blob: 858f70610edaa8370a6b79b2367147cf3d69dba9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040052#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400166 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Kuan Luof140f0f2007-10-15 15:16:53 -0400172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500201};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Robert Hancockfbbb2622006-10-27 19:08:41 -0700203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239};
240
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600250 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700251 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600252 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700253};
254
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600255struct nv_host_priv {
256 unsigned long type;
257};
258
Kuan Luof140f0f2007-10-15 15:16:53 -0400259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700296
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900298#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600299static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900300#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400301static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Tejun Heo39f87582006-06-17 15:49:56 +0900308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700314static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319static void nv_adma_irq_clear(struct ata_port *ap);
320static int nv_adma_port_start(struct ata_port *ap);
321static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900322#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900325#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600326static void nv_adma_freeze(struct ata_port *ap);
327static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700328static void nv_adma_error_handler(struct ata_port *ap);
329static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900332
Kuan Luof140f0f2007-10-15 15:16:53 -0400333static void nv_mcp55_thaw(struct ata_port *ap);
334static void nv_mcp55_freeze(struct ata_port *ap);
335static void nv_swncq_error_handler(struct ata_port *ap);
336static int nv_swncq_slave_config(struct scsi_device *sdev);
337static int nv_swncq_port_start(struct ata_port *ap);
338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343#ifdef CONFIG_PM
344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345static int nv_swncq_port_resume(struct ata_port *ap);
346#endif
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348enum nv_host_type
349{
350 GENERIC,
351 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700353 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400354 ADMA,
355 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356};
357
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500358static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400373
374 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375};
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377static struct pci_driver nv_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = nv_pci_tbl,
380 .probe = nv_init_one,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900381#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600382 .suspend = ata_pci_device_suspend,
383 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900384#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200385 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386};
387
Jeff Garzik193515d2005-11-07 00:59:37 -0500388static struct scsi_host_template nv_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900389 ATA_BMDMA_SHT(DRV_NAME),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390};
391
Robert Hancockfbbb2622006-10-27 19:08:41 -0700392static struct scsi_host_template nv_adma_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900393 ATA_NCQ_SHT(DRV_NAME),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700394 .can_queue = NV_ADMA_MAX_CPBS,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700395 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700396 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
397 .slave_configure = nv_adma_slave_config,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700398};
399
Kuan Luof140f0f2007-10-15 15:16:53 -0400400static struct scsi_host_template nv_swncq_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900401 ATA_NCQ_SHT(DRV_NAME),
Kuan Luof140f0f2007-10-15 15:16:53 -0400402 .can_queue = ATA_MAX_QUEUE,
Kuan Luof140f0f2007-10-15 15:16:53 -0400403 .sg_tablesize = LIBATA_MAX_PRD,
Kuan Luof140f0f2007-10-15 15:16:53 -0400404 .dma_boundary = ATA_DMA_BOUNDARY,
405 .slave_configure = nv_swncq_slave_config,
Kuan Luof140f0f2007-10-15 15:16:53 -0400406};
407
Tejun Heo029cfd62008-03-25 12:22:49 +0900408static struct ata_port_operations nv_generic_ops = {
409 .inherits = &ata_bmdma_port_ops,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900410 .hardreset = nv_hardreset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 .scr_read = nv_scr_read,
412 .scr_write = nv_scr_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413};
414
Tejun Heo029cfd62008-03-25 12:22:49 +0900415static struct ata_port_operations nv_nf2_ops = {
416 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900417 .freeze = nv_nf2_freeze,
418 .thaw = nv_nf2_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900419};
420
Tejun Heo029cfd62008-03-25 12:22:49 +0900421static struct ata_port_operations nv_ck804_ops = {
422 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900423 .freeze = nv_ck804_freeze,
424 .thaw = nv_ck804_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900425 .host_stop = nv_ck804_host_stop,
426};
427
Tejun Heo029cfd62008-03-25 12:22:49 +0900428static struct ata_port_operations nv_adma_ops = {
429 .inherits = &nv_generic_ops,
430
Robert Hancock2dec7552006-11-26 14:20:19 -0600431 .check_atapi_dma = nv_adma_check_atapi_dma,
Tejun Heo5682ed32008-04-07 22:47:16 +0900432 .sff_tf_read = nv_adma_tf_read,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900433 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700434 .qc_prep = nv_adma_qc_prep,
435 .qc_issue = nv_adma_qc_issue,
Tejun Heo5682ed32008-04-07 22:47:16 +0900436 .sff_irq_clear = nv_adma_irq_clear,
Tejun Heo029cfd62008-03-25 12:22:49 +0900437
Robert Hancock53014e22007-05-05 15:36:36 -0600438 .freeze = nv_adma_freeze,
439 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700440 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600441 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo029cfd62008-03-25 12:22:49 +0900442
Robert Hancockfbbb2622006-10-27 19:08:41 -0700443 .port_start = nv_adma_port_start,
444 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900445#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600446 .port_suspend = nv_adma_port_suspend,
447 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900448#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700449 .host_stop = nv_adma_host_stop,
450};
451
Tejun Heo029cfd62008-03-25 12:22:49 +0900452static struct ata_port_operations nv_swncq_ops = {
453 .inherits = &nv_generic_ops,
454
Kuan Luof140f0f2007-10-15 15:16:53 -0400455 .qc_defer = ata_std_qc_defer,
456 .qc_prep = nv_swncq_qc_prep,
457 .qc_issue = nv_swncq_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900458
Kuan Luof140f0f2007-10-15 15:16:53 -0400459 .freeze = nv_mcp55_freeze,
460 .thaw = nv_mcp55_thaw,
461 .error_handler = nv_swncq_error_handler,
Tejun Heo029cfd62008-03-25 12:22:49 +0900462
Kuan Luof140f0f2007-10-15 15:16:53 -0400463#ifdef CONFIG_PM
464 .port_suspend = nv_swncq_port_suspend,
465 .port_resume = nv_swncq_port_resume,
466#endif
467 .port_start = nv_swncq_port_start,
468};
469
Tejun Heo95947192008-03-25 12:22:49 +0900470struct nv_pi_priv {
471 irq_handler_t irq_handler;
472 struct scsi_host_template *sht;
473};
474
475#define NV_PI_PRIV(_irq_handler, _sht) \
476 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
477
Tejun Heo1626aeb2007-05-04 12:43:58 +0200478static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900479 /* generic */
480 {
Tejun Heo0c887582007-08-06 18:36:23 +0900481 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900482 .pio_mask = NV_PIO_MASK,
483 .mwdma_mask = NV_MWDMA_MASK,
484 .udma_mask = NV_UDMA_MASK,
485 .port_ops = &nv_generic_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900486 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900487 },
488 /* nforce2/3 */
489 {
Tejun Heo0c887582007-08-06 18:36:23 +0900490 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900491 .pio_mask = NV_PIO_MASK,
492 .mwdma_mask = NV_MWDMA_MASK,
493 .udma_mask = NV_UDMA_MASK,
494 .port_ops = &nv_nf2_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900495 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900496 },
497 /* ck804 */
498 {
Tejun Heo0c887582007-08-06 18:36:23 +0900499 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900500 .pio_mask = NV_PIO_MASK,
501 .mwdma_mask = NV_MWDMA_MASK,
502 .udma_mask = NV_UDMA_MASK,
503 .port_ops = &nv_ck804_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900504 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900505 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700506 /* ADMA */
507 {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700508 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
509 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
510 .pio_mask = NV_PIO_MASK,
511 .mwdma_mask = NV_MWDMA_MASK,
512 .udma_mask = NV_UDMA_MASK,
513 .port_ops = &nv_adma_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900514 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700515 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400516 /* SWNCQ */
517 {
Kuan Luof140f0f2007-10-15 15:16:53 -0400518 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
519 ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400520 .pio_mask = NV_PIO_MASK,
521 .mwdma_mask = NV_MWDMA_MASK,
522 .udma_mask = NV_UDMA_MASK,
523 .port_ops = &nv_swncq_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900524 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
Kuan Luof140f0f2007-10-15 15:16:53 -0400525 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526};
527
528MODULE_AUTHOR("NVIDIA");
529MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
530MODULE_LICENSE("GPL");
531MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
532MODULE_VERSION(DRV_VERSION);
533
Jeff Garzik06993d22008-04-04 03:34:45 -0400534static int adma_enabled;
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -0700535static int swncq_enabled = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700536
Robert Hancock2dec7552006-11-26 14:20:19 -0600537static void nv_adma_register_mode(struct ata_port *ap)
538{
Robert Hancock2dec7552006-11-26 14:20:19 -0600539 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600540 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800541 u16 tmp, status;
542 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600543
544 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
545 return;
546
Robert Hancocka2cfe812007-02-05 16:26:03 -0800547 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400548 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800549 ndelay(50);
550 status = readw(mmio + NV_ADMA_STAT);
551 count++;
552 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400553 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800554 ata_port_printk(ap, KERN_WARNING,
555 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
556 status);
557
Robert Hancock2dec7552006-11-26 14:20:19 -0600558 tmp = readw(mmio + NV_ADMA_CTL);
559 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
560
Robert Hancocka2cfe812007-02-05 16:26:03 -0800561 count = 0;
562 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400563 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800564 ndelay(50);
565 status = readw(mmio + NV_ADMA_STAT);
566 count++;
567 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400568 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800569 ata_port_printk(ap, KERN_WARNING,
570 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
571 status);
572
Robert Hancock2dec7552006-11-26 14:20:19 -0600573 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
574}
575
576static void nv_adma_mode(struct ata_port *ap)
577{
Robert Hancock2dec7552006-11-26 14:20:19 -0600578 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600579 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800580 u16 tmp, status;
581 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600582
583 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
584 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500585
Robert Hancock2dec7552006-11-26 14:20:19 -0600586 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
587
588 tmp = readw(mmio + NV_ADMA_CTL);
589 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
590
Robert Hancocka2cfe812007-02-05 16:26:03 -0800591 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400592 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800593 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
594 ndelay(50);
595 status = readw(mmio + NV_ADMA_STAT);
596 count++;
597 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400598 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800599 ata_port_printk(ap, KERN_WARNING,
600 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
601 status);
602
Robert Hancock2dec7552006-11-26 14:20:19 -0600603 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
604}
605
Robert Hancockfbbb2622006-10-27 19:08:41 -0700606static int nv_adma_slave_config(struct scsi_device *sdev)
607{
608 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600609 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600610 struct nv_adma_port_priv *port0, *port1;
611 struct scsi_device *sdev0, *sdev1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600612 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600613 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700614 unsigned short sg_tablesize;
615 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600616 int adma_enable;
617 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700618
619 rc = ata_scsi_slave_config(sdev);
620
621 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
622 /* Not a proper libata device, ignore */
623 return rc;
624
Robert Hancock8959d302008-02-04 19:39:02 -0600625 spin_lock_irqsave(ap->lock, flags);
626
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900627 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700628 /*
629 * NVIDIA reports that ADMA mode does not support ATAPI commands.
630 * Therefore ATAPI commands are sent through the legacy interface.
631 * However, the legacy interface only supports 32-bit DMA.
632 * Restrict DMA parameters as required by the legacy interface
633 * when an ATAPI device is connected.
634 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700635 segment_boundary = ATA_DMA_BOUNDARY;
636 /* Subtract 1 since an extra entry may be needed for padding, see
637 libata-scsi.c */
638 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500639
Robert Hancock2dec7552006-11-26 14:20:19 -0600640 /* Since the legacy DMA engine is in use, we need to disable ADMA
641 on the port. */
642 adma_enable = 0;
643 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400644 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700645 segment_boundary = NV_ADMA_DMA_BOUNDARY;
646 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600647 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700648 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500649
Robert Hancock2dec7552006-11-26 14:20:19 -0600650 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700651
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400652 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600653 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
654 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
655 else
656 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
657 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500658
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400659 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600660 new_reg = current_reg | config_mask;
661 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400662 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600663 new_reg = current_reg & ~config_mask;
664 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
665 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500666
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400667 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600668 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500669
Robert Hancock8959d302008-02-04 19:39:02 -0600670 port0 = ap->host->ports[0]->private_data;
671 port1 = ap->host->ports[1]->private_data;
672 sdev0 = ap->host->ports[0]->link.device[0].sdev;
673 sdev1 = ap->host->ports[1]->link.device[0].sdev;
674 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
675 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
676 /** We have to set the DMA mask to 32-bit if either port is in
677 ATAPI mode, since they are on the same PCI device which is
678 used for DMA mapping. If we set the mask we also need to set
679 the bounce limit on both ports to ensure that the block
680 layer doesn't feed addresses that cause DMA mapping to
681 choke. If either SCSI device is not allocated yet, it's OK
682 since that port will discover its correct setting when it
683 does get allocated.
684 Note: Setting 32-bit mask should not fail. */
685 if (sdev0)
686 blk_queue_bounce_limit(sdev0->request_queue,
687 ATA_DMA_MASK);
688 if (sdev1)
689 blk_queue_bounce_limit(sdev1->request_queue,
690 ATA_DMA_MASK);
691
692 pci_set_dma_mask(pdev, ATA_DMA_MASK);
693 } else {
694 /** This shouldn't fail as it was set to this value before */
695 pci_set_dma_mask(pdev, pp->adma_dma_mask);
696 if (sdev0)
697 blk_queue_bounce_limit(sdev0->request_queue,
698 pp->adma_dma_mask);
699 if (sdev1)
700 blk_queue_bounce_limit(sdev1->request_queue,
701 pp->adma_dma_mask);
702 }
703
Robert Hancockfbbb2622006-10-27 19:08:41 -0700704 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
705 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
706 ata_port_printk(ap, KERN_INFO,
Robert Hancock8959d302008-02-04 19:39:02 -0600707 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
708 (unsigned long long)*ap->host->dev->dma_mask,
709 segment_boundary, sg_tablesize);
710
711 spin_unlock_irqrestore(ap->lock, flags);
712
Robert Hancockfbbb2622006-10-27 19:08:41 -0700713 return rc;
714}
715
Robert Hancock2dec7552006-11-26 14:20:19 -0600716static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
717{
718 struct nv_adma_port_priv *pp = qc->ap->private_data;
719 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
720}
721
Robert Hancockf2fb3442007-03-26 21:43:36 -0800722static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
723{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600724 /* Other than when internal or pass-through commands are executed,
725 the only time this function will be called in ADMA mode will be
726 if a command fails. In the failure case we don't care about going
727 into register mode with ADMA commands pending, as the commands will
728 all shortly be aborted anyway. We assume that NCQ commands are not
729 issued via passthrough, which is the only way that switching into
730 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800731 nv_adma_register_mode(ap);
732
Tejun Heo9363c382008-04-07 22:47:16 +0900733 ata_sff_tf_read(ap, tf);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800734}
735
Robert Hancock2dec7552006-11-26 14:20:19 -0600736static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700737{
738 unsigned int idx = 0;
739
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400740 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600741 if (tf->flags & ATA_TFLAG_LBA48) {
742 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
743 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
745 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
746 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
747 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
748 } else
749 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500750
Robert Hancockac3d6b82007-02-19 19:02:46 -0600751 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
753 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
754 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700755 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500756
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400757 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600758 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700759
760 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500761
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400762 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600763 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700764
765 return idx;
766}
767
Robert Hancock5bd28a42007-02-05 16:26:01 -0800768static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700769{
770 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600771 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700772
773 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
774
Robert Hancock5bd28a42007-02-05 16:26:01 -0800775 if (unlikely((force_err ||
776 flags & (NV_CPB_RESP_ATA_ERR |
777 NV_CPB_RESP_CMD_ERR |
778 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900779 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800780 int freeze = 0;
781
782 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400783 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800784 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900785 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800786 ehi->err_mask |= AC_ERR_DEV;
787 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900788 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800789 ehi->err_mask |= AC_ERR_DEV;
790 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900791 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800792 ehi->err_mask |= AC_ERR_SYSTEM;
793 freeze = 1;
794 } else {
795 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900796 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800797 ehi->err_mask |= AC_ERR_OTHER;
798 freeze = 1;
799 }
800 /* Kill all commands. EH will determine what actually failed. */
801 if (freeze)
802 ata_port_freeze(ap);
803 else
804 ata_port_abort(ap);
805 return 1;
806 }
807
Robert Hancockf2fb3442007-03-26 21:43:36 -0800808 if (likely(flags & NV_CPB_RESP_DONE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700809 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800810 VPRINTK("CPB flags done, flags=0x%x\n", flags);
811 if (likely(qc)) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400812 DPRINTK("Completing qc from tag %d\n", cpb_num);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700813 ata_qc_complete(qc);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600814 } else {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900815 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600816 /* Notifier bits set without a command may indicate the drive
817 is misbehaving. Raise host state machine violation on this
818 condition. */
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400819 ata_port_printk(ap, KERN_ERR,
820 "notifier for tag %d with no cmd?\n",
821 cpb_num);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600822 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +0900823 ehi->action |= ATA_EH_RESET;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600824 ata_port_freeze(ap);
825 return 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700826 }
827 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800828 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700829}
830
Robert Hancock2dec7552006-11-26 14:20:19 -0600831static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
832{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900833 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600834
835 /* freeze if hotplugged */
836 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
837 ata_port_freeze(ap);
838 return 1;
839 }
840
841 /* bail out if not our interrupt */
842 if (!(irq_stat & NV_INT_DEV))
843 return 0;
844
845 /* DEV interrupt w/ no active qc? */
846 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heo9363c382008-04-07 22:47:16 +0900847 ata_sff_check_status(ap);
Robert Hancock2dec7552006-11-26 14:20:19 -0600848 return 1;
849 }
850
851 /* handle interrupt */
Tejun Heo9363c382008-04-07 22:47:16 +0900852 return ata_sff_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600853}
854
Robert Hancockfbbb2622006-10-27 19:08:41 -0700855static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
856{
857 struct ata_host *host = dev_instance;
858 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600859 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700860
861 spin_lock(&host->lock);
862
863 for (i = 0; i < host->n_ports; i++) {
864 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600865 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700866
867 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
868 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600869 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700870 u16 status;
871 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700872 u32 notifier, notifier_error;
Jeff Garzika617c092007-05-21 20:14:23 -0400873
Robert Hancock53014e22007-05-05 15:36:36 -0600874 /* if ADMA is disabled, use standard ata interrupt handler */
875 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
876 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
877 >> (NV_INT_PORT_SHIFT * i);
878 handled += nv_host_intr(ap, irq_stat);
879 continue;
880 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700881
Robert Hancock53014e22007-05-05 15:36:36 -0600882 /* if in ATA register mode, check for standard interrupts */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700883 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900884 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600885 >> (NV_INT_PORT_SHIFT * i);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400886 if (ata_tag_valid(ap->link.active_tag))
Robert Hancockf740d162007-01-23 20:09:02 -0600887 /** NV_INT_DEV indication seems unreliable at times
888 at least in ADMA mode. Force it on always when a
889 command is active, to prevent losing interrupts. */
890 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -0600891 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700892 }
893
894 notifier = readl(mmio + NV_ADMA_NOTIFIER);
895 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -0600896 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700897
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600898 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700899
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400900 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
Robert Hancockfbbb2622006-10-27 19:08:41 -0700901 !notifier_error)
902 /* Nothing to do */
903 continue;
904
905 status = readw(mmio + NV_ADMA_STAT);
906
907 /* Clear status. Ensure the controller sees the clearing before we start
908 looking at any of the CPB statuses, so that any CPB completions after
909 this point in the handler will raise another interrupt. */
910 writew(status, mmio + NV_ADMA_STAT);
911 readw(mmio + NV_ADMA_STAT); /* flush posted write */
912 rmb();
913
Robert Hancock5bd28a42007-02-05 16:26:01 -0800914 handled++; /* irq handled if we got here */
915
916 /* freeze if hotplugged or controller error */
917 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
918 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -0600919 NV_ADMA_STAT_TIMEOUT |
920 NV_ADMA_STAT_SERROR))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900921 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800922
923 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400924 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800925 if (status & NV_ADMA_STAT_TIMEOUT) {
926 ehi->err_mask |= AC_ERR_SYSTEM;
Tejun Heob64bbc32007-07-16 14:29:39 +0900927 ata_ehi_push_desc(ehi, "timeout");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800928 } else if (status & NV_ADMA_STAT_HOTPLUG) {
929 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +0900930 ata_ehi_push_desc(ehi, "hotplug");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800931 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
932 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +0900933 ata_ehi_push_desc(ehi, "hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -0600934 } else if (status & NV_ADMA_STAT_SERROR) {
935 /* let libata analyze SError and figure out the cause */
Tejun Heob64bbc32007-07-16 14:29:39 +0900936 ata_ehi_push_desc(ehi, "SError");
937 } else
938 ata_ehi_push_desc(ehi, "unknown");
Robert Hancockfbbb2622006-10-27 19:08:41 -0700939 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700940 continue;
941 }
942
Robert Hancock5bd28a42007-02-05 16:26:01 -0800943 if (status & (NV_ADMA_STAT_DONE |
Robert Hancocka1fe7822008-01-29 19:53:19 -0600944 NV_ADMA_STAT_CPBERR |
945 NV_ADMA_STAT_CMD_COMPLETE)) {
946 u32 check_commands = notifier_clears[i];
Robert Hancock721449b2007-02-19 19:03:08 -0600947 int pos, error = 0;
Robert Hancock8ba5e4c2007-03-08 18:02:18 -0600948
Robert Hancocka1fe7822008-01-29 19:53:19 -0600949 if (status & NV_ADMA_STAT_CPBERR) {
950 /* Check all active commands */
951 if (ata_tag_valid(ap->link.active_tag))
952 check_commands = 1 <<
953 ap->link.active_tag;
954 else
955 check_commands = ap->
956 link.sactive;
957 }
Robert Hancock8ba5e4c2007-03-08 18:02:18 -0600958
Robert Hancockfbbb2622006-10-27 19:08:41 -0700959 /** Check CPBs for completed commands */
Robert Hancock721449b2007-02-19 19:03:08 -0600960 while ((pos = ffs(check_commands)) && !error) {
961 pos--;
962 error = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400963 notifier_error & (1 << pos));
964 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700965 }
966 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700967 }
968 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500969
Jeff Garzikb4479162007-10-25 20:47:30 -0400970 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600971 /* Note: Both notifier clear registers must be written
972 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600973 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
974 writel(notifier_clears[0], pp->notifier_clear_block);
975 pp = host->ports[1]->private_data;
976 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600977 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700978
979 spin_unlock(&host->lock);
980
981 return IRQ_RETVAL(handled);
982}
983
Robert Hancock53014e22007-05-05 15:36:36 -0600984static void nv_adma_freeze(struct ata_port *ap)
985{
986 struct nv_adma_port_priv *pp = ap->private_data;
987 void __iomem *mmio = pp->ctl_block;
988 u16 tmp;
989
990 nv_ck804_freeze(ap);
991
992 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
993 return;
994
995 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400996 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -0600997 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
998
999 /* Disable interrupt */
1000 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001001 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001002 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001003 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001004}
1005
1006static void nv_adma_thaw(struct ata_port *ap)
1007{
1008 struct nv_adma_port_priv *pp = ap->private_data;
1009 void __iomem *mmio = pp->ctl_block;
1010 u16 tmp;
1011
1012 nv_ck804_thaw(ap);
1013
1014 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1015 return;
1016
1017 /* Enable interrupt */
1018 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001019 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001020 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001021 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001022}
1023
Robert Hancockfbbb2622006-10-27 19:08:41 -07001024static void nv_adma_irq_clear(struct ata_port *ap)
1025{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001026 struct nv_adma_port_priv *pp = ap->private_data;
1027 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001028 u32 notifier_clears[2];
1029
1030 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
Tejun Heo9363c382008-04-07 22:47:16 +09001031 ata_sff_irq_clear(ap);
Robert Hancock53014e22007-05-05 15:36:36 -06001032 return;
1033 }
1034
1035 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001036 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001037 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001038
1039 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001040 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001041
Robert Hancock53014e22007-05-05 15:36:36 -06001042 /* clear notifiers - note both ports need to be written with
1043 something even though we are only clearing on one */
1044 if (ap->port_no == 0) {
1045 notifier_clears[0] = 0xFFFFFFFF;
1046 notifier_clears[1] = 0;
1047 } else {
1048 notifier_clears[0] = 0;
1049 notifier_clears[1] = 0xFFFFFFFF;
1050 }
1051 pp = ap->host->ports[0]->private_data;
1052 writel(notifier_clears[0], pp->notifier_clear_block);
1053 pp = ap->host->ports[1]->private_data;
1054 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001055}
1056
Robert Hancockf5ecac22007-02-20 21:49:10 -06001057static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001058{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001059 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001060
Jeff Garzikb4479162007-10-25 20:47:30 -04001061 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Tejun Heo9363c382008-04-07 22:47:16 +09001062 ata_sff_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001063}
1064
1065static int nv_adma_port_start(struct ata_port *ap)
1066{
1067 struct device *dev = ap->host->dev;
1068 struct nv_adma_port_priv *pp;
1069 int rc;
1070 void *mem;
1071 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001072 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001073 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001074 u16 tmp;
1075
1076 VPRINTK("ENTER\n");
1077
Robert Hancock8959d302008-02-04 19:39:02 -06001078 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1079 pad buffers */
1080 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1081 if (rc)
1082 return rc;
1083 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1084 if (rc)
1085 return rc;
1086
Robert Hancockfbbb2622006-10-27 19:08:41 -07001087 rc = ata_port_start(ap);
1088 if (rc)
1089 return rc;
1090
Tejun Heo24dc5f32007-01-20 16:00:28 +09001091 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1092 if (!pp)
1093 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001094
Tejun Heo0d5ff562007-02-01 15:06:36 +09001095 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001096 ap->port_no * NV_ADMA_PORT_SIZE;
1097 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001098 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001099 pp->notifier_clear_block = pp->gen_block +
1100 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1101
Robert Hancock8959d302008-02-04 19:39:02 -06001102 /* Now that the legacy PRD and padding buffer are allocated we can
1103 safely raise the DMA mask to allocate the CPB/APRD table.
1104 These are allowed to fail since we store the value that ends up
1105 being used to set as the bounce limit in slave_config later if
1106 needed. */
1107 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1108 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1109 pp->adma_dma_mask = *dev->dma_mask;
1110
Tejun Heo24dc5f32007-01-20 16:00:28 +09001111 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1112 &mem_dma, GFP_KERNEL);
1113 if (!mem)
1114 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001115 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1116
1117 /*
1118 * First item in chunk of DMA memory:
1119 * 128-byte command parameter block (CPB)
1120 * one for each command tag
1121 */
1122 pp->cpb = mem;
1123 pp->cpb_dma = mem_dma;
1124
1125 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001126 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001127
1128 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1129 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1130
1131 /*
1132 * Second item: block of ADMA_SGTBL_LEN s/g entries
1133 */
1134 pp->aprd = mem;
1135 pp->aprd_dma = mem_dma;
1136
1137 ap->private_data = pp;
1138
1139 /* clear any outstanding interrupt conditions */
1140 writew(0xffff, mmio + NV_ADMA_STAT);
1141
1142 /* initialize port variables */
1143 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1144
1145 /* clear CPB fetch count */
1146 writew(0, mmio + NV_ADMA_CPB_COUNT);
1147
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001148 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001149 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001150 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1151 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001152
1153 tmp = readw(mmio + NV_ADMA_CTL);
1154 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001155 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001156 udelay(1);
1157 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001158 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001159
1160 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001161}
1162
1163static void nv_adma_port_stop(struct ata_port *ap)
1164{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001165 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001166 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001167
1168 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001169 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001170}
1171
Tejun Heo438ac6d2007-03-02 17:31:26 +09001172#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001173static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1174{
1175 struct nv_adma_port_priv *pp = ap->private_data;
1176 void __iomem *mmio = pp->ctl_block;
1177
1178 /* Go to register mode - clears GO */
1179 nv_adma_register_mode(ap);
1180
1181 /* clear CPB fetch count */
1182 writew(0, mmio + NV_ADMA_CPB_COUNT);
1183
1184 /* disable interrupt, shut down port */
1185 writew(0, mmio + NV_ADMA_CTL);
1186
1187 return 0;
1188}
1189
1190static int nv_adma_port_resume(struct ata_port *ap)
1191{
1192 struct nv_adma_port_priv *pp = ap->private_data;
1193 void __iomem *mmio = pp->ctl_block;
1194 u16 tmp;
1195
1196 /* set CPB block location */
1197 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001198 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001199
1200 /* clear any outstanding interrupt conditions */
1201 writew(0xffff, mmio + NV_ADMA_STAT);
1202
1203 /* initialize port variables */
1204 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1205
1206 /* clear CPB fetch count */
1207 writew(0, mmio + NV_ADMA_CPB_COUNT);
1208
1209 /* clear GO for register mode, enable interrupt */
1210 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001211 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1212 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001213
1214 tmp = readw(mmio + NV_ADMA_CTL);
1215 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001216 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001217 udelay(1);
1218 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001219 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001220
1221 return 0;
1222}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001223#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001224
Tejun Heo9a829cc2007-04-17 23:44:08 +09001225static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001226{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001227 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1228 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001229
1230 VPRINTK("ENTER\n");
1231
Tejun Heo9a829cc2007-04-17 23:44:08 +09001232 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001233
Tejun Heo0d5ff562007-02-01 15:06:36 +09001234 ioport->cmd_addr = mmio;
1235 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001236 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001237 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1238 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1239 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1240 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1241 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1242 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001243 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001244 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001245 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001246 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001247}
1248
Tejun Heo9a829cc2007-04-17 23:44:08 +09001249static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001250{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001251 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001252 unsigned int i;
1253 u32 tmp32;
1254
1255 VPRINTK("ENTER\n");
1256
1257 /* enable ADMA on the ports */
1258 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1259 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1260 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1261 NV_MCP_SATA_CFG_20_PORT1_EN |
1262 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1263
1264 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1265
Tejun Heo9a829cc2007-04-17 23:44:08 +09001266 for (i = 0; i < host->n_ports; i++)
1267 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001268
Robert Hancockfbbb2622006-10-27 19:08:41 -07001269 return 0;
1270}
1271
1272static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1273 struct scatterlist *sg,
1274 int idx,
1275 struct nv_adma_prd *aprd)
1276{
Robert Hancock41949ed2007-02-19 19:02:27 -06001277 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001278 if (qc->tf.flags & ATA_TFLAG_WRITE)
1279 flags |= NV_APRD_WRITE;
1280 if (idx == qc->n_elem - 1)
1281 flags |= NV_APRD_END;
1282 else if (idx != 4)
1283 flags |= NV_APRD_CONT;
1284
1285 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1286 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001287 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001288 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001289}
1290
1291static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1292{
1293 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001294 struct nv_adma_prd *aprd;
1295 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001296 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001297
1298 VPRINTK("ENTER\n");
1299
Tejun Heoff2aeb12007-12-05 16:43:11 +09001300 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1301 aprd = (si < 5) ? &cpb->aprd[si] :
1302 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1303 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001304 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001305 if (si > 5)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001306 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001307 else
1308 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001309}
1310
Robert Hancock382a6652007-02-05 16:26:02 -08001311static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1312{
1313 struct nv_adma_port_priv *pp = qc->ap->private_data;
1314
1315 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001316 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001317 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001318 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001319 return 1;
1320
Jeff Garzikb4479162007-10-25 20:47:30 -04001321 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001322 (qc->tf.protocol == ATA_PROT_NODATA))
1323 return 0;
1324
1325 return 1;
1326}
1327
Robert Hancockfbbb2622006-10-27 19:08:41 -07001328static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1329{
1330 struct nv_adma_port_priv *pp = qc->ap->private_data;
1331 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1332 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001333 NV_CPB_CTL_IEN;
1334
Robert Hancock382a6652007-02-05 16:26:02 -08001335 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001336 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1337 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001338 nv_adma_register_mode(qc->ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001339 ata_sff_qc_prep(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001340 return;
1341 }
1342
Robert Hancock41949ed2007-02-19 19:02:27 -06001343 cpb->resp_flags = NV_CPB_RESP_DONE;
1344 wmb();
1345 cpb->ctl_flags = 0;
1346 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001347
1348 cpb->len = 3;
1349 cpb->tag = qc->tag;
1350 cpb->next_cpb_idx = 0;
1351
1352 /* turn on NCQ flags for NCQ commands */
1353 if (qc->tf.protocol == ATA_PROT_NCQ)
1354 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1355
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001356 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1357
Robert Hancockfbbb2622006-10-27 19:08:41 -07001358 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1359
Jeff Garzikb4479162007-10-25 20:47:30 -04001360 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001361 nv_adma_fill_sg(qc, cpb);
1362 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1363 } else
1364 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001365
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001366 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1367 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001368 wmb();
1369 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001370 wmb();
1371 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001372}
1373
1374static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1375{
Robert Hancock2dec7552006-11-26 14:20:19 -06001376 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001377 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001378 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001379
1380 VPRINTK("ENTER\n");
1381
Robert Hancock3f3debd2007-11-25 16:59:36 -06001382 /* We can't handle result taskfile with NCQ commands, since
1383 retrieving the taskfile switches us out of ADMA mode and would abort
1384 existing commands. */
1385 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1386 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1387 ata_dev_printk(qc->dev, KERN_ERR,
1388 "NCQ w/ RESULT_TF not allowed\n");
1389 return AC_ERR_SYSTEM;
1390 }
1391
Robert Hancock382a6652007-02-05 16:26:02 -08001392 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001393 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001394 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001395 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1396 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001397 nv_adma_register_mode(qc->ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001398 return ata_sff_qc_issue(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001399 } else
1400 nv_adma_mode(qc->ap);
1401
1402 /* write append register, command tag in lower 8 bits
1403 and (number of cpbs to append -1) in top 8 bits */
1404 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001405
Jeff Garzikb4479162007-10-25 20:47:30 -04001406 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001407 /* Seems to need some delay before switching between NCQ and
1408 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001409 udelay(20);
1410 pp->last_issue_ncq = curr_ncq;
1411 }
1412
Robert Hancockfbbb2622006-10-27 19:08:41 -07001413 writew(qc->tag, mmio + NV_ADMA_APPEND);
1414
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001415 DPRINTK("Issued tag %u\n", qc->tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001416
1417 return 0;
1418}
1419
David Howells7d12e782006-10-05 14:55:46 +01001420static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421{
Jeff Garzikcca39742006-08-24 03:19:22 -04001422 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 unsigned int i;
1424 unsigned int handled = 0;
1425 unsigned long flags;
1426
Jeff Garzikcca39742006-08-24 03:19:22 -04001427 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
Jeff Garzikcca39742006-08-24 03:19:22 -04001429 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 struct ata_port *ap;
1431
Jeff Garzikcca39742006-08-24 03:19:22 -04001432 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001433 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001434 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 struct ata_queued_cmd *qc;
1436
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001437 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001438 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Tejun Heo9363c382008-04-07 22:47:16 +09001439 handled += ata_sff_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001440 else
1441 // No request pending? Clear interrupt status
1442 // anyway, in case there's one pending.
Tejun Heo5682ed32008-04-07 22:47:16 +09001443 ap->ops->sff_check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 }
1445
1446 }
1447
Jeff Garzikcca39742006-08-24 03:19:22 -04001448 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
1450 return IRQ_RETVAL(handled);
1451}
1452
Jeff Garzikcca39742006-08-24 03:19:22 -04001453static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001454{
1455 int i, handled = 0;
1456
Jeff Garzikcca39742006-08-24 03:19:22 -04001457 for (i = 0; i < host->n_ports; i++) {
1458 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001459
1460 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1461 handled += nv_host_intr(ap, irq_stat);
1462
1463 irq_stat >>= NV_INT_PORT_SHIFT;
1464 }
1465
1466 return IRQ_RETVAL(handled);
1467}
1468
David Howells7d12e782006-10-05 14:55:46 +01001469static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001470{
Jeff Garzikcca39742006-08-24 03:19:22 -04001471 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001472 u8 irq_stat;
1473 irqreturn_t ret;
1474
Jeff Garzikcca39742006-08-24 03:19:22 -04001475 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001476 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001477 ret = nv_do_interrupt(host, irq_stat);
1478 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001479
1480 return ret;
1481}
1482
David Howells7d12e782006-10-05 14:55:46 +01001483static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001484{
Jeff Garzikcca39742006-08-24 03:19:22 -04001485 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001486 u8 irq_stat;
1487 irqreturn_t ret;
1488
Jeff Garzikcca39742006-08-24 03:19:22 -04001489 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001490 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001491 ret = nv_do_interrupt(host, irq_stat);
1492 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001493
1494 return ret;
1495}
1496
Tejun Heoda3dbb12007-07-16 14:29:40 +09001497static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001500 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Tejun Heoda3dbb12007-07-16 14:29:40 +09001502 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1503 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504}
1505
Tejun Heoda3dbb12007-07-16 14:29:40 +09001506static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001509 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510
Tejun Heo0d5ff562007-02-01 15:06:36 +09001511 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001512 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513}
1514
Tejun Heo39f87582006-06-17 15:49:56 +09001515static void nv_nf2_freeze(struct ata_port *ap)
1516{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001517 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001518 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1519 u8 mask;
1520
Tejun Heo0d5ff562007-02-01 15:06:36 +09001521 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001522 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001523 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001524}
1525
1526static void nv_nf2_thaw(struct ata_port *ap)
1527{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001528 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001529 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1530 u8 mask;
1531
Tejun Heo0d5ff562007-02-01 15:06:36 +09001532 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001533
Tejun Heo0d5ff562007-02-01 15:06:36 +09001534 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001535 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001536 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001537}
1538
1539static void nv_ck804_freeze(struct ata_port *ap)
1540{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001541 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001542 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1543 u8 mask;
1544
1545 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1546 mask &= ~(NV_INT_ALL << shift);
1547 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1548}
1549
1550static void nv_ck804_thaw(struct ata_port *ap)
1551{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001552 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001553 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1554 u8 mask;
1555
1556 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1557
1558 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1559 mask |= (NV_INT_MASK << shift);
1560 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1561}
1562
Kuan Luof140f0f2007-10-15 15:16:53 -04001563static void nv_mcp55_freeze(struct ata_port *ap)
1564{
1565 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1566 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1567 u32 mask;
1568
1569 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1570
1571 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1572 mask &= ~(NV_INT_ALL_MCP55 << shift);
1573 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Tejun Heo9363c382008-04-07 22:47:16 +09001574 ata_sff_freeze(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001575}
1576
1577static void nv_mcp55_thaw(struct ata_port *ap)
1578{
1579 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1580 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1581 u32 mask;
1582
1583 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1584
1585 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1586 mask |= (NV_INT_MASK_MCP55 << shift);
1587 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Tejun Heo9363c382008-04-07 22:47:16 +09001588 ata_sff_thaw(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001589}
1590
Tejun Heocc0680a2007-08-06 18:36:23 +09001591static int nv_hardreset(struct ata_link *link, unsigned int *class,
Tejun Heod4b2bab2007-02-02 16:50:52 +09001592 unsigned long deadline)
Tejun Heo39f87582006-06-17 15:49:56 +09001593{
Tejun Heoa0b9f4b2008-04-23 12:14:05 +09001594 int rc;
Tejun Heo39f87582006-06-17 15:49:56 +09001595
1596 /* SATA hardreset fails to retrieve proper device signature on
Tejun Heoa0b9f4b2008-04-23 12:14:05 +09001597 * some controllers. Request follow up SRST. For more info,
1598 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
Tejun Heo39f87582006-06-17 15:49:56 +09001599 */
Tejun Heoa0b9f4b2008-04-23 12:14:05 +09001600 rc = sata_sff_hardreset(link, class, deadline);
1601 if (rc)
1602 return rc;
1603 return -EAGAIN;
Tejun Heo39f87582006-06-17 15:49:56 +09001604}
1605
Robert Hancockfbbb2622006-10-27 19:08:41 -07001606static void nv_adma_error_handler(struct ata_port *ap)
1607{
1608 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001609 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001610 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001611 int i;
1612 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001613
Jeff Garzikb4479162007-10-25 20:47:30 -04001614 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001615 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1616 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1617 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1618 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001619 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1620 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001621
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001622 ata_port_printk(ap, KERN_ERR,
1623 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001624 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1625 "next cpb count 0x%X next cpb idx 0x%x\n",
1626 notifier, notifier_error, gen_ctl, status,
1627 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001628
Jeff Garzikb4479162007-10-25 20:47:30 -04001629 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001630 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001631 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001632 ap->link.sactive & (1 << i))
Robert Hancock2cb27852007-02-11 18:34:44 -06001633 ata_port_printk(ap, KERN_ERR,
1634 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1635 i, cpb->ctl_flags, cpb->resp_flags);
1636 }
1637 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001638
Robert Hancockfbbb2622006-10-27 19:08:41 -07001639 /* Push us back into port register mode for error handling. */
1640 nv_adma_register_mode(ap);
1641
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001642 /* Mark all of the CPBs as invalid to prevent them from
1643 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001644 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001645 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1646
1647 /* clear CPB fetch count */
1648 writew(0, mmio + NV_ADMA_CPB_COUNT);
1649
1650 /* Reset channel */
1651 tmp = readw(mmio + NV_ADMA_CTL);
1652 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001653 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001654 udelay(1);
1655 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001656 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001657 }
1658
Tejun Heo9363c382008-04-07 22:47:16 +09001659 ata_sff_error_handler(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001660}
1661
Kuan Luof140f0f2007-10-15 15:16:53 -04001662static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1663{
1664 struct nv_swncq_port_priv *pp = ap->private_data;
1665 struct defer_queue *dq = &pp->defer_queue;
1666
1667 /* queue is full */
1668 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1669 dq->defer_bits |= (1 << qc->tag);
1670 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1671}
1672
1673static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1674{
1675 struct nv_swncq_port_priv *pp = ap->private_data;
1676 struct defer_queue *dq = &pp->defer_queue;
1677 unsigned int tag;
1678
1679 if (dq->head == dq->tail) /* null queue */
1680 return NULL;
1681
1682 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1683 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1684 WARN_ON(!(dq->defer_bits & (1 << tag)));
1685 dq->defer_bits &= ~(1 << tag);
1686
1687 return ata_qc_from_tag(ap, tag);
1688}
1689
1690static void nv_swncq_fis_reinit(struct ata_port *ap)
1691{
1692 struct nv_swncq_port_priv *pp = ap->private_data;
1693
1694 pp->dhfis_bits = 0;
1695 pp->dmafis_bits = 0;
1696 pp->sdbfis_bits = 0;
1697 pp->ncq_flags = 0;
1698}
1699
1700static void nv_swncq_pp_reinit(struct ata_port *ap)
1701{
1702 struct nv_swncq_port_priv *pp = ap->private_data;
1703 struct defer_queue *dq = &pp->defer_queue;
1704
1705 dq->head = 0;
1706 dq->tail = 0;
1707 dq->defer_bits = 0;
1708 pp->qc_active = 0;
1709 pp->last_issue_tag = ATA_TAG_POISON;
1710 nv_swncq_fis_reinit(ap);
1711}
1712
1713static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1714{
1715 struct nv_swncq_port_priv *pp = ap->private_data;
1716
1717 writew(fis, pp->irq_block);
1718}
1719
1720static void __ata_bmdma_stop(struct ata_port *ap)
1721{
1722 struct ata_queued_cmd qc;
1723
1724 qc.ap = ap;
1725 ata_bmdma_stop(&qc);
1726}
1727
1728static void nv_swncq_ncq_stop(struct ata_port *ap)
1729{
1730 struct nv_swncq_port_priv *pp = ap->private_data;
1731 unsigned int i;
1732 u32 sactive;
1733 u32 done_mask;
1734
1735 ata_port_printk(ap, KERN_ERR,
1736 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1737 ap->qc_active, ap->link.sactive);
1738 ata_port_printk(ap, KERN_ERR,
1739 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1740 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1741 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1742 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1743
1744 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
Tejun Heo5682ed32008-04-07 22:47:16 +09001745 ap->ops->sff_check_status(ap),
Kuan Luof140f0f2007-10-15 15:16:53 -04001746 ioread8(ap->ioaddr.error_addr));
1747
1748 sactive = readl(pp->sactive_block);
1749 done_mask = pp->qc_active ^ sactive;
1750
1751 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1752 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1753 u8 err = 0;
1754 if (pp->qc_active & (1 << i))
1755 err = 0;
1756 else if (done_mask & (1 << i))
1757 err = 1;
1758 else
1759 continue;
1760
1761 ata_port_printk(ap, KERN_ERR,
1762 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1763 (pp->dhfis_bits >> i) & 0x1,
1764 (pp->dmafis_bits >> i) & 0x1,
1765 (pp->sdbfis_bits >> i) & 0x1,
1766 (sactive >> i) & 0x1,
1767 (err ? "error! tag doesn't exit" : " "));
1768 }
1769
1770 nv_swncq_pp_reinit(ap);
Tejun Heo5682ed32008-04-07 22:47:16 +09001771 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001772 __ata_bmdma_stop(ap);
1773 nv_swncq_irq_clear(ap, 0xffff);
1774}
1775
1776static void nv_swncq_error_handler(struct ata_port *ap)
1777{
1778 struct ata_eh_context *ehc = &ap->link.eh_context;
1779
1780 if (ap->link.sactive) {
1781 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001782 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001783 }
1784
Tejun Heo9363c382008-04-07 22:47:16 +09001785 ata_sff_error_handler(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001786}
1787
1788#ifdef CONFIG_PM
1789static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1790{
1791 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1792 u32 tmp;
1793
1794 /* clear irq */
1795 writel(~0, mmio + NV_INT_STATUS_MCP55);
1796
1797 /* disable irq */
1798 writel(0, mmio + NV_INT_ENABLE_MCP55);
1799
1800 /* disable swncq */
1801 tmp = readl(mmio + NV_CTL_MCP55);
1802 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1803 writel(tmp, mmio + NV_CTL_MCP55);
1804
1805 return 0;
1806}
1807
1808static int nv_swncq_port_resume(struct ata_port *ap)
1809{
1810 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1811 u32 tmp;
1812
1813 /* clear irq */
1814 writel(~0, mmio + NV_INT_STATUS_MCP55);
1815
1816 /* enable irq */
1817 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1818
1819 /* enable swncq */
1820 tmp = readl(mmio + NV_CTL_MCP55);
1821 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1822
1823 return 0;
1824}
1825#endif
1826
1827static void nv_swncq_host_init(struct ata_host *host)
1828{
1829 u32 tmp;
1830 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1831 struct pci_dev *pdev = to_pci_dev(host->dev);
1832 u8 regval;
1833
1834 /* disable ECO 398 */
1835 pci_read_config_byte(pdev, 0x7f, &regval);
1836 regval &= ~(1 << 7);
1837 pci_write_config_byte(pdev, 0x7f, regval);
1838
1839 /* enable swncq */
1840 tmp = readl(mmio + NV_CTL_MCP55);
1841 VPRINTK("HOST_CTL:0x%X\n", tmp);
1842 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1843
1844 /* enable irq intr */
1845 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1846 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1847 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1848
1849 /* clear port irq */
1850 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1851}
1852
1853static int nv_swncq_slave_config(struct scsi_device *sdev)
1854{
1855 struct ata_port *ap = ata_shost_to_port(sdev->host);
1856 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1857 struct ata_device *dev;
1858 int rc;
1859 u8 rev;
1860 u8 check_maxtor = 0;
1861 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1862
1863 rc = ata_scsi_slave_config(sdev);
1864 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1865 /* Not a proper libata device, ignore */
1866 return rc;
1867
1868 dev = &ap->link.device[sdev->id];
1869 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1870 return rc;
1871
1872 /* if MCP51 and Maxtor, then disable ncq */
1873 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1874 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1875 check_maxtor = 1;
1876
1877 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1878 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1879 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1880 pci_read_config_byte(pdev, 0x8, &rev);
1881 if (rev <= 0xa2)
1882 check_maxtor = 1;
1883 }
1884
1885 if (!check_maxtor)
1886 return rc;
1887
1888 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1889
1890 if (strncmp(model_num, "Maxtor", 6) == 0) {
1891 ata_scsi_change_queue_depth(sdev, 1);
1892 ata_dev_printk(dev, KERN_NOTICE,
1893 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1894 }
1895
1896 return rc;
1897}
1898
1899static int nv_swncq_port_start(struct ata_port *ap)
1900{
1901 struct device *dev = ap->host->dev;
1902 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1903 struct nv_swncq_port_priv *pp;
1904 int rc;
1905
1906 rc = ata_port_start(ap);
1907 if (rc)
1908 return rc;
1909
1910 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1911 if (!pp)
1912 return -ENOMEM;
1913
1914 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1915 &pp->prd_dma, GFP_KERNEL);
1916 if (!pp->prd)
1917 return -ENOMEM;
1918 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1919
1920 ap->private_data = pp;
1921 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1922 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1923 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1924
1925 return 0;
1926}
1927
1928static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1929{
1930 if (qc->tf.protocol != ATA_PROT_NCQ) {
Tejun Heo9363c382008-04-07 22:47:16 +09001931 ata_sff_qc_prep(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04001932 return;
1933 }
1934
1935 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1936 return;
1937
1938 nv_swncq_fill_sg(qc);
1939}
1940
1941static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1942{
1943 struct ata_port *ap = qc->ap;
1944 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04001945 struct nv_swncq_port_priv *pp = ap->private_data;
1946 struct ata_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001947 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04001948
1949 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1950
1951 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001952 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04001953 u32 addr, offset;
1954 u32 sg_len, len;
1955
1956 addr = (u32)sg_dma_address(sg);
1957 sg_len = sg_dma_len(sg);
1958
1959 while (sg_len) {
1960 offset = addr & 0xffff;
1961 len = sg_len;
1962 if ((offset + sg_len) > 0x10000)
1963 len = 0x10000 - offset;
1964
1965 prd[idx].addr = cpu_to_le32(addr);
1966 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1967
1968 idx++;
1969 sg_len -= len;
1970 addr += len;
1971 }
1972 }
1973
Tejun Heoff2aeb12007-12-05 16:43:11 +09001974 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04001975}
1976
1977static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1978 struct ata_queued_cmd *qc)
1979{
1980 struct nv_swncq_port_priv *pp = ap->private_data;
1981
1982 if (qc == NULL)
1983 return 0;
1984
1985 DPRINTK("Enter\n");
1986
1987 writel((1 << qc->tag), pp->sactive_block);
1988 pp->last_issue_tag = qc->tag;
1989 pp->dhfis_bits &= ~(1 << qc->tag);
1990 pp->dmafis_bits &= ~(1 << qc->tag);
1991 pp->qc_active |= (0x1 << qc->tag);
1992
Tejun Heo5682ed32008-04-07 22:47:16 +09001993 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1994 ap->ops->sff_exec_command(ap, &qc->tf);
Kuan Luof140f0f2007-10-15 15:16:53 -04001995
1996 DPRINTK("Issued tag %u\n", qc->tag);
1997
1998 return 0;
1999}
2000
2001static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2002{
2003 struct ata_port *ap = qc->ap;
2004 struct nv_swncq_port_priv *pp = ap->private_data;
2005
2006 if (qc->tf.protocol != ATA_PROT_NCQ)
Tejun Heo9363c382008-04-07 22:47:16 +09002007 return ata_sff_qc_issue(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04002008
2009 DPRINTK("Enter\n");
2010
2011 if (!pp->qc_active)
2012 nv_swncq_issue_atacmd(ap, qc);
2013 else
2014 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2015
2016 return 0;
2017}
2018
2019static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2020{
2021 u32 serror;
2022 struct ata_eh_info *ehi = &ap->link.eh_info;
2023
2024 ata_ehi_clear_desc(ehi);
2025
2026 /* AHCI needs SError cleared; otherwise, it might lock up */
2027 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2028 sata_scr_write(&ap->link, SCR_ERROR, serror);
2029
2030 /* analyze @irq_stat */
2031 if (fis & NV_SWNCQ_IRQ_ADDED)
2032 ata_ehi_push_desc(ehi, "hot plug");
2033 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2034 ata_ehi_push_desc(ehi, "hot unplug");
2035
2036 ata_ehi_hotplugged(ehi);
2037
2038 /* okay, let's hand over to EH */
2039 ehi->serror |= serror;
2040
2041 ata_port_freeze(ap);
2042}
2043
2044static int nv_swncq_sdbfis(struct ata_port *ap)
2045{
2046 struct ata_queued_cmd *qc;
2047 struct nv_swncq_port_priv *pp = ap->private_data;
2048 struct ata_eh_info *ehi = &ap->link.eh_info;
2049 u32 sactive;
2050 int nr_done = 0;
2051 u32 done_mask;
2052 int i;
2053 u8 host_stat;
2054 u8 lack_dhfis = 0;
2055
2056 host_stat = ap->ops->bmdma_status(ap);
2057 if (unlikely(host_stat & ATA_DMA_ERR)) {
2058 /* error when transfering data to/from memory */
2059 ata_ehi_clear_desc(ehi);
2060 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2061 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002062 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002063 return -EINVAL;
2064 }
2065
Tejun Heo5682ed32008-04-07 22:47:16 +09002066 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002067 __ata_bmdma_stop(ap);
2068
2069 sactive = readl(pp->sactive_block);
2070 done_mask = pp->qc_active ^ sactive;
2071
2072 if (unlikely(done_mask & sactive)) {
2073 ata_ehi_clear_desc(ehi);
2074 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2075 "(%08x->%08x)", pp->qc_active, sactive);
2076 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002077 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002078 return -EINVAL;
2079 }
2080 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2081 if (!(done_mask & (1 << i)))
2082 continue;
2083
2084 qc = ata_qc_from_tag(ap, i);
2085 if (qc) {
2086 ata_qc_complete(qc);
2087 pp->qc_active &= ~(1 << i);
2088 pp->dhfis_bits &= ~(1 << i);
2089 pp->dmafis_bits &= ~(1 << i);
2090 pp->sdbfis_bits |= (1 << i);
2091 nr_done++;
2092 }
2093 }
2094
2095 if (!ap->qc_active) {
2096 DPRINTK("over\n");
2097 nv_swncq_pp_reinit(ap);
2098 return nr_done;
2099 }
2100
2101 if (pp->qc_active & pp->dhfis_bits)
2102 return nr_done;
2103
2104 if ((pp->ncq_flags & ncq_saw_backout) ||
2105 (pp->qc_active ^ pp->dhfis_bits))
2106 /* if the controller cann't get a device to host register FIS,
2107 * The driver needs to reissue the new command.
2108 */
2109 lack_dhfis = 1;
2110
2111 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2112 "SWNCQ:qc_active 0x%X defer_bits %X "
2113 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2114 ap->print_id, ap->qc_active, pp->qc_active,
2115 pp->defer_queue.defer_bits, pp->dhfis_bits,
2116 pp->dmafis_bits, pp->last_issue_tag);
2117
2118 nv_swncq_fis_reinit(ap);
2119
2120 if (lack_dhfis) {
2121 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2122 nv_swncq_issue_atacmd(ap, qc);
2123 return nr_done;
2124 }
2125
2126 if (pp->defer_queue.defer_bits) {
2127 /* send deferral queue command */
2128 qc = nv_swncq_qc_from_dq(ap);
2129 WARN_ON(qc == NULL);
2130 nv_swncq_issue_atacmd(ap, qc);
2131 }
2132
2133 return nr_done;
2134}
2135
2136static inline u32 nv_swncq_tag(struct ata_port *ap)
2137{
2138 struct nv_swncq_port_priv *pp = ap->private_data;
2139 u32 tag;
2140
2141 tag = readb(pp->tag_block) >> 2;
2142 return (tag & 0x1f);
2143}
2144
2145static int nv_swncq_dmafis(struct ata_port *ap)
2146{
2147 struct ata_queued_cmd *qc;
2148 unsigned int rw;
2149 u8 dmactl;
2150 u32 tag;
2151 struct nv_swncq_port_priv *pp = ap->private_data;
2152
2153 __ata_bmdma_stop(ap);
2154 tag = nv_swncq_tag(ap);
2155
2156 DPRINTK("dma setup tag 0x%x\n", tag);
2157 qc = ata_qc_from_tag(ap, tag);
2158
2159 if (unlikely(!qc))
2160 return 0;
2161
2162 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2163
2164 /* load PRD table addr. */
2165 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2166 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2167
2168 /* specify data direction, triple-check start bit is clear */
2169 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2170 dmactl &= ~ATA_DMA_WR;
2171 if (!rw)
2172 dmactl |= ATA_DMA_WR;
2173
2174 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2175
2176 return 1;
2177}
2178
2179static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2180{
2181 struct nv_swncq_port_priv *pp = ap->private_data;
2182 struct ata_queued_cmd *qc;
2183 struct ata_eh_info *ehi = &ap->link.eh_info;
2184 u32 serror;
2185 u8 ata_stat;
2186 int rc = 0;
2187
Tejun Heo5682ed32008-04-07 22:47:16 +09002188 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002189 nv_swncq_irq_clear(ap, fis);
2190 if (!fis)
2191 return;
2192
2193 if (ap->pflags & ATA_PFLAG_FROZEN)
2194 return;
2195
2196 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2197 nv_swncq_hotplug(ap, fis);
2198 return;
2199 }
2200
2201 if (!pp->qc_active)
2202 return;
2203
2204 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2205 return;
2206 ap->ops->scr_write(ap, SCR_ERROR, serror);
2207
2208 if (ata_stat & ATA_ERR) {
2209 ata_ehi_clear_desc(ehi);
2210 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2211 ehi->err_mask |= AC_ERR_DEV;
2212 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002213 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002214 ata_port_freeze(ap);
2215 return;
2216 }
2217
2218 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2219 /* If the IRQ is backout, driver must issue
2220 * the new command again some time later.
2221 */
2222 pp->ncq_flags |= ncq_saw_backout;
2223 }
2224
2225 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2226 pp->ncq_flags |= ncq_saw_sdb;
2227 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2228 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2229 ap->print_id, pp->qc_active, pp->dhfis_bits,
2230 pp->dmafis_bits, readl(pp->sactive_block));
2231 rc = nv_swncq_sdbfis(ap);
2232 if (rc < 0)
2233 goto irq_error;
2234 }
2235
2236 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2237 /* The interrupt indicates the new command
2238 * was transmitted correctly to the drive.
2239 */
2240 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2241 pp->ncq_flags |= ncq_saw_d2h;
2242 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2243 ata_ehi_push_desc(ehi, "illegal fis transaction");
2244 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002245 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002246 goto irq_error;
2247 }
2248
2249 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2250 !(pp->ncq_flags & ncq_saw_dmas)) {
Tejun Heo5682ed32008-04-07 22:47:16 +09002251 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002252 if (ata_stat & ATA_BUSY)
2253 goto irq_exit;
2254
2255 if (pp->defer_queue.defer_bits) {
2256 DPRINTK("send next command\n");
2257 qc = nv_swncq_qc_from_dq(ap);
2258 nv_swncq_issue_atacmd(ap, qc);
2259 }
2260 }
2261 }
2262
2263 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2264 /* program the dma controller with appropriate PRD buffers
2265 * and start the DMA transfer for requested command.
2266 */
2267 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2268 pp->ncq_flags |= ncq_saw_dmas;
2269 rc = nv_swncq_dmafis(ap);
2270 }
2271
2272irq_exit:
2273 return;
2274irq_error:
2275 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2276 ata_port_freeze(ap);
2277 return;
2278}
2279
2280static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2281{
2282 struct ata_host *host = dev_instance;
2283 unsigned int i;
2284 unsigned int handled = 0;
2285 unsigned long flags;
2286 u32 irq_stat;
2287
2288 spin_lock_irqsave(&host->lock, flags);
2289
2290 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2291
2292 for (i = 0; i < host->n_ports; i++) {
2293 struct ata_port *ap = host->ports[i];
2294
2295 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2296 if (ap->link.sactive) {
2297 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2298 handled = 1;
2299 } else {
2300 if (irq_stat) /* reserve Hotplug */
2301 nv_swncq_irq_clear(ap, 0xfff0);
2302
2303 handled += nv_host_intr(ap, (u8)irq_stat);
2304 }
2305 }
2306 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2307 }
2308
2309 spin_unlock_irqrestore(&host->lock, flags);
2310
2311 return IRQ_RETVAL(handled);
2312}
2313
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002314static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002316 static int printed_version;
Tejun Heo1626aeb2007-05-04 12:43:58 +02002317 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo95947192008-03-25 12:22:49 +09002318 struct nv_pi_priv *ipriv;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002319 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002320 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 int rc;
2322 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002323 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002324 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
2326 // Make sure this is a SATA controller by counting the number of bars
2327 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2328 // it's an IDE controller and we ignore it.
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002329 for (bar = 0; bar < 6; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 if (pci_resource_start(pdev, bar) == 0)
2331 return -ENODEV;
2332
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002333 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05002334 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335
Tejun Heo24dc5f32007-01-20 16:00:28 +09002336 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002338 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Tejun Heo9a829cc2007-04-17 23:44:08 +09002340 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002341 if (type == CK804 && adma_enabled) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07002342 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2343 type = ADMA;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002344 }
2345
Jeff Garzik360737a2007-10-29 06:49:24 -04002346 if (type == SWNCQ) {
2347 if (swncq_enabled)
2348 dev_printk(KERN_NOTICE, &pdev->dev,
2349 "Using SWNCQ mode\n");
2350 else
2351 type = GENERIC;
2352 }
2353
Tejun Heo1626aeb2007-05-04 12:43:58 +02002354 ppi[0] = &nv_port_info[type];
Tejun Heo95947192008-03-25 12:22:49 +09002355 ipriv = ppi[0]->private_data;
Tejun Heo9363c382008-04-07 22:47:16 +09002356 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002357 if (rc)
2358 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Tejun Heo24dc5f32007-01-20 16:00:28 +09002360 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002361 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002362 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002363 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002364 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365
Tejun Heo9a829cc2007-04-17 23:44:08 +09002366 /* request and iomap NV_MMIO_BAR */
2367 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2368 if (rc)
2369 return rc;
2370
2371 /* configure SCR access */
2372 base = host->iomap[NV_MMIO_BAR];
2373 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2374 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002375
Tejun Heoada364e2006-06-17 15:49:56 +09002376 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002377 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002378 u8 regval;
2379
2380 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2381 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2382 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2383 }
2384
Tejun Heo9a829cc2007-04-17 23:44:08 +09002385 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002386 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002387 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002388 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002389 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002390 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002391 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002392
Tejun Heo9a829cc2007-04-17 23:44:08 +09002393 pci_set_master(pdev);
Tejun Heo95947192008-03-25 12:22:49 +09002394 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2395 IRQF_SHARED, ipriv->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396}
2397
Tejun Heo438ac6d2007-03-02 17:31:26 +09002398#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002399static int nv_pci_device_resume(struct pci_dev *pdev)
2400{
2401 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2402 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002403 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002404
Robert Hancockce053fa2007-02-05 16:26:04 -08002405 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002406 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002407 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002408
2409 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002410 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002411 u8 regval;
2412
2413 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2414 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2415 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2416 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002417 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002418 u32 tmp32;
2419 struct nv_adma_port_priv *pp;
2420 /* enable/disable ADMA on the ports appropriately */
2421 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2422
2423 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002424 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002425 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002426 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002427 else
2428 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002429 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002430 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002431 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002432 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002433 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002434 else
2435 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002436 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002437
2438 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2439 }
2440 }
2441
2442 ata_host_resume(host);
2443
2444 return 0;
2445}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002446#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002447
Jeff Garzikcca39742006-08-24 03:19:22 -04002448static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002449{
Jeff Garzikcca39742006-08-24 03:19:22 -04002450 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002451 u8 regval;
2452
2453 /* disable SATA space for CK804 */
2454 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2455 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2456 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002457}
2458
Robert Hancockfbbb2622006-10-27 19:08:41 -07002459static void nv_adma_host_stop(struct ata_host *host)
2460{
2461 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002462 u32 tmp32;
2463
Robert Hancockfbbb2622006-10-27 19:08:41 -07002464 /* disable ADMA on the ports */
2465 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2466 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2467 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2468 NV_MCP_SATA_CFG_20_PORT1_EN |
2469 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2470
2471 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2472
2473 nv_ck804_host_stop(host);
2474}
2475
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476static int __init nv_init(void)
2477{
Pavel Roskinb7887192006-08-10 18:13:18 +09002478 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479}
2480
2481static void __exit nv_exit(void)
2482{
2483 pci_unregister_driver(&nv_pci_driver);
2484}
2485
2486module_init(nv_init);
2487module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002488module_param_named(adma, adma_enabled, bool, 0444);
2489MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002490module_param_named(swncq, swncq_enabled, bool, 0444);
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -07002491MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002492