blob: bd5b6c35ee5d8c5ac318964db4236db7432be7fb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Jeff Garzikaa7e16d2005-08-29 15:12:56 -04008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040022 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070032 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050046#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070048#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040052#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070053
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Jeff Garzik10ad05d2006-03-22 23:50:50 -050056enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090057 NV_MMIO_BAR = 5,
58
Jeff Garzik10ad05d2006-03-22 23:50:50 -050059 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Tejun Heo27e4b272006-06-17 15:49:55 +090066 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050067 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050068 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090069 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Tejun Heo27e4b272006-06-17 15:49:55 +090072 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
Tejun Heo39f87582006-06-17 15:49:56 +090080 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090081 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090083
Tejun Heo27e4b272006-06-17 15:49:55 +090084 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050085 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Jeff Garzik10ad05d2006-03-22 23:50:50 -050088 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070091 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400166 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700171
Kuan Luof140f0f2007-10-15 15:16:53 -0400172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500201};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Robert Hancockfbbb2622006-10-27 19:08:41 -0700203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239};
240
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600250 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700251 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600252 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700253};
254
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600255struct nv_host_priv {
256 unsigned long type;
257};
258
Kuan Luof140f0f2007-10-15 15:16:53 -0400259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700296
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900298#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600299static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900300#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400301static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Tejun Heo39f87582006-06-17 15:49:56 +0900308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap);
312static void nv_error_handler(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700313static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600314static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700315static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318static void nv_adma_irq_clear(struct ata_port *ap);
319static int nv_adma_port_start(struct ata_port *ap);
320static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900321#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600322static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900324#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600325static void nv_adma_freeze(struct ata_port *ap);
326static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700327static void nv_adma_error_handler(struct ata_port *ap);
328static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600329static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800330static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900331
Kuan Luof140f0f2007-10-15 15:16:53 -0400332static void nv_mcp55_thaw(struct ata_port *ap);
333static void nv_mcp55_freeze(struct ata_port *ap);
334static void nv_swncq_error_handler(struct ata_port *ap);
335static int nv_swncq_slave_config(struct scsi_device *sdev);
336static int nv_swncq_port_start(struct ata_port *ap);
337static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342#ifdef CONFIG_PM
343static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344static int nv_swncq_port_resume(struct ata_port *ap);
345#endif
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347enum nv_host_type
348{
349 GENERIC,
350 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900351 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700352 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400353 ADMA,
354 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355};
356
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500357static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a92006-09-27 22:20:11 -0400358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400372
373 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374};
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376static struct pci_driver nv_pci_driver = {
377 .name = DRV_NAME,
378 .id_table = nv_pci_tbl,
379 .probe = nv_init_one,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900380#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600381 .suspend = ata_pci_device_suspend,
382 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900383#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200384 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385};
386
Jeff Garzik193515d2005-11-07 00:59:37 -0500387static struct scsi_host_template nv_sht = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 .module = THIS_MODULE,
389 .name = DRV_NAME,
390 .ioctl = ata_scsi_ioctl,
391 .queuecommand = ata_scsi_queuecmd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 .can_queue = ATA_DEF_QUEUE,
393 .this_id = ATA_SHT_THIS_ID,
394 .sg_tablesize = LIBATA_MAX_PRD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
396 .emulated = ATA_SHT_EMULATED,
397 .use_clustering = ATA_SHT_USE_CLUSTERING,
398 .proc_name = DRV_NAME,
399 .dma_boundary = ATA_DMA_BOUNDARY,
400 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900401 .slave_destroy = ata_scsi_slave_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 .bios_param = ata_std_bios_param,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403};
404
Robert Hancockfbbb2622006-10-27 19:08:41 -0700405static struct scsi_host_template nv_adma_sht = {
406 .module = THIS_MODULE,
407 .name = DRV_NAME,
408 .ioctl = ata_scsi_ioctl,
409 .queuecommand = ata_scsi_queuecmd,
Robert Hancock1e0b5ab2007-06-28 18:52:24 -0600410 .change_queue_depth = ata_scsi_change_queue_depth,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700411 .can_queue = NV_ADMA_MAX_CPBS,
412 .this_id = ATA_SHT_THIS_ID,
413 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700414 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
415 .emulated = ATA_SHT_EMULATED,
416 .use_clustering = ATA_SHT_USE_CLUSTERING,
417 .proc_name = DRV_NAME,
418 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
419 .slave_configure = nv_adma_slave_config,
420 .slave_destroy = ata_scsi_slave_destroy,
421 .bios_param = ata_std_bios_param,
422};
423
Kuan Luof140f0f2007-10-15 15:16:53 -0400424static struct scsi_host_template nv_swncq_sht = {
425 .module = THIS_MODULE,
426 .name = DRV_NAME,
427 .ioctl = ata_scsi_ioctl,
428 .queuecommand = ata_scsi_queuecmd,
429 .change_queue_depth = ata_scsi_change_queue_depth,
430 .can_queue = ATA_MAX_QUEUE,
431 .this_id = ATA_SHT_THIS_ID,
432 .sg_tablesize = LIBATA_MAX_PRD,
433 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
434 .emulated = ATA_SHT_EMULATED,
435 .use_clustering = ATA_SHT_USE_CLUSTERING,
436 .proc_name = DRV_NAME,
437 .dma_boundary = ATA_DMA_BOUNDARY,
438 .slave_configure = nv_swncq_slave_config,
439 .slave_destroy = ata_scsi_slave_destroy,
440 .bios_param = ata_std_bios_param,
441};
442
Tejun Heoada364e2006-06-17 15:49:56 +0900443static const struct ata_port_operations nv_generic_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 .tf_load = ata_tf_load,
445 .tf_read = ata_tf_read,
446 .exec_command = ata_exec_command,
447 .check_status = ata_check_status,
448 .dev_select = ata_std_dev_select,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 .bmdma_setup = ata_bmdma_setup,
450 .bmdma_start = ata_bmdma_start,
451 .bmdma_stop = ata_bmdma_stop,
452 .bmdma_status = ata_bmdma_status,
453 .qc_prep = ata_qc_prep,
454 .qc_issue = ata_qc_issue_prot,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900455 .mode_filter = ata_pci_default_filter,
Tejun Heo39f87582006-06-17 15:49:56 +0900456 .freeze = ata_bmdma_freeze,
457 .thaw = ata_bmdma_thaw,
458 .error_handler = nv_error_handler,
459 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900460 .data_xfer = ata_data_xfer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900462 .irq_on = ata_irq_on,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 .scr_read = nv_scr_read,
464 .scr_write = nv_scr_write,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900465 .port_start = ata_sff_port_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466};
467
Tejun Heoada364e2006-06-17 15:49:56 +0900468static const struct ata_port_operations nv_nf2_ops = {
Tejun Heoada364e2006-06-17 15:49:56 +0900469 .tf_load = ata_tf_load,
470 .tf_read = ata_tf_read,
471 .exec_command = ata_exec_command,
472 .check_status = ata_check_status,
473 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900474 .bmdma_setup = ata_bmdma_setup,
475 .bmdma_start = ata_bmdma_start,
476 .bmdma_stop = ata_bmdma_stop,
477 .bmdma_status = ata_bmdma_status,
478 .qc_prep = ata_qc_prep,
479 .qc_issue = ata_qc_issue_prot,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900480 .mode_filter = ata_pci_default_filter,
Tejun Heo39f87582006-06-17 15:49:56 +0900481 .freeze = nv_nf2_freeze,
482 .thaw = nv_nf2_thaw,
483 .error_handler = nv_error_handler,
484 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900485 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900486 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900487 .irq_on = ata_irq_on,
Tejun Heoada364e2006-06-17 15:49:56 +0900488 .scr_read = nv_scr_read,
489 .scr_write = nv_scr_write,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900490 .port_start = ata_sff_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900491};
492
493static const struct ata_port_operations nv_ck804_ops = {
Tejun Heoada364e2006-06-17 15:49:56 +0900494 .tf_load = ata_tf_load,
495 .tf_read = ata_tf_read,
496 .exec_command = ata_exec_command,
497 .check_status = ata_check_status,
498 .dev_select = ata_std_dev_select,
Tejun Heoada364e2006-06-17 15:49:56 +0900499 .bmdma_setup = ata_bmdma_setup,
500 .bmdma_start = ata_bmdma_start,
501 .bmdma_stop = ata_bmdma_stop,
502 .bmdma_status = ata_bmdma_status,
503 .qc_prep = ata_qc_prep,
504 .qc_issue = ata_qc_issue_prot,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900505 .mode_filter = ata_pci_default_filter,
Tejun Heo39f87582006-06-17 15:49:56 +0900506 .freeze = nv_ck804_freeze,
507 .thaw = nv_ck804_thaw,
508 .error_handler = nv_error_handler,
509 .post_internal_cmd = ata_bmdma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900510 .data_xfer = ata_data_xfer,
Tejun Heoada364e2006-06-17 15:49:56 +0900511 .irq_clear = ata_bmdma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900512 .irq_on = ata_irq_on,
Tejun Heoada364e2006-06-17 15:49:56 +0900513 .scr_read = nv_scr_read,
514 .scr_write = nv_scr_write,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900515 .port_start = ata_sff_port_start,
Tejun Heoada364e2006-06-17 15:49:56 +0900516 .host_stop = nv_ck804_host_stop,
517};
518
Robert Hancockfbbb2622006-10-27 19:08:41 -0700519static const struct ata_port_operations nv_adma_ops = {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700520 .tf_load = ata_tf_load,
Robert Hancockf2fb3442007-03-26 21:43:36 -0800521 .tf_read = nv_adma_tf_read,
Robert Hancock2dec7552006-11-26 14:20:19 -0600522 .check_atapi_dma = nv_adma_check_atapi_dma,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700523 .exec_command = ata_exec_command,
524 .check_status = ata_check_status,
525 .dev_select = ata_std_dev_select,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600526 .bmdma_setup = ata_bmdma_setup,
527 .bmdma_start = ata_bmdma_start,
528 .bmdma_stop = ata_bmdma_stop,
529 .bmdma_status = ata_bmdma_status,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900530 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700531 .qc_prep = nv_adma_qc_prep,
532 .qc_issue = nv_adma_qc_issue,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900533 .mode_filter = ata_pci_default_filter,
Robert Hancock53014e22007-05-05 15:36:36 -0600534 .freeze = nv_adma_freeze,
535 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700536 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600537 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900538 .data_xfer = ata_data_xfer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700539 .irq_clear = nv_adma_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900540 .irq_on = ata_irq_on,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700541 .scr_read = nv_scr_read,
542 .scr_write = nv_scr_write,
543 .port_start = nv_adma_port_start,
544 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900545#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600546 .port_suspend = nv_adma_port_suspend,
547 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900548#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700549 .host_stop = nv_adma_host_stop,
550};
551
Kuan Luof140f0f2007-10-15 15:16:53 -0400552static const struct ata_port_operations nv_swncq_ops = {
553 .tf_load = ata_tf_load,
554 .tf_read = ata_tf_read,
555 .exec_command = ata_exec_command,
556 .check_status = ata_check_status,
557 .dev_select = ata_std_dev_select,
558 .bmdma_setup = ata_bmdma_setup,
559 .bmdma_start = ata_bmdma_start,
560 .bmdma_stop = ata_bmdma_stop,
561 .bmdma_status = ata_bmdma_status,
562 .qc_defer = ata_std_qc_defer,
563 .qc_prep = nv_swncq_qc_prep,
564 .qc_issue = nv_swncq_qc_issue,
Tejun Heo6bd99b42008-03-25 12:22:48 +0900565 .mode_filter = ata_pci_default_filter,
Kuan Luof140f0f2007-10-15 15:16:53 -0400566 .freeze = nv_mcp55_freeze,
567 .thaw = nv_mcp55_thaw,
568 .error_handler = nv_swncq_error_handler,
569 .post_internal_cmd = ata_bmdma_post_internal_cmd,
570 .data_xfer = ata_data_xfer,
571 .irq_clear = ata_bmdma_irq_clear,
572 .irq_on = ata_irq_on,
573 .scr_read = nv_scr_read,
574 .scr_write = nv_scr_write,
575#ifdef CONFIG_PM
576 .port_suspend = nv_swncq_port_suspend,
577 .port_resume = nv_swncq_port_resume,
578#endif
579 .port_start = nv_swncq_port_start,
580};
581
Tejun Heo1626aeb2007-05-04 12:43:58 +0200582static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900583 /* generic */
584 {
585 .sht = &nv_sht,
Tejun Heo0c887582007-08-06 18:36:23 +0900586 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900587 .pio_mask = NV_PIO_MASK,
588 .mwdma_mask = NV_MWDMA_MASK,
589 .udma_mask = NV_UDMA_MASK,
590 .port_ops = &nv_generic_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900591 .irq_handler = nv_generic_interrupt,
Tejun Heoada364e2006-06-17 15:49:56 +0900592 },
593 /* nforce2/3 */
594 {
595 .sht = &nv_sht,
Tejun Heo0c887582007-08-06 18:36:23 +0900596 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900597 .pio_mask = NV_PIO_MASK,
598 .mwdma_mask = NV_MWDMA_MASK,
599 .udma_mask = NV_UDMA_MASK,
600 .port_ops = &nv_nf2_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900601 .irq_handler = nv_nf2_interrupt,
Tejun Heoada364e2006-06-17 15:49:56 +0900602 },
603 /* ck804 */
604 {
605 .sht = &nv_sht,
Tejun Heo0c887582007-08-06 18:36:23 +0900606 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
Tejun Heoada364e2006-06-17 15:49:56 +0900607 .pio_mask = NV_PIO_MASK,
608 .mwdma_mask = NV_MWDMA_MASK,
609 .udma_mask = NV_UDMA_MASK,
610 .port_ops = &nv_ck804_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900611 .irq_handler = nv_ck804_interrupt,
Tejun Heoada364e2006-06-17 15:49:56 +0900612 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700613 /* ADMA */
614 {
615 .sht = &nv_adma_sht,
616 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
617 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
618 .pio_mask = NV_PIO_MASK,
619 .mwdma_mask = NV_MWDMA_MASK,
620 .udma_mask = NV_UDMA_MASK,
621 .port_ops = &nv_adma_ops,
Tejun Heo9a829cc2007-04-17 23:44:08 +0900622 .irq_handler = nv_adma_interrupt,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700623 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400624 /* SWNCQ */
625 {
626 .sht = &nv_swncq_sht,
627 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
628 ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400629 .pio_mask = NV_PIO_MASK,
630 .mwdma_mask = NV_MWDMA_MASK,
631 .udma_mask = NV_UDMA_MASK,
632 .port_ops = &nv_swncq_ops,
633 .irq_handler = nv_swncq_interrupt,
634 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635};
636
637MODULE_AUTHOR("NVIDIA");
638MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
639MODULE_LICENSE("GPL");
640MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
641MODULE_VERSION(DRV_VERSION);
642
Robert Hancockfbbb2622006-10-27 19:08:41 -0700643static int adma_enabled = 1;
Kuan Luof140f0f2007-10-15 15:16:53 -0400644static int swncq_enabled;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700645
Robert Hancock2dec7552006-11-26 14:20:19 -0600646static void nv_adma_register_mode(struct ata_port *ap)
647{
Robert Hancock2dec7552006-11-26 14:20:19 -0600648 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600649 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800650 u16 tmp, status;
651 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600652
653 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
654 return;
655
Robert Hancocka2cfe812007-02-05 16:26:03 -0800656 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400657 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800658 ndelay(50);
659 status = readw(mmio + NV_ADMA_STAT);
660 count++;
661 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400662 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800663 ata_port_printk(ap, KERN_WARNING,
664 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
665 status);
666
Robert Hancock2dec7552006-11-26 14:20:19 -0600667 tmp = readw(mmio + NV_ADMA_CTL);
668 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
669
Robert Hancocka2cfe812007-02-05 16:26:03 -0800670 count = 0;
671 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400672 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800673 ndelay(50);
674 status = readw(mmio + NV_ADMA_STAT);
675 count++;
676 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400677 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800678 ata_port_printk(ap, KERN_WARNING,
679 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
680 status);
681
Robert Hancock2dec7552006-11-26 14:20:19 -0600682 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
683}
684
685static void nv_adma_mode(struct ata_port *ap)
686{
Robert Hancock2dec7552006-11-26 14:20:19 -0600687 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600688 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800689 u16 tmp, status;
690 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600691
692 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
693 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500694
Robert Hancock2dec7552006-11-26 14:20:19 -0600695 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
696
697 tmp = readw(mmio + NV_ADMA_CTL);
698 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
699
Robert Hancocka2cfe812007-02-05 16:26:03 -0800700 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400701 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800702 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
703 ndelay(50);
704 status = readw(mmio + NV_ADMA_STAT);
705 count++;
706 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400707 if (count == 20)
Robert Hancocka2cfe812007-02-05 16:26:03 -0800708 ata_port_printk(ap, KERN_WARNING,
709 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
710 status);
711
Robert Hancock2dec7552006-11-26 14:20:19 -0600712 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
713}
714
Robert Hancockfbbb2622006-10-27 19:08:41 -0700715static int nv_adma_slave_config(struct scsi_device *sdev)
716{
717 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600718 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600719 struct nv_adma_port_priv *port0, *port1;
720 struct scsi_device *sdev0, *sdev1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600721 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600722 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700723 unsigned short sg_tablesize;
724 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600725 int adma_enable;
726 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700727
728 rc = ata_scsi_slave_config(sdev);
729
730 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
731 /* Not a proper libata device, ignore */
732 return rc;
733
Robert Hancock8959d302008-02-04 19:39:02 -0600734 spin_lock_irqsave(ap->lock, flags);
735
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900736 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700737 /*
738 * NVIDIA reports that ADMA mode does not support ATAPI commands.
739 * Therefore ATAPI commands are sent through the legacy interface.
740 * However, the legacy interface only supports 32-bit DMA.
741 * Restrict DMA parameters as required by the legacy interface
742 * when an ATAPI device is connected.
743 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700744 segment_boundary = ATA_DMA_BOUNDARY;
745 /* Subtract 1 since an extra entry may be needed for padding, see
746 libata-scsi.c */
747 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500748
Robert Hancock2dec7552006-11-26 14:20:19 -0600749 /* Since the legacy DMA engine is in use, we need to disable ADMA
750 on the port. */
751 adma_enable = 0;
752 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400753 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700754 segment_boundary = NV_ADMA_DMA_BOUNDARY;
755 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600756 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700757 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500758
Robert Hancock2dec7552006-11-26 14:20:19 -0600759 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700760
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400761 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600762 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
763 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
764 else
765 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
766 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500767
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400768 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600769 new_reg = current_reg | config_mask;
770 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400771 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600772 new_reg = current_reg & ~config_mask;
773 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
774 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500775
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400776 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600777 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500778
Robert Hancock8959d302008-02-04 19:39:02 -0600779 port0 = ap->host->ports[0]->private_data;
780 port1 = ap->host->ports[1]->private_data;
781 sdev0 = ap->host->ports[0]->link.device[0].sdev;
782 sdev1 = ap->host->ports[1]->link.device[0].sdev;
783 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
784 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
785 /** We have to set the DMA mask to 32-bit if either port is in
786 ATAPI mode, since they are on the same PCI device which is
787 used for DMA mapping. If we set the mask we also need to set
788 the bounce limit on both ports to ensure that the block
789 layer doesn't feed addresses that cause DMA mapping to
790 choke. If either SCSI device is not allocated yet, it's OK
791 since that port will discover its correct setting when it
792 does get allocated.
793 Note: Setting 32-bit mask should not fail. */
794 if (sdev0)
795 blk_queue_bounce_limit(sdev0->request_queue,
796 ATA_DMA_MASK);
797 if (sdev1)
798 blk_queue_bounce_limit(sdev1->request_queue,
799 ATA_DMA_MASK);
800
801 pci_set_dma_mask(pdev, ATA_DMA_MASK);
802 } else {
803 /** This shouldn't fail as it was set to this value before */
804 pci_set_dma_mask(pdev, pp->adma_dma_mask);
805 if (sdev0)
806 blk_queue_bounce_limit(sdev0->request_queue,
807 pp->adma_dma_mask);
808 if (sdev1)
809 blk_queue_bounce_limit(sdev1->request_queue,
810 pp->adma_dma_mask);
811 }
812
Robert Hancockfbbb2622006-10-27 19:08:41 -0700813 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
814 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
815 ata_port_printk(ap, KERN_INFO,
Robert Hancock8959d302008-02-04 19:39:02 -0600816 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
817 (unsigned long long)*ap->host->dev->dma_mask,
818 segment_boundary, sg_tablesize);
819
820 spin_unlock_irqrestore(ap->lock, flags);
821
Robert Hancockfbbb2622006-10-27 19:08:41 -0700822 return rc;
823}
824
Robert Hancock2dec7552006-11-26 14:20:19 -0600825static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
826{
827 struct nv_adma_port_priv *pp = qc->ap->private_data;
828 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
829}
830
Robert Hancockf2fb3442007-03-26 21:43:36 -0800831static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
832{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600833 /* Other than when internal or pass-through commands are executed,
834 the only time this function will be called in ADMA mode will be
835 if a command fails. In the failure case we don't care about going
836 into register mode with ADMA commands pending, as the commands will
837 all shortly be aborted anyway. We assume that NCQ commands are not
838 issued via passthrough, which is the only way that switching into
839 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800840 nv_adma_register_mode(ap);
841
842 ata_tf_read(ap, tf);
843}
844
Robert Hancock2dec7552006-11-26 14:20:19 -0600845static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700846{
847 unsigned int idx = 0;
848
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400849 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600850 if (tf->flags & ATA_TFLAG_LBA48) {
851 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
852 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
853 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
854 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
855 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
856 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
857 } else
858 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500859
Robert Hancockac3d6b82007-02-19 19:02:46 -0600860 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
861 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
862 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
863 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700864 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500865
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400866 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600867 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700868
869 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500870
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400871 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600872 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700873
874 return idx;
875}
876
Robert Hancock5bd28a42007-02-05 16:26:01 -0800877static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700878{
879 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600880 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700881
882 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
883
Robert Hancock5bd28a42007-02-05 16:26:01 -0800884 if (unlikely((force_err ||
885 flags & (NV_CPB_RESP_ATA_ERR |
886 NV_CPB_RESP_CMD_ERR |
887 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900888 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800889 int freeze = 0;
890
891 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400892 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800893 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900894 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800895 ehi->err_mask |= AC_ERR_DEV;
896 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900897 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800898 ehi->err_mask |= AC_ERR_DEV;
899 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900900 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800901 ehi->err_mask |= AC_ERR_SYSTEM;
902 freeze = 1;
903 } else {
904 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900905 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800906 ehi->err_mask |= AC_ERR_OTHER;
907 freeze = 1;
908 }
909 /* Kill all commands. EH will determine what actually failed. */
910 if (freeze)
911 ata_port_freeze(ap);
912 else
913 ata_port_abort(ap);
914 return 1;
915 }
916
Robert Hancockf2fb3442007-03-26 21:43:36 -0800917 if (likely(flags & NV_CPB_RESP_DONE)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700918 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800919 VPRINTK("CPB flags done, flags=0x%x\n", flags);
920 if (likely(qc)) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400921 DPRINTK("Completing qc from tag %d\n", cpb_num);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700922 ata_qc_complete(qc);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600923 } else {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900924 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600925 /* Notifier bits set without a command may indicate the drive
926 is misbehaving. Raise host state machine violation on this
927 condition. */
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400928 ata_port_printk(ap, KERN_ERR,
929 "notifier for tag %d with no cmd?\n",
930 cpb_num);
Robert Hancock2a54cf72007-02-21 23:53:03 -0600931 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +0900932 ehi->action |= ATA_EH_RESET;
Robert Hancock2a54cf72007-02-21 23:53:03 -0600933 ata_port_freeze(ap);
934 return 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700935 }
936 }
Robert Hancock5bd28a42007-02-05 16:26:01 -0800937 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700938}
939
Robert Hancock2dec7552006-11-26 14:20:19 -0600940static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
941{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900942 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600943
944 /* freeze if hotplugged */
945 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
946 ata_port_freeze(ap);
947 return 1;
948 }
949
950 /* bail out if not our interrupt */
951 if (!(irq_stat & NV_INT_DEV))
952 return 0;
953
954 /* DEV interrupt w/ no active qc? */
955 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
956 ata_check_status(ap);
957 return 1;
958 }
959
960 /* handle interrupt */
Robert Hancockf740d162007-01-23 20:09:02 -0600961 return ata_host_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600962}
963
Robert Hancockfbbb2622006-10-27 19:08:41 -0700964static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
965{
966 struct ata_host *host = dev_instance;
967 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600968 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700969
970 spin_lock(&host->lock);
971
972 for (i = 0; i < host->n_ports; i++) {
973 struct ata_port *ap = host->ports[i];
Robert Hancock2dec7552006-11-26 14:20:19 -0600974 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700975
976 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
977 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600978 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700979 u16 status;
980 u32 gen_ctl;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700981 u32 notifier, notifier_error;
Jeff Garzika617c092007-05-21 20:14:23 -0400982
Robert Hancock53014e22007-05-05 15:36:36 -0600983 /* if ADMA is disabled, use standard ata interrupt handler */
984 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
985 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
986 >> (NV_INT_PORT_SHIFT * i);
987 handled += nv_host_intr(ap, irq_stat);
988 continue;
989 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700990
Robert Hancock53014e22007-05-05 15:36:36 -0600991 /* if in ATA register mode, check for standard interrupts */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700992 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
Tejun Heo0d5ff562007-02-01 15:06:36 +0900993 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
Robert Hancock2dec7552006-11-26 14:20:19 -0600994 >> (NV_INT_PORT_SHIFT * i);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400995 if (ata_tag_valid(ap->link.active_tag))
Robert Hancockf740d162007-01-23 20:09:02 -0600996 /** NV_INT_DEV indication seems unreliable at times
997 at least in ADMA mode. Force it on always when a
998 command is active, to prevent losing interrupts. */
999 irq_stat |= NV_INT_DEV;
Robert Hancock2dec7552006-11-26 14:20:19 -06001000 handled += nv_host_intr(ap, irq_stat);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001001 }
1002
1003 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1004 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
Robert Hancock2dec7552006-11-26 14:20:19 -06001005 notifier_clears[i] = notifier | notifier_error;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001006
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001007 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001008
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001009 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
Robert Hancockfbbb2622006-10-27 19:08:41 -07001010 !notifier_error)
1011 /* Nothing to do */
1012 continue;
1013
1014 status = readw(mmio + NV_ADMA_STAT);
1015
1016 /* Clear status. Ensure the controller sees the clearing before we start
1017 looking at any of the CPB statuses, so that any CPB completions after
1018 this point in the handler will raise another interrupt. */
1019 writew(status, mmio + NV_ADMA_STAT);
1020 readw(mmio + NV_ADMA_STAT); /* flush posted write */
1021 rmb();
1022
Robert Hancock5bd28a42007-02-05 16:26:01 -08001023 handled++; /* irq handled if we got here */
1024
1025 /* freeze if hotplugged or controller error */
1026 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
1027 NV_ADMA_STAT_HOTUNPLUG |
Robert Hancock5278b502007-02-11 18:36:56 -06001028 NV_ADMA_STAT_TIMEOUT |
1029 NV_ADMA_STAT_SERROR))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001030 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -08001031
1032 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001033 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
Robert Hancock5bd28a42007-02-05 16:26:01 -08001034 if (status & NV_ADMA_STAT_TIMEOUT) {
1035 ehi->err_mask |= AC_ERR_SYSTEM;
Tejun Heob64bbc32007-07-16 14:29:39 +09001036 ata_ehi_push_desc(ehi, "timeout");
Robert Hancock5bd28a42007-02-05 16:26:01 -08001037 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1038 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +09001039 ata_ehi_push_desc(ehi, "hotplug");
Robert Hancock5bd28a42007-02-05 16:26:01 -08001040 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1041 ata_ehi_hotplugged(ehi);
Tejun Heob64bbc32007-07-16 14:29:39 +09001042 ata_ehi_push_desc(ehi, "hot unplug");
Robert Hancock5278b502007-02-11 18:36:56 -06001043 } else if (status & NV_ADMA_STAT_SERROR) {
1044 /* let libata analyze SError and figure out the cause */
Tejun Heob64bbc32007-07-16 14:29:39 +09001045 ata_ehi_push_desc(ehi, "SError");
1046 } else
1047 ata_ehi_push_desc(ehi, "unknown");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001048 ata_port_freeze(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001049 continue;
1050 }
1051
Robert Hancock5bd28a42007-02-05 16:26:01 -08001052 if (status & (NV_ADMA_STAT_DONE |
Robert Hancocka1fe7822008-01-29 19:53:19 -06001053 NV_ADMA_STAT_CPBERR |
1054 NV_ADMA_STAT_CMD_COMPLETE)) {
1055 u32 check_commands = notifier_clears[i];
Robert Hancock721449b2007-02-19 19:03:08 -06001056 int pos, error = 0;
Robert Hancock8ba5e4c2007-03-08 18:02:18 -06001057
Robert Hancocka1fe7822008-01-29 19:53:19 -06001058 if (status & NV_ADMA_STAT_CPBERR) {
1059 /* Check all active commands */
1060 if (ata_tag_valid(ap->link.active_tag))
1061 check_commands = 1 <<
1062 ap->link.active_tag;
1063 else
1064 check_commands = ap->
1065 link.sactive;
1066 }
Robert Hancock8ba5e4c2007-03-08 18:02:18 -06001067
Robert Hancockfbbb2622006-10-27 19:08:41 -07001068 /** Check CPBs for completed commands */
Robert Hancock721449b2007-02-19 19:03:08 -06001069 while ((pos = ffs(check_commands)) && !error) {
1070 pos--;
1071 error = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001072 notifier_error & (1 << pos));
1073 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001074 }
1075 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001076 }
1077 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -05001078
Jeff Garzikb4479162007-10-25 20:47:30 -04001079 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -06001080 /* Note: Both notifier clear registers must be written
1081 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001082 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1083 writel(notifier_clears[0], pp->notifier_clear_block);
1084 pp = host->ports[1]->private_data;
1085 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -06001086 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001087
1088 spin_unlock(&host->lock);
1089
1090 return IRQ_RETVAL(handled);
1091}
1092
Robert Hancock53014e22007-05-05 15:36:36 -06001093static void nv_adma_freeze(struct ata_port *ap)
1094{
1095 struct nv_adma_port_priv *pp = ap->private_data;
1096 void __iomem *mmio = pp->ctl_block;
1097 u16 tmp;
1098
1099 nv_ck804_freeze(ap);
1100
1101 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1102 return;
1103
1104 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001105 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001106 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1107
1108 /* Disable interrupt */
1109 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001110 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001111 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001112 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001113}
1114
1115static void nv_adma_thaw(struct ata_port *ap)
1116{
1117 struct nv_adma_port_priv *pp = ap->private_data;
1118 void __iomem *mmio = pp->ctl_block;
1119 u16 tmp;
1120
1121 nv_ck804_thaw(ap);
1122
1123 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1124 return;
1125
1126 /* Enable interrupt */
1127 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001128 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001129 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001130 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001131}
1132
Robert Hancockfbbb2622006-10-27 19:08:41 -07001133static void nv_adma_irq_clear(struct ata_port *ap)
1134{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001135 struct nv_adma_port_priv *pp = ap->private_data;
1136 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001137 u32 notifier_clears[2];
1138
1139 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1140 ata_bmdma_irq_clear(ap);
1141 return;
1142 }
1143
1144 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001145 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001146 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001147
1148 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001149 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001150
Robert Hancock53014e22007-05-05 15:36:36 -06001151 /* clear notifiers - note both ports need to be written with
1152 something even though we are only clearing on one */
1153 if (ap->port_no == 0) {
1154 notifier_clears[0] = 0xFFFFFFFF;
1155 notifier_clears[1] = 0;
1156 } else {
1157 notifier_clears[0] = 0;
1158 notifier_clears[1] = 0xFFFFFFFF;
1159 }
1160 pp = ap->host->ports[0]->private_data;
1161 writel(notifier_clears[0], pp->notifier_clear_block);
1162 pp = ap->host->ports[1]->private_data;
1163 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001164}
1165
Robert Hancockf5ecac22007-02-20 21:49:10 -06001166static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001167{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001168 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001169
Jeff Garzikb4479162007-10-25 20:47:30 -04001170 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Robert Hancockf5ecac22007-02-20 21:49:10 -06001171 ata_bmdma_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001172}
1173
1174static int nv_adma_port_start(struct ata_port *ap)
1175{
1176 struct device *dev = ap->host->dev;
1177 struct nv_adma_port_priv *pp;
1178 int rc;
1179 void *mem;
1180 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001181 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001182 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001183 u16 tmp;
1184
1185 VPRINTK("ENTER\n");
1186
Robert Hancock8959d302008-02-04 19:39:02 -06001187 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1188 pad buffers */
1189 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1190 if (rc)
1191 return rc;
1192 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1193 if (rc)
1194 return rc;
1195
Robert Hancockfbbb2622006-10-27 19:08:41 -07001196 rc = ata_port_start(ap);
1197 if (rc)
1198 return rc;
1199
Tejun Heo24dc5f32007-01-20 16:00:28 +09001200 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1201 if (!pp)
1202 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001203
Tejun Heo0d5ff562007-02-01 15:06:36 +09001204 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001205 ap->port_no * NV_ADMA_PORT_SIZE;
1206 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001207 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001208 pp->notifier_clear_block = pp->gen_block +
1209 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1210
Robert Hancock8959d302008-02-04 19:39:02 -06001211 /* Now that the legacy PRD and padding buffer are allocated we can
1212 safely raise the DMA mask to allocate the CPB/APRD table.
1213 These are allowed to fail since we store the value that ends up
1214 being used to set as the bounce limit in slave_config later if
1215 needed. */
1216 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1217 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1218 pp->adma_dma_mask = *dev->dma_mask;
1219
Tejun Heo24dc5f32007-01-20 16:00:28 +09001220 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1221 &mem_dma, GFP_KERNEL);
1222 if (!mem)
1223 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001224 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1225
1226 /*
1227 * First item in chunk of DMA memory:
1228 * 128-byte command parameter block (CPB)
1229 * one for each command tag
1230 */
1231 pp->cpb = mem;
1232 pp->cpb_dma = mem_dma;
1233
1234 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001235 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001236
1237 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1238 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1239
1240 /*
1241 * Second item: block of ADMA_SGTBL_LEN s/g entries
1242 */
1243 pp->aprd = mem;
1244 pp->aprd_dma = mem_dma;
1245
1246 ap->private_data = pp;
1247
1248 /* clear any outstanding interrupt conditions */
1249 writew(0xffff, mmio + NV_ADMA_STAT);
1250
1251 /* initialize port variables */
1252 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1253
1254 /* clear CPB fetch count */
1255 writew(0, mmio + NV_ADMA_CPB_COUNT);
1256
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001257 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001258 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001259 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1260 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001261
1262 tmp = readw(mmio + NV_ADMA_CTL);
1263 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001264 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001265 udelay(1);
1266 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001267 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001268
1269 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001270}
1271
1272static void nv_adma_port_stop(struct ata_port *ap)
1273{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001274 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001275 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001276
1277 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001278 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001279}
1280
Tejun Heo438ac6d2007-03-02 17:31:26 +09001281#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001282static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1283{
1284 struct nv_adma_port_priv *pp = ap->private_data;
1285 void __iomem *mmio = pp->ctl_block;
1286
1287 /* Go to register mode - clears GO */
1288 nv_adma_register_mode(ap);
1289
1290 /* clear CPB fetch count */
1291 writew(0, mmio + NV_ADMA_CPB_COUNT);
1292
1293 /* disable interrupt, shut down port */
1294 writew(0, mmio + NV_ADMA_CTL);
1295
1296 return 0;
1297}
1298
1299static int nv_adma_port_resume(struct ata_port *ap)
1300{
1301 struct nv_adma_port_priv *pp = ap->private_data;
1302 void __iomem *mmio = pp->ctl_block;
1303 u16 tmp;
1304
1305 /* set CPB block location */
1306 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001307 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001308
1309 /* clear any outstanding interrupt conditions */
1310 writew(0xffff, mmio + NV_ADMA_STAT);
1311
1312 /* initialize port variables */
1313 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1314
1315 /* clear CPB fetch count */
1316 writew(0, mmio + NV_ADMA_CPB_COUNT);
1317
1318 /* clear GO for register mode, enable interrupt */
1319 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001320 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1321 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001322
1323 tmp = readw(mmio + NV_ADMA_CTL);
1324 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001325 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001326 udelay(1);
1327 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001328 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001329
1330 return 0;
1331}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001332#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001333
Tejun Heo9a829cc2007-04-17 23:44:08 +09001334static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001335{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001336 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1337 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001338
1339 VPRINTK("ENTER\n");
1340
Tejun Heo9a829cc2007-04-17 23:44:08 +09001341 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001342
Tejun Heo0d5ff562007-02-01 15:06:36 +09001343 ioport->cmd_addr = mmio;
1344 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001345 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001346 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1347 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1348 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1349 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1350 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1351 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001352 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001353 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001354 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001355 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001356}
1357
Tejun Heo9a829cc2007-04-17 23:44:08 +09001358static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001359{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001360 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001361 unsigned int i;
1362 u32 tmp32;
1363
1364 VPRINTK("ENTER\n");
1365
1366 /* enable ADMA on the ports */
1367 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1368 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1369 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1370 NV_MCP_SATA_CFG_20_PORT1_EN |
1371 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1372
1373 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1374
Tejun Heo9a829cc2007-04-17 23:44:08 +09001375 for (i = 0; i < host->n_ports; i++)
1376 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001377
Robert Hancockfbbb2622006-10-27 19:08:41 -07001378 return 0;
1379}
1380
1381static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1382 struct scatterlist *sg,
1383 int idx,
1384 struct nv_adma_prd *aprd)
1385{
Robert Hancock41949ed2007-02-19 19:02:27 -06001386 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001387 if (qc->tf.flags & ATA_TFLAG_WRITE)
1388 flags |= NV_APRD_WRITE;
1389 if (idx == qc->n_elem - 1)
1390 flags |= NV_APRD_END;
1391 else if (idx != 4)
1392 flags |= NV_APRD_CONT;
1393
1394 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1395 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001396 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001397 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001398}
1399
1400static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1401{
1402 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001403 struct nv_adma_prd *aprd;
1404 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001405 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001406
1407 VPRINTK("ENTER\n");
1408
Tejun Heoff2aeb12007-12-05 16:43:11 +09001409 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1410 aprd = (si < 5) ? &cpb->aprd[si] :
1411 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1412 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001413 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001414 if (si > 5)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001415 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001416 else
1417 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001418}
1419
Robert Hancock382a6652007-02-05 16:26:02 -08001420static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1421{
1422 struct nv_adma_port_priv *pp = qc->ap->private_data;
1423
1424 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001425 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001426 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001427 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001428 return 1;
1429
Jeff Garzikb4479162007-10-25 20:47:30 -04001430 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001431 (qc->tf.protocol == ATA_PROT_NODATA))
1432 return 0;
1433
1434 return 1;
1435}
1436
Robert Hancockfbbb2622006-10-27 19:08:41 -07001437static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1438{
1439 struct nv_adma_port_priv *pp = qc->ap->private_data;
1440 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1441 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001442 NV_CPB_CTL_IEN;
1443
Robert Hancock382a6652007-02-05 16:26:02 -08001444 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001445 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1446 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001447 nv_adma_register_mode(qc->ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001448 ata_qc_prep(qc);
1449 return;
1450 }
1451
Robert Hancock41949ed2007-02-19 19:02:27 -06001452 cpb->resp_flags = NV_CPB_RESP_DONE;
1453 wmb();
1454 cpb->ctl_flags = 0;
1455 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001456
1457 cpb->len = 3;
1458 cpb->tag = qc->tag;
1459 cpb->next_cpb_idx = 0;
1460
1461 /* turn on NCQ flags for NCQ commands */
1462 if (qc->tf.protocol == ATA_PROT_NCQ)
1463 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1464
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001465 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1466
Robert Hancockfbbb2622006-10-27 19:08:41 -07001467 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1468
Jeff Garzikb4479162007-10-25 20:47:30 -04001469 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001470 nv_adma_fill_sg(qc, cpb);
1471 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1472 } else
1473 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001474
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001475 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1476 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001477 wmb();
1478 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001479 wmb();
1480 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001481}
1482
1483static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1484{
Robert Hancock2dec7552006-11-26 14:20:19 -06001485 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001486 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001487 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001488
1489 VPRINTK("ENTER\n");
1490
Robert Hancock3f3debd2007-11-25 16:59:36 -06001491 /* We can't handle result taskfile with NCQ commands, since
1492 retrieving the taskfile switches us out of ADMA mode and would abort
1493 existing commands. */
1494 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1495 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1496 ata_dev_printk(qc->dev, KERN_ERR,
1497 "NCQ w/ RESULT_TF not allowed\n");
1498 return AC_ERR_SYSTEM;
1499 }
1500
Robert Hancock382a6652007-02-05 16:26:02 -08001501 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001502 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001503 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001504 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1505 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001506 nv_adma_register_mode(qc->ap);
1507 return ata_qc_issue_prot(qc);
1508 } else
1509 nv_adma_mode(qc->ap);
1510
1511 /* write append register, command tag in lower 8 bits
1512 and (number of cpbs to append -1) in top 8 bits */
1513 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001514
Jeff Garzikb4479162007-10-25 20:47:30 -04001515 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001516 /* Seems to need some delay before switching between NCQ and
1517 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001518 udelay(20);
1519 pp->last_issue_ncq = curr_ncq;
1520 }
1521
Robert Hancockfbbb2622006-10-27 19:08:41 -07001522 writew(qc->tag, mmio + NV_ADMA_APPEND);
1523
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001524 DPRINTK("Issued tag %u\n", qc->tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001525
1526 return 0;
1527}
1528
David Howells7d12e782006-10-05 14:55:46 +01001529static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530{
Jeff Garzikcca39742006-08-24 03:19:22 -04001531 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 unsigned int i;
1533 unsigned int handled = 0;
1534 unsigned long flags;
1535
Jeff Garzikcca39742006-08-24 03:19:22 -04001536 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
Jeff Garzikcca39742006-08-24 03:19:22 -04001538 for (i = 0; i < host->n_ports; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 struct ata_port *ap;
1540
Jeff Garzikcca39742006-08-24 03:19:22 -04001541 ap = host->ports[i];
Tejun Heoc1389502005-08-22 14:59:24 +09001542 if (ap &&
Jeff Garzik029f5462006-04-02 10:30:40 -04001543 !(ap->flags & ATA_FLAG_DISABLED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 struct ata_queued_cmd *qc;
1545
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001546 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Albert Leee50362e2005-09-27 17:39:50 +08001547 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 handled += ata_host_intr(ap, qc);
Andrew Chewb8870302006-01-04 19:13:04 -08001549 else
1550 // No request pending? Clear interrupt status
1551 // anyway, in case there's one pending.
1552 ap->ops->check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 }
1554
1555 }
1556
Jeff Garzikcca39742006-08-24 03:19:22 -04001557 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 return IRQ_RETVAL(handled);
1560}
1561
Jeff Garzikcca39742006-08-24 03:19:22 -04001562static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001563{
1564 int i, handled = 0;
1565
Jeff Garzikcca39742006-08-24 03:19:22 -04001566 for (i = 0; i < host->n_ports; i++) {
1567 struct ata_port *ap = host->ports[i];
Tejun Heoada364e2006-06-17 15:49:56 +09001568
1569 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1570 handled += nv_host_intr(ap, irq_stat);
1571
1572 irq_stat >>= NV_INT_PORT_SHIFT;
1573 }
1574
1575 return IRQ_RETVAL(handled);
1576}
1577
David Howells7d12e782006-10-05 14:55:46 +01001578static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001579{
Jeff Garzikcca39742006-08-24 03:19:22 -04001580 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001581 u8 irq_stat;
1582 irqreturn_t ret;
1583
Jeff Garzikcca39742006-08-24 03:19:22 -04001584 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001585 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001586 ret = nv_do_interrupt(host, irq_stat);
1587 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001588
1589 return ret;
1590}
1591
David Howells7d12e782006-10-05 14:55:46 +01001592static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001593{
Jeff Garzikcca39742006-08-24 03:19:22 -04001594 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001595 u8 irq_stat;
1596 irqreturn_t ret;
1597
Jeff Garzikcca39742006-08-24 03:19:22 -04001598 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001599 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001600 ret = nv_do_interrupt(host, irq_stat);
1601 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001602
1603 return ret;
1604}
1605
Tejun Heoda3dbb12007-07-16 14:29:40 +09001606static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001609 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Tejun Heoda3dbb12007-07-16 14:29:40 +09001611 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1612 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
Tejun Heoda3dbb12007-07-16 14:29:40 +09001615static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001618 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Tejun Heo0d5ff562007-02-01 15:06:36 +09001620 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001621 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622}
1623
Tejun Heo39f87582006-06-17 15:49:56 +09001624static void nv_nf2_freeze(struct ata_port *ap)
1625{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001626 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001627 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1628 u8 mask;
1629
Tejun Heo0d5ff562007-02-01 15:06:36 +09001630 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001631 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001632 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001633}
1634
1635static void nv_nf2_thaw(struct ata_port *ap)
1636{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001637 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001638 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1639 u8 mask;
1640
Tejun Heo0d5ff562007-02-01 15:06:36 +09001641 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001642
Tejun Heo0d5ff562007-02-01 15:06:36 +09001643 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001644 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001645 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001646}
1647
1648static void nv_ck804_freeze(struct ata_port *ap)
1649{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001650 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001651 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1652 u8 mask;
1653
1654 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1655 mask &= ~(NV_INT_ALL << shift);
1656 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1657}
1658
1659static void nv_ck804_thaw(struct ata_port *ap)
1660{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001661 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001662 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1663 u8 mask;
1664
1665 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1666
1667 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1668 mask |= (NV_INT_MASK << shift);
1669 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1670}
1671
Kuan Luof140f0f2007-10-15 15:16:53 -04001672static void nv_mcp55_freeze(struct ata_port *ap)
1673{
1674 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1675 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1676 u32 mask;
1677
1678 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1679
1680 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1681 mask &= ~(NV_INT_ALL_MCP55 << shift);
1682 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1683 ata_bmdma_freeze(ap);
1684}
1685
1686static void nv_mcp55_thaw(struct ata_port *ap)
1687{
1688 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1689 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1690 u32 mask;
1691
1692 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1693
1694 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1695 mask |= (NV_INT_MASK_MCP55 << shift);
1696 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1697 ata_bmdma_thaw(ap);
1698}
1699
Tejun Heocc0680a2007-08-06 18:36:23 +09001700static int nv_hardreset(struct ata_link *link, unsigned int *class,
Tejun Heod4b2bab2007-02-02 16:50:52 +09001701 unsigned long deadline)
Tejun Heo39f87582006-06-17 15:49:56 +09001702{
1703 unsigned int dummy;
1704
1705 /* SATA hardreset fails to retrieve proper device signature on
1706 * some controllers. Don't classify on hardreset. For more
Fernando Luis Vázquez Cao647c5952007-11-07 16:33:49 +09001707 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
Tejun Heo39f87582006-06-17 15:49:56 +09001708 */
Tejun Heocc0680a2007-08-06 18:36:23 +09001709 return sata_std_hardreset(link, &dummy, deadline);
Tejun Heo39f87582006-06-17 15:49:56 +09001710}
1711
1712static void nv_error_handler(struct ata_port *ap)
1713{
1714 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1715 nv_hardreset, ata_std_postreset);
1716}
1717
Robert Hancockfbbb2622006-10-27 19:08:41 -07001718static void nv_adma_error_handler(struct ata_port *ap)
1719{
1720 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001721 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001722 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001723 int i;
1724 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001725
Jeff Garzikb4479162007-10-25 20:47:30 -04001726 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001727 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1728 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1729 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1730 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001731 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1732 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001733
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001734 ata_port_printk(ap, KERN_ERR,
1735 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001736 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1737 "next cpb count 0x%X next cpb idx 0x%x\n",
1738 notifier, notifier_error, gen_ctl, status,
1739 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001740
Jeff Garzikb4479162007-10-25 20:47:30 -04001741 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001742 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001743 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001744 ap->link.sactive & (1 << i))
Robert Hancock2cb27852007-02-11 18:34:44 -06001745 ata_port_printk(ap, KERN_ERR,
1746 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1747 i, cpb->ctl_flags, cpb->resp_flags);
1748 }
1749 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001750
Robert Hancockfbbb2622006-10-27 19:08:41 -07001751 /* Push us back into port register mode for error handling. */
1752 nv_adma_register_mode(ap);
1753
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001754 /* Mark all of the CPBs as invalid to prevent them from
1755 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001756 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001757 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1758
1759 /* clear CPB fetch count */
1760 writew(0, mmio + NV_ADMA_CPB_COUNT);
1761
1762 /* Reset channel */
1763 tmp = readw(mmio + NV_ADMA_CTL);
1764 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001765 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001766 udelay(1);
1767 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001768 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001769 }
1770
1771 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1772 nv_hardreset, ata_std_postreset);
1773}
1774
Kuan Luof140f0f2007-10-15 15:16:53 -04001775static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1776{
1777 struct nv_swncq_port_priv *pp = ap->private_data;
1778 struct defer_queue *dq = &pp->defer_queue;
1779
1780 /* queue is full */
1781 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1782 dq->defer_bits |= (1 << qc->tag);
1783 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1784}
1785
1786static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1787{
1788 struct nv_swncq_port_priv *pp = ap->private_data;
1789 struct defer_queue *dq = &pp->defer_queue;
1790 unsigned int tag;
1791
1792 if (dq->head == dq->tail) /* null queue */
1793 return NULL;
1794
1795 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1796 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1797 WARN_ON(!(dq->defer_bits & (1 << tag)));
1798 dq->defer_bits &= ~(1 << tag);
1799
1800 return ata_qc_from_tag(ap, tag);
1801}
1802
1803static void nv_swncq_fis_reinit(struct ata_port *ap)
1804{
1805 struct nv_swncq_port_priv *pp = ap->private_data;
1806
1807 pp->dhfis_bits = 0;
1808 pp->dmafis_bits = 0;
1809 pp->sdbfis_bits = 0;
1810 pp->ncq_flags = 0;
1811}
1812
1813static void nv_swncq_pp_reinit(struct ata_port *ap)
1814{
1815 struct nv_swncq_port_priv *pp = ap->private_data;
1816 struct defer_queue *dq = &pp->defer_queue;
1817
1818 dq->head = 0;
1819 dq->tail = 0;
1820 dq->defer_bits = 0;
1821 pp->qc_active = 0;
1822 pp->last_issue_tag = ATA_TAG_POISON;
1823 nv_swncq_fis_reinit(ap);
1824}
1825
1826static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1827{
1828 struct nv_swncq_port_priv *pp = ap->private_data;
1829
1830 writew(fis, pp->irq_block);
1831}
1832
1833static void __ata_bmdma_stop(struct ata_port *ap)
1834{
1835 struct ata_queued_cmd qc;
1836
1837 qc.ap = ap;
1838 ata_bmdma_stop(&qc);
1839}
1840
1841static void nv_swncq_ncq_stop(struct ata_port *ap)
1842{
1843 struct nv_swncq_port_priv *pp = ap->private_data;
1844 unsigned int i;
1845 u32 sactive;
1846 u32 done_mask;
1847
1848 ata_port_printk(ap, KERN_ERR,
1849 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1850 ap->qc_active, ap->link.sactive);
1851 ata_port_printk(ap, KERN_ERR,
1852 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1853 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1854 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1855 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1856
1857 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1858 ap->ops->check_status(ap),
1859 ioread8(ap->ioaddr.error_addr));
1860
1861 sactive = readl(pp->sactive_block);
1862 done_mask = pp->qc_active ^ sactive;
1863
1864 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1865 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1866 u8 err = 0;
1867 if (pp->qc_active & (1 << i))
1868 err = 0;
1869 else if (done_mask & (1 << i))
1870 err = 1;
1871 else
1872 continue;
1873
1874 ata_port_printk(ap, KERN_ERR,
1875 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1876 (pp->dhfis_bits >> i) & 0x1,
1877 (pp->dmafis_bits >> i) & 0x1,
1878 (pp->sdbfis_bits >> i) & 0x1,
1879 (sactive >> i) & 0x1,
1880 (err ? "error! tag doesn't exit" : " "));
1881 }
1882
1883 nv_swncq_pp_reinit(ap);
1884 ap->ops->irq_clear(ap);
1885 __ata_bmdma_stop(ap);
1886 nv_swncq_irq_clear(ap, 0xffff);
1887}
1888
1889static void nv_swncq_error_handler(struct ata_port *ap)
1890{
1891 struct ata_eh_context *ehc = &ap->link.eh_context;
1892
1893 if (ap->link.sactive) {
1894 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001895 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001896 }
1897
1898 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1899 nv_hardreset, ata_std_postreset);
1900}
1901
1902#ifdef CONFIG_PM
1903static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1904{
1905 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1906 u32 tmp;
1907
1908 /* clear irq */
1909 writel(~0, mmio + NV_INT_STATUS_MCP55);
1910
1911 /* disable irq */
1912 writel(0, mmio + NV_INT_ENABLE_MCP55);
1913
1914 /* disable swncq */
1915 tmp = readl(mmio + NV_CTL_MCP55);
1916 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1917 writel(tmp, mmio + NV_CTL_MCP55);
1918
1919 return 0;
1920}
1921
1922static int nv_swncq_port_resume(struct ata_port *ap)
1923{
1924 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1925 u32 tmp;
1926
1927 /* clear irq */
1928 writel(~0, mmio + NV_INT_STATUS_MCP55);
1929
1930 /* enable irq */
1931 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1932
1933 /* enable swncq */
1934 tmp = readl(mmio + NV_CTL_MCP55);
1935 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1936
1937 return 0;
1938}
1939#endif
1940
1941static void nv_swncq_host_init(struct ata_host *host)
1942{
1943 u32 tmp;
1944 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1945 struct pci_dev *pdev = to_pci_dev(host->dev);
1946 u8 regval;
1947
1948 /* disable ECO 398 */
1949 pci_read_config_byte(pdev, 0x7f, &regval);
1950 regval &= ~(1 << 7);
1951 pci_write_config_byte(pdev, 0x7f, regval);
1952
1953 /* enable swncq */
1954 tmp = readl(mmio + NV_CTL_MCP55);
1955 VPRINTK("HOST_CTL:0x%X\n", tmp);
1956 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1957
1958 /* enable irq intr */
1959 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1960 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1961 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1962
1963 /* clear port irq */
1964 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1965}
1966
1967static int nv_swncq_slave_config(struct scsi_device *sdev)
1968{
1969 struct ata_port *ap = ata_shost_to_port(sdev->host);
1970 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1971 struct ata_device *dev;
1972 int rc;
1973 u8 rev;
1974 u8 check_maxtor = 0;
1975 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1976
1977 rc = ata_scsi_slave_config(sdev);
1978 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1979 /* Not a proper libata device, ignore */
1980 return rc;
1981
1982 dev = &ap->link.device[sdev->id];
1983 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1984 return rc;
1985
1986 /* if MCP51 and Maxtor, then disable ncq */
1987 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1988 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1989 check_maxtor = 1;
1990
1991 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1992 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1993 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1994 pci_read_config_byte(pdev, 0x8, &rev);
1995 if (rev <= 0xa2)
1996 check_maxtor = 1;
1997 }
1998
1999 if (!check_maxtor)
2000 return rc;
2001
2002 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
2003
2004 if (strncmp(model_num, "Maxtor", 6) == 0) {
2005 ata_scsi_change_queue_depth(sdev, 1);
2006 ata_dev_printk(dev, KERN_NOTICE,
2007 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
2008 }
2009
2010 return rc;
2011}
2012
2013static int nv_swncq_port_start(struct ata_port *ap)
2014{
2015 struct device *dev = ap->host->dev;
2016 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
2017 struct nv_swncq_port_priv *pp;
2018 int rc;
2019
2020 rc = ata_port_start(ap);
2021 if (rc)
2022 return rc;
2023
2024 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2025 if (!pp)
2026 return -ENOMEM;
2027
2028 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
2029 &pp->prd_dma, GFP_KERNEL);
2030 if (!pp->prd)
2031 return -ENOMEM;
2032 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2033
2034 ap->private_data = pp;
2035 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2036 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2037 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2038
2039 return 0;
2040}
2041
2042static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2043{
2044 if (qc->tf.protocol != ATA_PROT_NCQ) {
2045 ata_qc_prep(qc);
2046 return;
2047 }
2048
2049 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2050 return;
2051
2052 nv_swncq_fill_sg(qc);
2053}
2054
2055static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2056{
2057 struct ata_port *ap = qc->ap;
2058 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04002059 struct nv_swncq_port_priv *pp = ap->private_data;
2060 struct ata_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09002061 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04002062
2063 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2064
2065 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09002066 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04002067 u32 addr, offset;
2068 u32 sg_len, len;
2069
2070 addr = (u32)sg_dma_address(sg);
2071 sg_len = sg_dma_len(sg);
2072
2073 while (sg_len) {
2074 offset = addr & 0xffff;
2075 len = sg_len;
2076 if ((offset + sg_len) > 0x10000)
2077 len = 0x10000 - offset;
2078
2079 prd[idx].addr = cpu_to_le32(addr);
2080 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2081
2082 idx++;
2083 sg_len -= len;
2084 addr += len;
2085 }
2086 }
2087
Tejun Heoff2aeb12007-12-05 16:43:11 +09002088 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04002089}
2090
2091static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2092 struct ata_queued_cmd *qc)
2093{
2094 struct nv_swncq_port_priv *pp = ap->private_data;
2095
2096 if (qc == NULL)
2097 return 0;
2098
2099 DPRINTK("Enter\n");
2100
2101 writel((1 << qc->tag), pp->sactive_block);
2102 pp->last_issue_tag = qc->tag;
2103 pp->dhfis_bits &= ~(1 << qc->tag);
2104 pp->dmafis_bits &= ~(1 << qc->tag);
2105 pp->qc_active |= (0x1 << qc->tag);
2106
2107 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2108 ap->ops->exec_command(ap, &qc->tf);
2109
2110 DPRINTK("Issued tag %u\n", qc->tag);
2111
2112 return 0;
2113}
2114
2115static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2116{
2117 struct ata_port *ap = qc->ap;
2118 struct nv_swncq_port_priv *pp = ap->private_data;
2119
2120 if (qc->tf.protocol != ATA_PROT_NCQ)
2121 return ata_qc_issue_prot(qc);
2122
2123 DPRINTK("Enter\n");
2124
2125 if (!pp->qc_active)
2126 nv_swncq_issue_atacmd(ap, qc);
2127 else
2128 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2129
2130 return 0;
2131}
2132
2133static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2134{
2135 u32 serror;
2136 struct ata_eh_info *ehi = &ap->link.eh_info;
2137
2138 ata_ehi_clear_desc(ehi);
2139
2140 /* AHCI needs SError cleared; otherwise, it might lock up */
2141 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2142 sata_scr_write(&ap->link, SCR_ERROR, serror);
2143
2144 /* analyze @irq_stat */
2145 if (fis & NV_SWNCQ_IRQ_ADDED)
2146 ata_ehi_push_desc(ehi, "hot plug");
2147 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2148 ata_ehi_push_desc(ehi, "hot unplug");
2149
2150 ata_ehi_hotplugged(ehi);
2151
2152 /* okay, let's hand over to EH */
2153 ehi->serror |= serror;
2154
2155 ata_port_freeze(ap);
2156}
2157
2158static int nv_swncq_sdbfis(struct ata_port *ap)
2159{
2160 struct ata_queued_cmd *qc;
2161 struct nv_swncq_port_priv *pp = ap->private_data;
2162 struct ata_eh_info *ehi = &ap->link.eh_info;
2163 u32 sactive;
2164 int nr_done = 0;
2165 u32 done_mask;
2166 int i;
2167 u8 host_stat;
2168 u8 lack_dhfis = 0;
2169
2170 host_stat = ap->ops->bmdma_status(ap);
2171 if (unlikely(host_stat & ATA_DMA_ERR)) {
2172 /* error when transfering data to/from memory */
2173 ata_ehi_clear_desc(ehi);
2174 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2175 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002176 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002177 return -EINVAL;
2178 }
2179
2180 ap->ops->irq_clear(ap);
2181 __ata_bmdma_stop(ap);
2182
2183 sactive = readl(pp->sactive_block);
2184 done_mask = pp->qc_active ^ sactive;
2185
2186 if (unlikely(done_mask & sactive)) {
2187 ata_ehi_clear_desc(ehi);
2188 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2189 "(%08x->%08x)", pp->qc_active, sactive);
2190 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002191 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002192 return -EINVAL;
2193 }
2194 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2195 if (!(done_mask & (1 << i)))
2196 continue;
2197
2198 qc = ata_qc_from_tag(ap, i);
2199 if (qc) {
2200 ata_qc_complete(qc);
2201 pp->qc_active &= ~(1 << i);
2202 pp->dhfis_bits &= ~(1 << i);
2203 pp->dmafis_bits &= ~(1 << i);
2204 pp->sdbfis_bits |= (1 << i);
2205 nr_done++;
2206 }
2207 }
2208
2209 if (!ap->qc_active) {
2210 DPRINTK("over\n");
2211 nv_swncq_pp_reinit(ap);
2212 return nr_done;
2213 }
2214
2215 if (pp->qc_active & pp->dhfis_bits)
2216 return nr_done;
2217
2218 if ((pp->ncq_flags & ncq_saw_backout) ||
2219 (pp->qc_active ^ pp->dhfis_bits))
2220 /* if the controller cann't get a device to host register FIS,
2221 * The driver needs to reissue the new command.
2222 */
2223 lack_dhfis = 1;
2224
2225 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2226 "SWNCQ:qc_active 0x%X defer_bits %X "
2227 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2228 ap->print_id, ap->qc_active, pp->qc_active,
2229 pp->defer_queue.defer_bits, pp->dhfis_bits,
2230 pp->dmafis_bits, pp->last_issue_tag);
2231
2232 nv_swncq_fis_reinit(ap);
2233
2234 if (lack_dhfis) {
2235 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2236 nv_swncq_issue_atacmd(ap, qc);
2237 return nr_done;
2238 }
2239
2240 if (pp->defer_queue.defer_bits) {
2241 /* send deferral queue command */
2242 qc = nv_swncq_qc_from_dq(ap);
2243 WARN_ON(qc == NULL);
2244 nv_swncq_issue_atacmd(ap, qc);
2245 }
2246
2247 return nr_done;
2248}
2249
2250static inline u32 nv_swncq_tag(struct ata_port *ap)
2251{
2252 struct nv_swncq_port_priv *pp = ap->private_data;
2253 u32 tag;
2254
2255 tag = readb(pp->tag_block) >> 2;
2256 return (tag & 0x1f);
2257}
2258
2259static int nv_swncq_dmafis(struct ata_port *ap)
2260{
2261 struct ata_queued_cmd *qc;
2262 unsigned int rw;
2263 u8 dmactl;
2264 u32 tag;
2265 struct nv_swncq_port_priv *pp = ap->private_data;
2266
2267 __ata_bmdma_stop(ap);
2268 tag = nv_swncq_tag(ap);
2269
2270 DPRINTK("dma setup tag 0x%x\n", tag);
2271 qc = ata_qc_from_tag(ap, tag);
2272
2273 if (unlikely(!qc))
2274 return 0;
2275
2276 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2277
2278 /* load PRD table addr. */
2279 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2280 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2281
2282 /* specify data direction, triple-check start bit is clear */
2283 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2284 dmactl &= ~ATA_DMA_WR;
2285 if (!rw)
2286 dmactl |= ATA_DMA_WR;
2287
2288 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2289
2290 return 1;
2291}
2292
2293static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2294{
2295 struct nv_swncq_port_priv *pp = ap->private_data;
2296 struct ata_queued_cmd *qc;
2297 struct ata_eh_info *ehi = &ap->link.eh_info;
2298 u32 serror;
2299 u8 ata_stat;
2300 int rc = 0;
2301
2302 ata_stat = ap->ops->check_status(ap);
2303 nv_swncq_irq_clear(ap, fis);
2304 if (!fis)
2305 return;
2306
2307 if (ap->pflags & ATA_PFLAG_FROZEN)
2308 return;
2309
2310 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2311 nv_swncq_hotplug(ap, fis);
2312 return;
2313 }
2314
2315 if (!pp->qc_active)
2316 return;
2317
2318 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2319 return;
2320 ap->ops->scr_write(ap, SCR_ERROR, serror);
2321
2322 if (ata_stat & ATA_ERR) {
2323 ata_ehi_clear_desc(ehi);
2324 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2325 ehi->err_mask |= AC_ERR_DEV;
2326 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002327 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002328 ata_port_freeze(ap);
2329 return;
2330 }
2331
2332 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2333 /* If the IRQ is backout, driver must issue
2334 * the new command again some time later.
2335 */
2336 pp->ncq_flags |= ncq_saw_backout;
2337 }
2338
2339 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2340 pp->ncq_flags |= ncq_saw_sdb;
2341 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2342 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2343 ap->print_id, pp->qc_active, pp->dhfis_bits,
2344 pp->dmafis_bits, readl(pp->sactive_block));
2345 rc = nv_swncq_sdbfis(ap);
2346 if (rc < 0)
2347 goto irq_error;
2348 }
2349
2350 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2351 /* The interrupt indicates the new command
2352 * was transmitted correctly to the drive.
2353 */
2354 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2355 pp->ncq_flags |= ncq_saw_d2h;
2356 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2357 ata_ehi_push_desc(ehi, "illegal fis transaction");
2358 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002359 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002360 goto irq_error;
2361 }
2362
2363 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2364 !(pp->ncq_flags & ncq_saw_dmas)) {
2365 ata_stat = ap->ops->check_status(ap);
2366 if (ata_stat & ATA_BUSY)
2367 goto irq_exit;
2368
2369 if (pp->defer_queue.defer_bits) {
2370 DPRINTK("send next command\n");
2371 qc = nv_swncq_qc_from_dq(ap);
2372 nv_swncq_issue_atacmd(ap, qc);
2373 }
2374 }
2375 }
2376
2377 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2378 /* program the dma controller with appropriate PRD buffers
2379 * and start the DMA transfer for requested command.
2380 */
2381 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2382 pp->ncq_flags |= ncq_saw_dmas;
2383 rc = nv_swncq_dmafis(ap);
2384 }
2385
2386irq_exit:
2387 return;
2388irq_error:
2389 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2390 ata_port_freeze(ap);
2391 return;
2392}
2393
2394static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2395{
2396 struct ata_host *host = dev_instance;
2397 unsigned int i;
2398 unsigned int handled = 0;
2399 unsigned long flags;
2400 u32 irq_stat;
2401
2402 spin_lock_irqsave(&host->lock, flags);
2403
2404 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2405
2406 for (i = 0; i < host->n_ports; i++) {
2407 struct ata_port *ap = host->ports[i];
2408
2409 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2410 if (ap->link.sactive) {
2411 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2412 handled = 1;
2413 } else {
2414 if (irq_stat) /* reserve Hotplug */
2415 nv_swncq_irq_clear(ap, 0xfff0);
2416
2417 handled += nv_host_intr(ap, (u8)irq_stat);
2418 }
2419 }
2420 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2421 }
2422
2423 spin_unlock_irqrestore(&host->lock, flags);
2424
2425 return IRQ_RETVAL(handled);
2426}
2427
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002428static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429{
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002430 static int printed_version;
Tejun Heo1626aeb2007-05-04 12:43:58 +02002431 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo9a829cc2007-04-17 23:44:08 +09002432 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002433 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 int rc;
2435 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002436 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002437 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
2439 // Make sure this is a SATA controller by counting the number of bars
2440 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2441 // it's an IDE controller and we ignore it.
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002442 for (bar = 0; bar < 6; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 if (pci_resource_start(pdev, bar) == 0)
2444 return -ENODEV;
2445
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002446 if (!printed_version++)
Jeff Garzika9524a72005-10-30 14:39:11 -05002447 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Tejun Heo24dc5f32007-01-20 16:00:28 +09002449 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002451 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452
Tejun Heo9a829cc2007-04-17 23:44:08 +09002453 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002454 if (type == CK804 && adma_enabled) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07002455 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2456 type = ADMA;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002457 }
2458
Jeff Garzik360737a2007-10-29 06:49:24 -04002459 if (type == SWNCQ) {
2460 if (swncq_enabled)
2461 dev_printk(KERN_NOTICE, &pdev->dev,
2462 "Using SWNCQ mode\n");
2463 else
2464 type = GENERIC;
2465 }
2466
Tejun Heo1626aeb2007-05-04 12:43:58 +02002467 ppi[0] = &nv_port_info[type];
Tejun Heod583bc12007-07-04 18:02:07 +09002468 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002469 if (rc)
2470 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Tejun Heo24dc5f32007-01-20 16:00:28 +09002472 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002473 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002474 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002475 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002476 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Tejun Heo9a829cc2007-04-17 23:44:08 +09002478 /* request and iomap NV_MMIO_BAR */
2479 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2480 if (rc)
2481 return rc;
2482
2483 /* configure SCR access */
2484 base = host->iomap[NV_MMIO_BAR];
2485 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2486 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002487
Tejun Heoada364e2006-06-17 15:49:56 +09002488 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002489 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002490 u8 regval;
2491
2492 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2493 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2494 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2495 }
2496
Tejun Heo9a829cc2007-04-17 23:44:08 +09002497 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002498 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002499 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002500 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002501 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002502 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002503 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002504
Tejun Heo9a829cc2007-04-17 23:44:08 +09002505 pci_set_master(pdev);
2506 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2507 IRQF_SHARED, ppi[0]->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508}
2509
Tejun Heo438ac6d2007-03-02 17:31:26 +09002510#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002511static int nv_pci_device_resume(struct pci_dev *pdev)
2512{
2513 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2514 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002515 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002516
Robert Hancockce053fa2007-02-05 16:26:04 -08002517 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002518 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002519 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002520
2521 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002522 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002523 u8 regval;
2524
2525 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2526 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2527 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2528 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002529 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002530 u32 tmp32;
2531 struct nv_adma_port_priv *pp;
2532 /* enable/disable ADMA on the ports appropriately */
2533 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2534
2535 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002536 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002537 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002538 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002539 else
2540 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002541 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002542 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002543 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002544 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002545 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002546 else
2547 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002549
2550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2551 }
2552 }
2553
2554 ata_host_resume(host);
2555
2556 return 0;
2557}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002558#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002559
Jeff Garzikcca39742006-08-24 03:19:22 -04002560static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002561{
Jeff Garzikcca39742006-08-24 03:19:22 -04002562 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002563 u8 regval;
2564
2565 /* disable SATA space for CK804 */
2566 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2567 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2568 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002569}
2570
Robert Hancockfbbb2622006-10-27 19:08:41 -07002571static void nv_adma_host_stop(struct ata_host *host)
2572{
2573 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002574 u32 tmp32;
2575
Robert Hancockfbbb2622006-10-27 19:08:41 -07002576 /* disable ADMA on the ports */
2577 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2578 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2579 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2580 NV_MCP_SATA_CFG_20_PORT1_EN |
2581 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2582
2583 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2584
2585 nv_ck804_host_stop(host);
2586}
2587
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588static int __init nv_init(void)
2589{
Pavel Roskinb7887192006-08-10 18:13:18 +09002590 return pci_register_driver(&nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591}
2592
2593static void __exit nv_exit(void)
2594{
2595 pci_unregister_driver(&nv_pci_driver);
2596}
2597
2598module_init(nv_init);
2599module_exit(nv_exit);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002600module_param_named(adma, adma_enabled, bool, 0444);
2601MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002602module_param_named(swncq, swncq_enabled, bool, 0444);
2603MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2604