blob: c60255810e68d787b1686a0a65b2630fd0e1c90f [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
Mark Lord02a121d2007-12-01 13:07:22 -0500167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
Mark Lord02a121d2007-12-01 13:07:22 -0500171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500174
Brett Russ20f733e2005-09-01 18:26:17 -0400175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500214 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500248
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400281 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400300
Brett Russ31961942005-09-30 01:36:00 -0400301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400316
Jeff Garzikc9d39132005-11-13 17:47:51 -0500317 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500319
Brett Russ31961942005-09-30 01:36:00 -0400320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500326 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400336};
337
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400338#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500340#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500341
Jeff Garzik095fec82005-11-12 09:50:49 -0500342enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
345 */
346 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500347
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
350 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
352
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400353 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
355};
356
Jeff Garzik522479f2005-11-12 22:14:02 -0500357enum chip_type {
358 chip_504x,
359 chip_508x,
360 chip_5080,
361 chip_604x,
362 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500363 chip_6042,
364 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500365};
366
Brett Russ31961942005-09-30 01:36:00 -0400367/* Command ReQuest Block: 32B */
368struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400369 __le32 sg_addr;
370 __le32 sg_addr_hi;
371 __le16 ctrl_flags;
372 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400373};
374
Jeff Garzike4e7b892006-01-31 12:18:41 -0500375struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400376 __le32 addr;
377 __le32 addr_hi;
378 __le32 flags;
379 __le32 len;
380 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500381};
382
Brett Russ31961942005-09-30 01:36:00 -0400383/* Command ResPonse Block: 8B */
384struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400385 __le16 id;
386 __le16 flags;
387 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400388};
389
390/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
391struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400392 __le32 addr;
393 __le32 flags_size;
394 __le32 addr_hi;
395 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400396};
397
398struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400399 struct mv_crqb *crqb;
400 dma_addr_t crqb_dma;
401 struct mv_crpb *crpb;
402 dma_addr_t crpb_dma;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400405
406 unsigned int req_idx;
407 unsigned int resp_idx;
408
Brett Russ31961942005-09-30 01:36:00 -0400409 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500412struct mv_port_signal {
413 u32 amps;
414 u32 pre;
415};
416
Mark Lord02a121d2007-12-01 13:07:22 -0500417struct mv_host_priv {
418 u32 hp_flags;
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
421 u32 irq_cause_ofs;
422 u32 irq_mask_ofs;
423 u32 unmask_all_irqs;
424};
425
Jeff Garzik47c2b672005-11-12 21:13:17 -0500426struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436};
437
Brett Russ20f733e2005-09-01 18:26:17 -0400438static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900439static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400443static int mv_port_start(struct ata_port *ap);
444static void mv_port_stop(struct ata_port *ap);
445static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500446static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900447static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400448static void mv_error_handler(struct ata_port *ap);
449static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450static void mv_eh_freeze(struct ata_port *ap);
451static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400452static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
453
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500454static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
455 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500456static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
457static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
458 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500459static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
460 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500461static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
462static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500464static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500466static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
468 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500469static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500471static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500473static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
474 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500475static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
476 void __iomem *port_mmio, int want_ncq);
477static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500478
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400479static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400480 .module = THIS_MODULE,
481 .name = DRV_NAME,
482 .ioctl = ata_scsi_ioctl,
483 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400484 .can_queue = ATA_DEF_QUEUE,
485 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400486 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400487 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
488 .emulated = ATA_SHT_EMULATED,
489 .use_clustering = 1,
490 .proc_name = DRV_NAME,
491 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400492 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400493 .slave_destroy = ata_scsi_slave_destroy,
494 .bios_param = ata_std_bios_param,
495};
496
497static struct scsi_host_template mv6_sht = {
498 .module = THIS_MODULE,
499 .name = DRV_NAME,
500 .ioctl = ata_scsi_ioctl,
501 .queuecommand = ata_scsi_queuecmd,
502 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400503 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400504 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400505 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
506 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500507 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400508 .proc_name = DRV_NAME,
509 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400510 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900511 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400512 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400513};
514
Jeff Garzikc9d39132005-11-13 17:47:51 -0500515static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
521
Jeff Garzikcffacd82007-03-09 09:46:47 -0500522 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500523
524 .qc_prep = mv_qc_prep,
525 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900526 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500527
Jeff Garzikc9d39132005-11-13 17:47:51 -0500528 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900529 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400531 .error_handler = mv_error_handler,
532 .post_internal_cmd = mv_post_int_cmd,
533 .freeze = mv_eh_freeze,
534 .thaw = mv_eh_thaw,
535
Jeff Garzikc9d39132005-11-13 17:47:51 -0500536 .scr_read = mv5_scr_read,
537 .scr_write = mv5_scr_write,
538
539 .port_start = mv_port_start,
540 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541};
542
543static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
549
Jeff Garzikcffacd82007-03-09 09:46:47 -0500550 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400551
Brett Russ31961942005-09-30 01:36:00 -0400552 .qc_prep = mv_qc_prep,
553 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900554 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400555
Brett Russ20f733e2005-09-01 18:26:17 -0400556 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900557 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400558
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400559 .error_handler = mv_error_handler,
560 .post_internal_cmd = mv_post_int_cmd,
561 .freeze = mv_eh_freeze,
562 .thaw = mv_eh_thaw,
563
Brett Russ20f733e2005-09-01 18:26:17 -0400564 .scr_read = mv_scr_read,
565 .scr_write = mv_scr_write,
566
Brett Russ31961942005-09-30 01:36:00 -0400567 .port_start = mv_port_start,
568 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400569};
570
Jeff Garzike4e7b892006-01-31 12:18:41 -0500571static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500572 .tf_load = ata_tf_load,
573 .tf_read = ata_tf_read,
574 .check_status = ata_check_status,
575 .exec_command = ata_exec_command,
576 .dev_select = ata_std_dev_select,
577
Jeff Garzikcffacd82007-03-09 09:46:47 -0500578 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579
580 .qc_prep = mv_qc_prep_iie,
581 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900582 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500583
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900585 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400587 .error_handler = mv_error_handler,
588 .post_internal_cmd = mv_post_int_cmd,
589 .freeze = mv_eh_freeze,
590 .thaw = mv_eh_thaw,
591
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592 .scr_read = mv_scr_read,
593 .scr_write = mv_scr_write,
594
595 .port_start = mv_port_start,
596 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597};
598
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100599static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400600 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400601 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500604 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400605 },
606 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500610 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400611 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500614 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400615 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500616 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500617 },
Brett Russ20f733e2005-09-01 18:26:17 -0400618 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500622 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400623 },
624 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400627 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400628 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500629 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400630 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500631 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500633 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400634 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 .port_ops = &mv_iie_ops,
636 },
637 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .port_ops = &mv_iie_ops,
642 },
Brett Russ20f733e2005-09-01 18:26:17 -0400643};
644
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500645static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400646 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
647 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
648 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
649 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100650 /* RocketRAID 1740/174x have different identifiers */
651 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
652 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400653
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400654 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
655 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
656 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
657 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
658 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500659
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400660 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
661
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200662 /* Adaptec 1430SA */
663 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
664
Mark Lord02a121d2007-12-01 13:07:22 -0500665 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800666 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
667
Mark Lord02a121d2007-12-01 13:07:22 -0500668 /* Highpoint RocketRAID PCIe series */
669 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
670 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
671
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400672 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400673};
674
675static struct pci_driver mv_pci_driver = {
676 .name = DRV_NAME,
677 .id_table = mv_pci_tbl,
678 .probe = mv_init_one,
679 .remove = ata_pci_remove_one,
680};
681
Jeff Garzik47c2b672005-11-12 21:13:17 -0500682static const struct mv_hw_ops mv5xxx_ops = {
683 .phy_errata = mv5_phy_errata,
684 .enable_leds = mv5_enable_leds,
685 .read_preamp = mv5_read_preamp,
686 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500687 .reset_flash = mv5_reset_flash,
688 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500689};
690
691static const struct mv_hw_ops mv6xxx_ops = {
692 .phy_errata = mv6_phy_errata,
693 .enable_leds = mv6_enable_leds,
694 .read_preamp = mv6_read_preamp,
695 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500696 .reset_flash = mv6_reset_flash,
697 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500698};
699
Brett Russ20f733e2005-09-01 18:26:17 -0400700/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500701 * module options
702 */
703static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
704
705
Jeff Garzikd88184f2007-02-26 01:26:06 -0500706/* move to PCI layer or libata core? */
707static int pci_go_64(struct pci_dev *pdev)
708{
709 int rc;
710
711 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
713 if (rc) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
715 if (rc) {
716 dev_printk(KERN_ERR, &pdev->dev,
717 "64-bit DMA enable failed\n");
718 return rc;
719 }
720 }
721 } else {
722 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
723 if (rc) {
724 dev_printk(KERN_ERR, &pdev->dev,
725 "32-bit DMA enable failed\n");
726 return rc;
727 }
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
729 if (rc) {
730 dev_printk(KERN_ERR, &pdev->dev,
731 "32-bit consistent DMA enable failed\n");
732 return rc;
733 }
734 }
735
736 return rc;
737}
738
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500739/*
Brett Russ20f733e2005-09-01 18:26:17 -0400740 * Functions
741 */
742
743static inline void writelfl(unsigned long data, void __iomem *addr)
744{
745 writel(data, addr);
746 (void) readl(addr); /* flush to avoid PCI posted write */
747}
748
Brett Russ20f733e2005-09-01 18:26:17 -0400749static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
750{
751 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
752}
753
Jeff Garzikc9d39132005-11-13 17:47:51 -0500754static inline unsigned int mv_hc_from_port(unsigned int port)
755{
756 return port >> MV_PORT_HC_SHIFT;
757}
758
759static inline unsigned int mv_hardport_from_port(unsigned int port)
760{
761 return port & MV_PORT_MASK;
762}
763
764static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
765 unsigned int port)
766{
767 return mv_hc_base(base, mv_hc_from_port(port));
768}
769
Brett Russ20f733e2005-09-01 18:26:17 -0400770static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
771{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500772 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500773 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400775}
776
777static inline void __iomem *mv_ap_base(struct ata_port *ap)
778{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900779 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400780}
781
Jeff Garzikcca39742006-08-24 03:19:22 -0400782static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400783{
Jeff Garzikcca39742006-08-24 03:19:22 -0400784 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400785}
786
787static void mv_irq_clear(struct ata_port *ap)
788{
789}
790
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791static void mv_set_edma_ptrs(void __iomem *port_mmio,
792 struct mv_host_priv *hpriv,
793 struct mv_port_priv *pp)
794{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 u32 index;
796
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 /*
798 * initialize request queue
799 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400800 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
801
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400802 WARN_ON(pp->crqb_dma & 0x3ff);
803 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
806
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400808 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
810 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812
813 /*
814 * initialize response queue
815 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
817
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818 WARN_ON(pp->crpb_dma & 0xff);
819 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
820
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400822 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
824 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400827 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400829}
830
Brett Russ05b308e2005-10-05 17:08:53 -0400831/**
832 * mv_start_dma - Enable eDMA engine
833 * @base: port base address
834 * @pp: port private data
835 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900836 * Verify the local cache of the eDMA state is accurate with a
837 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400838 *
839 * LOCKING:
840 * Inherited from caller.
841 */
Mark Lord0c589122008-01-26 18:31:16 -0500842static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500843 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400844{
Mark Lord72109162008-01-26 18:31:33 -0500845 int want_ncq = (protocol == ATA_PROT_NCQ);
846
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
850 __mv_stop_dma(ap);
851 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500853 struct mv_host_priv *hpriv = ap->host->private_data;
854 int hard_port = mv_hardport_from_port(ap->port_no);
855 void __iomem *hc_mmio = mv_hc_base_from_port(
856 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
857 u32 hc_irq_cause, ipending;
858
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400859 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500860 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400861
Mark Lord0c589122008-01-26 18:31:16 -0500862 /* clear EDMA interrupt indicator, if any */
863 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
864 ipending = (DEV_IRQ << hard_port) |
865 (CRPB_DMA_DONE << hard_port);
866 if (hc_irq_cause & ipending) {
867 writelfl(hc_irq_cause & ~ipending,
868 hc_mmio + HC_IRQ_CAUSE_OFS);
869 }
870
Mark Lord72109162008-01-26 18:31:33 -0500871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500872
873 /* clear FIS IRQ Cause */
874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
875
Mark Lordf630d562008-01-26 18:31:00 -0500876 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400877
Mark Lordf630d562008-01-26 18:31:00 -0500878 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400879 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
880 }
Mark Lordf630d562008-01-26 18:31:00 -0500881 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400882}
883
Brett Russ05b308e2005-10-05 17:08:53 -0400884/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400885 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400886 * @ap: ATA channel to manipulate
887 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900888 * Verify the local cache of the eDMA state is accurate with a
889 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400890 *
891 * LOCKING:
892 * Inherited from caller.
893 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400894static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400895{
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400898 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400899 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400900
Jeff Garzik4537deb2007-07-12 14:30:19 -0400901 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400902 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400903 */
Brett Russ31961942005-09-30 01:36:00 -0400904 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400906 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900907 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400908 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500909
Brett Russ31961942005-09-30 01:36:00 -0400910 /* now properly wait for the eDMA to stop */
911 for (i = 1000; i > 0; i--) {
912 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400913 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400914 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400915
Brett Russ31961942005-09-30 01:36:00 -0400916 udelay(100);
917 }
918
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400919 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900920 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400921 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400922 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400923
924 return err;
Brett Russ31961942005-09-30 01:36:00 -0400925}
926
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400927static int mv_stop_dma(struct ata_port *ap)
928{
929 unsigned long flags;
930 int rc;
931
932 spin_lock_irqsave(&ap->host->lock, flags);
933 rc = __mv_stop_dma(ap);
934 spin_unlock_irqrestore(&ap->host->lock, flags);
935
936 return rc;
937}
938
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400939#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400940static void mv_dump_mem(void __iomem *start, unsigned bytes)
941{
Brett Russ31961942005-09-30 01:36:00 -0400942 int b, w;
943 for (b = 0; b < bytes; ) {
944 DPRINTK("%p: ", start + b);
945 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400946 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400947 b += sizeof(u32);
948 }
949 printk("\n");
950 }
Brett Russ31961942005-09-30 01:36:00 -0400951}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400952#endif
953
Brett Russ31961942005-09-30 01:36:00 -0400954static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
955{
956#ifdef ATA_DEBUG
957 int b, w;
958 u32 dw;
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%02x: ", b);
961 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400962 (void) pci_read_config_dword(pdev, b, &dw);
963 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
968#endif
969}
970static void mv_dump_all_regs(void __iomem *mmio_base, int port,
971 struct pci_dev *pdev)
972{
973#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500974 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400975 port >> MV_PORT_HC_SHIFT);
976 void __iomem *port_base;
977 int start_port, num_ports, p, start_hc, num_hcs, hc;
978
979 if (0 > port) {
980 start_hc = start_port = 0;
981 num_ports = 8; /* shld be benign for 4 port devs */
982 num_hcs = 2;
983 } else {
984 start_hc = port >> MV_PORT_HC_SHIFT;
985 start_port = port;
986 num_ports = num_hcs = 1;
987 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500988 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400989 num_ports > 1 ? num_ports - 1 : start_port);
990
991 if (NULL != pdev) {
992 DPRINTK("PCI config space regs:\n");
993 mv_dump_pci_cfg(pdev, 0x68);
994 }
995 DPRINTK("PCI regs:\n");
996 mv_dump_mem(mmio_base+0xc00, 0x3c);
997 mv_dump_mem(mmio_base+0xd00, 0x34);
998 mv_dump_mem(mmio_base+0xf00, 0x4);
999 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1000 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001001 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001002 DPRINTK("HC regs (HC %i):\n", hc);
1003 mv_dump_mem(hc_base, 0x1c);
1004 }
1005 for (p = start_port; p < start_port + num_ports; p++) {
1006 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001007 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001008 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001009 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001010 mv_dump_mem(port_base+0x300, 0x60);
1011 }
1012#endif
1013}
1014
Brett Russ20f733e2005-09-01 18:26:17 -04001015static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1016{
1017 unsigned int ofs;
1018
1019 switch (sc_reg_in) {
1020 case SCR_STATUS:
1021 case SCR_CONTROL:
1022 case SCR_ERROR:
1023 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1024 break;
1025 case SCR_ACTIVE:
1026 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1027 break;
1028 default:
1029 ofs = 0xffffffffU;
1030 break;
1031 }
1032 return ofs;
1033}
1034
Tejun Heoda3dbb12007-07-16 14:29:40 +09001035static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001036{
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1038
Tejun Heoda3dbb12007-07-16 14:29:40 +09001039 if (ofs != 0xffffffffU) {
1040 *val = readl(mv_ap_base(ap) + ofs);
1041 return 0;
1042 } else
1043 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001044}
1045
Tejun Heoda3dbb12007-07-16 14:29:40 +09001046static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001047{
1048 unsigned int ofs = mv_scr_offset(sc_reg_in);
1049
Tejun Heoda3dbb12007-07-16 14:29:40 +09001050 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001051 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001052 return 0;
1053 } else
1054 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001055}
1056
Mark Lord72109162008-01-26 18:31:33 -05001057static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1058 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001059{
Mark Lord0c589122008-01-26 18:31:16 -05001060 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001061
1062 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001063 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001064
Mark Lord0c589122008-01-26 18:31:16 -05001065 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001066 cfg |= (1 << 8); /* enab config burst size mask */
1067
Mark Lord0c589122008-01-26 18:31:16 -05001068 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001069 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1070
1071 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001072 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1073 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001074 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001076 }
1077
Mark Lord72109162008-01-26 18:31:33 -05001078 if (want_ncq) {
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1081 } else
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1083
Jeff Garzike4e7b892006-01-31 12:18:41 -05001084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1085}
1086
Brett Russ05b308e2005-10-05 17:08:53 -04001087/**
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1090 *
1091 * Allocate and point to DMA memory, init port private memory,
1092 * zero indices.
1093 *
1094 * LOCKING:
1095 * Inherited from caller.
1096 */
Brett Russ31961942005-09-30 01:36:00 -04001097static int mv_port_start(struct ata_port *ap)
1098{
Jeff Garzikcca39742006-08-24 03:19:22 -04001099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1103 void *mem;
1104 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001105 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001106 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001107
Tejun Heo24dc5f32007-01-20 16:00:28 +09001108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001109 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001110 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001111
Tejun Heo24dc5f32007-01-20 16:00:28 +09001112 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1113 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001114 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001115 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001116 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1117
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001118 rc = ata_pad_alloc(ap, dev);
1119 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001120 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001121
Jeff Garzik8b260242005-11-12 12:32:50 -05001122 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001123 * 32-slot command request table (CRQB), 32 bytes each in size
1124 */
1125 pp->crqb = mem;
1126 pp->crqb_dma = mem_dma;
1127 mem += MV_CRQB_Q_SZ;
1128 mem_dma += MV_CRQB_Q_SZ;
1129
Jeff Garzik8b260242005-11-12 12:32:50 -05001130 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001131 * 32-slot command response table (CRPB), 8 bytes each in size
1132 */
1133 pp->crpb = mem;
1134 pp->crpb_dma = mem_dma;
1135 mem += MV_CRPB_Q_SZ;
1136 mem_dma += MV_CRPB_Q_SZ;
1137
1138 /* Third item:
1139 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1140 */
1141 pp->sg_tbl = mem;
1142 pp->sg_tbl_dma = mem_dma;
1143
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001144 spin_lock_irqsave(&ap->host->lock, flags);
1145
Mark Lord72109162008-01-26 18:31:33 -05001146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Brett Russ31961942005-09-30 01:36:00 -04001147
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001149
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001150 spin_unlock_irqrestore(&ap->host->lock, flags);
1151
Brett Russ31961942005-09-30 01:36:00 -04001152 /* Don't turn on EDMA here...do it before DMA commands only. Else
1153 * we'll be unable to send non-data, PIO, etc due to restricted access
1154 * to shadow regs.
1155 */
1156 ap->private_data = pp;
1157 return 0;
1158}
1159
Brett Russ05b308e2005-10-05 17:08:53 -04001160/**
1161 * mv_port_stop - Port specific cleanup/stop routine.
1162 * @ap: ATA channel to manipulate
1163 *
1164 * Stop DMA, cleanup port memory.
1165 *
1166 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001167 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001168 */
Brett Russ31961942005-09-30 01:36:00 -04001169static void mv_port_stop(struct ata_port *ap)
1170{
Brett Russ31961942005-09-30 01:36:00 -04001171 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001172}
1173
Brett Russ05b308e2005-10-05 17:08:53 -04001174/**
1175 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1176 * @qc: queued command whose SG list to source from
1177 *
1178 * Populate the SG list and mark the last entry.
1179 *
1180 * LOCKING:
1181 * Inherited from caller.
1182 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001183static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001184{
1185 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001186 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001187 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001188 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001189
Jeff Garzikd88184f2007-02-26 01:26:06 -05001190 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001191 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001192 dma_addr_t addr = sg_dma_address(sg);
1193 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001194
Olof Johansson4007b492007-10-02 20:45:27 -05001195 while (sg_len) {
1196 u32 offset = addr & 0xffff;
1197 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001198
Olof Johansson4007b492007-10-02 20:45:27 -05001199 if ((offset + sg_len > 0x10000))
1200 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001201
Olof Johansson4007b492007-10-02 20:45:27 -05001202 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1203 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001204 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001205
1206 sg_len -= len;
1207 addr += len;
1208
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001209 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001210 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001211 }
Brett Russ31961942005-09-30 01:36:00 -04001212 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001213
1214 if (likely(last_sg))
1215 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001216}
1217
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001218static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001219{
Mark Lord559eeda2006-05-19 16:40:15 -04001220 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001221 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001222 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001223}
1224
Brett Russ05b308e2005-10-05 17:08:53 -04001225/**
1226 * mv_qc_prep - Host specific command preparation.
1227 * @qc: queued command to prepare
1228 *
1229 * This routine simply redirects to the general purpose routine
1230 * if command is not DMA. Else, it handles prep of the CRQB
1231 * (command request block), does some sanity checking, and calls
1232 * the SG load routine.
1233 *
1234 * LOCKING:
1235 * Inherited from caller.
1236 */
Brett Russ31961942005-09-30 01:36:00 -04001237static void mv_qc_prep(struct ata_queued_cmd *qc)
1238{
1239 struct ata_port *ap = qc->ap;
1240 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001241 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001242 struct ata_taskfile *tf;
1243 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001244 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001245
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001246 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001247 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001248
Brett Russ31961942005-09-30 01:36:00 -04001249 /* Fill in command request block
1250 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001251 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001252 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001253 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001254 flags |= qc->tag << CRQB_TAG_SHIFT;
1255
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001256 /* get current queue index from software */
1257 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001258
Mark Lorda6432432006-05-19 16:36:36 -04001259 pp->crqb[in_index].sg_addr =
1260 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1261 pp->crqb[in_index].sg_addr_hi =
1262 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1263 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1264
1265 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001266 tf = &qc->tf;
1267
1268 /* Sadly, the CRQB cannot accomodate all registers--there are
1269 * only 11 bytes...so we must pick and choose required
1270 * registers based on the command. So, we drop feature and
1271 * hob_feature for [RW] DMA commands, but they are needed for
1272 * NCQ. NCQ will drop hob_nsect.
1273 */
1274 switch (tf->command) {
1275 case ATA_CMD_READ:
1276 case ATA_CMD_READ_EXT:
1277 case ATA_CMD_WRITE:
1278 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001279 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001280 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1281 break;
1282#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1283 case ATA_CMD_FPDMA_READ:
1284 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001285 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001286 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1287 break;
1288#endif /* FIXME: remove this line when NCQ added */
1289 default:
1290 /* The only other commands EDMA supports in non-queued and
1291 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1292 * of which are defined/used by Linux. If we get here, this
1293 * driver needs work.
1294 *
1295 * FIXME: modify libata to give qc_prep a return value and
1296 * return error here.
1297 */
1298 BUG_ON(tf->command);
1299 break;
1300 }
1301 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1302 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1303 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1304 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1305 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1306 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1307 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1308 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1309 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1310
Jeff Garzike4e7b892006-01-31 12:18:41 -05001311 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001312 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001313 mv_fill_sg(qc);
1314}
1315
1316/**
1317 * mv_qc_prep_iie - Host specific command preparation.
1318 * @qc: queued command to prepare
1319 *
1320 * This routine simply redirects to the general purpose routine
1321 * if command is not DMA. Else, it handles prep of the CRQB
1322 * (command request block), does some sanity checking, and calls
1323 * the SG load routine.
1324 *
1325 * LOCKING:
1326 * Inherited from caller.
1327 */
1328static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1329{
1330 struct ata_port *ap = qc->ap;
1331 struct mv_port_priv *pp = ap->private_data;
1332 struct mv_crqb_iie *crqb;
1333 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001334 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001335 u32 flags = 0;
1336
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001337 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001338 return;
1339
Jeff Garzike4e7b892006-01-31 12:18:41 -05001340 /* Fill in Gen IIE command request block
1341 */
1342 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1343 flags |= CRQB_FLAG_READ;
1344
Tejun Heobeec7db2006-02-11 19:11:13 +09001345 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001346 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001347 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001348
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001349 /* get current queue index from software */
1350 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001351
1352 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001353 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1354 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1355 crqb->flags = cpu_to_le32(flags);
1356
1357 tf = &qc->tf;
1358 crqb->ata_cmd[0] = cpu_to_le32(
1359 (tf->command << 16) |
1360 (tf->feature << 24)
1361 );
1362 crqb->ata_cmd[1] = cpu_to_le32(
1363 (tf->lbal << 0) |
1364 (tf->lbam << 8) |
1365 (tf->lbah << 16) |
1366 (tf->device << 24)
1367 );
1368 crqb->ata_cmd[2] = cpu_to_le32(
1369 (tf->hob_lbal << 0) |
1370 (tf->hob_lbam << 8) |
1371 (tf->hob_lbah << 16) |
1372 (tf->hob_feature << 24)
1373 );
1374 crqb->ata_cmd[3] = cpu_to_le32(
1375 (tf->nsect << 0) |
1376 (tf->hob_nsect << 8)
1377 );
1378
1379 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1380 return;
Brett Russ31961942005-09-30 01:36:00 -04001381 mv_fill_sg(qc);
1382}
1383
Brett Russ05b308e2005-10-05 17:08:53 -04001384/**
1385 * mv_qc_issue - Initiate a command to the host
1386 * @qc: queued command to start
1387 *
1388 * This routine simply redirects to the general purpose routine
1389 * if command is not DMA. Else, it sanity checks our local
1390 * caches of the request producer/consumer indices then enables
1391 * DMA and bumps the request producer index.
1392 *
1393 * LOCKING:
1394 * Inherited from caller.
1395 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001396static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001397{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001398 struct ata_port *ap = qc->ap;
1399 void __iomem *port_mmio = mv_ap_base(ap);
1400 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001402
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001403 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001404 /* We're about to send a non-EDMA capable command to the
1405 * port. Turn off EDMA so there won't be problems accessing
1406 * shadow block, etc registers.
1407 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001408 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001409 return ata_qc_issue_prot(qc);
1410 }
1411
Mark Lord72109162008-01-26 18:31:33 -05001412 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001413
1414 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001415
Brett Russ31961942005-09-30 01:36:00 -04001416 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001417 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1418 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001419
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001420 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001421
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001423
1424 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001425 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1426 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001427
1428 return 0;
1429}
1430
Brett Russ05b308e2005-10-05 17:08:53 -04001431/**
Brett Russ05b308e2005-10-05 17:08:53 -04001432 * mv_err_intr - Handle error interrupts on the port
1433 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001434 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001435 *
1436 * In most cases, just clear the interrupt and move on. However,
1437 * some cases require an eDMA reset, which is done right before
1438 * the COMRESET in mv_phy_reset(). The SERR case requires a
1439 * clear of pending errors in the SATA SERROR register. Finally,
1440 * if the port disabled DMA, update our cached copy to match.
1441 *
1442 * LOCKING:
1443 * Inherited from caller.
1444 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001445static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001446{
Brett Russ31961942005-09-30 01:36:00 -04001447 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001448 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1449 struct mv_port_priv *pp = ap->private_data;
1450 struct mv_host_priv *hpriv = ap->host->private_data;
1451 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1452 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001453 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001454
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001456
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 if (!edma_enabled) {
1458 /* just a guess: do we need to do this? should we
1459 * expand this, and do it in all cases?
1460 */
Tejun Heo936fd732007-08-06 18:36:23 +09001461 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1462 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001463 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464
1465 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1466
1467 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1468
1469 /*
1470 * all generations share these EDMA error cause bits
1471 */
1472
1473 if (edma_err_cause & EDMA_ERR_DEV)
1474 err_mask |= AC_ERR_DEV;
1475 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001476 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 EDMA_ERR_INTRL_PAR)) {
1478 err_mask |= AC_ERR_ATA_BUS;
1479 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001480 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001481 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001482 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1483 ata_ehi_hotplugged(ehi);
1484 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001485 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001486 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001487 }
1488
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001489 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 eh_freeze_mask = EDMA_EH_FREEZE_5;
1491
1492 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1493 struct mv_port_priv *pp = ap->private_data;
1494 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001495 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001496 }
1497 } else {
1498 eh_freeze_mask = EDMA_EH_FREEZE;
1499
1500 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1501 struct mv_port_priv *pp = ap->private_data;
1502 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001503 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001504 }
1505
1506 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001507 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1508 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 err_mask = AC_ERR_ATA_BUS;
1510 action |= ATA_EH_HARDRESET;
1511 }
1512 }
Brett Russ20f733e2005-09-01 18:26:17 -04001513
1514 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001515 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001516
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517 if (!err_mask) {
1518 err_mask = AC_ERR_OTHER;
1519 action |= ATA_EH_HARDRESET;
1520 }
1521
1522 ehi->serror |= serr;
1523 ehi->action |= action;
1524
1525 if (qc)
1526 qc->err_mask |= err_mask;
1527 else
1528 ehi->err_mask |= err_mask;
1529
1530 if (edma_err_cause & eh_freeze_mask)
1531 ata_port_freeze(ap);
1532 else
1533 ata_port_abort(ap);
1534}
1535
1536static void mv_intr_pio(struct ata_port *ap)
1537{
1538 struct ata_queued_cmd *qc;
1539 u8 ata_status;
1540
1541 /* ignore spurious intr if drive still BUSY */
1542 ata_status = readb(ap->ioaddr.status_addr);
1543 if (unlikely(ata_status & ATA_BUSY))
1544 return;
1545
1546 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001547 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 if (unlikely(!qc)) /* no active tag */
1549 return;
1550 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1551 return;
1552
1553 /* and finally, complete the ATA command */
1554 qc->err_mask |= ac_err_mask(ata_status);
1555 ata_qc_complete(qc);
1556}
1557
1558static void mv_intr_edma(struct ata_port *ap)
1559{
1560 void __iomem *port_mmio = mv_ap_base(ap);
1561 struct mv_host_priv *hpriv = ap->host->private_data;
1562 struct mv_port_priv *pp = ap->private_data;
1563 struct ata_queued_cmd *qc;
1564 u32 out_index, in_index;
1565 bool work_done = false;
1566
1567 /* get h/w response queue pointer */
1568 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1569 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1570
1571 while (1) {
1572 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001573 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001574
1575 /* get s/w response queue last-read pointer, and compare */
1576 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1577 if (in_index == out_index)
1578 break;
1579
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001580 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001581 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001582 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001583
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001584 /* Gen II/IIE: get active ATA command via tag, to enable
1585 * support for queueing. this works transparently for
1586 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001587 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001588 else
1589 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001590
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001591 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001592
1593 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1594 * bits (WARNING: might not necessarily be associated
1595 * with this command), which -should- be clear
1596 * if all is well
1597 */
1598 status = le16_to_cpu(pp->crpb[out_index].flags);
1599 if (unlikely(status & 0xff)) {
1600 mv_err_intr(ap, qc);
1601 return;
1602 }
1603
1604 /* and finally, complete the ATA command */
1605 if (qc) {
1606 qc->err_mask |=
1607 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1608 ata_qc_complete(qc);
1609 }
1610
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001611 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001612 * indicate (after the loop completes) to hardware
1613 * that we have consumed a response queue entry.
1614 */
1615 work_done = true;
1616 pp->resp_idx++;
1617 }
1618
1619 if (work_done)
1620 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1621 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1622 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001623}
1624
Brett Russ05b308e2005-10-05 17:08:53 -04001625/**
1626 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001627 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001628 * @relevant: port error bits relevant to this host controller
1629 * @hc: which host controller we're to look at
1630 *
1631 * Read then write clear the HC interrupt status then walk each
1632 * port connected to the HC and see if it needs servicing. Port
1633 * success ints are reported in the HC interrupt status reg, the
1634 * port error ints are reported in the higher level main
1635 * interrupt status register and thus are passed in via the
1636 * 'relevant' argument.
1637 *
1638 * LOCKING:
1639 * Inherited from caller.
1640 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001641static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001642{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001643 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001644 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001645 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001646 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001647
Jeff Garzik35177262007-02-24 21:26:42 -05001648 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001649 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001650 else
Brett Russ20f733e2005-09-01 18:26:17 -04001651 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001652
1653 /* we'll need the HC success int register in most cases */
1654 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001655 if (!hc_irq_cause)
1656 return;
1657
1658 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001659
1660 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001661 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001662
1663 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001664 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001665 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001666 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001667
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001668 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001669 continue;
1670
Brett Russ31961942005-09-30 01:36:00 -04001671 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001672 if (port >= MV_PORTS_PER_HC) {
1673 shift++; /* skip bit 8 in the HC Main IRQ reg */
1674 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001675 have_err_bits = ((PORT0_ERR << shift) & relevant);
1676
1677 if (unlikely(have_err_bits)) {
1678 struct ata_queued_cmd *qc;
1679
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001680 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001681 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1682 continue;
1683
1684 mv_err_intr(ap, qc);
1685 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001686 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001687
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001688 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1689
1690 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1691 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1692 mv_intr_edma(ap);
1693 } else {
1694 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1695 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001696 }
1697 }
1698 VPRINTK("EXIT\n");
1699}
1700
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001701static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1702{
Mark Lord02a121d2007-12-01 13:07:22 -05001703 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001704 struct ata_port *ap;
1705 struct ata_queued_cmd *qc;
1706 struct ata_eh_info *ehi;
1707 unsigned int i, err_mask, printed = 0;
1708 u32 err_cause;
1709
Mark Lord02a121d2007-12-01 13:07:22 -05001710 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711
1712 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1713 err_cause);
1714
1715 DPRINTK("All regs @ PCI error\n");
1716 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1717
Mark Lord02a121d2007-12-01 13:07:22 -05001718 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001719
1720 for (i = 0; i < host->n_ports; i++) {
1721 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001722 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001723 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001724 ata_ehi_clear_desc(ehi);
1725 if (!printed++)
1726 ata_ehi_push_desc(ehi,
1727 "PCI err cause 0x%08x", err_cause);
1728 err_mask = AC_ERR_HOST_BUS;
1729 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001730 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001731 if (qc)
1732 qc->err_mask |= err_mask;
1733 else
1734 ehi->err_mask |= err_mask;
1735
1736 ata_port_freeze(ap);
1737 }
1738 }
1739}
1740
Brett Russ05b308e2005-10-05 17:08:53 -04001741/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001742 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001743 * @irq: unused
1744 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001745 *
1746 * Read the read only register to determine if any host
1747 * controllers have pending interrupts. If so, call lower level
1748 * routine to handle. Also check for PCI errors which are only
1749 * reported here.
1750 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001751 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001752 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001753 * interrupts.
1754 */
David Howells7d12e782006-10-05 14:55:46 +01001755static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001756{
Jeff Garzikcca39742006-08-24 03:19:22 -04001757 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001758 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001759 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001760 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001761
Mark Lord646a4da2008-01-26 18:30:37 -05001762 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001763 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001764 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001765
1766 /* check the cases where we either have nothing pending or have read
1767 * a bogus register value which can indicate HW removal or PCI fault
1768 */
Mark Lord646a4da2008-01-26 18:30:37 -05001769 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1770 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001771
Jeff Garzikcca39742006-08-24 03:19:22 -04001772 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001773
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001774 if (unlikely(irq_stat & PCI_ERR)) {
1775 mv_pci_error(host, mmio);
1776 handled = 1;
1777 goto out_unlock; /* skip all other HC irq handling */
1778 }
1779
Brett Russ20f733e2005-09-01 18:26:17 -04001780 for (hc = 0; hc < n_hcs; hc++) {
1781 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1782 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001783 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001784 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001785 }
1786 }
Mark Lord615ab952006-05-19 16:24:56 -04001787
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001788out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001789 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001790
1791 return IRQ_RETVAL(handled);
1792}
1793
Jeff Garzikc9d39132005-11-13 17:47:51 -05001794static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1795{
1796 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1797 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1798
1799 return hc_mmio + ofs;
1800}
1801
1802static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1803{
1804 unsigned int ofs;
1805
1806 switch (sc_reg_in) {
1807 case SCR_STATUS:
1808 case SCR_ERROR:
1809 case SCR_CONTROL:
1810 ofs = sc_reg_in * sizeof(u32);
1811 break;
1812 default:
1813 ofs = 0xffffffffU;
1814 break;
1815 }
1816 return ofs;
1817}
1818
Tejun Heoda3dbb12007-07-16 14:29:40 +09001819static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001820{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001821 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1822 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001823 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1824
Tejun Heoda3dbb12007-07-16 14:29:40 +09001825 if (ofs != 0xffffffffU) {
1826 *val = readl(addr + ofs);
1827 return 0;
1828 } else
1829 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001830}
1831
Tejun Heoda3dbb12007-07-16 14:29:40 +09001832static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001833{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001834 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1835 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001836 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1837
Tejun Heoda3dbb12007-07-16 14:29:40 +09001838 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001839 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001840 return 0;
1841 } else
1842 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001843}
1844
Jeff Garzik522479f2005-11-12 22:14:02 -05001845static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1846{
Jeff Garzik522479f2005-11-12 22:14:02 -05001847 int early_5080;
1848
Auke Kok44c10132007-06-08 15:46:36 -07001849 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001850
1851 if (!early_5080) {
1852 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1853 tmp |= (1 << 0);
1854 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1855 }
1856
1857 mv_reset_pci_bus(pdev, mmio);
1858}
1859
1860static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1861{
1862 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1863}
1864
Jeff Garzik47c2b672005-11-12 21:13:17 -05001865static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001866 void __iomem *mmio)
1867{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001868 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1869 u32 tmp;
1870
1871 tmp = readl(phy_mmio + MV5_PHY_MODE);
1872
1873 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1874 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001875}
1876
Jeff Garzik47c2b672005-11-12 21:13:17 -05001877static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001878{
Jeff Garzik522479f2005-11-12 22:14:02 -05001879 u32 tmp;
1880
1881 writel(0, mmio + MV_GPIO_PORT_CTL);
1882
1883 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1884
1885 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1886 tmp |= ~(1 << 0);
1887 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001888}
1889
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001890static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1891 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001892{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001893 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1894 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1895 u32 tmp;
1896 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1897
1898 if (fix_apm_sq) {
1899 tmp = readl(phy_mmio + MV5_LT_MODE);
1900 tmp |= (1 << 19);
1901 writel(tmp, phy_mmio + MV5_LT_MODE);
1902
1903 tmp = readl(phy_mmio + MV5_PHY_CTL);
1904 tmp &= ~0x3;
1905 tmp |= 0x1;
1906 writel(tmp, phy_mmio + MV5_PHY_CTL);
1907 }
1908
1909 tmp = readl(phy_mmio + MV5_PHY_MODE);
1910 tmp &= ~mask;
1911 tmp |= hpriv->signal[port].pre;
1912 tmp |= hpriv->signal[port].amps;
1913 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001914}
1915
Jeff Garzikc9d39132005-11-13 17:47:51 -05001916
1917#undef ZERO
1918#define ZERO(reg) writel(0, port_mmio + (reg))
1919static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1920 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001921{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001922 void __iomem *port_mmio = mv_port_base(mmio, port);
1923
1924 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1925
1926 mv_channel_reset(hpriv, mmio, port);
1927
1928 ZERO(0x028); /* command */
1929 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1930 ZERO(0x004); /* timer */
1931 ZERO(0x008); /* irq err cause */
1932 ZERO(0x00c); /* irq err mask */
1933 ZERO(0x010); /* rq bah */
1934 ZERO(0x014); /* rq inp */
1935 ZERO(0x018); /* rq outp */
1936 ZERO(0x01c); /* respq bah */
1937 ZERO(0x024); /* respq outp */
1938 ZERO(0x020); /* respq inp */
1939 ZERO(0x02c); /* test control */
1940 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1941}
1942#undef ZERO
1943
1944#define ZERO(reg) writel(0, hc_mmio + (reg))
1945static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1946 unsigned int hc)
1947{
1948 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1949 u32 tmp;
1950
1951 ZERO(0x00c);
1952 ZERO(0x010);
1953 ZERO(0x014);
1954 ZERO(0x018);
1955
1956 tmp = readl(hc_mmio + 0x20);
1957 tmp &= 0x1c1c1c1c;
1958 tmp |= 0x03030303;
1959 writel(tmp, hc_mmio + 0x20);
1960}
1961#undef ZERO
1962
1963static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1964 unsigned int n_hc)
1965{
1966 unsigned int hc, port;
1967
1968 for (hc = 0; hc < n_hc; hc++) {
1969 for (port = 0; port < MV_PORTS_PER_HC; port++)
1970 mv5_reset_hc_port(hpriv, mmio,
1971 (hc * MV_PORTS_PER_HC) + port);
1972
1973 mv5_reset_one_hc(hpriv, mmio, hc);
1974 }
1975
1976 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001977}
1978
Jeff Garzik101ffae2005-11-12 22:17:49 -05001979#undef ZERO
1980#define ZERO(reg) writel(0, mmio + (reg))
1981static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1982{
Mark Lord02a121d2007-12-01 13:07:22 -05001983 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1984 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001985 u32 tmp;
1986
1987 tmp = readl(mmio + MV_PCI_MODE);
1988 tmp &= 0xff00ffff;
1989 writel(tmp, mmio + MV_PCI_MODE);
1990
1991 ZERO(MV_PCI_DISC_TIMER);
1992 ZERO(MV_PCI_MSI_TRIGGER);
1993 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1994 ZERO(HC_MAIN_IRQ_MASK_OFS);
1995 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001996 ZERO(hpriv->irq_cause_ofs);
1997 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001998 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1999 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2000 ZERO(MV_PCI_ERR_ATTRIBUTE);
2001 ZERO(MV_PCI_ERR_COMMAND);
2002}
2003#undef ZERO
2004
2005static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2006{
2007 u32 tmp;
2008
2009 mv5_reset_flash(hpriv, mmio);
2010
2011 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2012 tmp &= 0x3;
2013 tmp |= (1 << 5) | (1 << 6);
2014 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2015}
2016
2017/**
2018 * mv6_reset_hc - Perform the 6xxx global soft reset
2019 * @mmio: base address of the HBA
2020 *
2021 * This routine only applies to 6xxx parts.
2022 *
2023 * LOCKING:
2024 * Inherited from caller.
2025 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002026static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2027 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002028{
2029 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2030 int i, rc = 0;
2031 u32 t;
2032
2033 /* Following procedure defined in PCI "main command and status
2034 * register" table.
2035 */
2036 t = readl(reg);
2037 writel(t | STOP_PCI_MASTER, reg);
2038
2039 for (i = 0; i < 1000; i++) {
2040 udelay(1);
2041 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002042 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002043 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002044 }
2045 if (!(PCI_MASTER_EMPTY & t)) {
2046 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2047 rc = 1;
2048 goto done;
2049 }
2050
2051 /* set reset */
2052 i = 5;
2053 do {
2054 writel(t | GLOB_SFT_RST, reg);
2055 t = readl(reg);
2056 udelay(1);
2057 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2058
2059 if (!(GLOB_SFT_RST & t)) {
2060 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2061 rc = 1;
2062 goto done;
2063 }
2064
2065 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2066 i = 5;
2067 do {
2068 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2069 t = readl(reg);
2070 udelay(1);
2071 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2072
2073 if (GLOB_SFT_RST & t) {
2074 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2075 rc = 1;
2076 }
2077done:
2078 return rc;
2079}
2080
Jeff Garzik47c2b672005-11-12 21:13:17 -05002081static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002082 void __iomem *mmio)
2083{
2084 void __iomem *port_mmio;
2085 u32 tmp;
2086
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002087 tmp = readl(mmio + MV_RESET_CFG);
2088 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002089 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002090 hpriv->signal[idx].pre = 0x1 << 5;
2091 return;
2092 }
2093
2094 port_mmio = mv_port_base(mmio, idx);
2095 tmp = readl(port_mmio + PHY_MODE2);
2096
2097 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2098 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2099}
2100
Jeff Garzik47c2b672005-11-12 21:13:17 -05002101static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002102{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002103 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002104}
2105
Jeff Garzikc9d39132005-11-13 17:47:51 -05002106static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002107 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002108{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002109 void __iomem *port_mmio = mv_port_base(mmio, port);
2110
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002111 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002112 int fix_phy_mode2 =
2113 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002114 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002115 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2116 u32 m2, tmp;
2117
2118 if (fix_phy_mode2) {
2119 m2 = readl(port_mmio + PHY_MODE2);
2120 m2 &= ~(1 << 16);
2121 m2 |= (1 << 31);
2122 writel(m2, port_mmio + PHY_MODE2);
2123
2124 udelay(200);
2125
2126 m2 = readl(port_mmio + PHY_MODE2);
2127 m2 &= ~((1 << 16) | (1 << 31));
2128 writel(m2, port_mmio + PHY_MODE2);
2129
2130 udelay(200);
2131 }
2132
2133 /* who knows what this magic does */
2134 tmp = readl(port_mmio + PHY_MODE3);
2135 tmp &= ~0x7F800000;
2136 tmp |= 0x2A800000;
2137 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002138
2139 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002140 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002141
2142 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002143
2144 if (hp_flags & MV_HP_ERRATA_60X1B2)
2145 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002146
2147 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2148
2149 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002150
2151 if (hp_flags & MV_HP_ERRATA_60X1B2)
2152 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153 }
2154
2155 /* Revert values of pre-emphasis and signal amps to the saved ones */
2156 m2 = readl(port_mmio + PHY_MODE2);
2157
2158 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002159 m2 |= hpriv->signal[port].amps;
2160 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002161 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002162
Jeff Garzike4e7b892006-01-31 12:18:41 -05002163 /* according to mvSata 3.6.1, some IIE values are fixed */
2164 if (IS_GEN_IIE(hpriv)) {
2165 m2 &= ~0xC30FF01F;
2166 m2 |= 0x0000900F;
2167 }
2168
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002169 writel(m2, port_mmio + PHY_MODE2);
2170}
2171
Jeff Garzikc9d39132005-11-13 17:47:51 -05002172static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2173 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002174{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002175 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002176
Brett Russ31961942005-09-30 01:36:00 -04002177 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002178
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002179 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002180 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002181 ifctl |= (1 << 7); /* enable gen2i speed */
2182 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002183 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2184 }
2185
Brett Russ20f733e2005-09-01 18:26:17 -04002186 udelay(25); /* allow reset propagation */
2187
2188 /* Spec never mentions clearing the bit. Marvell's driver does
2189 * clear the bit, however.
2190 */
Brett Russ31961942005-09-30 01:36:00 -04002191 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002192
Jeff Garzikc9d39132005-11-13 17:47:51 -05002193 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2194
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002195 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002196 mdelay(1);
2197}
2198
Jeff Garzikc9d39132005-11-13 17:47:51 -05002199/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002200 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002201 * @ap: ATA channel to manipulate
2202 *
2203 * Part of this is taken from __sata_phy_reset and modified to
2204 * not sleep since this routine gets called from interrupt level.
2205 *
2206 * LOCKING:
2207 * Inherited from caller. This is coded to safe to call at
2208 * interrupt level, i.e. it does not sleep.
2209 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002210static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2211 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002212{
2213 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002214 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002215 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002216 int retry = 5;
2217 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002218
2219 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002220
Tejun Heoda3dbb12007-07-16 14:29:40 +09002221#ifdef DEBUG
2222 {
2223 u32 sstatus, serror, scontrol;
2224
2225 mv_scr_read(ap, SCR_STATUS, &sstatus);
2226 mv_scr_read(ap, SCR_ERROR, &serror);
2227 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2228 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002229 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002230 }
2231#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002232
Jeff Garzik22374672005-11-17 10:59:48 -05002233 /* Issue COMRESET via SControl */
2234comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002235 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002236 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002237
Tejun Heo936fd732007-08-06 18:36:23 +09002238 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002239 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002240
Brett Russ31961942005-09-30 01:36:00 -04002241 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002242 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002243 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002244 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002245
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002246 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002247 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002248
Jeff Garzik22374672005-11-17 10:59:48 -05002249 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002250 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002251 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2252 (retry-- > 0))
2253 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002254
Tejun Heoda3dbb12007-07-16 14:29:40 +09002255#ifdef DEBUG
2256 {
2257 u32 sstatus, serror, scontrol;
2258
2259 mv_scr_read(ap, SCR_STATUS, &sstatus);
2260 mv_scr_read(ap, SCR_ERROR, &serror);
2261 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2262 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2263 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2264 }
2265#endif
Brett Russ31961942005-09-30 01:36:00 -04002266
Tejun Heo936fd732007-08-06 18:36:23 +09002267 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002268 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002269 return;
2270 }
2271
Jeff Garzik22374672005-11-17 10:59:48 -05002272 /* even after SStatus reflects that device is ready,
2273 * it seems to take a while for link to be fully
2274 * established (and thus Status no longer 0x80/0x7F),
2275 * so we poll a bit for that, here.
2276 */
2277 retry = 20;
2278 while (1) {
2279 u8 drv_stat = ata_check_status(ap);
2280 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2281 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002282 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002283 if (retry-- <= 0)
2284 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002285 if (time_after(jiffies, deadline))
2286 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002287 }
2288
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002289 /* FIXME: if we passed the deadline, the following
2290 * code probably produces an invalid result
2291 */
Brett Russ20f733e2005-09-01 18:26:17 -04002292
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002293 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002294 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002295
2296 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2297
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002299
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002300 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002301}
2302
Tejun Heocc0680a2007-08-06 18:36:23 +09002303static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002304{
Tejun Heocc0680a2007-08-06 18:36:23 +09002305 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002306 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002307 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002308 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002310 rc = mv_stop_dma(ap);
2311 if (rc)
2312 ehc->i.action |= ATA_EH_HARDRESET;
2313
2314 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2315 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2316 ehc->i.action |= ATA_EH_HARDRESET;
2317 }
2318
2319 /* if we're about to do hardreset, nothing more to do */
2320 if (ehc->i.action & ATA_EH_HARDRESET)
2321 return 0;
2322
Tejun Heocc0680a2007-08-06 18:36:23 +09002323 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002324 rc = ata_wait_ready(ap, deadline);
2325 else
2326 rc = -ENODEV;
2327
2328 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002329}
2330
Tejun Heocc0680a2007-08-06 18:36:23 +09002331static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002332 unsigned long deadline)
2333{
Tejun Heocc0680a2007-08-06 18:36:23 +09002334 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002335 struct mv_host_priv *hpriv = ap->host->private_data;
2336 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2337
2338 mv_stop_dma(ap);
2339
2340 mv_channel_reset(hpriv, mmio, ap->port_no);
2341
2342 mv_phy_reset(ap, class, deadline);
2343
2344 return 0;
2345}
2346
Tejun Heocc0680a2007-08-06 18:36:23 +09002347static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002348{
Tejun Heocc0680a2007-08-06 18:36:23 +09002349 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 u32 serr;
2351
2352 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002353 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002354
2355 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002356 sata_scr_read(link, SCR_ERROR, &serr);
2357 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002358
2359 /* bail out if no device is present */
2360 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2361 DPRINTK("EXIT, no device\n");
2362 return;
2363 }
2364
2365 /* set up device control */
2366 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2367}
2368
2369static void mv_error_handler(struct ata_port *ap)
2370{
2371 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2372 mv_hardreset, mv_postreset);
2373}
2374
2375static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2376{
2377 mv_stop_dma(qc->ap);
2378}
2379
2380static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002381{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002382 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002383 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2384 u32 tmp, mask;
2385 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002386
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002388
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002389 shift = ap->port_no * 2;
2390 if (hc > 0)
2391 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002392
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002393 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002394
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002395 /* disable assertion of portN err, done events */
2396 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2397 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2398}
2399
2400static void mv_eh_thaw(struct ata_port *ap)
2401{
2402 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2403 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2404 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2405 void __iomem *port_mmio = mv_ap_base(ap);
2406 u32 tmp, mask, hc_irq_cause;
2407 unsigned int shift, hc_port_no = ap->port_no;
2408
2409 /* FIXME: handle coalescing completion events properly */
2410
2411 shift = ap->port_no * 2;
2412 if (hc > 0) {
2413 shift++;
2414 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002415 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002416
2417 mask = 0x3 << shift;
2418
2419 /* clear EDMA errors on this port */
2420 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2421
2422 /* clear pending irq events */
2423 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2424 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2425 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2426 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2427
2428 /* enable assertion of portN err, done events */
2429 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2430 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002431}
2432
Brett Russ05b308e2005-10-05 17:08:53 -04002433/**
2434 * mv_port_init - Perform some early initialization on a single port.
2435 * @port: libata data structure storing shadow register addresses
2436 * @port_mmio: base address of the port
2437 *
2438 * Initialize shadow register mmio addresses, clear outstanding
2439 * interrupts on the port, and unmask interrupts for the future
2440 * start of the port.
2441 *
2442 * LOCKING:
2443 * Inherited from caller.
2444 */
Brett Russ31961942005-09-30 01:36:00 -04002445static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2446{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002447 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002448 unsigned serr_ofs;
2449
Jeff Garzik8b260242005-11-12 12:32:50 -05002450 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002451 */
2452 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002453 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002454 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2455 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2456 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2457 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2458 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2459 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002460 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002461 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2462 /* special case: control/altstatus doesn't have ATA_REG_ address */
2463 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2464
2465 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002466 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002467
Brett Russ31961942005-09-30 01:36:00 -04002468 /* Clear any currently outstanding port interrupt conditions */
2469 serr_ofs = mv_scr_offset(SCR_ERROR);
2470 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2471 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2472
Mark Lord646a4da2008-01-26 18:30:37 -05002473 /* unmask all non-transient EDMA error interrupts */
2474 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002475
Jeff Garzik8b260242005-11-12 12:32:50 -05002476 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002477 readl(port_mmio + EDMA_CFG_OFS),
2478 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2479 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002480}
2481
Tejun Heo4447d352007-04-17 23:44:08 +09002482static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002483{
Tejun Heo4447d352007-04-17 23:44:08 +09002484 struct pci_dev *pdev = to_pci_dev(host->dev);
2485 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002486 u32 hp_flags = hpriv->hp_flags;
2487
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002488 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002489 case chip_5080:
2490 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002491 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002492
Auke Kok44c10132007-06-08 15:46:36 -07002493 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002494 case 0x1:
2495 hp_flags |= MV_HP_ERRATA_50XXB0;
2496 break;
2497 case 0x3:
2498 hp_flags |= MV_HP_ERRATA_50XXB2;
2499 break;
2500 default:
2501 dev_printk(KERN_WARNING, &pdev->dev,
2502 "Applying 50XXB2 workarounds to unknown rev\n");
2503 hp_flags |= MV_HP_ERRATA_50XXB2;
2504 break;
2505 }
2506 break;
2507
2508 case chip_504x:
2509 case chip_508x:
2510 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002511 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002512
Auke Kok44c10132007-06-08 15:46:36 -07002513 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002514 case 0x0:
2515 hp_flags |= MV_HP_ERRATA_50XXB0;
2516 break;
2517 case 0x3:
2518 hp_flags |= MV_HP_ERRATA_50XXB2;
2519 break;
2520 default:
2521 dev_printk(KERN_WARNING, &pdev->dev,
2522 "Applying B2 workarounds to unknown rev\n");
2523 hp_flags |= MV_HP_ERRATA_50XXB2;
2524 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002525 }
2526 break;
2527
2528 case chip_604x:
2529 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002530 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002531 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002532
Auke Kok44c10132007-06-08 15:46:36 -07002533 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002534 case 0x7:
2535 hp_flags |= MV_HP_ERRATA_60X1B2;
2536 break;
2537 case 0x9:
2538 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002539 break;
2540 default:
2541 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002542 "Applying B2 workarounds to unknown rev\n");
2543 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002544 break;
2545 }
2546 break;
2547
Jeff Garzike4e7b892006-01-31 12:18:41 -05002548 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002549 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002550 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2551 (pdev->device == 0x2300 || pdev->device == 0x2310))
2552 {
Mark Lord4e520032007-12-11 12:58:05 -05002553 /*
2554 * Highpoint RocketRAID PCIe 23xx series cards:
2555 *
2556 * Unconfigured drives are treated as "Legacy"
2557 * by the BIOS, and it overwrites sector 8 with
2558 * a "Lgcy" metadata block prior to Linux boot.
2559 *
2560 * Configured drives (RAID or JBOD) leave sector 8
2561 * alone, but instead overwrite a high numbered
2562 * sector for the RAID metadata. This sector can
2563 * be determined exactly, by truncating the physical
2564 * drive capacity to a nice even GB value.
2565 *
2566 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2567 *
2568 * Warn the user, lest they think we're just buggy.
2569 */
2570 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2571 " BIOS CORRUPTS DATA on all attached drives,"
2572 " regardless of if/how they are configured."
2573 " BEWARE!\n");
2574 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2575 " use sectors 8-9 on \"Legacy\" drives,"
2576 " and avoid the final two gigabytes on"
2577 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002578 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002579 case chip_6042:
2580 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002581 hp_flags |= MV_HP_GEN_IIE;
2582
Auke Kok44c10132007-06-08 15:46:36 -07002583 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002584 case 0x0:
2585 hp_flags |= MV_HP_ERRATA_XX42A0;
2586 break;
2587 case 0x1:
2588 hp_flags |= MV_HP_ERRATA_60X1C0;
2589 break;
2590 default:
2591 dev_printk(KERN_WARNING, &pdev->dev,
2592 "Applying 60X1C0 workarounds to unknown rev\n");
2593 hp_flags |= MV_HP_ERRATA_60X1C0;
2594 break;
2595 }
2596 break;
2597
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002598 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002599 dev_printk(KERN_ERR, &pdev->dev,
2600 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002601 return 1;
2602 }
2603
2604 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002605 if (hp_flags & MV_HP_PCIE) {
2606 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2607 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2608 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2609 } else {
2610 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2611 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2612 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2613 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002614
2615 return 0;
2616}
2617
Brett Russ05b308e2005-10-05 17:08:53 -04002618/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002619 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002620 * @host: ATA host to initialize
2621 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002622 *
2623 * If possible, do an early global reset of the host. Then do
2624 * our port init and clear/unmask all/relevant host interrupts.
2625 *
2626 * LOCKING:
2627 * Inherited from caller.
2628 */
Tejun Heo4447d352007-04-17 23:44:08 +09002629static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002630{
2631 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002632 struct pci_dev *pdev = to_pci_dev(host->dev);
2633 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2634 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002635
Jeff Garzik47c2b672005-11-12 21:13:17 -05002636 /* global interrupt mask */
2637 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2638
Tejun Heo4447d352007-04-17 23:44:08 +09002639 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002640 if (rc)
2641 goto done;
2642
Tejun Heo4447d352007-04-17 23:44:08 +09002643 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002644
Tejun Heo4447d352007-04-17 23:44:08 +09002645 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002646 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002647
Jeff Garzikc9d39132005-11-13 17:47:51 -05002648 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002649 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002650 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002651
Jeff Garzik522479f2005-11-12 22:14:02 -05002652 hpriv->ops->reset_flash(hpriv, mmio);
2653 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002654 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002655
Tejun Heo4447d352007-04-17 23:44:08 +09002656 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002657 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002658 void __iomem *port_mmio = mv_port_base(mmio, port);
2659
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002660 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002661 ifctl |= (1 << 7); /* enable gen2i speed */
2662 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002663 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2664 }
2665
Jeff Garzikc9d39132005-11-13 17:47:51 -05002666 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002667 }
2668
Tejun Heo4447d352007-04-17 23:44:08 +09002669 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002670 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002671 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002672 unsigned int offset = port_mmio - mmio;
2673
2674 mv_port_init(&ap->ioaddr, port_mmio);
2675
2676 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2677 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002678 }
2679
2680 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002681 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2682
2683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2684 "(before clear)=0x%08x\n", hc,
2685 readl(hc_mmio + HC_CFG_OFS),
2686 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2687
2688 /* Clear any currently outstanding hc interrupt conditions */
2689 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002690 }
2691
Brett Russ31961942005-09-30 01:36:00 -04002692 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002693 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002694
2695 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002696 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002697
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002698 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002699 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2700 else
2701 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002702
2703 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002704 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002705 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2706 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002707 readl(mmio + hpriv->irq_cause_ofs),
2708 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002709
Brett Russ31961942005-09-30 01:36:00 -04002710done:
Brett Russ20f733e2005-09-01 18:26:17 -04002711 return rc;
2712}
2713
Brett Russ05b308e2005-10-05 17:08:53 -04002714/**
2715 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002716 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002717 *
2718 * FIXME: complete this.
2719 *
2720 * LOCKING:
2721 * Inherited from caller.
2722 */
Tejun Heo4447d352007-04-17 23:44:08 +09002723static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002724{
Tejun Heo4447d352007-04-17 23:44:08 +09002725 struct pci_dev *pdev = to_pci_dev(host->dev);
2726 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002727 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002728 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002729
2730 /* Use this to determine the HW stepping of the chip so we know
2731 * what errata to workaround
2732 */
Brett Russ31961942005-09-30 01:36:00 -04002733 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2734 if (scc == 0)
2735 scc_s = "SCSI";
2736 else if (scc == 0x01)
2737 scc_s = "RAID";
2738 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002739 scc_s = "?";
2740
2741 if (IS_GEN_I(hpriv))
2742 gen = "I";
2743 else if (IS_GEN_II(hpriv))
2744 gen = "II";
2745 else if (IS_GEN_IIE(hpriv))
2746 gen = "IIE";
2747 else
2748 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002749
Jeff Garzika9524a72005-10-30 14:39:11 -05002750 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002751 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2752 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002753 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2754}
2755
Brett Russ05b308e2005-10-05 17:08:53 -04002756/**
2757 * mv_init_one - handle a positive probe of a Marvell host
2758 * @pdev: PCI device found
2759 * @ent: PCI device ID entry for the matched host
2760 *
2761 * LOCKING:
2762 * Inherited from caller.
2763 */
Brett Russ20f733e2005-09-01 18:26:17 -04002764static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2765{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002766 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002767 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002768 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2769 struct ata_host *host;
2770 struct mv_host_priv *hpriv;
2771 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002772
Jeff Garzika9524a72005-10-30 14:39:11 -05002773 if (!printed_version++)
2774 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002775
Tejun Heo4447d352007-04-17 23:44:08 +09002776 /* allocate host */
2777 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2778
2779 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2780 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2781 if (!host || !hpriv)
2782 return -ENOMEM;
2783 host->private_data = hpriv;
2784
2785 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002786 rc = pcim_enable_device(pdev);
2787 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002788 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002789
Tejun Heo0d5ff562007-02-01 15:06:36 +09002790 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2791 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002792 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002793 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002794 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002795 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002796
Jeff Garzikd88184f2007-02-26 01:26:06 -05002797 rc = pci_go_64(pdev);
2798 if (rc)
2799 return rc;
2800
Brett Russ20f733e2005-09-01 18:26:17 -04002801 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002802 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002803 if (rc)
2804 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002805
Brett Russ31961942005-09-30 01:36:00 -04002806 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002807 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002808 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002809
Brett Russ31961942005-09-30 01:36:00 -04002810 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002811 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002812
Tejun Heo4447d352007-04-17 23:44:08 +09002813 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002814 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002815 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002816 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002817}
2818
2819static int __init mv_init(void)
2820{
Pavel Roskinb7887192006-08-10 18:13:18 +09002821 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002822}
2823
2824static void __exit mv_exit(void)
2825{
2826 pci_unregister_driver(&mv_pci_driver);
2827}
2828
2829MODULE_AUTHOR("Brett Russ");
2830MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2831MODULE_LICENSE("GPL");
2832MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2833MODULE_VERSION(DRV_VERSION);
2834
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002835module_param(msi, int, 0444);
2836MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2837
Brett Russ20f733e2005-09-01 18:26:17 -04002838module_init(mv_init);
2839module_exit(mv_exit);