blob: 207c400c64dfad76f177c50c2ece410392dc24f5 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
Mark Lord02a121d2007-12-01 13:07:22 -0500167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
Mark Lord02a121d2007-12-01 13:07:22 -0500171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500174
Brett Russ20f733e2005-09-01 18:26:17 -0400175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500214 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500248
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400281 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400300
Brett Russ31961942005-09-30 01:36:00 -0400301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400316
Jeff Garzikc9d39132005-11-13 17:47:51 -0500317 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500319
Brett Russ31961942005-09-30 01:36:00 -0400320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500326 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400336};
337
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400338#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500340#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500341
Jeff Garzik095fec82005-11-12 09:50:49 -0500342enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
345 */
346 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500347
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
350 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
352
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400353 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
355};
356
Jeff Garzik522479f2005-11-12 22:14:02 -0500357enum chip_type {
358 chip_504x,
359 chip_508x,
360 chip_5080,
361 chip_604x,
362 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500363 chip_6042,
364 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500365};
366
Brett Russ31961942005-09-30 01:36:00 -0400367/* Command ReQuest Block: 32B */
368struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400369 __le32 sg_addr;
370 __le32 sg_addr_hi;
371 __le16 ctrl_flags;
372 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400373};
374
Jeff Garzike4e7b892006-01-31 12:18:41 -0500375struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400376 __le32 addr;
377 __le32 addr_hi;
378 __le32 flags;
379 __le32 len;
380 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500381};
382
Brett Russ31961942005-09-30 01:36:00 -0400383/* Command ResPonse Block: 8B */
384struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400385 __le16 id;
386 __le16 flags;
387 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400388};
389
390/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
391struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400392 __le32 addr;
393 __le32 flags_size;
394 __le32 addr_hi;
395 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400396};
397
398struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400399 struct mv_crqb *crqb;
400 dma_addr_t crqb_dma;
401 struct mv_crpb *crpb;
402 dma_addr_t crpb_dma;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400405
406 unsigned int req_idx;
407 unsigned int resp_idx;
408
Brett Russ31961942005-09-30 01:36:00 -0400409 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500412struct mv_port_signal {
413 u32 amps;
414 u32 pre;
415};
416
Mark Lord02a121d2007-12-01 13:07:22 -0500417struct mv_host_priv {
418 u32 hp_flags;
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
421 u32 irq_cause_ofs;
422 u32 irq_mask_ofs;
423 u32 unmask_all_irqs;
424};
425
Jeff Garzik47c2b672005-11-12 21:13:17 -0500426struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436};
437
Brett Russ20f733e2005-09-01 18:26:17 -0400438static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900439static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400443static int mv_port_start(struct ata_port *ap);
444static void mv_port_stop(struct ata_port *ap);
445static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500446static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900447static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400448static void mv_error_handler(struct ata_port *ap);
449static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450static void mv_eh_freeze(struct ata_port *ap);
451static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400452static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
453
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500454static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
455 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500456static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
457static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
458 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500459static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
460 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500461static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
462static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500464static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500466static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
468 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500469static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500471static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500473static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
474 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500475static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
476 void __iomem *port_mmio, int want_ncq);
477static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500478
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400479static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400480 .module = THIS_MODULE,
481 .name = DRV_NAME,
482 .ioctl = ata_scsi_ioctl,
483 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400484 .can_queue = ATA_DEF_QUEUE,
485 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400486 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400487 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
488 .emulated = ATA_SHT_EMULATED,
489 .use_clustering = 1,
490 .proc_name = DRV_NAME,
491 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400492 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400493 .slave_destroy = ata_scsi_slave_destroy,
494 .bios_param = ata_std_bios_param,
495};
496
497static struct scsi_host_template mv6_sht = {
498 .module = THIS_MODULE,
499 .name = DRV_NAME,
500 .ioctl = ata_scsi_ioctl,
501 .queuecommand = ata_scsi_queuecmd,
502 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400503 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400504 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400505 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
506 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500507 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400508 .proc_name = DRV_NAME,
509 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400510 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900511 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400512 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400513};
514
Jeff Garzikc9d39132005-11-13 17:47:51 -0500515static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
521
Jeff Garzikcffacd82007-03-09 09:46:47 -0500522 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500523
524 .qc_prep = mv_qc_prep,
525 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900526 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500527
Jeff Garzikc9d39132005-11-13 17:47:51 -0500528 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900529 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400531 .error_handler = mv_error_handler,
532 .post_internal_cmd = mv_post_int_cmd,
533 .freeze = mv_eh_freeze,
534 .thaw = mv_eh_thaw,
535
Jeff Garzikc9d39132005-11-13 17:47:51 -0500536 .scr_read = mv5_scr_read,
537 .scr_write = mv5_scr_write,
538
539 .port_start = mv_port_start,
540 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541};
542
543static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
549
Jeff Garzikcffacd82007-03-09 09:46:47 -0500550 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400551
Brett Russ31961942005-09-30 01:36:00 -0400552 .qc_prep = mv_qc_prep,
553 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900554 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400555
Brett Russ20f733e2005-09-01 18:26:17 -0400556 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900557 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400558
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400559 .error_handler = mv_error_handler,
560 .post_internal_cmd = mv_post_int_cmd,
561 .freeze = mv_eh_freeze,
562 .thaw = mv_eh_thaw,
563
Brett Russ20f733e2005-09-01 18:26:17 -0400564 .scr_read = mv_scr_read,
565 .scr_write = mv_scr_write,
566
Brett Russ31961942005-09-30 01:36:00 -0400567 .port_start = mv_port_start,
568 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400569};
570
Jeff Garzike4e7b892006-01-31 12:18:41 -0500571static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500572 .tf_load = ata_tf_load,
573 .tf_read = ata_tf_read,
574 .check_status = ata_check_status,
575 .exec_command = ata_exec_command,
576 .dev_select = ata_std_dev_select,
577
Jeff Garzikcffacd82007-03-09 09:46:47 -0500578 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579
580 .qc_prep = mv_qc_prep_iie,
581 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900582 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500583
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900585 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400587 .error_handler = mv_error_handler,
588 .post_internal_cmd = mv_post_int_cmd,
589 .freeze = mv_eh_freeze,
590 .thaw = mv_eh_thaw,
591
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592 .scr_read = mv_scr_read,
593 .scr_write = mv_scr_write,
594
595 .port_start = mv_port_start,
596 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597};
598
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100599static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400600 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400601 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500604 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400605 },
606 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500610 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400611 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500614 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400615 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500616 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500617 },
Brett Russ20f733e2005-09-01 18:26:17 -0400618 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500622 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400623 },
624 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400627 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400628 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500629 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400630 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500631 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500633 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400634 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 .port_ops = &mv_iie_ops,
636 },
637 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .port_ops = &mv_iie_ops,
642 },
Brett Russ20f733e2005-09-01 18:26:17 -0400643};
644
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500645static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400646 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
647 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
648 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
649 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100650 /* RocketRAID 1740/174x have different identifiers */
651 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
652 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400653
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400654 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
655 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
656 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
657 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
658 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500659
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400660 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
661
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200662 /* Adaptec 1430SA */
663 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
664
Mark Lord02a121d2007-12-01 13:07:22 -0500665 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800666 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
667
Mark Lord02a121d2007-12-01 13:07:22 -0500668 /* Highpoint RocketRAID PCIe series */
669 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
670 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
671
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400672 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400673};
674
675static struct pci_driver mv_pci_driver = {
676 .name = DRV_NAME,
677 .id_table = mv_pci_tbl,
678 .probe = mv_init_one,
679 .remove = ata_pci_remove_one,
680};
681
Jeff Garzik47c2b672005-11-12 21:13:17 -0500682static const struct mv_hw_ops mv5xxx_ops = {
683 .phy_errata = mv5_phy_errata,
684 .enable_leds = mv5_enable_leds,
685 .read_preamp = mv5_read_preamp,
686 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500687 .reset_flash = mv5_reset_flash,
688 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500689};
690
691static const struct mv_hw_ops mv6xxx_ops = {
692 .phy_errata = mv6_phy_errata,
693 .enable_leds = mv6_enable_leds,
694 .read_preamp = mv6_read_preamp,
695 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500696 .reset_flash = mv6_reset_flash,
697 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500698};
699
Brett Russ20f733e2005-09-01 18:26:17 -0400700/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500701 * module options
702 */
703static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
704
705
Jeff Garzikd88184f2007-02-26 01:26:06 -0500706/* move to PCI layer or libata core? */
707static int pci_go_64(struct pci_dev *pdev)
708{
709 int rc;
710
711 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
713 if (rc) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
715 if (rc) {
716 dev_printk(KERN_ERR, &pdev->dev,
717 "64-bit DMA enable failed\n");
718 return rc;
719 }
720 }
721 } else {
722 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
723 if (rc) {
724 dev_printk(KERN_ERR, &pdev->dev,
725 "32-bit DMA enable failed\n");
726 return rc;
727 }
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
729 if (rc) {
730 dev_printk(KERN_ERR, &pdev->dev,
731 "32-bit consistent DMA enable failed\n");
732 return rc;
733 }
734 }
735
736 return rc;
737}
738
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500739/*
Brett Russ20f733e2005-09-01 18:26:17 -0400740 * Functions
741 */
742
743static inline void writelfl(unsigned long data, void __iomem *addr)
744{
745 writel(data, addr);
746 (void) readl(addr); /* flush to avoid PCI posted write */
747}
748
Brett Russ20f733e2005-09-01 18:26:17 -0400749static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
750{
751 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
752}
753
Jeff Garzikc9d39132005-11-13 17:47:51 -0500754static inline unsigned int mv_hc_from_port(unsigned int port)
755{
756 return port >> MV_PORT_HC_SHIFT;
757}
758
759static inline unsigned int mv_hardport_from_port(unsigned int port)
760{
761 return port & MV_PORT_MASK;
762}
763
764static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
765 unsigned int port)
766{
767 return mv_hc_base(base, mv_hc_from_port(port));
768}
769
Brett Russ20f733e2005-09-01 18:26:17 -0400770static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
771{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500772 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500773 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400775}
776
777static inline void __iomem *mv_ap_base(struct ata_port *ap)
778{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900779 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400780}
781
Jeff Garzikcca39742006-08-24 03:19:22 -0400782static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400783{
Jeff Garzikcca39742006-08-24 03:19:22 -0400784 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400785}
786
787static void mv_irq_clear(struct ata_port *ap)
788{
789}
790
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791static void mv_set_edma_ptrs(void __iomem *port_mmio,
792 struct mv_host_priv *hpriv,
793 struct mv_port_priv *pp)
794{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 u32 index;
796
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 /*
798 * initialize request queue
799 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400800 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
801
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400802 WARN_ON(pp->crqb_dma & 0x3ff);
803 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
806
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400808 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
810 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812
813 /*
814 * initialize response queue
815 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
817
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818 WARN_ON(pp->crpb_dma & 0xff);
819 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
820
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400822 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
824 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400827 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400829}
830
Brett Russ05b308e2005-10-05 17:08:53 -0400831/**
832 * mv_start_dma - Enable eDMA engine
833 * @base: port base address
834 * @pp: port private data
835 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900836 * Verify the local cache of the eDMA state is accurate with a
837 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400838 *
839 * LOCKING:
840 * Inherited from caller.
841 */
Mark Lord0c589122008-01-26 18:31:16 -0500842static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500843 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400844{
Mark Lord72109162008-01-26 18:31:33 -0500845 int want_ncq = (protocol == ATA_PROT_NCQ);
846
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
850 __mv_stop_dma(ap);
851 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500853 struct mv_host_priv *hpriv = ap->host->private_data;
854 int hard_port = mv_hardport_from_port(ap->port_no);
855 void __iomem *hc_mmio = mv_hc_base_from_port(
856 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
857 u32 hc_irq_cause, ipending;
858
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400859 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500860 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400861
Mark Lord0c589122008-01-26 18:31:16 -0500862 /* clear EDMA interrupt indicator, if any */
863 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
864 ipending = (DEV_IRQ << hard_port) |
865 (CRPB_DMA_DONE << hard_port);
866 if (hc_irq_cause & ipending) {
867 writelfl(hc_irq_cause & ~ipending,
868 hc_mmio + HC_IRQ_CAUSE_OFS);
869 }
870
Mark Lord72109162008-01-26 18:31:33 -0500871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500872
873 /* clear FIS IRQ Cause */
874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
875
Mark Lordf630d562008-01-26 18:31:00 -0500876 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400877
Mark Lordf630d562008-01-26 18:31:00 -0500878 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400879 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
880 }
Mark Lordf630d562008-01-26 18:31:00 -0500881 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400882}
883
Brett Russ05b308e2005-10-05 17:08:53 -0400884/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400885 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400886 * @ap: ATA channel to manipulate
887 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900888 * Verify the local cache of the eDMA state is accurate with a
889 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400890 *
891 * LOCKING:
892 * Inherited from caller.
893 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400894static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400895{
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400898 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400899 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400900
Jeff Garzik4537deb2007-07-12 14:30:19 -0400901 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400902 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400903 */
Brett Russ31961942005-09-30 01:36:00 -0400904 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400906 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900907 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400908 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500909
Brett Russ31961942005-09-30 01:36:00 -0400910 /* now properly wait for the eDMA to stop */
911 for (i = 1000; i > 0; i--) {
912 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400913 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400914 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400915
Brett Russ31961942005-09-30 01:36:00 -0400916 udelay(100);
917 }
918
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400919 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900920 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400921 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400922 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400923
924 return err;
Brett Russ31961942005-09-30 01:36:00 -0400925}
926
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400927static int mv_stop_dma(struct ata_port *ap)
928{
929 unsigned long flags;
930 int rc;
931
932 spin_lock_irqsave(&ap->host->lock, flags);
933 rc = __mv_stop_dma(ap);
934 spin_unlock_irqrestore(&ap->host->lock, flags);
935
936 return rc;
937}
938
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400939#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400940static void mv_dump_mem(void __iomem *start, unsigned bytes)
941{
Brett Russ31961942005-09-30 01:36:00 -0400942 int b, w;
943 for (b = 0; b < bytes; ) {
944 DPRINTK("%p: ", start + b);
945 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400946 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400947 b += sizeof(u32);
948 }
949 printk("\n");
950 }
Brett Russ31961942005-09-30 01:36:00 -0400951}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400952#endif
953
Brett Russ31961942005-09-30 01:36:00 -0400954static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
955{
956#ifdef ATA_DEBUG
957 int b, w;
958 u32 dw;
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%02x: ", b);
961 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400962 (void) pci_read_config_dword(pdev, b, &dw);
963 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
968#endif
969}
970static void mv_dump_all_regs(void __iomem *mmio_base, int port,
971 struct pci_dev *pdev)
972{
973#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500974 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400975 port >> MV_PORT_HC_SHIFT);
976 void __iomem *port_base;
977 int start_port, num_ports, p, start_hc, num_hcs, hc;
978
979 if (0 > port) {
980 start_hc = start_port = 0;
981 num_ports = 8; /* shld be benign for 4 port devs */
982 num_hcs = 2;
983 } else {
984 start_hc = port >> MV_PORT_HC_SHIFT;
985 start_port = port;
986 num_ports = num_hcs = 1;
987 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500988 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400989 num_ports > 1 ? num_ports - 1 : start_port);
990
991 if (NULL != pdev) {
992 DPRINTK("PCI config space regs:\n");
993 mv_dump_pci_cfg(pdev, 0x68);
994 }
995 DPRINTK("PCI regs:\n");
996 mv_dump_mem(mmio_base+0xc00, 0x3c);
997 mv_dump_mem(mmio_base+0xd00, 0x34);
998 mv_dump_mem(mmio_base+0xf00, 0x4);
999 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1000 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001001 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001002 DPRINTK("HC regs (HC %i):\n", hc);
1003 mv_dump_mem(hc_base, 0x1c);
1004 }
1005 for (p = start_port; p < start_port + num_ports; p++) {
1006 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001007 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001008 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001009 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001010 mv_dump_mem(port_base+0x300, 0x60);
1011 }
1012#endif
1013}
1014
Brett Russ20f733e2005-09-01 18:26:17 -04001015static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1016{
1017 unsigned int ofs;
1018
1019 switch (sc_reg_in) {
1020 case SCR_STATUS:
1021 case SCR_CONTROL:
1022 case SCR_ERROR:
1023 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1024 break;
1025 case SCR_ACTIVE:
1026 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1027 break;
1028 default:
1029 ofs = 0xffffffffU;
1030 break;
1031 }
1032 return ofs;
1033}
1034
Tejun Heoda3dbb12007-07-16 14:29:40 +09001035static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001036{
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1038
Tejun Heoda3dbb12007-07-16 14:29:40 +09001039 if (ofs != 0xffffffffU) {
1040 *val = readl(mv_ap_base(ap) + ofs);
1041 return 0;
1042 } else
1043 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001044}
1045
Tejun Heoda3dbb12007-07-16 14:29:40 +09001046static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001047{
1048 unsigned int ofs = mv_scr_offset(sc_reg_in);
1049
Tejun Heoda3dbb12007-07-16 14:29:40 +09001050 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001051 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001052 return 0;
1053 } else
1054 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001055}
1056
Mark Lord72109162008-01-26 18:31:33 -05001057static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1058 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001059{
Mark Lord0c589122008-01-26 18:31:16 -05001060 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001061
1062 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001063 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001064
Mark Lord0c589122008-01-26 18:31:16 -05001065 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001066 cfg |= (1 << 8); /* enab config burst size mask */
1067
Mark Lord0c589122008-01-26 18:31:16 -05001068 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001069 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1070
1071 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001072 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1073 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001074 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001076 }
1077
Mark Lord72109162008-01-26 18:31:33 -05001078 if (want_ncq) {
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1081 } else
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1083
Jeff Garzike4e7b892006-01-31 12:18:41 -05001084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1085}
1086
Brett Russ05b308e2005-10-05 17:08:53 -04001087/**
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1090 *
1091 * Allocate and point to DMA memory, init port private memory,
1092 * zero indices.
1093 *
1094 * LOCKING:
1095 * Inherited from caller.
1096 */
Brett Russ31961942005-09-30 01:36:00 -04001097static int mv_port_start(struct ata_port *ap)
1098{
Jeff Garzikcca39742006-08-24 03:19:22 -04001099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1103 void *mem;
1104 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001105 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001106 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001107
Tejun Heo24dc5f32007-01-20 16:00:28 +09001108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001109 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001110 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001111
Tejun Heo24dc5f32007-01-20 16:00:28 +09001112 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1113 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001114 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001115 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001116 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1117
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001118 rc = ata_pad_alloc(ap, dev);
1119 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001120 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001121
Jeff Garzik8b260242005-11-12 12:32:50 -05001122 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001123 * 32-slot command request table (CRQB), 32 bytes each in size
1124 */
1125 pp->crqb = mem;
1126 pp->crqb_dma = mem_dma;
1127 mem += MV_CRQB_Q_SZ;
1128 mem_dma += MV_CRQB_Q_SZ;
1129
Jeff Garzik8b260242005-11-12 12:32:50 -05001130 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001131 * 32-slot command response table (CRPB), 8 bytes each in size
1132 */
1133 pp->crpb = mem;
1134 pp->crpb_dma = mem_dma;
1135 mem += MV_CRPB_Q_SZ;
1136 mem_dma += MV_CRPB_Q_SZ;
1137
1138 /* Third item:
1139 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1140 */
1141 pp->sg_tbl = mem;
1142 pp->sg_tbl_dma = mem_dma;
1143
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001144 spin_lock_irqsave(&ap->host->lock, flags);
1145
Mark Lord72109162008-01-26 18:31:33 -05001146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Brett Russ31961942005-09-30 01:36:00 -04001147
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001149
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001150 spin_unlock_irqrestore(&ap->host->lock, flags);
1151
Brett Russ31961942005-09-30 01:36:00 -04001152 /* Don't turn on EDMA here...do it before DMA commands only. Else
1153 * we'll be unable to send non-data, PIO, etc due to restricted access
1154 * to shadow regs.
1155 */
1156 ap->private_data = pp;
1157 return 0;
1158}
1159
Brett Russ05b308e2005-10-05 17:08:53 -04001160/**
1161 * mv_port_stop - Port specific cleanup/stop routine.
1162 * @ap: ATA channel to manipulate
1163 *
1164 * Stop DMA, cleanup port memory.
1165 *
1166 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001167 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001168 */
Brett Russ31961942005-09-30 01:36:00 -04001169static void mv_port_stop(struct ata_port *ap)
1170{
Brett Russ31961942005-09-30 01:36:00 -04001171 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001172}
1173
Brett Russ05b308e2005-10-05 17:08:53 -04001174/**
1175 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1176 * @qc: queued command whose SG list to source from
1177 *
1178 * Populate the SG list and mark the last entry.
1179 *
1180 * LOCKING:
1181 * Inherited from caller.
1182 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001183static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001184{
1185 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001186 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001187 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001188 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001189
Jeff Garzikd88184f2007-02-26 01:26:06 -05001190 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001191 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001192 dma_addr_t addr = sg_dma_address(sg);
1193 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001194
Olof Johansson4007b492007-10-02 20:45:27 -05001195 while (sg_len) {
1196 u32 offset = addr & 0xffff;
1197 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001198
Olof Johansson4007b492007-10-02 20:45:27 -05001199 if ((offset + sg_len > 0x10000))
1200 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001201
Olof Johansson4007b492007-10-02 20:45:27 -05001202 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1203 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001204 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001205
1206 sg_len -= len;
1207 addr += len;
1208
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001209 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001210 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001211 }
Brett Russ31961942005-09-30 01:36:00 -04001212 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001213
1214 if (likely(last_sg))
1215 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001216}
1217
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001218static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001219{
Mark Lord559eeda2006-05-19 16:40:15 -04001220 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001221 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001222 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001223}
1224
Brett Russ05b308e2005-10-05 17:08:53 -04001225/**
1226 * mv_qc_prep - Host specific command preparation.
1227 * @qc: queued command to prepare
1228 *
1229 * This routine simply redirects to the general purpose routine
1230 * if command is not DMA. Else, it handles prep of the CRQB
1231 * (command request block), does some sanity checking, and calls
1232 * the SG load routine.
1233 *
1234 * LOCKING:
1235 * Inherited from caller.
1236 */
Brett Russ31961942005-09-30 01:36:00 -04001237static void mv_qc_prep(struct ata_queued_cmd *qc)
1238{
1239 struct ata_port *ap = qc->ap;
1240 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001241 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001242 struct ata_taskfile *tf;
1243 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001244 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001245
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001246 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001247 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001248
Brett Russ31961942005-09-30 01:36:00 -04001249 /* Fill in command request block
1250 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001251 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001252 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001253 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001254 flags |= qc->tag << CRQB_TAG_SHIFT;
1255
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001256 /* get current queue index from software */
1257 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001258
Mark Lorda6432432006-05-19 16:36:36 -04001259 pp->crqb[in_index].sg_addr =
1260 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1261 pp->crqb[in_index].sg_addr_hi =
1262 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1263 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1264
1265 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001266 tf = &qc->tf;
1267
1268 /* Sadly, the CRQB cannot accomodate all registers--there are
1269 * only 11 bytes...so we must pick and choose required
1270 * registers based on the command. So, we drop feature and
1271 * hob_feature for [RW] DMA commands, but they are needed for
1272 * NCQ. NCQ will drop hob_nsect.
1273 */
1274 switch (tf->command) {
1275 case ATA_CMD_READ:
1276 case ATA_CMD_READ_EXT:
1277 case ATA_CMD_WRITE:
1278 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001279 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001280 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1281 break;
1282#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1283 case ATA_CMD_FPDMA_READ:
1284 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001285 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001286 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1287 break;
1288#endif /* FIXME: remove this line when NCQ added */
1289 default:
1290 /* The only other commands EDMA supports in non-queued and
1291 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1292 * of which are defined/used by Linux. If we get here, this
1293 * driver needs work.
1294 *
1295 * FIXME: modify libata to give qc_prep a return value and
1296 * return error here.
1297 */
1298 BUG_ON(tf->command);
1299 break;
1300 }
1301 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1302 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1303 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1304 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1305 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1306 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1307 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1308 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1309 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1310
Jeff Garzike4e7b892006-01-31 12:18:41 -05001311 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001312 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001313 mv_fill_sg(qc);
1314}
1315
1316/**
1317 * mv_qc_prep_iie - Host specific command preparation.
1318 * @qc: queued command to prepare
1319 *
1320 * This routine simply redirects to the general purpose routine
1321 * if command is not DMA. Else, it handles prep of the CRQB
1322 * (command request block), does some sanity checking, and calls
1323 * the SG load routine.
1324 *
1325 * LOCKING:
1326 * Inherited from caller.
1327 */
1328static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1329{
1330 struct ata_port *ap = qc->ap;
1331 struct mv_port_priv *pp = ap->private_data;
1332 struct mv_crqb_iie *crqb;
1333 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001334 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001335 u32 flags = 0;
1336
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001337 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001338 return;
1339
Jeff Garzike4e7b892006-01-31 12:18:41 -05001340 /* Fill in Gen IIE command request block
1341 */
1342 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1343 flags |= CRQB_FLAG_READ;
1344
Tejun Heobeec7db2006-02-11 19:11:13 +09001345 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001346 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001347 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001348
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001349 /* get current queue index from software */
1350 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001351
1352 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001353 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1354 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1355 crqb->flags = cpu_to_le32(flags);
1356
1357 tf = &qc->tf;
1358 crqb->ata_cmd[0] = cpu_to_le32(
1359 (tf->command << 16) |
1360 (tf->feature << 24)
1361 );
1362 crqb->ata_cmd[1] = cpu_to_le32(
1363 (tf->lbal << 0) |
1364 (tf->lbam << 8) |
1365 (tf->lbah << 16) |
1366 (tf->device << 24)
1367 );
1368 crqb->ata_cmd[2] = cpu_to_le32(
1369 (tf->hob_lbal << 0) |
1370 (tf->hob_lbam << 8) |
1371 (tf->hob_lbah << 16) |
1372 (tf->hob_feature << 24)
1373 );
1374 crqb->ata_cmd[3] = cpu_to_le32(
1375 (tf->nsect << 0) |
1376 (tf->hob_nsect << 8)
1377 );
1378
1379 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1380 return;
Brett Russ31961942005-09-30 01:36:00 -04001381 mv_fill_sg(qc);
1382}
1383
Brett Russ05b308e2005-10-05 17:08:53 -04001384/**
1385 * mv_qc_issue - Initiate a command to the host
1386 * @qc: queued command to start
1387 *
1388 * This routine simply redirects to the general purpose routine
1389 * if command is not DMA. Else, it sanity checks our local
1390 * caches of the request producer/consumer indices then enables
1391 * DMA and bumps the request producer index.
1392 *
1393 * LOCKING:
1394 * Inherited from caller.
1395 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001396static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001397{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001398 struct ata_port *ap = qc->ap;
1399 void __iomem *port_mmio = mv_ap_base(ap);
1400 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001402
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001403 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001404 /* We're about to send a non-EDMA capable command to the
1405 * port. Turn off EDMA so there won't be problems accessing
1406 * shadow block, etc registers.
1407 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001408 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001409 return ata_qc_issue_prot(qc);
1410 }
1411
Mark Lord72109162008-01-26 18:31:33 -05001412 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001413
1414 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001415
Brett Russ31961942005-09-30 01:36:00 -04001416 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001417 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1418 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001419
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001420 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001421
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001423
1424 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001425 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1426 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001427
1428 return 0;
1429}
1430
Brett Russ05b308e2005-10-05 17:08:53 -04001431/**
Brett Russ05b308e2005-10-05 17:08:53 -04001432 * mv_err_intr - Handle error interrupts on the port
1433 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001434 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001435 *
1436 * In most cases, just clear the interrupt and move on. However,
1437 * some cases require an eDMA reset, which is done right before
1438 * the COMRESET in mv_phy_reset(). The SERR case requires a
1439 * clear of pending errors in the SATA SERROR register. Finally,
1440 * if the port disabled DMA, update our cached copy to match.
1441 *
1442 * LOCKING:
1443 * Inherited from caller.
1444 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001445static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001446{
Brett Russ31961942005-09-30 01:36:00 -04001447 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001448 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1449 struct mv_port_priv *pp = ap->private_data;
1450 struct mv_host_priv *hpriv = ap->host->private_data;
1451 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1452 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001453 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001454
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001456
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 if (!edma_enabled) {
1458 /* just a guess: do we need to do this? should we
1459 * expand this, and do it in all cases?
1460 */
Tejun Heo936fd732007-08-06 18:36:23 +09001461 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1462 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001463 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464
1465 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1466
1467 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1468
1469 /*
1470 * all generations share these EDMA error cause bits
1471 */
1472
1473 if (edma_err_cause & EDMA_ERR_DEV)
1474 err_mask |= AC_ERR_DEV;
1475 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001476 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 EDMA_ERR_INTRL_PAR)) {
1478 err_mask |= AC_ERR_ATA_BUS;
1479 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001480 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001481 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001482 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1483 ata_ehi_hotplugged(ehi);
1484 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001485 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001486 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001487 }
1488
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001489 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 eh_freeze_mask = EDMA_EH_FREEZE_5;
1491
1492 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1493 struct mv_port_priv *pp = ap->private_data;
1494 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001495 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001496 }
1497 } else {
1498 eh_freeze_mask = EDMA_EH_FREEZE;
1499
1500 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1501 struct mv_port_priv *pp = ap->private_data;
1502 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001503 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001504 }
1505
1506 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001507 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1508 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 err_mask = AC_ERR_ATA_BUS;
1510 action |= ATA_EH_HARDRESET;
1511 }
1512 }
Brett Russ20f733e2005-09-01 18:26:17 -04001513
1514 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001515 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001516
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517 if (!err_mask) {
1518 err_mask = AC_ERR_OTHER;
1519 action |= ATA_EH_HARDRESET;
1520 }
1521
1522 ehi->serror |= serr;
1523 ehi->action |= action;
1524
1525 if (qc)
1526 qc->err_mask |= err_mask;
1527 else
1528 ehi->err_mask |= err_mask;
1529
1530 if (edma_err_cause & eh_freeze_mask)
1531 ata_port_freeze(ap);
1532 else
1533 ata_port_abort(ap);
1534}
1535
1536static void mv_intr_pio(struct ata_port *ap)
1537{
1538 struct ata_queued_cmd *qc;
1539 u8 ata_status;
1540
1541 /* ignore spurious intr if drive still BUSY */
1542 ata_status = readb(ap->ioaddr.status_addr);
1543 if (unlikely(ata_status & ATA_BUSY))
1544 return;
1545
1546 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001547 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 if (unlikely(!qc)) /* no active tag */
1549 return;
1550 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1551 return;
1552
1553 /* and finally, complete the ATA command */
1554 qc->err_mask |= ac_err_mask(ata_status);
1555 ata_qc_complete(qc);
1556}
1557
1558static void mv_intr_edma(struct ata_port *ap)
1559{
1560 void __iomem *port_mmio = mv_ap_base(ap);
1561 struct mv_host_priv *hpriv = ap->host->private_data;
1562 struct mv_port_priv *pp = ap->private_data;
1563 struct ata_queued_cmd *qc;
1564 u32 out_index, in_index;
1565 bool work_done = false;
1566
1567 /* get h/w response queue pointer */
1568 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1569 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1570
1571 while (1) {
1572 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001573 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001574
1575 /* get s/w response queue last-read pointer, and compare */
1576 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1577 if (in_index == out_index)
1578 break;
1579
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001580 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001581 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001582 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001583
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001584 /* Gen II/IIE: get active ATA command via tag, to enable
1585 * support for queueing. this works transparently for
1586 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001587 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001588 else
1589 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001590
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001591 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001592
Mark Lordcb924412008-01-26 18:32:09 -05001593 /* For non-NCQ mode, the lower 8 bits of status
1594 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1595 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001596 */
1597 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001598 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001599 mv_err_intr(ap, qc);
1600 return;
1601 }
1602
1603 /* and finally, complete the ATA command */
1604 if (qc) {
1605 qc->err_mask |=
1606 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1607 ata_qc_complete(qc);
1608 }
1609
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001610 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001611 * indicate (after the loop completes) to hardware
1612 * that we have consumed a response queue entry.
1613 */
1614 work_done = true;
1615 pp->resp_idx++;
1616 }
1617
1618 if (work_done)
1619 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1620 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1621 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001622}
1623
Brett Russ05b308e2005-10-05 17:08:53 -04001624/**
1625 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001626 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001627 * @relevant: port error bits relevant to this host controller
1628 * @hc: which host controller we're to look at
1629 *
1630 * Read then write clear the HC interrupt status then walk each
1631 * port connected to the HC and see if it needs servicing. Port
1632 * success ints are reported in the HC interrupt status reg, the
1633 * port error ints are reported in the higher level main
1634 * interrupt status register and thus are passed in via the
1635 * 'relevant' argument.
1636 *
1637 * LOCKING:
1638 * Inherited from caller.
1639 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001640static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001641{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001642 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001643 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001644 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001645 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001646
Jeff Garzik35177262007-02-24 21:26:42 -05001647 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001648 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001649 else
Brett Russ20f733e2005-09-01 18:26:17 -04001650 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001651
1652 /* we'll need the HC success int register in most cases */
1653 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001654 if (!hc_irq_cause)
1655 return;
1656
1657 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001658
1659 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001660 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001661
1662 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001663 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001664 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001665 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001666
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001667 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001668 continue;
1669
Brett Russ31961942005-09-30 01:36:00 -04001670 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001671 if (port >= MV_PORTS_PER_HC) {
1672 shift++; /* skip bit 8 in the HC Main IRQ reg */
1673 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001674 have_err_bits = ((PORT0_ERR << shift) & relevant);
1675
1676 if (unlikely(have_err_bits)) {
1677 struct ata_queued_cmd *qc;
1678
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001679 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001680 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1681 continue;
1682
1683 mv_err_intr(ap, qc);
1684 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001685 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001686
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001687 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1688
1689 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1690 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1691 mv_intr_edma(ap);
1692 } else {
1693 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1694 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001695 }
1696 }
1697 VPRINTK("EXIT\n");
1698}
1699
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001700static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1701{
Mark Lord02a121d2007-12-01 13:07:22 -05001702 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001703 struct ata_port *ap;
1704 struct ata_queued_cmd *qc;
1705 struct ata_eh_info *ehi;
1706 unsigned int i, err_mask, printed = 0;
1707 u32 err_cause;
1708
Mark Lord02a121d2007-12-01 13:07:22 -05001709 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001710
1711 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1712 err_cause);
1713
1714 DPRINTK("All regs @ PCI error\n");
1715 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1716
Mark Lord02a121d2007-12-01 13:07:22 -05001717 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001718
1719 for (i = 0; i < host->n_ports; i++) {
1720 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001721 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001722 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001723 ata_ehi_clear_desc(ehi);
1724 if (!printed++)
1725 ata_ehi_push_desc(ehi,
1726 "PCI err cause 0x%08x", err_cause);
1727 err_mask = AC_ERR_HOST_BUS;
1728 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001729 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730 if (qc)
1731 qc->err_mask |= err_mask;
1732 else
1733 ehi->err_mask |= err_mask;
1734
1735 ata_port_freeze(ap);
1736 }
1737 }
1738}
1739
Brett Russ05b308e2005-10-05 17:08:53 -04001740/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001741 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001742 * @irq: unused
1743 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001744 *
1745 * Read the read only register to determine if any host
1746 * controllers have pending interrupts. If so, call lower level
1747 * routine to handle. Also check for PCI errors which are only
1748 * reported here.
1749 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001750 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001751 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001752 * interrupts.
1753 */
David Howells7d12e782006-10-05 14:55:46 +01001754static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001755{
Jeff Garzikcca39742006-08-24 03:19:22 -04001756 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001757 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001758 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001759 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001760
Mark Lord646a4da2008-01-26 18:30:37 -05001761 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001762 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001763 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001764
1765 /* check the cases where we either have nothing pending or have read
1766 * a bogus register value which can indicate HW removal or PCI fault
1767 */
Mark Lord646a4da2008-01-26 18:30:37 -05001768 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1769 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001770
Jeff Garzikcca39742006-08-24 03:19:22 -04001771 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001772
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001773 if (unlikely(irq_stat & PCI_ERR)) {
1774 mv_pci_error(host, mmio);
1775 handled = 1;
1776 goto out_unlock; /* skip all other HC irq handling */
1777 }
1778
Brett Russ20f733e2005-09-01 18:26:17 -04001779 for (hc = 0; hc < n_hcs; hc++) {
1780 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1781 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001782 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001783 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001784 }
1785 }
Mark Lord615ab952006-05-19 16:24:56 -04001786
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001787out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001788 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001789
1790 return IRQ_RETVAL(handled);
1791}
1792
Jeff Garzikc9d39132005-11-13 17:47:51 -05001793static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1794{
1795 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1796 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1797
1798 return hc_mmio + ofs;
1799}
1800
1801static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1802{
1803 unsigned int ofs;
1804
1805 switch (sc_reg_in) {
1806 case SCR_STATUS:
1807 case SCR_ERROR:
1808 case SCR_CONTROL:
1809 ofs = sc_reg_in * sizeof(u32);
1810 break;
1811 default:
1812 ofs = 0xffffffffU;
1813 break;
1814 }
1815 return ofs;
1816}
1817
Tejun Heoda3dbb12007-07-16 14:29:40 +09001818static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001819{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001820 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1821 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001822 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1823
Tejun Heoda3dbb12007-07-16 14:29:40 +09001824 if (ofs != 0xffffffffU) {
1825 *val = readl(addr + ofs);
1826 return 0;
1827 } else
1828 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001829}
1830
Tejun Heoda3dbb12007-07-16 14:29:40 +09001831static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001832{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001833 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1834 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001835 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1836
Tejun Heoda3dbb12007-07-16 14:29:40 +09001837 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001838 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001839 return 0;
1840 } else
1841 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001842}
1843
Jeff Garzik522479f2005-11-12 22:14:02 -05001844static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1845{
Jeff Garzik522479f2005-11-12 22:14:02 -05001846 int early_5080;
1847
Auke Kok44c10132007-06-08 15:46:36 -07001848 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001849
1850 if (!early_5080) {
1851 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1852 tmp |= (1 << 0);
1853 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1854 }
1855
1856 mv_reset_pci_bus(pdev, mmio);
1857}
1858
1859static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1860{
1861 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1862}
1863
Jeff Garzik47c2b672005-11-12 21:13:17 -05001864static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001865 void __iomem *mmio)
1866{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001867 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1868 u32 tmp;
1869
1870 tmp = readl(phy_mmio + MV5_PHY_MODE);
1871
1872 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1873 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001874}
1875
Jeff Garzik47c2b672005-11-12 21:13:17 -05001876static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001877{
Jeff Garzik522479f2005-11-12 22:14:02 -05001878 u32 tmp;
1879
1880 writel(0, mmio + MV_GPIO_PORT_CTL);
1881
1882 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1883
1884 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1885 tmp |= ~(1 << 0);
1886 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001887}
1888
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001889static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1890 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001891{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001892 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1893 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1894 u32 tmp;
1895 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1896
1897 if (fix_apm_sq) {
1898 tmp = readl(phy_mmio + MV5_LT_MODE);
1899 tmp |= (1 << 19);
1900 writel(tmp, phy_mmio + MV5_LT_MODE);
1901
1902 tmp = readl(phy_mmio + MV5_PHY_CTL);
1903 tmp &= ~0x3;
1904 tmp |= 0x1;
1905 writel(tmp, phy_mmio + MV5_PHY_CTL);
1906 }
1907
1908 tmp = readl(phy_mmio + MV5_PHY_MODE);
1909 tmp &= ~mask;
1910 tmp |= hpriv->signal[port].pre;
1911 tmp |= hpriv->signal[port].amps;
1912 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001913}
1914
Jeff Garzikc9d39132005-11-13 17:47:51 -05001915
1916#undef ZERO
1917#define ZERO(reg) writel(0, port_mmio + (reg))
1918static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1919 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001920{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001921 void __iomem *port_mmio = mv_port_base(mmio, port);
1922
1923 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1924
1925 mv_channel_reset(hpriv, mmio, port);
1926
1927 ZERO(0x028); /* command */
1928 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1929 ZERO(0x004); /* timer */
1930 ZERO(0x008); /* irq err cause */
1931 ZERO(0x00c); /* irq err mask */
1932 ZERO(0x010); /* rq bah */
1933 ZERO(0x014); /* rq inp */
1934 ZERO(0x018); /* rq outp */
1935 ZERO(0x01c); /* respq bah */
1936 ZERO(0x024); /* respq outp */
1937 ZERO(0x020); /* respq inp */
1938 ZERO(0x02c); /* test control */
1939 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1940}
1941#undef ZERO
1942
1943#define ZERO(reg) writel(0, hc_mmio + (reg))
1944static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 unsigned int hc)
1946{
1947 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1948 u32 tmp;
1949
1950 ZERO(0x00c);
1951 ZERO(0x010);
1952 ZERO(0x014);
1953 ZERO(0x018);
1954
1955 tmp = readl(hc_mmio + 0x20);
1956 tmp &= 0x1c1c1c1c;
1957 tmp |= 0x03030303;
1958 writel(tmp, hc_mmio + 0x20);
1959}
1960#undef ZERO
1961
1962static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1963 unsigned int n_hc)
1964{
1965 unsigned int hc, port;
1966
1967 for (hc = 0; hc < n_hc; hc++) {
1968 for (port = 0; port < MV_PORTS_PER_HC; port++)
1969 mv5_reset_hc_port(hpriv, mmio,
1970 (hc * MV_PORTS_PER_HC) + port);
1971
1972 mv5_reset_one_hc(hpriv, mmio, hc);
1973 }
1974
1975 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001976}
1977
Jeff Garzik101ffae2005-11-12 22:17:49 -05001978#undef ZERO
1979#define ZERO(reg) writel(0, mmio + (reg))
1980static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1981{
Mark Lord02a121d2007-12-01 13:07:22 -05001982 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1983 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001984 u32 tmp;
1985
1986 tmp = readl(mmio + MV_PCI_MODE);
1987 tmp &= 0xff00ffff;
1988 writel(tmp, mmio + MV_PCI_MODE);
1989
1990 ZERO(MV_PCI_DISC_TIMER);
1991 ZERO(MV_PCI_MSI_TRIGGER);
1992 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1993 ZERO(HC_MAIN_IRQ_MASK_OFS);
1994 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001995 ZERO(hpriv->irq_cause_ofs);
1996 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001997 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1998 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1999 ZERO(MV_PCI_ERR_ATTRIBUTE);
2000 ZERO(MV_PCI_ERR_COMMAND);
2001}
2002#undef ZERO
2003
2004static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2005{
2006 u32 tmp;
2007
2008 mv5_reset_flash(hpriv, mmio);
2009
2010 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2011 tmp &= 0x3;
2012 tmp |= (1 << 5) | (1 << 6);
2013 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2014}
2015
2016/**
2017 * mv6_reset_hc - Perform the 6xxx global soft reset
2018 * @mmio: base address of the HBA
2019 *
2020 * This routine only applies to 6xxx parts.
2021 *
2022 * LOCKING:
2023 * Inherited from caller.
2024 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002025static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2026 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002027{
2028 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2029 int i, rc = 0;
2030 u32 t;
2031
2032 /* Following procedure defined in PCI "main command and status
2033 * register" table.
2034 */
2035 t = readl(reg);
2036 writel(t | STOP_PCI_MASTER, reg);
2037
2038 for (i = 0; i < 1000; i++) {
2039 udelay(1);
2040 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002041 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002042 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002043 }
2044 if (!(PCI_MASTER_EMPTY & t)) {
2045 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2046 rc = 1;
2047 goto done;
2048 }
2049
2050 /* set reset */
2051 i = 5;
2052 do {
2053 writel(t | GLOB_SFT_RST, reg);
2054 t = readl(reg);
2055 udelay(1);
2056 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2057
2058 if (!(GLOB_SFT_RST & t)) {
2059 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2060 rc = 1;
2061 goto done;
2062 }
2063
2064 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2065 i = 5;
2066 do {
2067 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2068 t = readl(reg);
2069 udelay(1);
2070 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2071
2072 if (GLOB_SFT_RST & t) {
2073 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2074 rc = 1;
2075 }
2076done:
2077 return rc;
2078}
2079
Jeff Garzik47c2b672005-11-12 21:13:17 -05002080static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002081 void __iomem *mmio)
2082{
2083 void __iomem *port_mmio;
2084 u32 tmp;
2085
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002086 tmp = readl(mmio + MV_RESET_CFG);
2087 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002088 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002089 hpriv->signal[idx].pre = 0x1 << 5;
2090 return;
2091 }
2092
2093 port_mmio = mv_port_base(mmio, idx);
2094 tmp = readl(port_mmio + PHY_MODE2);
2095
2096 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2097 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2098}
2099
Jeff Garzik47c2b672005-11-12 21:13:17 -05002100static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002101{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002102 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002103}
2104
Jeff Garzikc9d39132005-11-13 17:47:51 -05002105static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002106 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002107{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002108 void __iomem *port_mmio = mv_port_base(mmio, port);
2109
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002110 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002111 int fix_phy_mode2 =
2112 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002113 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002114 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2115 u32 m2, tmp;
2116
2117 if (fix_phy_mode2) {
2118 m2 = readl(port_mmio + PHY_MODE2);
2119 m2 &= ~(1 << 16);
2120 m2 |= (1 << 31);
2121 writel(m2, port_mmio + PHY_MODE2);
2122
2123 udelay(200);
2124
2125 m2 = readl(port_mmio + PHY_MODE2);
2126 m2 &= ~((1 << 16) | (1 << 31));
2127 writel(m2, port_mmio + PHY_MODE2);
2128
2129 udelay(200);
2130 }
2131
2132 /* who knows what this magic does */
2133 tmp = readl(port_mmio + PHY_MODE3);
2134 tmp &= ~0x7F800000;
2135 tmp |= 0x2A800000;
2136 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002137
2138 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002139 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002140
2141 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002142
2143 if (hp_flags & MV_HP_ERRATA_60X1B2)
2144 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002145
2146 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2147
2148 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002149
2150 if (hp_flags & MV_HP_ERRATA_60X1B2)
2151 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002152 }
2153
2154 /* Revert values of pre-emphasis and signal amps to the saved ones */
2155 m2 = readl(port_mmio + PHY_MODE2);
2156
2157 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002158 m2 |= hpriv->signal[port].amps;
2159 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002160 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002161
Jeff Garzike4e7b892006-01-31 12:18:41 -05002162 /* according to mvSata 3.6.1, some IIE values are fixed */
2163 if (IS_GEN_IIE(hpriv)) {
2164 m2 &= ~0xC30FF01F;
2165 m2 |= 0x0000900F;
2166 }
2167
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002168 writel(m2, port_mmio + PHY_MODE2);
2169}
2170
Jeff Garzikc9d39132005-11-13 17:47:51 -05002171static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2172 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002173{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002174 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002175
Brett Russ31961942005-09-30 01:36:00 -04002176 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002177
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002178 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002179 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002180 ifctl |= (1 << 7); /* enable gen2i speed */
2181 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002182 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2183 }
2184
Brett Russ20f733e2005-09-01 18:26:17 -04002185 udelay(25); /* allow reset propagation */
2186
2187 /* Spec never mentions clearing the bit. Marvell's driver does
2188 * clear the bit, however.
2189 */
Brett Russ31961942005-09-30 01:36:00 -04002190 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002191
Jeff Garzikc9d39132005-11-13 17:47:51 -05002192 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2193
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002194 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002195 mdelay(1);
2196}
2197
Jeff Garzikc9d39132005-11-13 17:47:51 -05002198/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002199 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002200 * @ap: ATA channel to manipulate
2201 *
2202 * Part of this is taken from __sata_phy_reset and modified to
2203 * not sleep since this routine gets called from interrupt level.
2204 *
2205 * LOCKING:
2206 * Inherited from caller. This is coded to safe to call at
2207 * interrupt level, i.e. it does not sleep.
2208 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002209static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2210 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002211{
2212 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002213 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002214 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002215 int retry = 5;
2216 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002217
2218 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002219
Tejun Heoda3dbb12007-07-16 14:29:40 +09002220#ifdef DEBUG
2221 {
2222 u32 sstatus, serror, scontrol;
2223
2224 mv_scr_read(ap, SCR_STATUS, &sstatus);
2225 mv_scr_read(ap, SCR_ERROR, &serror);
2226 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2227 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002228 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002229 }
2230#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002231
Jeff Garzik22374672005-11-17 10:59:48 -05002232 /* Issue COMRESET via SControl */
2233comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002234 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002235 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002236
Tejun Heo936fd732007-08-06 18:36:23 +09002237 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002238 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002239
Brett Russ31961942005-09-30 01:36:00 -04002240 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002241 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002242 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002243 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002244
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002245 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002246 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002247
Jeff Garzik22374672005-11-17 10:59:48 -05002248 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002249 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002250 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2251 (retry-- > 0))
2252 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002253
Tejun Heoda3dbb12007-07-16 14:29:40 +09002254#ifdef DEBUG
2255 {
2256 u32 sstatus, serror, scontrol;
2257
2258 mv_scr_read(ap, SCR_STATUS, &sstatus);
2259 mv_scr_read(ap, SCR_ERROR, &serror);
2260 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2261 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2262 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2263 }
2264#endif
Brett Russ31961942005-09-30 01:36:00 -04002265
Tejun Heo936fd732007-08-06 18:36:23 +09002266 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002267 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002268 return;
2269 }
2270
Jeff Garzik22374672005-11-17 10:59:48 -05002271 /* even after SStatus reflects that device is ready,
2272 * it seems to take a while for link to be fully
2273 * established (and thus Status no longer 0x80/0x7F),
2274 * so we poll a bit for that, here.
2275 */
2276 retry = 20;
2277 while (1) {
2278 u8 drv_stat = ata_check_status(ap);
2279 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2280 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002281 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002282 if (retry-- <= 0)
2283 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002284 if (time_after(jiffies, deadline))
2285 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002286 }
2287
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002288 /* FIXME: if we passed the deadline, the following
2289 * code probably produces an invalid result
2290 */
Brett Russ20f733e2005-09-01 18:26:17 -04002291
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002292 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002293 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002294
2295 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2296
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002297 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002298
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002299 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002300}
2301
Tejun Heocc0680a2007-08-06 18:36:23 +09002302static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002303{
Tejun Heocc0680a2007-08-06 18:36:23 +09002304 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002305 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002306 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002307 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002308
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002309 rc = mv_stop_dma(ap);
2310 if (rc)
2311 ehc->i.action |= ATA_EH_HARDRESET;
2312
2313 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2314 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2315 ehc->i.action |= ATA_EH_HARDRESET;
2316 }
2317
2318 /* if we're about to do hardreset, nothing more to do */
2319 if (ehc->i.action & ATA_EH_HARDRESET)
2320 return 0;
2321
Tejun Heocc0680a2007-08-06 18:36:23 +09002322 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002323 rc = ata_wait_ready(ap, deadline);
2324 else
2325 rc = -ENODEV;
2326
2327 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002328}
2329
Tejun Heocc0680a2007-08-06 18:36:23 +09002330static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002331 unsigned long deadline)
2332{
Tejun Heocc0680a2007-08-06 18:36:23 +09002333 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 struct mv_host_priv *hpriv = ap->host->private_data;
2335 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2336
2337 mv_stop_dma(ap);
2338
2339 mv_channel_reset(hpriv, mmio, ap->port_no);
2340
2341 mv_phy_reset(ap, class, deadline);
2342
2343 return 0;
2344}
2345
Tejun Heocc0680a2007-08-06 18:36:23 +09002346static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002347{
Tejun Heocc0680a2007-08-06 18:36:23 +09002348 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 u32 serr;
2350
2351 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002352 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002353
2354 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002355 sata_scr_read(link, SCR_ERROR, &serr);
2356 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002357
2358 /* bail out if no device is present */
2359 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2360 DPRINTK("EXIT, no device\n");
2361 return;
2362 }
2363
2364 /* set up device control */
2365 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2366}
2367
2368static void mv_error_handler(struct ata_port *ap)
2369{
2370 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2371 mv_hardreset, mv_postreset);
2372}
2373
2374static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2375{
2376 mv_stop_dma(qc->ap);
2377}
2378
2379static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002380{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002381 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002382 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2383 u32 tmp, mask;
2384 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002385
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002386 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002387
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002388 shift = ap->port_no * 2;
2389 if (hc > 0)
2390 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002391
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002392 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002393
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002394 /* disable assertion of portN err, done events */
2395 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2396 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2397}
2398
2399static void mv_eh_thaw(struct ata_port *ap)
2400{
2401 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2402 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2403 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2404 void __iomem *port_mmio = mv_ap_base(ap);
2405 u32 tmp, mask, hc_irq_cause;
2406 unsigned int shift, hc_port_no = ap->port_no;
2407
2408 /* FIXME: handle coalescing completion events properly */
2409
2410 shift = ap->port_no * 2;
2411 if (hc > 0) {
2412 shift++;
2413 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002414 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002415
2416 mask = 0x3 << shift;
2417
2418 /* clear EDMA errors on this port */
2419 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2420
2421 /* clear pending irq events */
2422 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2423 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2424 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2425 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2426
2427 /* enable assertion of portN err, done events */
2428 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2429 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002430}
2431
Brett Russ05b308e2005-10-05 17:08:53 -04002432/**
2433 * mv_port_init - Perform some early initialization on a single port.
2434 * @port: libata data structure storing shadow register addresses
2435 * @port_mmio: base address of the port
2436 *
2437 * Initialize shadow register mmio addresses, clear outstanding
2438 * interrupts on the port, and unmask interrupts for the future
2439 * start of the port.
2440 *
2441 * LOCKING:
2442 * Inherited from caller.
2443 */
Brett Russ31961942005-09-30 01:36:00 -04002444static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2445{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002446 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002447 unsigned serr_ofs;
2448
Jeff Garzik8b260242005-11-12 12:32:50 -05002449 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002450 */
2451 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002452 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002453 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2454 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2455 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2456 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2457 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2458 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002459 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002460 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2461 /* special case: control/altstatus doesn't have ATA_REG_ address */
2462 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2463
2464 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002465 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002466
Brett Russ31961942005-09-30 01:36:00 -04002467 /* Clear any currently outstanding port interrupt conditions */
2468 serr_ofs = mv_scr_offset(SCR_ERROR);
2469 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2470 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2471
Mark Lord646a4da2008-01-26 18:30:37 -05002472 /* unmask all non-transient EDMA error interrupts */
2473 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002474
Jeff Garzik8b260242005-11-12 12:32:50 -05002475 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002476 readl(port_mmio + EDMA_CFG_OFS),
2477 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2478 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002479}
2480
Tejun Heo4447d352007-04-17 23:44:08 +09002481static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002482{
Tejun Heo4447d352007-04-17 23:44:08 +09002483 struct pci_dev *pdev = to_pci_dev(host->dev);
2484 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002485 u32 hp_flags = hpriv->hp_flags;
2486
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002487 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002488 case chip_5080:
2489 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002490 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002491
Auke Kok44c10132007-06-08 15:46:36 -07002492 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002493 case 0x1:
2494 hp_flags |= MV_HP_ERRATA_50XXB0;
2495 break;
2496 case 0x3:
2497 hp_flags |= MV_HP_ERRATA_50XXB2;
2498 break;
2499 default:
2500 dev_printk(KERN_WARNING, &pdev->dev,
2501 "Applying 50XXB2 workarounds to unknown rev\n");
2502 hp_flags |= MV_HP_ERRATA_50XXB2;
2503 break;
2504 }
2505 break;
2506
2507 case chip_504x:
2508 case chip_508x:
2509 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002510 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002511
Auke Kok44c10132007-06-08 15:46:36 -07002512 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002513 case 0x0:
2514 hp_flags |= MV_HP_ERRATA_50XXB0;
2515 break;
2516 case 0x3:
2517 hp_flags |= MV_HP_ERRATA_50XXB2;
2518 break;
2519 default:
2520 dev_printk(KERN_WARNING, &pdev->dev,
2521 "Applying B2 workarounds to unknown rev\n");
2522 hp_flags |= MV_HP_ERRATA_50XXB2;
2523 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002524 }
2525 break;
2526
2527 case chip_604x:
2528 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002529 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002530 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002531
Auke Kok44c10132007-06-08 15:46:36 -07002532 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002533 case 0x7:
2534 hp_flags |= MV_HP_ERRATA_60X1B2;
2535 break;
2536 case 0x9:
2537 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002538 break;
2539 default:
2540 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541 "Applying B2 workarounds to unknown rev\n");
2542 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002543 break;
2544 }
2545 break;
2546
Jeff Garzike4e7b892006-01-31 12:18:41 -05002547 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002548 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002549 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2550 (pdev->device == 0x2300 || pdev->device == 0x2310))
2551 {
Mark Lord4e520032007-12-11 12:58:05 -05002552 /*
2553 * Highpoint RocketRAID PCIe 23xx series cards:
2554 *
2555 * Unconfigured drives are treated as "Legacy"
2556 * by the BIOS, and it overwrites sector 8 with
2557 * a "Lgcy" metadata block prior to Linux boot.
2558 *
2559 * Configured drives (RAID or JBOD) leave sector 8
2560 * alone, but instead overwrite a high numbered
2561 * sector for the RAID metadata. This sector can
2562 * be determined exactly, by truncating the physical
2563 * drive capacity to a nice even GB value.
2564 *
2565 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2566 *
2567 * Warn the user, lest they think we're just buggy.
2568 */
2569 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2570 " BIOS CORRUPTS DATA on all attached drives,"
2571 " regardless of if/how they are configured."
2572 " BEWARE!\n");
2573 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2574 " use sectors 8-9 on \"Legacy\" drives,"
2575 " and avoid the final two gigabytes on"
2576 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002577 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002578 case chip_6042:
2579 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002580 hp_flags |= MV_HP_GEN_IIE;
2581
Auke Kok44c10132007-06-08 15:46:36 -07002582 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002583 case 0x0:
2584 hp_flags |= MV_HP_ERRATA_XX42A0;
2585 break;
2586 case 0x1:
2587 hp_flags |= MV_HP_ERRATA_60X1C0;
2588 break;
2589 default:
2590 dev_printk(KERN_WARNING, &pdev->dev,
2591 "Applying 60X1C0 workarounds to unknown rev\n");
2592 hp_flags |= MV_HP_ERRATA_60X1C0;
2593 break;
2594 }
2595 break;
2596
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002597 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002598 dev_printk(KERN_ERR, &pdev->dev,
2599 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002600 return 1;
2601 }
2602
2603 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002604 if (hp_flags & MV_HP_PCIE) {
2605 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2606 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2607 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2608 } else {
2609 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2610 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2611 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2612 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002613
2614 return 0;
2615}
2616
Brett Russ05b308e2005-10-05 17:08:53 -04002617/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002618 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002619 * @host: ATA host to initialize
2620 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002621 *
2622 * If possible, do an early global reset of the host. Then do
2623 * our port init and clear/unmask all/relevant host interrupts.
2624 *
2625 * LOCKING:
2626 * Inherited from caller.
2627 */
Tejun Heo4447d352007-04-17 23:44:08 +09002628static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002629{
2630 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002631 struct pci_dev *pdev = to_pci_dev(host->dev);
2632 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2633 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002634
Jeff Garzik47c2b672005-11-12 21:13:17 -05002635 /* global interrupt mask */
2636 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2637
Tejun Heo4447d352007-04-17 23:44:08 +09002638 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002639 if (rc)
2640 goto done;
2641
Tejun Heo4447d352007-04-17 23:44:08 +09002642 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002643
Tejun Heo4447d352007-04-17 23:44:08 +09002644 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002645 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002646
Jeff Garzikc9d39132005-11-13 17:47:51 -05002647 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002648 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002649 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002650
Jeff Garzik522479f2005-11-12 22:14:02 -05002651 hpriv->ops->reset_flash(hpriv, mmio);
2652 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002653 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002654
Tejun Heo4447d352007-04-17 23:44:08 +09002655 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002656 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002657 void __iomem *port_mmio = mv_port_base(mmio, port);
2658
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002659 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002660 ifctl |= (1 << 7); /* enable gen2i speed */
2661 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002662 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2663 }
2664
Jeff Garzikc9d39132005-11-13 17:47:51 -05002665 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002666 }
2667
Tejun Heo4447d352007-04-17 23:44:08 +09002668 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002669 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002670 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002671 unsigned int offset = port_mmio - mmio;
2672
2673 mv_port_init(&ap->ioaddr, port_mmio);
2674
2675 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2676 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002677 }
2678
2679 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002680 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2681
2682 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2683 "(before clear)=0x%08x\n", hc,
2684 readl(hc_mmio + HC_CFG_OFS),
2685 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2686
2687 /* Clear any currently outstanding hc interrupt conditions */
2688 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002689 }
2690
Brett Russ31961942005-09-30 01:36:00 -04002691 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002692 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002693
2694 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002695 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002696
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002697 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002698 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2699 else
2700 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002701
2702 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002703 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002704 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2705 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002706 readl(mmio + hpriv->irq_cause_ofs),
2707 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002708
Brett Russ31961942005-09-30 01:36:00 -04002709done:
Brett Russ20f733e2005-09-01 18:26:17 -04002710 return rc;
2711}
2712
Brett Russ05b308e2005-10-05 17:08:53 -04002713/**
2714 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002715 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002716 *
2717 * FIXME: complete this.
2718 *
2719 * LOCKING:
2720 * Inherited from caller.
2721 */
Tejun Heo4447d352007-04-17 23:44:08 +09002722static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002723{
Tejun Heo4447d352007-04-17 23:44:08 +09002724 struct pci_dev *pdev = to_pci_dev(host->dev);
2725 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002726 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002727 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002728
2729 /* Use this to determine the HW stepping of the chip so we know
2730 * what errata to workaround
2731 */
Brett Russ31961942005-09-30 01:36:00 -04002732 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2733 if (scc == 0)
2734 scc_s = "SCSI";
2735 else if (scc == 0x01)
2736 scc_s = "RAID";
2737 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002738 scc_s = "?";
2739
2740 if (IS_GEN_I(hpriv))
2741 gen = "I";
2742 else if (IS_GEN_II(hpriv))
2743 gen = "II";
2744 else if (IS_GEN_IIE(hpriv))
2745 gen = "IIE";
2746 else
2747 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002748
Jeff Garzika9524a72005-10-30 14:39:11 -05002749 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002750 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2751 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002752 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2753}
2754
Brett Russ05b308e2005-10-05 17:08:53 -04002755/**
2756 * mv_init_one - handle a positive probe of a Marvell host
2757 * @pdev: PCI device found
2758 * @ent: PCI device ID entry for the matched host
2759 *
2760 * LOCKING:
2761 * Inherited from caller.
2762 */
Brett Russ20f733e2005-09-01 18:26:17 -04002763static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2764{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002765 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002766 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002767 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2768 struct ata_host *host;
2769 struct mv_host_priv *hpriv;
2770 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002771
Jeff Garzika9524a72005-10-30 14:39:11 -05002772 if (!printed_version++)
2773 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002774
Tejun Heo4447d352007-04-17 23:44:08 +09002775 /* allocate host */
2776 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2777
2778 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2779 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2780 if (!host || !hpriv)
2781 return -ENOMEM;
2782 host->private_data = hpriv;
2783
2784 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002785 rc = pcim_enable_device(pdev);
2786 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002787 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002788
Tejun Heo0d5ff562007-02-01 15:06:36 +09002789 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2790 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002791 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002792 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002793 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002794 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002795
Jeff Garzikd88184f2007-02-26 01:26:06 -05002796 rc = pci_go_64(pdev);
2797 if (rc)
2798 return rc;
2799
Brett Russ20f733e2005-09-01 18:26:17 -04002800 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002801 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002802 if (rc)
2803 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002804
Brett Russ31961942005-09-30 01:36:00 -04002805 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002806 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002807 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002808
Brett Russ31961942005-09-30 01:36:00 -04002809 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002810 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002811
Tejun Heo4447d352007-04-17 23:44:08 +09002812 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002813 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002814 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002815 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002816}
2817
2818static int __init mv_init(void)
2819{
Pavel Roskinb7887192006-08-10 18:13:18 +09002820 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002821}
2822
2823static void __exit mv_exit(void)
2824{
2825 pci_unregister_driver(&mv_pci_driver);
2826}
2827
2828MODULE_AUTHOR("Brett Russ");
2829MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2830MODULE_LICENSE("GPL");
2831MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2832MODULE_VERSION(DRV_VERSION);
2833
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002834module_param(msi, int, 0444);
2835MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2836
Brett Russ20f733e2005-09-01 18:26:17 -04002837module_init(mv_init);
2838module_exit(mv_exit);