blob: 32a0ace5234a7b99b8e87e2e74853ddd9a26c0c2 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
Mark Lord02a121d2007-12-01 13:07:22 -0500167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
Mark Lord02a121d2007-12-01 13:07:22 -0500171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500174
Brett Russ20f733e2005-09-01 18:26:17 -0400175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500214 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500248
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400281 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400300
Brett Russ31961942005-09-30 01:36:00 -0400301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400316
Jeff Garzikc9d39132005-11-13 17:47:51 -0500317 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500319
Brett Russ31961942005-09-30 01:36:00 -0400320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500326 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400336};
337
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400338#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500340#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500341
Jeff Garzik095fec82005-11-12 09:50:49 -0500342enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
345 */
346 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500347
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
350 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
352
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400353 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
355};
356
Jeff Garzik522479f2005-11-12 22:14:02 -0500357enum chip_type {
358 chip_504x,
359 chip_508x,
360 chip_5080,
361 chip_604x,
362 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500363 chip_6042,
364 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500365};
366
Brett Russ31961942005-09-30 01:36:00 -0400367/* Command ReQuest Block: 32B */
368struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400369 __le32 sg_addr;
370 __le32 sg_addr_hi;
371 __le16 ctrl_flags;
372 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400373};
374
Jeff Garzike4e7b892006-01-31 12:18:41 -0500375struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400376 __le32 addr;
377 __le32 addr_hi;
378 __le32 flags;
379 __le32 len;
380 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500381};
382
Brett Russ31961942005-09-30 01:36:00 -0400383/* Command ResPonse Block: 8B */
384struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400385 __le16 id;
386 __le16 flags;
387 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400388};
389
390/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
391struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400392 __le32 addr;
393 __le32 flags_size;
394 __le32 addr_hi;
395 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400396};
397
398struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400399 struct mv_crqb *crqb;
400 dma_addr_t crqb_dma;
401 struct mv_crpb *crpb;
402 dma_addr_t crpb_dma;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400405
406 unsigned int req_idx;
407 unsigned int resp_idx;
408
Brett Russ31961942005-09-30 01:36:00 -0400409 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500412struct mv_port_signal {
413 u32 amps;
414 u32 pre;
415};
416
Mark Lord02a121d2007-12-01 13:07:22 -0500417struct mv_host_priv {
418 u32 hp_flags;
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
421 u32 irq_cause_ofs;
422 u32 irq_mask_ofs;
423 u32 unmask_all_irqs;
424};
425
Jeff Garzik47c2b672005-11-12 21:13:17 -0500426struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436};
437
Brett Russ20f733e2005-09-01 18:26:17 -0400438static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900439static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400443static int mv_port_start(struct ata_port *ap);
444static void mv_port_stop(struct ata_port *ap);
445static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500446static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900447static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400448static void mv_error_handler(struct ata_port *ap);
449static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450static void mv_eh_freeze(struct ata_port *ap);
451static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400452static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
453
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500454static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
455 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500456static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
457static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
458 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500459static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
460 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500461static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
462static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500464static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500466static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
468 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500469static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500471static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500473static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
474 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500475static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
476 void __iomem *port_mmio, int want_ncq);
477static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500478
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400479static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400480 .module = THIS_MODULE,
481 .name = DRV_NAME,
482 .ioctl = ata_scsi_ioctl,
483 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400484 .can_queue = ATA_DEF_QUEUE,
485 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400486 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400487 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
488 .emulated = ATA_SHT_EMULATED,
489 .use_clustering = 1,
490 .proc_name = DRV_NAME,
491 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400492 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400493 .slave_destroy = ata_scsi_slave_destroy,
494 .bios_param = ata_std_bios_param,
495};
496
497static struct scsi_host_template mv6_sht = {
498 .module = THIS_MODULE,
499 .name = DRV_NAME,
500 .ioctl = ata_scsi_ioctl,
501 .queuecommand = ata_scsi_queuecmd,
502 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400503 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400504 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400505 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
506 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500507 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400508 .proc_name = DRV_NAME,
509 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400510 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900511 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400512 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400513};
514
Jeff Garzikc9d39132005-11-13 17:47:51 -0500515static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
521
Jeff Garzikcffacd82007-03-09 09:46:47 -0500522 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500523
524 .qc_prep = mv_qc_prep,
525 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900526 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500527
Jeff Garzikc9d39132005-11-13 17:47:51 -0500528 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900529 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400531 .error_handler = mv_error_handler,
532 .post_internal_cmd = mv_post_int_cmd,
533 .freeze = mv_eh_freeze,
534 .thaw = mv_eh_thaw,
535
Jeff Garzikc9d39132005-11-13 17:47:51 -0500536 .scr_read = mv5_scr_read,
537 .scr_write = mv5_scr_write,
538
539 .port_start = mv_port_start,
540 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541};
542
543static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
549
Jeff Garzikcffacd82007-03-09 09:46:47 -0500550 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400551
Brett Russ31961942005-09-30 01:36:00 -0400552 .qc_prep = mv_qc_prep,
553 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900554 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400555
Brett Russ20f733e2005-09-01 18:26:17 -0400556 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900557 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400558
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400559 .error_handler = mv_error_handler,
560 .post_internal_cmd = mv_post_int_cmd,
561 .freeze = mv_eh_freeze,
562 .thaw = mv_eh_thaw,
563
Brett Russ20f733e2005-09-01 18:26:17 -0400564 .scr_read = mv_scr_read,
565 .scr_write = mv_scr_write,
566
Brett Russ31961942005-09-30 01:36:00 -0400567 .port_start = mv_port_start,
568 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400569};
570
Jeff Garzike4e7b892006-01-31 12:18:41 -0500571static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500572 .tf_load = ata_tf_load,
573 .tf_read = ata_tf_read,
574 .check_status = ata_check_status,
575 .exec_command = ata_exec_command,
576 .dev_select = ata_std_dev_select,
577
Jeff Garzikcffacd82007-03-09 09:46:47 -0500578 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579
580 .qc_prep = mv_qc_prep_iie,
581 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900582 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500583
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900585 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400587 .error_handler = mv_error_handler,
588 .post_internal_cmd = mv_post_int_cmd,
589 .freeze = mv_eh_freeze,
590 .thaw = mv_eh_thaw,
591
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592 .scr_read = mv_scr_read,
593 .scr_write = mv_scr_write,
594
595 .port_start = mv_port_start,
596 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597};
598
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100599static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400600 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400601 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500604 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400605 },
606 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500610 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400611 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500614 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400615 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500616 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500617 },
Brett Russ20f733e2005-09-01 18:26:17 -0400618 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500622 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400623 },
624 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400627 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400628 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500629 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400630 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500631 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500633 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400634 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 .port_ops = &mv_iie_ops,
636 },
637 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .port_ops = &mv_iie_ops,
642 },
Brett Russ20f733e2005-09-01 18:26:17 -0400643};
644
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500645static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400646 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
647 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
648 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
649 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100650 /* RocketRAID 1740/174x have different identifiers */
651 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
652 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400653
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400654 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
655 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
656 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
657 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
658 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500659
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400660 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
661
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200662 /* Adaptec 1430SA */
663 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
664
Mark Lord02a121d2007-12-01 13:07:22 -0500665 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800666 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
667
Mark Lord02a121d2007-12-01 13:07:22 -0500668 /* Highpoint RocketRAID PCIe series */
669 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
670 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
671
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400672 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400673};
674
675static struct pci_driver mv_pci_driver = {
676 .name = DRV_NAME,
677 .id_table = mv_pci_tbl,
678 .probe = mv_init_one,
679 .remove = ata_pci_remove_one,
680};
681
Jeff Garzik47c2b672005-11-12 21:13:17 -0500682static const struct mv_hw_ops mv5xxx_ops = {
683 .phy_errata = mv5_phy_errata,
684 .enable_leds = mv5_enable_leds,
685 .read_preamp = mv5_read_preamp,
686 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500687 .reset_flash = mv5_reset_flash,
688 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500689};
690
691static const struct mv_hw_ops mv6xxx_ops = {
692 .phy_errata = mv6_phy_errata,
693 .enable_leds = mv6_enable_leds,
694 .read_preamp = mv6_read_preamp,
695 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500696 .reset_flash = mv6_reset_flash,
697 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500698};
699
Brett Russ20f733e2005-09-01 18:26:17 -0400700/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500701 * module options
702 */
703static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
704
705
Jeff Garzikd88184f2007-02-26 01:26:06 -0500706/* move to PCI layer or libata core? */
707static int pci_go_64(struct pci_dev *pdev)
708{
709 int rc;
710
711 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
713 if (rc) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
715 if (rc) {
716 dev_printk(KERN_ERR, &pdev->dev,
717 "64-bit DMA enable failed\n");
718 return rc;
719 }
720 }
721 } else {
722 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
723 if (rc) {
724 dev_printk(KERN_ERR, &pdev->dev,
725 "32-bit DMA enable failed\n");
726 return rc;
727 }
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
729 if (rc) {
730 dev_printk(KERN_ERR, &pdev->dev,
731 "32-bit consistent DMA enable failed\n");
732 return rc;
733 }
734 }
735
736 return rc;
737}
738
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500739/*
Brett Russ20f733e2005-09-01 18:26:17 -0400740 * Functions
741 */
742
743static inline void writelfl(unsigned long data, void __iomem *addr)
744{
745 writel(data, addr);
746 (void) readl(addr); /* flush to avoid PCI posted write */
747}
748
Brett Russ20f733e2005-09-01 18:26:17 -0400749static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
750{
751 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
752}
753
Jeff Garzikc9d39132005-11-13 17:47:51 -0500754static inline unsigned int mv_hc_from_port(unsigned int port)
755{
756 return port >> MV_PORT_HC_SHIFT;
757}
758
759static inline unsigned int mv_hardport_from_port(unsigned int port)
760{
761 return port & MV_PORT_MASK;
762}
763
764static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
765 unsigned int port)
766{
767 return mv_hc_base(base, mv_hc_from_port(port));
768}
769
Brett Russ20f733e2005-09-01 18:26:17 -0400770static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
771{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500772 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500773 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400775}
776
777static inline void __iomem *mv_ap_base(struct ata_port *ap)
778{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900779 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400780}
781
Jeff Garzikcca39742006-08-24 03:19:22 -0400782static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400783{
Jeff Garzikcca39742006-08-24 03:19:22 -0400784 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400785}
786
787static void mv_irq_clear(struct ata_port *ap)
788{
789}
790
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791static void mv_set_edma_ptrs(void __iomem *port_mmio,
792 struct mv_host_priv *hpriv,
793 struct mv_port_priv *pp)
794{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 u32 index;
796
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 /*
798 * initialize request queue
799 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400800 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
801
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400802 WARN_ON(pp->crqb_dma & 0x3ff);
803 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
806
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400808 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
810 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812
813 /*
814 * initialize response queue
815 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
817
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818 WARN_ON(pp->crpb_dma & 0xff);
819 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
820
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400822 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
824 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400827 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400829}
830
Brett Russ05b308e2005-10-05 17:08:53 -0400831/**
832 * mv_start_dma - Enable eDMA engine
833 * @base: port base address
834 * @pp: port private data
835 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900836 * Verify the local cache of the eDMA state is accurate with a
837 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400838 *
839 * LOCKING:
840 * Inherited from caller.
841 */
Mark Lord0c589122008-01-26 18:31:16 -0500842static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500843 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400844{
Mark Lord72109162008-01-26 18:31:33 -0500845 int want_ncq = (protocol == ATA_PROT_NCQ);
846
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
850 __mv_stop_dma(ap);
851 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500853 struct mv_host_priv *hpriv = ap->host->private_data;
854 int hard_port = mv_hardport_from_port(ap->port_no);
855 void __iomem *hc_mmio = mv_hc_base_from_port(
856 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
857 u32 hc_irq_cause, ipending;
858
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400859 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500860 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400861
Mark Lord0c589122008-01-26 18:31:16 -0500862 /* clear EDMA interrupt indicator, if any */
863 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
864 ipending = (DEV_IRQ << hard_port) |
865 (CRPB_DMA_DONE << hard_port);
866 if (hc_irq_cause & ipending) {
867 writelfl(hc_irq_cause & ~ipending,
868 hc_mmio + HC_IRQ_CAUSE_OFS);
869 }
870
Mark Lord72109162008-01-26 18:31:33 -0500871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500872
873 /* clear FIS IRQ Cause */
874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
875
Mark Lordf630d562008-01-26 18:31:00 -0500876 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400877
Mark Lordf630d562008-01-26 18:31:00 -0500878 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400879 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
880 }
Mark Lordf630d562008-01-26 18:31:00 -0500881 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400882}
883
Brett Russ05b308e2005-10-05 17:08:53 -0400884/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400885 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400886 * @ap: ATA channel to manipulate
887 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900888 * Verify the local cache of the eDMA state is accurate with a
889 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400890 *
891 * LOCKING:
892 * Inherited from caller.
893 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400894static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400895{
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400898 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400899 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400900
Jeff Garzik4537deb2007-07-12 14:30:19 -0400901 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400902 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400903 */
Brett Russ31961942005-09-30 01:36:00 -0400904 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400906 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900907 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400908 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500909
Brett Russ31961942005-09-30 01:36:00 -0400910 /* now properly wait for the eDMA to stop */
911 for (i = 1000; i > 0; i--) {
912 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400913 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400914 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400915
Brett Russ31961942005-09-30 01:36:00 -0400916 udelay(100);
917 }
918
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400919 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900920 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400921 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400922 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400923
924 return err;
Brett Russ31961942005-09-30 01:36:00 -0400925}
926
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400927static int mv_stop_dma(struct ata_port *ap)
928{
929 unsigned long flags;
930 int rc;
931
932 spin_lock_irqsave(&ap->host->lock, flags);
933 rc = __mv_stop_dma(ap);
934 spin_unlock_irqrestore(&ap->host->lock, flags);
935
936 return rc;
937}
938
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400939#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400940static void mv_dump_mem(void __iomem *start, unsigned bytes)
941{
Brett Russ31961942005-09-30 01:36:00 -0400942 int b, w;
943 for (b = 0; b < bytes; ) {
944 DPRINTK("%p: ", start + b);
945 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400946 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400947 b += sizeof(u32);
948 }
949 printk("\n");
950 }
Brett Russ31961942005-09-30 01:36:00 -0400951}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400952#endif
953
Brett Russ31961942005-09-30 01:36:00 -0400954static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
955{
956#ifdef ATA_DEBUG
957 int b, w;
958 u32 dw;
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%02x: ", b);
961 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400962 (void) pci_read_config_dword(pdev, b, &dw);
963 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
968#endif
969}
970static void mv_dump_all_regs(void __iomem *mmio_base, int port,
971 struct pci_dev *pdev)
972{
973#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500974 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400975 port >> MV_PORT_HC_SHIFT);
976 void __iomem *port_base;
977 int start_port, num_ports, p, start_hc, num_hcs, hc;
978
979 if (0 > port) {
980 start_hc = start_port = 0;
981 num_ports = 8; /* shld be benign for 4 port devs */
982 num_hcs = 2;
983 } else {
984 start_hc = port >> MV_PORT_HC_SHIFT;
985 start_port = port;
986 num_ports = num_hcs = 1;
987 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500988 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400989 num_ports > 1 ? num_ports - 1 : start_port);
990
991 if (NULL != pdev) {
992 DPRINTK("PCI config space regs:\n");
993 mv_dump_pci_cfg(pdev, 0x68);
994 }
995 DPRINTK("PCI regs:\n");
996 mv_dump_mem(mmio_base+0xc00, 0x3c);
997 mv_dump_mem(mmio_base+0xd00, 0x34);
998 mv_dump_mem(mmio_base+0xf00, 0x4);
999 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1000 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001001 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001002 DPRINTK("HC regs (HC %i):\n", hc);
1003 mv_dump_mem(hc_base, 0x1c);
1004 }
1005 for (p = start_port; p < start_port + num_ports; p++) {
1006 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001007 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001008 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001009 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001010 mv_dump_mem(port_base+0x300, 0x60);
1011 }
1012#endif
1013}
1014
Brett Russ20f733e2005-09-01 18:26:17 -04001015static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1016{
1017 unsigned int ofs;
1018
1019 switch (sc_reg_in) {
1020 case SCR_STATUS:
1021 case SCR_CONTROL:
1022 case SCR_ERROR:
1023 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1024 break;
1025 case SCR_ACTIVE:
1026 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1027 break;
1028 default:
1029 ofs = 0xffffffffU;
1030 break;
1031 }
1032 return ofs;
1033}
1034
Tejun Heoda3dbb12007-07-16 14:29:40 +09001035static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001036{
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1038
Tejun Heoda3dbb12007-07-16 14:29:40 +09001039 if (ofs != 0xffffffffU) {
1040 *val = readl(mv_ap_base(ap) + ofs);
1041 return 0;
1042 } else
1043 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001044}
1045
Tejun Heoda3dbb12007-07-16 14:29:40 +09001046static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001047{
1048 unsigned int ofs = mv_scr_offset(sc_reg_in);
1049
Tejun Heoda3dbb12007-07-16 14:29:40 +09001050 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001051 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001052 return 0;
1053 } else
1054 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001055}
1056
Mark Lord72109162008-01-26 18:31:33 -05001057static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1058 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001059{
Mark Lord0c589122008-01-26 18:31:16 -05001060 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001061
1062 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001063 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001064
Mark Lord0c589122008-01-26 18:31:16 -05001065 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001066 cfg |= (1 << 8); /* enab config burst size mask */
1067
Mark Lord0c589122008-01-26 18:31:16 -05001068 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001069 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1070
1071 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001072 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1073 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001074 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001076 }
1077
Mark Lord72109162008-01-26 18:31:33 -05001078 if (want_ncq) {
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1081 } else
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1083
Jeff Garzike4e7b892006-01-31 12:18:41 -05001084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1085}
1086
Brett Russ05b308e2005-10-05 17:08:53 -04001087/**
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1090 *
1091 * Allocate and point to DMA memory, init port private memory,
1092 * zero indices.
1093 *
1094 * LOCKING:
1095 * Inherited from caller.
1096 */
Brett Russ31961942005-09-30 01:36:00 -04001097static int mv_port_start(struct ata_port *ap)
1098{
Jeff Garzikcca39742006-08-24 03:19:22 -04001099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1103 void *mem;
1104 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001105 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001106 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001107
Tejun Heo24dc5f32007-01-20 16:00:28 +09001108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001109 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001110 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001111
Tejun Heo24dc5f32007-01-20 16:00:28 +09001112 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1113 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001114 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001115 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001116 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1117
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001118 rc = ata_pad_alloc(ap, dev);
1119 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001120 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001121
Jeff Garzik8b260242005-11-12 12:32:50 -05001122 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001123 * 32-slot command request table (CRQB), 32 bytes each in size
1124 */
1125 pp->crqb = mem;
1126 pp->crqb_dma = mem_dma;
1127 mem += MV_CRQB_Q_SZ;
1128 mem_dma += MV_CRQB_Q_SZ;
1129
Jeff Garzik8b260242005-11-12 12:32:50 -05001130 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001131 * 32-slot command response table (CRPB), 8 bytes each in size
1132 */
1133 pp->crpb = mem;
1134 pp->crpb_dma = mem_dma;
1135 mem += MV_CRPB_Q_SZ;
1136 mem_dma += MV_CRPB_Q_SZ;
1137
1138 /* Third item:
1139 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1140 */
1141 pp->sg_tbl = mem;
1142 pp->sg_tbl_dma = mem_dma;
1143
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001144 spin_lock_irqsave(&ap->host->lock, flags);
1145
Mark Lord72109162008-01-26 18:31:33 -05001146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Brett Russ31961942005-09-30 01:36:00 -04001147
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001149
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001150 spin_unlock_irqrestore(&ap->host->lock, flags);
1151
Brett Russ31961942005-09-30 01:36:00 -04001152 /* Don't turn on EDMA here...do it before DMA commands only. Else
1153 * we'll be unable to send non-data, PIO, etc due to restricted access
1154 * to shadow regs.
1155 */
1156 ap->private_data = pp;
1157 return 0;
1158}
1159
Brett Russ05b308e2005-10-05 17:08:53 -04001160/**
1161 * mv_port_stop - Port specific cleanup/stop routine.
1162 * @ap: ATA channel to manipulate
1163 *
1164 * Stop DMA, cleanup port memory.
1165 *
1166 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001167 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001168 */
Brett Russ31961942005-09-30 01:36:00 -04001169static void mv_port_stop(struct ata_port *ap)
1170{
Brett Russ31961942005-09-30 01:36:00 -04001171 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001172}
1173
Brett Russ05b308e2005-10-05 17:08:53 -04001174/**
1175 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1176 * @qc: queued command whose SG list to source from
1177 *
1178 * Populate the SG list and mark the last entry.
1179 *
1180 * LOCKING:
1181 * Inherited from caller.
1182 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001183static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001184{
1185 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001186 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001187 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001188 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001189
Jeff Garzikd88184f2007-02-26 01:26:06 -05001190 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001191 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001192 dma_addr_t addr = sg_dma_address(sg);
1193 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001194
Olof Johansson4007b492007-10-02 20:45:27 -05001195 while (sg_len) {
1196 u32 offset = addr & 0xffff;
1197 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001198
Olof Johansson4007b492007-10-02 20:45:27 -05001199 if ((offset + sg_len > 0x10000))
1200 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001201
Olof Johansson4007b492007-10-02 20:45:27 -05001202 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1203 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001204 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001205
1206 sg_len -= len;
1207 addr += len;
1208
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001209 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001210 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001211 }
Brett Russ31961942005-09-30 01:36:00 -04001212 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001213
1214 if (likely(last_sg))
1215 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001216}
1217
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001218static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001219{
Mark Lord559eeda2006-05-19 16:40:15 -04001220 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001221 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001222 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001223}
1224
Brett Russ05b308e2005-10-05 17:08:53 -04001225/**
1226 * mv_qc_prep - Host specific command preparation.
1227 * @qc: queued command to prepare
1228 *
1229 * This routine simply redirects to the general purpose routine
1230 * if command is not DMA. Else, it handles prep of the CRQB
1231 * (command request block), does some sanity checking, and calls
1232 * the SG load routine.
1233 *
1234 * LOCKING:
1235 * Inherited from caller.
1236 */
Brett Russ31961942005-09-30 01:36:00 -04001237static void mv_qc_prep(struct ata_queued_cmd *qc)
1238{
1239 struct ata_port *ap = qc->ap;
1240 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001241 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001242 struct ata_taskfile *tf;
1243 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001244 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001245
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001246 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001247 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001248
Brett Russ31961942005-09-30 01:36:00 -04001249 /* Fill in command request block
1250 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001251 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001252 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001253 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001254 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001255 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001256
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001257 /* get current queue index from software */
1258 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001259
Mark Lorda6432432006-05-19 16:36:36 -04001260 pp->crqb[in_index].sg_addr =
1261 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1262 pp->crqb[in_index].sg_addr_hi =
1263 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1264 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1265
1266 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001267 tf = &qc->tf;
1268
1269 /* Sadly, the CRQB cannot accomodate all registers--there are
1270 * only 11 bytes...so we must pick and choose required
1271 * registers based on the command. So, we drop feature and
1272 * hob_feature for [RW] DMA commands, but they are needed for
1273 * NCQ. NCQ will drop hob_nsect.
1274 */
1275 switch (tf->command) {
1276 case ATA_CMD_READ:
1277 case ATA_CMD_READ_EXT:
1278 case ATA_CMD_WRITE:
1279 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001280 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001281 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1282 break;
1283#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1284 case ATA_CMD_FPDMA_READ:
1285 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001286 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001287 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1288 break;
1289#endif /* FIXME: remove this line when NCQ added */
1290 default:
1291 /* The only other commands EDMA supports in non-queued and
1292 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1293 * of which are defined/used by Linux. If we get here, this
1294 * driver needs work.
1295 *
1296 * FIXME: modify libata to give qc_prep a return value and
1297 * return error here.
1298 */
1299 BUG_ON(tf->command);
1300 break;
1301 }
1302 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1303 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1304 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1305 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1306 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1307 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1308 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1309 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1310 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1311
Jeff Garzike4e7b892006-01-31 12:18:41 -05001312 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001313 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001314 mv_fill_sg(qc);
1315}
1316
1317/**
1318 * mv_qc_prep_iie - Host specific command preparation.
1319 * @qc: queued command to prepare
1320 *
1321 * This routine simply redirects to the general purpose routine
1322 * if command is not DMA. Else, it handles prep of the CRQB
1323 * (command request block), does some sanity checking, and calls
1324 * the SG load routine.
1325 *
1326 * LOCKING:
1327 * Inherited from caller.
1328 */
1329static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1330{
1331 struct ata_port *ap = qc->ap;
1332 struct mv_port_priv *pp = ap->private_data;
1333 struct mv_crqb_iie *crqb;
1334 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001335 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001336 u32 flags = 0;
1337
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001338 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001339 return;
1340
Jeff Garzike4e7b892006-01-31 12:18:41 -05001341 /* Fill in Gen IIE command request block
1342 */
1343 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1344 flags |= CRQB_FLAG_READ;
1345
Tejun Heobeec7db2006-02-11 19:11:13 +09001346 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001347 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001348 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001349 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001350
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001351 /* get current queue index from software */
1352 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001353
1354 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001355 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1356 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1357 crqb->flags = cpu_to_le32(flags);
1358
1359 tf = &qc->tf;
1360 crqb->ata_cmd[0] = cpu_to_le32(
1361 (tf->command << 16) |
1362 (tf->feature << 24)
1363 );
1364 crqb->ata_cmd[1] = cpu_to_le32(
1365 (tf->lbal << 0) |
1366 (tf->lbam << 8) |
1367 (tf->lbah << 16) |
1368 (tf->device << 24)
1369 );
1370 crqb->ata_cmd[2] = cpu_to_le32(
1371 (tf->hob_lbal << 0) |
1372 (tf->hob_lbam << 8) |
1373 (tf->hob_lbah << 16) |
1374 (tf->hob_feature << 24)
1375 );
1376 crqb->ata_cmd[3] = cpu_to_le32(
1377 (tf->nsect << 0) |
1378 (tf->hob_nsect << 8)
1379 );
1380
1381 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1382 return;
Brett Russ31961942005-09-30 01:36:00 -04001383 mv_fill_sg(qc);
1384}
1385
Brett Russ05b308e2005-10-05 17:08:53 -04001386/**
1387 * mv_qc_issue - Initiate a command to the host
1388 * @qc: queued command to start
1389 *
1390 * This routine simply redirects to the general purpose routine
1391 * if command is not DMA. Else, it sanity checks our local
1392 * caches of the request producer/consumer indices then enables
1393 * DMA and bumps the request producer index.
1394 *
1395 * LOCKING:
1396 * Inherited from caller.
1397 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001398static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001399{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001400 struct ata_port *ap = qc->ap;
1401 void __iomem *port_mmio = mv_ap_base(ap);
1402 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001403 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001404
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001405 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001406 /* We're about to send a non-EDMA capable command to the
1407 * port. Turn off EDMA so there won't be problems accessing
1408 * shadow block, etc registers.
1409 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001410 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001411 return ata_qc_issue_prot(qc);
1412 }
1413
Mark Lord72109162008-01-26 18:31:33 -05001414 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001415
1416 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001417
Brett Russ31961942005-09-30 01:36:00 -04001418 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001419 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1420 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001421
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001423
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001424 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001425
1426 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001427 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1428 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001429
1430 return 0;
1431}
1432
Brett Russ05b308e2005-10-05 17:08:53 -04001433/**
Brett Russ05b308e2005-10-05 17:08:53 -04001434 * mv_err_intr - Handle error interrupts on the port
1435 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001436 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001437 *
1438 * In most cases, just clear the interrupt and move on. However,
1439 * some cases require an eDMA reset, which is done right before
1440 * the COMRESET in mv_phy_reset(). The SERR case requires a
1441 * clear of pending errors in the SATA SERROR register. Finally,
1442 * if the port disabled DMA, update our cached copy to match.
1443 *
1444 * LOCKING:
1445 * Inherited from caller.
1446 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001448{
Brett Russ31961942005-09-30 01:36:00 -04001449 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001450 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1451 struct mv_port_priv *pp = ap->private_data;
1452 struct mv_host_priv *hpriv = ap->host->private_data;
1453 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1454 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001455 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001456
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001458
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 if (!edma_enabled) {
1460 /* just a guess: do we need to do this? should we
1461 * expand this, and do it in all cases?
1462 */
Tejun Heo936fd732007-08-06 18:36:23 +09001463 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1464 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001465 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001466
1467 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1468
1469 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1470
1471 /*
1472 * all generations share these EDMA error cause bits
1473 */
1474
1475 if (edma_err_cause & EDMA_ERR_DEV)
1476 err_mask |= AC_ERR_DEV;
1477 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001478 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001479 EDMA_ERR_INTRL_PAR)) {
1480 err_mask |= AC_ERR_ATA_BUS;
1481 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001482 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001483 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1485 ata_ehi_hotplugged(ehi);
1486 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001487 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001488 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001489 }
1490
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001491 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001492 eh_freeze_mask = EDMA_EH_FREEZE_5;
1493
1494 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1495 struct mv_port_priv *pp = ap->private_data;
1496 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001497 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001498 }
1499 } else {
1500 eh_freeze_mask = EDMA_EH_FREEZE;
1501
1502 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1503 struct mv_port_priv *pp = ap->private_data;
1504 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001505 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001506 }
1507
1508 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001509 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1510 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001511 err_mask = AC_ERR_ATA_BUS;
1512 action |= ATA_EH_HARDRESET;
1513 }
1514 }
Brett Russ20f733e2005-09-01 18:26:17 -04001515
1516 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001517 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001518
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001519 if (!err_mask) {
1520 err_mask = AC_ERR_OTHER;
1521 action |= ATA_EH_HARDRESET;
1522 }
1523
1524 ehi->serror |= serr;
1525 ehi->action |= action;
1526
1527 if (qc)
1528 qc->err_mask |= err_mask;
1529 else
1530 ehi->err_mask |= err_mask;
1531
1532 if (edma_err_cause & eh_freeze_mask)
1533 ata_port_freeze(ap);
1534 else
1535 ata_port_abort(ap);
1536}
1537
1538static void mv_intr_pio(struct ata_port *ap)
1539{
1540 struct ata_queued_cmd *qc;
1541 u8 ata_status;
1542
1543 /* ignore spurious intr if drive still BUSY */
1544 ata_status = readb(ap->ioaddr.status_addr);
1545 if (unlikely(ata_status & ATA_BUSY))
1546 return;
1547
1548 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001549 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550 if (unlikely(!qc)) /* no active tag */
1551 return;
1552 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1553 return;
1554
1555 /* and finally, complete the ATA command */
1556 qc->err_mask |= ac_err_mask(ata_status);
1557 ata_qc_complete(qc);
1558}
1559
1560static void mv_intr_edma(struct ata_port *ap)
1561{
1562 void __iomem *port_mmio = mv_ap_base(ap);
1563 struct mv_host_priv *hpriv = ap->host->private_data;
1564 struct mv_port_priv *pp = ap->private_data;
1565 struct ata_queued_cmd *qc;
1566 u32 out_index, in_index;
1567 bool work_done = false;
1568
1569 /* get h/w response queue pointer */
1570 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1571 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1572
1573 while (1) {
1574 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001575 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001576
1577 /* get s/w response queue last-read pointer, and compare */
1578 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1579 if (in_index == out_index)
1580 break;
1581
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001582 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001583 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001584 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001585
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001586 /* Gen II/IIE: get active ATA command via tag, to enable
1587 * support for queueing. this works transparently for
1588 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001589 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001590 else if (IS_GEN_II(hpriv))
1591 tag = (le16_to_cpu(pp->crpb[out_index].id)
1592 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001593
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001594 else /* IS_GEN_IIE */
1595 tag = (le16_to_cpu(pp->crpb[out_index].id)
1596 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001597
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001598 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001599
1600 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1601 * bits (WARNING: might not necessarily be associated
1602 * with this command), which -should- be clear
1603 * if all is well
1604 */
1605 status = le16_to_cpu(pp->crpb[out_index].flags);
1606 if (unlikely(status & 0xff)) {
1607 mv_err_intr(ap, qc);
1608 return;
1609 }
1610
1611 /* and finally, complete the ATA command */
1612 if (qc) {
1613 qc->err_mask |=
1614 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1615 ata_qc_complete(qc);
1616 }
1617
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001618 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001619 * indicate (after the loop completes) to hardware
1620 * that we have consumed a response queue entry.
1621 */
1622 work_done = true;
1623 pp->resp_idx++;
1624 }
1625
1626 if (work_done)
1627 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1628 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1629 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001630}
1631
Brett Russ05b308e2005-10-05 17:08:53 -04001632/**
1633 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001634 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001635 * @relevant: port error bits relevant to this host controller
1636 * @hc: which host controller we're to look at
1637 *
1638 * Read then write clear the HC interrupt status then walk each
1639 * port connected to the HC and see if it needs servicing. Port
1640 * success ints are reported in the HC interrupt status reg, the
1641 * port error ints are reported in the higher level main
1642 * interrupt status register and thus are passed in via the
1643 * 'relevant' argument.
1644 *
1645 * LOCKING:
1646 * Inherited from caller.
1647 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001648static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001649{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001650 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001651 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001652 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001653 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001654
Jeff Garzik35177262007-02-24 21:26:42 -05001655 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001656 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001657 else
Brett Russ20f733e2005-09-01 18:26:17 -04001658 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001659
1660 /* we'll need the HC success int register in most cases */
1661 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001662 if (!hc_irq_cause)
1663 return;
1664
1665 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001666
1667 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001668 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001669
1670 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001671 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001672 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001673 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001674
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001675 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001676 continue;
1677
Brett Russ31961942005-09-30 01:36:00 -04001678 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001679 if (port >= MV_PORTS_PER_HC) {
1680 shift++; /* skip bit 8 in the HC Main IRQ reg */
1681 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001682 have_err_bits = ((PORT0_ERR << shift) & relevant);
1683
1684 if (unlikely(have_err_bits)) {
1685 struct ata_queued_cmd *qc;
1686
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001687 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001688 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1689 continue;
1690
1691 mv_err_intr(ap, qc);
1692 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001693 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001694
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001695 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1696
1697 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1698 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1699 mv_intr_edma(ap);
1700 } else {
1701 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1702 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001703 }
1704 }
1705 VPRINTK("EXIT\n");
1706}
1707
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001708static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1709{
Mark Lord02a121d2007-12-01 13:07:22 -05001710 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711 struct ata_port *ap;
1712 struct ata_queued_cmd *qc;
1713 struct ata_eh_info *ehi;
1714 unsigned int i, err_mask, printed = 0;
1715 u32 err_cause;
1716
Mark Lord02a121d2007-12-01 13:07:22 -05001717 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001718
1719 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1720 err_cause);
1721
1722 DPRINTK("All regs @ PCI error\n");
1723 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1724
Mark Lord02a121d2007-12-01 13:07:22 -05001725 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001726
1727 for (i = 0; i < host->n_ports; i++) {
1728 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001729 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001730 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001731 ata_ehi_clear_desc(ehi);
1732 if (!printed++)
1733 ata_ehi_push_desc(ehi,
1734 "PCI err cause 0x%08x", err_cause);
1735 err_mask = AC_ERR_HOST_BUS;
1736 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001737 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001738 if (qc)
1739 qc->err_mask |= err_mask;
1740 else
1741 ehi->err_mask |= err_mask;
1742
1743 ata_port_freeze(ap);
1744 }
1745 }
1746}
1747
Brett Russ05b308e2005-10-05 17:08:53 -04001748/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001749 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001750 * @irq: unused
1751 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001752 *
1753 * Read the read only register to determine if any host
1754 * controllers have pending interrupts. If so, call lower level
1755 * routine to handle. Also check for PCI errors which are only
1756 * reported here.
1757 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001758 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001759 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001760 * interrupts.
1761 */
David Howells7d12e782006-10-05 14:55:46 +01001762static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001763{
Jeff Garzikcca39742006-08-24 03:19:22 -04001764 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001765 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001766 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001767 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001768
Mark Lord646a4da2008-01-26 18:30:37 -05001769 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001770 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001771 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001772
1773 /* check the cases where we either have nothing pending or have read
1774 * a bogus register value which can indicate HW removal or PCI fault
1775 */
Mark Lord646a4da2008-01-26 18:30:37 -05001776 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1777 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001778
Jeff Garzikcca39742006-08-24 03:19:22 -04001779 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001780
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001781 if (unlikely(irq_stat & PCI_ERR)) {
1782 mv_pci_error(host, mmio);
1783 handled = 1;
1784 goto out_unlock; /* skip all other HC irq handling */
1785 }
1786
Brett Russ20f733e2005-09-01 18:26:17 -04001787 for (hc = 0; hc < n_hcs; hc++) {
1788 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1789 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001790 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001791 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001792 }
1793 }
Mark Lord615ab952006-05-19 16:24:56 -04001794
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001795out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001796 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001797
1798 return IRQ_RETVAL(handled);
1799}
1800
Jeff Garzikc9d39132005-11-13 17:47:51 -05001801static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1802{
1803 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1804 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1805
1806 return hc_mmio + ofs;
1807}
1808
1809static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1810{
1811 unsigned int ofs;
1812
1813 switch (sc_reg_in) {
1814 case SCR_STATUS:
1815 case SCR_ERROR:
1816 case SCR_CONTROL:
1817 ofs = sc_reg_in * sizeof(u32);
1818 break;
1819 default:
1820 ofs = 0xffffffffU;
1821 break;
1822 }
1823 return ofs;
1824}
1825
Tejun Heoda3dbb12007-07-16 14:29:40 +09001826static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001827{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001828 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1829 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001830 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1831
Tejun Heoda3dbb12007-07-16 14:29:40 +09001832 if (ofs != 0xffffffffU) {
1833 *val = readl(addr + ofs);
1834 return 0;
1835 } else
1836 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001837}
1838
Tejun Heoda3dbb12007-07-16 14:29:40 +09001839static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001840{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001841 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1842 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001843 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1844
Tejun Heoda3dbb12007-07-16 14:29:40 +09001845 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001846 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001847 return 0;
1848 } else
1849 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001850}
1851
Jeff Garzik522479f2005-11-12 22:14:02 -05001852static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1853{
Jeff Garzik522479f2005-11-12 22:14:02 -05001854 int early_5080;
1855
Auke Kok44c10132007-06-08 15:46:36 -07001856 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001857
1858 if (!early_5080) {
1859 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1860 tmp |= (1 << 0);
1861 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1862 }
1863
1864 mv_reset_pci_bus(pdev, mmio);
1865}
1866
1867static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1868{
1869 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1870}
1871
Jeff Garzik47c2b672005-11-12 21:13:17 -05001872static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001873 void __iomem *mmio)
1874{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001875 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1876 u32 tmp;
1877
1878 tmp = readl(phy_mmio + MV5_PHY_MODE);
1879
1880 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1881 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001882}
1883
Jeff Garzik47c2b672005-11-12 21:13:17 -05001884static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001885{
Jeff Garzik522479f2005-11-12 22:14:02 -05001886 u32 tmp;
1887
1888 writel(0, mmio + MV_GPIO_PORT_CTL);
1889
1890 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1891
1892 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1893 tmp |= ~(1 << 0);
1894 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001895}
1896
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001897static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001899{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001900 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1901 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1902 u32 tmp;
1903 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1904
1905 if (fix_apm_sq) {
1906 tmp = readl(phy_mmio + MV5_LT_MODE);
1907 tmp |= (1 << 19);
1908 writel(tmp, phy_mmio + MV5_LT_MODE);
1909
1910 tmp = readl(phy_mmio + MV5_PHY_CTL);
1911 tmp &= ~0x3;
1912 tmp |= 0x1;
1913 writel(tmp, phy_mmio + MV5_PHY_CTL);
1914 }
1915
1916 tmp = readl(phy_mmio + MV5_PHY_MODE);
1917 tmp &= ~mask;
1918 tmp |= hpriv->signal[port].pre;
1919 tmp |= hpriv->signal[port].amps;
1920 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001921}
1922
Jeff Garzikc9d39132005-11-13 17:47:51 -05001923
1924#undef ZERO
1925#define ZERO(reg) writel(0, port_mmio + (reg))
1926static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1927 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001928{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001929 void __iomem *port_mmio = mv_port_base(mmio, port);
1930
1931 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1932
1933 mv_channel_reset(hpriv, mmio, port);
1934
1935 ZERO(0x028); /* command */
1936 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1937 ZERO(0x004); /* timer */
1938 ZERO(0x008); /* irq err cause */
1939 ZERO(0x00c); /* irq err mask */
1940 ZERO(0x010); /* rq bah */
1941 ZERO(0x014); /* rq inp */
1942 ZERO(0x018); /* rq outp */
1943 ZERO(0x01c); /* respq bah */
1944 ZERO(0x024); /* respq outp */
1945 ZERO(0x020); /* respq inp */
1946 ZERO(0x02c); /* test control */
1947 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1948}
1949#undef ZERO
1950
1951#define ZERO(reg) writel(0, hc_mmio + (reg))
1952static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int hc)
1954{
1955 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1956 u32 tmp;
1957
1958 ZERO(0x00c);
1959 ZERO(0x010);
1960 ZERO(0x014);
1961 ZERO(0x018);
1962
1963 tmp = readl(hc_mmio + 0x20);
1964 tmp &= 0x1c1c1c1c;
1965 tmp |= 0x03030303;
1966 writel(tmp, hc_mmio + 0x20);
1967}
1968#undef ZERO
1969
1970static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int n_hc)
1972{
1973 unsigned int hc, port;
1974
1975 for (hc = 0; hc < n_hc; hc++) {
1976 for (port = 0; port < MV_PORTS_PER_HC; port++)
1977 mv5_reset_hc_port(hpriv, mmio,
1978 (hc * MV_PORTS_PER_HC) + port);
1979
1980 mv5_reset_one_hc(hpriv, mmio, hc);
1981 }
1982
1983 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001984}
1985
Jeff Garzik101ffae2005-11-12 22:17:49 -05001986#undef ZERO
1987#define ZERO(reg) writel(0, mmio + (reg))
1988static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1989{
Mark Lord02a121d2007-12-01 13:07:22 -05001990 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1991 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001992 u32 tmp;
1993
1994 tmp = readl(mmio + MV_PCI_MODE);
1995 tmp &= 0xff00ffff;
1996 writel(tmp, mmio + MV_PCI_MODE);
1997
1998 ZERO(MV_PCI_DISC_TIMER);
1999 ZERO(MV_PCI_MSI_TRIGGER);
2000 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2001 ZERO(HC_MAIN_IRQ_MASK_OFS);
2002 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002003 ZERO(hpriv->irq_cause_ofs);
2004 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002005 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2006 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2007 ZERO(MV_PCI_ERR_ATTRIBUTE);
2008 ZERO(MV_PCI_ERR_COMMAND);
2009}
2010#undef ZERO
2011
2012static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2013{
2014 u32 tmp;
2015
2016 mv5_reset_flash(hpriv, mmio);
2017
2018 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2019 tmp &= 0x3;
2020 tmp |= (1 << 5) | (1 << 6);
2021 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2022}
2023
2024/**
2025 * mv6_reset_hc - Perform the 6xxx global soft reset
2026 * @mmio: base address of the HBA
2027 *
2028 * This routine only applies to 6xxx parts.
2029 *
2030 * LOCKING:
2031 * Inherited from caller.
2032 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002033static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2034 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002035{
2036 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2037 int i, rc = 0;
2038 u32 t;
2039
2040 /* Following procedure defined in PCI "main command and status
2041 * register" table.
2042 */
2043 t = readl(reg);
2044 writel(t | STOP_PCI_MASTER, reg);
2045
2046 for (i = 0; i < 1000; i++) {
2047 udelay(1);
2048 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002049 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002050 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002051 }
2052 if (!(PCI_MASTER_EMPTY & t)) {
2053 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2054 rc = 1;
2055 goto done;
2056 }
2057
2058 /* set reset */
2059 i = 5;
2060 do {
2061 writel(t | GLOB_SFT_RST, reg);
2062 t = readl(reg);
2063 udelay(1);
2064 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2065
2066 if (!(GLOB_SFT_RST & t)) {
2067 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2068 rc = 1;
2069 goto done;
2070 }
2071
2072 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2073 i = 5;
2074 do {
2075 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2076 t = readl(reg);
2077 udelay(1);
2078 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2079
2080 if (GLOB_SFT_RST & t) {
2081 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2082 rc = 1;
2083 }
2084done:
2085 return rc;
2086}
2087
Jeff Garzik47c2b672005-11-12 21:13:17 -05002088static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002089 void __iomem *mmio)
2090{
2091 void __iomem *port_mmio;
2092 u32 tmp;
2093
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002094 tmp = readl(mmio + MV_RESET_CFG);
2095 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002096 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002097 hpriv->signal[idx].pre = 0x1 << 5;
2098 return;
2099 }
2100
2101 port_mmio = mv_port_base(mmio, idx);
2102 tmp = readl(port_mmio + PHY_MODE2);
2103
2104 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2105 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2106}
2107
Jeff Garzik47c2b672005-11-12 21:13:17 -05002108static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002109{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002110 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002111}
2112
Jeff Garzikc9d39132005-11-13 17:47:51 -05002113static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002114 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002115{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002116 void __iomem *port_mmio = mv_port_base(mmio, port);
2117
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002118 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002119 int fix_phy_mode2 =
2120 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002121 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002122 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2123 u32 m2, tmp;
2124
2125 if (fix_phy_mode2) {
2126 m2 = readl(port_mmio + PHY_MODE2);
2127 m2 &= ~(1 << 16);
2128 m2 |= (1 << 31);
2129 writel(m2, port_mmio + PHY_MODE2);
2130
2131 udelay(200);
2132
2133 m2 = readl(port_mmio + PHY_MODE2);
2134 m2 &= ~((1 << 16) | (1 << 31));
2135 writel(m2, port_mmio + PHY_MODE2);
2136
2137 udelay(200);
2138 }
2139
2140 /* who knows what this magic does */
2141 tmp = readl(port_mmio + PHY_MODE3);
2142 tmp &= ~0x7F800000;
2143 tmp |= 0x2A800000;
2144 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002145
2146 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002147 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002148
2149 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002150
2151 if (hp_flags & MV_HP_ERRATA_60X1B2)
2152 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153
2154 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2155
2156 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002157
2158 if (hp_flags & MV_HP_ERRATA_60X1B2)
2159 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002160 }
2161
2162 /* Revert values of pre-emphasis and signal amps to the saved ones */
2163 m2 = readl(port_mmio + PHY_MODE2);
2164
2165 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002166 m2 |= hpriv->signal[port].amps;
2167 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002168 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002169
Jeff Garzike4e7b892006-01-31 12:18:41 -05002170 /* according to mvSata 3.6.1, some IIE values are fixed */
2171 if (IS_GEN_IIE(hpriv)) {
2172 m2 &= ~0xC30FF01F;
2173 m2 |= 0x0000900F;
2174 }
2175
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002176 writel(m2, port_mmio + PHY_MODE2);
2177}
2178
Jeff Garzikc9d39132005-11-13 17:47:51 -05002179static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2180 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002181{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002182 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002183
Brett Russ31961942005-09-30 01:36:00 -04002184 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002185
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002186 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002187 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002188 ifctl |= (1 << 7); /* enable gen2i speed */
2189 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2191 }
2192
Brett Russ20f733e2005-09-01 18:26:17 -04002193 udelay(25); /* allow reset propagation */
2194
2195 /* Spec never mentions clearing the bit. Marvell's driver does
2196 * clear the bit, however.
2197 */
Brett Russ31961942005-09-30 01:36:00 -04002198 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002199
Jeff Garzikc9d39132005-11-13 17:47:51 -05002200 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2201
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002202 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002203 mdelay(1);
2204}
2205
Jeff Garzikc9d39132005-11-13 17:47:51 -05002206/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002207 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002208 * @ap: ATA channel to manipulate
2209 *
2210 * Part of this is taken from __sata_phy_reset and modified to
2211 * not sleep since this routine gets called from interrupt level.
2212 *
2213 * LOCKING:
2214 * Inherited from caller. This is coded to safe to call at
2215 * interrupt level, i.e. it does not sleep.
2216 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002217static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2218 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002219{
2220 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002221 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002222 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002223 int retry = 5;
2224 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002225
2226 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002227
Tejun Heoda3dbb12007-07-16 14:29:40 +09002228#ifdef DEBUG
2229 {
2230 u32 sstatus, serror, scontrol;
2231
2232 mv_scr_read(ap, SCR_STATUS, &sstatus);
2233 mv_scr_read(ap, SCR_ERROR, &serror);
2234 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2235 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002236 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002237 }
2238#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002239
Jeff Garzik22374672005-11-17 10:59:48 -05002240 /* Issue COMRESET via SControl */
2241comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002242 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002243 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002244
Tejun Heo936fd732007-08-06 18:36:23 +09002245 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002246 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002247
Brett Russ31961942005-09-30 01:36:00 -04002248 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002249 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002250 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002251 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002252
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002253 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002254 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002255
Jeff Garzik22374672005-11-17 10:59:48 -05002256 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002257 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002258 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2259 (retry-- > 0))
2260 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002261
Tejun Heoda3dbb12007-07-16 14:29:40 +09002262#ifdef DEBUG
2263 {
2264 u32 sstatus, serror, scontrol;
2265
2266 mv_scr_read(ap, SCR_STATUS, &sstatus);
2267 mv_scr_read(ap, SCR_ERROR, &serror);
2268 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2269 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2270 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2271 }
2272#endif
Brett Russ31961942005-09-30 01:36:00 -04002273
Tejun Heo936fd732007-08-06 18:36:23 +09002274 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002275 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002276 return;
2277 }
2278
Jeff Garzik22374672005-11-17 10:59:48 -05002279 /* even after SStatus reflects that device is ready,
2280 * it seems to take a while for link to be fully
2281 * established (and thus Status no longer 0x80/0x7F),
2282 * so we poll a bit for that, here.
2283 */
2284 retry = 20;
2285 while (1) {
2286 u8 drv_stat = ata_check_status(ap);
2287 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2288 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002289 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002290 if (retry-- <= 0)
2291 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002292 if (time_after(jiffies, deadline))
2293 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002294 }
2295
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002296 /* FIXME: if we passed the deadline, the following
2297 * code probably produces an invalid result
2298 */
Brett Russ20f733e2005-09-01 18:26:17 -04002299
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002300 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002301 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002302
2303 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002305 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002306
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002307 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002308}
2309
Tejun Heocc0680a2007-08-06 18:36:23 +09002310static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002311{
Tejun Heocc0680a2007-08-06 18:36:23 +09002312 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002313 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002314 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002315 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002316
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002317 rc = mv_stop_dma(ap);
2318 if (rc)
2319 ehc->i.action |= ATA_EH_HARDRESET;
2320
2321 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2322 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2323 ehc->i.action |= ATA_EH_HARDRESET;
2324 }
2325
2326 /* if we're about to do hardreset, nothing more to do */
2327 if (ehc->i.action & ATA_EH_HARDRESET)
2328 return 0;
2329
Tejun Heocc0680a2007-08-06 18:36:23 +09002330 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002331 rc = ata_wait_ready(ap, deadline);
2332 else
2333 rc = -ENODEV;
2334
2335 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002336}
2337
Tejun Heocc0680a2007-08-06 18:36:23 +09002338static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002339 unsigned long deadline)
2340{
Tejun Heocc0680a2007-08-06 18:36:23 +09002341 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002342 struct mv_host_priv *hpriv = ap->host->private_data;
2343 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2344
2345 mv_stop_dma(ap);
2346
2347 mv_channel_reset(hpriv, mmio, ap->port_no);
2348
2349 mv_phy_reset(ap, class, deadline);
2350
2351 return 0;
2352}
2353
Tejun Heocc0680a2007-08-06 18:36:23 +09002354static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002355{
Tejun Heocc0680a2007-08-06 18:36:23 +09002356 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002357 u32 serr;
2358
2359 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002360 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002361
2362 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002363 sata_scr_read(link, SCR_ERROR, &serr);
2364 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002365
2366 /* bail out if no device is present */
2367 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2368 DPRINTK("EXIT, no device\n");
2369 return;
2370 }
2371
2372 /* set up device control */
2373 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2374}
2375
2376static void mv_error_handler(struct ata_port *ap)
2377{
2378 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2379 mv_hardreset, mv_postreset);
2380}
2381
2382static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2383{
2384 mv_stop_dma(qc->ap);
2385}
2386
2387static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002388{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002389 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002390 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2391 u32 tmp, mask;
2392 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002393
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002394 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002395
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002396 shift = ap->port_no * 2;
2397 if (hc > 0)
2398 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002399
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002400 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002401
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002402 /* disable assertion of portN err, done events */
2403 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2404 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2405}
2406
2407static void mv_eh_thaw(struct ata_port *ap)
2408{
2409 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2410 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2411 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2412 void __iomem *port_mmio = mv_ap_base(ap);
2413 u32 tmp, mask, hc_irq_cause;
2414 unsigned int shift, hc_port_no = ap->port_no;
2415
2416 /* FIXME: handle coalescing completion events properly */
2417
2418 shift = ap->port_no * 2;
2419 if (hc > 0) {
2420 shift++;
2421 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002422 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002423
2424 mask = 0x3 << shift;
2425
2426 /* clear EDMA errors on this port */
2427 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2428
2429 /* clear pending irq events */
2430 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2431 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2432 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2433 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2434
2435 /* enable assertion of portN err, done events */
2436 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2437 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002438}
2439
Brett Russ05b308e2005-10-05 17:08:53 -04002440/**
2441 * mv_port_init - Perform some early initialization on a single port.
2442 * @port: libata data structure storing shadow register addresses
2443 * @port_mmio: base address of the port
2444 *
2445 * Initialize shadow register mmio addresses, clear outstanding
2446 * interrupts on the port, and unmask interrupts for the future
2447 * start of the port.
2448 *
2449 * LOCKING:
2450 * Inherited from caller.
2451 */
Brett Russ31961942005-09-30 01:36:00 -04002452static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2453{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002454 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002455 unsigned serr_ofs;
2456
Jeff Garzik8b260242005-11-12 12:32:50 -05002457 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002458 */
2459 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002460 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002461 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2462 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2463 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2464 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2465 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2466 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002467 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002468 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2469 /* special case: control/altstatus doesn't have ATA_REG_ address */
2470 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2471
2472 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002473 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002474
Brett Russ31961942005-09-30 01:36:00 -04002475 /* Clear any currently outstanding port interrupt conditions */
2476 serr_ofs = mv_scr_offset(SCR_ERROR);
2477 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2478 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2479
Mark Lord646a4da2008-01-26 18:30:37 -05002480 /* unmask all non-transient EDMA error interrupts */
2481 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002482
Jeff Garzik8b260242005-11-12 12:32:50 -05002483 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002484 readl(port_mmio + EDMA_CFG_OFS),
2485 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2486 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002487}
2488
Tejun Heo4447d352007-04-17 23:44:08 +09002489static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002490{
Tejun Heo4447d352007-04-17 23:44:08 +09002491 struct pci_dev *pdev = to_pci_dev(host->dev);
2492 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002493 u32 hp_flags = hpriv->hp_flags;
2494
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002495 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002496 case chip_5080:
2497 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002498 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002499
Auke Kok44c10132007-06-08 15:46:36 -07002500 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002501 case 0x1:
2502 hp_flags |= MV_HP_ERRATA_50XXB0;
2503 break;
2504 case 0x3:
2505 hp_flags |= MV_HP_ERRATA_50XXB2;
2506 break;
2507 default:
2508 dev_printk(KERN_WARNING, &pdev->dev,
2509 "Applying 50XXB2 workarounds to unknown rev\n");
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2512 }
2513 break;
2514
2515 case chip_504x:
2516 case chip_508x:
2517 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002518 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002519
Auke Kok44c10132007-06-08 15:46:36 -07002520 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002521 case 0x0:
2522 hp_flags |= MV_HP_ERRATA_50XXB0;
2523 break;
2524 case 0x3:
2525 hp_flags |= MV_HP_ERRATA_50XXB2;
2526 break;
2527 default:
2528 dev_printk(KERN_WARNING, &pdev->dev,
2529 "Applying B2 workarounds to unknown rev\n");
2530 hp_flags |= MV_HP_ERRATA_50XXB2;
2531 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002532 }
2533 break;
2534
2535 case chip_604x:
2536 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002537 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002538 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002539
Auke Kok44c10132007-06-08 15:46:36 -07002540 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541 case 0x7:
2542 hp_flags |= MV_HP_ERRATA_60X1B2;
2543 break;
2544 case 0x9:
2545 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002549 "Applying B2 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002551 break;
2552 }
2553 break;
2554
Jeff Garzike4e7b892006-01-31 12:18:41 -05002555 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002556 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002557 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2558 (pdev->device == 0x2300 || pdev->device == 0x2310))
2559 {
Mark Lord4e520032007-12-11 12:58:05 -05002560 /*
2561 * Highpoint RocketRAID PCIe 23xx series cards:
2562 *
2563 * Unconfigured drives are treated as "Legacy"
2564 * by the BIOS, and it overwrites sector 8 with
2565 * a "Lgcy" metadata block prior to Linux boot.
2566 *
2567 * Configured drives (RAID or JBOD) leave sector 8
2568 * alone, but instead overwrite a high numbered
2569 * sector for the RAID metadata. This sector can
2570 * be determined exactly, by truncating the physical
2571 * drive capacity to a nice even GB value.
2572 *
2573 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2574 *
2575 * Warn the user, lest they think we're just buggy.
2576 */
2577 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2578 " BIOS CORRUPTS DATA on all attached drives,"
2579 " regardless of if/how they are configured."
2580 " BEWARE!\n");
2581 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2582 " use sectors 8-9 on \"Legacy\" drives,"
2583 " and avoid the final two gigabytes on"
2584 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002585 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002586 case chip_6042:
2587 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002588 hp_flags |= MV_HP_GEN_IIE;
2589
Auke Kok44c10132007-06-08 15:46:36 -07002590 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002591 case 0x0:
2592 hp_flags |= MV_HP_ERRATA_XX42A0;
2593 break;
2594 case 0x1:
2595 hp_flags |= MV_HP_ERRATA_60X1C0;
2596 break;
2597 default:
2598 dev_printk(KERN_WARNING, &pdev->dev,
2599 "Applying 60X1C0 workarounds to unknown rev\n");
2600 hp_flags |= MV_HP_ERRATA_60X1C0;
2601 break;
2602 }
2603 break;
2604
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002605 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002606 dev_printk(KERN_ERR, &pdev->dev,
2607 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002608 return 1;
2609 }
2610
2611 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002612 if (hp_flags & MV_HP_PCIE) {
2613 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2614 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2615 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2616 } else {
2617 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2618 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2619 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2620 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002621
2622 return 0;
2623}
2624
Brett Russ05b308e2005-10-05 17:08:53 -04002625/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002626 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002627 * @host: ATA host to initialize
2628 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002629 *
2630 * If possible, do an early global reset of the host. Then do
2631 * our port init and clear/unmask all/relevant host interrupts.
2632 *
2633 * LOCKING:
2634 * Inherited from caller.
2635 */
Tejun Heo4447d352007-04-17 23:44:08 +09002636static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002637{
2638 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002639 struct pci_dev *pdev = to_pci_dev(host->dev);
2640 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2641 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002642
Jeff Garzik47c2b672005-11-12 21:13:17 -05002643 /* global interrupt mask */
2644 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2645
Tejun Heo4447d352007-04-17 23:44:08 +09002646 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002647 if (rc)
2648 goto done;
2649
Tejun Heo4447d352007-04-17 23:44:08 +09002650 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002651
Tejun Heo4447d352007-04-17 23:44:08 +09002652 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002653 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002654
Jeff Garzikc9d39132005-11-13 17:47:51 -05002655 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002656 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002657 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002658
Jeff Garzik522479f2005-11-12 22:14:02 -05002659 hpriv->ops->reset_flash(hpriv, mmio);
2660 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002661 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002662
Tejun Heo4447d352007-04-17 23:44:08 +09002663 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002664 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002665 void __iomem *port_mmio = mv_port_base(mmio, port);
2666
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002667 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002668 ifctl |= (1 << 7); /* enable gen2i speed */
2669 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002670 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2671 }
2672
Jeff Garzikc9d39132005-11-13 17:47:51 -05002673 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002674 }
2675
Tejun Heo4447d352007-04-17 23:44:08 +09002676 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002677 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002678 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002679 unsigned int offset = port_mmio - mmio;
2680
2681 mv_port_init(&ap->ioaddr, port_mmio);
2682
2683 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2684 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002685 }
2686
2687 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002688 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2689
2690 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2691 "(before clear)=0x%08x\n", hc,
2692 readl(hc_mmio + HC_CFG_OFS),
2693 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2694
2695 /* Clear any currently outstanding hc interrupt conditions */
2696 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002697 }
2698
Brett Russ31961942005-09-30 01:36:00 -04002699 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002700 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002701
2702 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002703 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002704
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002705 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002706 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2707 else
2708 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002709
2710 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002711 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002712 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2713 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002714 readl(mmio + hpriv->irq_cause_ofs),
2715 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002716
Brett Russ31961942005-09-30 01:36:00 -04002717done:
Brett Russ20f733e2005-09-01 18:26:17 -04002718 return rc;
2719}
2720
Brett Russ05b308e2005-10-05 17:08:53 -04002721/**
2722 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002723 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002724 *
2725 * FIXME: complete this.
2726 *
2727 * LOCKING:
2728 * Inherited from caller.
2729 */
Tejun Heo4447d352007-04-17 23:44:08 +09002730static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002731{
Tejun Heo4447d352007-04-17 23:44:08 +09002732 struct pci_dev *pdev = to_pci_dev(host->dev);
2733 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002734 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002735 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002736
2737 /* Use this to determine the HW stepping of the chip so we know
2738 * what errata to workaround
2739 */
Brett Russ31961942005-09-30 01:36:00 -04002740 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2741 if (scc == 0)
2742 scc_s = "SCSI";
2743 else if (scc == 0x01)
2744 scc_s = "RAID";
2745 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002746 scc_s = "?";
2747
2748 if (IS_GEN_I(hpriv))
2749 gen = "I";
2750 else if (IS_GEN_II(hpriv))
2751 gen = "II";
2752 else if (IS_GEN_IIE(hpriv))
2753 gen = "IIE";
2754 else
2755 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002756
Jeff Garzika9524a72005-10-30 14:39:11 -05002757 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002758 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2759 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002760 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2761}
2762
Brett Russ05b308e2005-10-05 17:08:53 -04002763/**
2764 * mv_init_one - handle a positive probe of a Marvell host
2765 * @pdev: PCI device found
2766 * @ent: PCI device ID entry for the matched host
2767 *
2768 * LOCKING:
2769 * Inherited from caller.
2770 */
Brett Russ20f733e2005-09-01 18:26:17 -04002771static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2772{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002773 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002774 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002775 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2776 struct ata_host *host;
2777 struct mv_host_priv *hpriv;
2778 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002779
Jeff Garzika9524a72005-10-30 14:39:11 -05002780 if (!printed_version++)
2781 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002782
Tejun Heo4447d352007-04-17 23:44:08 +09002783 /* allocate host */
2784 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2785
2786 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2787 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2788 if (!host || !hpriv)
2789 return -ENOMEM;
2790 host->private_data = hpriv;
2791
2792 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002793 rc = pcim_enable_device(pdev);
2794 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002795 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002796
Tejun Heo0d5ff562007-02-01 15:06:36 +09002797 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2798 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002799 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002800 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002801 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002802 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002803
Jeff Garzikd88184f2007-02-26 01:26:06 -05002804 rc = pci_go_64(pdev);
2805 if (rc)
2806 return rc;
2807
Brett Russ20f733e2005-09-01 18:26:17 -04002808 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002809 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002810 if (rc)
2811 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002812
Brett Russ31961942005-09-30 01:36:00 -04002813 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002814 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002815 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002816
Brett Russ31961942005-09-30 01:36:00 -04002817 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002818 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002819
Tejun Heo4447d352007-04-17 23:44:08 +09002820 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002821 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002822 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002823 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002824}
2825
2826static int __init mv_init(void)
2827{
Pavel Roskinb7887192006-08-10 18:13:18 +09002828 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002829}
2830
2831static void __exit mv_exit(void)
2832{
2833 pci_unregister_driver(&mv_pci_driver);
2834}
2835
2836MODULE_AUTHOR("Brett Russ");
2837MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2838MODULE_LICENSE("GPL");
2839MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2840MODULE_VERSION(DRV_VERSION);
2841
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002842module_param(msi, int, 0444);
2843MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2844
Brett Russ20f733e2005-09-01 18:26:17 -04002845module_init(mv_init);
2846module_exit(mv_exit);