blob: f117f6a01676bc4af2d35fc256fe354b2e74855e [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
Mark Lord02a121d2007-12-01 13:07:22 -0500167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
Mark Lord02a121d2007-12-01 13:07:22 -0500171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500174
Brett Russ20f733e2005-09-01 18:26:17 -0400175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500214 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500248
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400281 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400300
Brett Russ31961942005-09-30 01:36:00 -0400301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400316
Jeff Garzikc9d39132005-11-13 17:47:51 -0500317 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500319
Brett Russ31961942005-09-30 01:36:00 -0400320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500326 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400335};
336
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400337#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
338#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500339#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500340
Jeff Garzik095fec82005-11-12 09:50:49 -0500341enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400342 /* DMA boundary 0xffff is required by the s/g splitting
343 * we need on /length/ in mv_fill-sg().
344 */
345 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500346
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400347 /* mask of register bits containing lower 32 bits
348 * of EDMA request queue DMA address
349 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500350 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400352 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500353 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
354};
355
Jeff Garzik522479f2005-11-12 22:14:02 -0500356enum chip_type {
357 chip_504x,
358 chip_508x,
359 chip_5080,
360 chip_604x,
361 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500362 chip_6042,
363 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500364};
365
Brett Russ31961942005-09-30 01:36:00 -0400366/* Command ReQuest Block: 32B */
367struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400368 __le32 sg_addr;
369 __le32 sg_addr_hi;
370 __le16 ctrl_flags;
371 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400372};
373
Jeff Garzike4e7b892006-01-31 12:18:41 -0500374struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400375 __le32 addr;
376 __le32 addr_hi;
377 __le32 flags;
378 __le32 len;
379 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500380};
381
Brett Russ31961942005-09-30 01:36:00 -0400382/* Command ResPonse Block: 8B */
383struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400384 __le16 id;
385 __le16 flags;
386 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400387};
388
389/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
390struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400391 __le32 addr;
392 __le32 flags_size;
393 __le32 addr_hi;
394 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400395};
396
397struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400398 struct mv_crqb *crqb;
399 dma_addr_t crqb_dma;
400 struct mv_crpb *crpb;
401 dma_addr_t crpb_dma;
402 struct mv_sg *sg_tbl;
403 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400404
405 unsigned int req_idx;
406 unsigned int resp_idx;
407
Brett Russ31961942005-09-30 01:36:00 -0400408 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400409};
410
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500411struct mv_port_signal {
412 u32 amps;
413 u32 pre;
414};
415
Mark Lord02a121d2007-12-01 13:07:22 -0500416struct mv_host_priv {
417 u32 hp_flags;
418 struct mv_port_signal signal[8];
419 const struct mv_hw_ops *ops;
420 u32 irq_cause_ofs;
421 u32 irq_mask_ofs;
422 u32 unmask_all_irqs;
423};
424
Jeff Garzik47c2b672005-11-12 21:13:17 -0500425struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500426 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500428 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
429 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
430 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500431 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500433 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
434 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500435};
436
Brett Russ20f733e2005-09-01 18:26:17 -0400437static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900438static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
439static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
440static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
441static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400442static int mv_port_start(struct ata_port *ap);
443static void mv_port_stop(struct ata_port *ap);
444static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500445static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900446static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400447static void mv_error_handler(struct ata_port *ap);
448static void mv_post_int_cmd(struct ata_queued_cmd *qc);
449static void mv_eh_freeze(struct ata_port *ap);
450static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400451static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
452
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500453static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500455static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
456static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500458static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500460static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
461static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500462
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500463static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
464 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500465static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
466static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
467 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500468static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
469 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500470static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
471static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500472static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int port_no);
Mark Lord0c589122008-01-26 18:31:16 -0500474static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
475 void __iomem *port_mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500476
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400477static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400478 .module = THIS_MODULE,
479 .name = DRV_NAME,
480 .ioctl = ata_scsi_ioctl,
481 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400482 .can_queue = ATA_DEF_QUEUE,
483 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400484 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400485 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
486 .emulated = ATA_SHT_EMULATED,
487 .use_clustering = 1,
488 .proc_name = DRV_NAME,
489 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400490 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400491 .slave_destroy = ata_scsi_slave_destroy,
492 .bios_param = ata_std_bios_param,
493};
494
495static struct scsi_host_template mv6_sht = {
496 .module = THIS_MODULE,
497 .name = DRV_NAME,
498 .ioctl = ata_scsi_ioctl,
499 .queuecommand = ata_scsi_queuecmd,
500 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400501 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400502 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400503 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
504 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500505 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400506 .proc_name = DRV_NAME,
507 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400508 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900509 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400510 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400511};
512
Jeff Garzikc9d39132005-11-13 17:47:51 -0500513static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
Jeff Garzikcffacd82007-03-09 09:46:47 -0500520 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500521
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900524 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500525
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900527 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500528
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
Jeff Garzikc9d39132005-11-13 17:47:51 -0500534 .scr_read = mv5_scr_read,
535 .scr_write = mv5_scr_write,
536
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500539};
540
541static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
547
Jeff Garzikcffacd82007-03-09 09:46:47 -0500548 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400549
Brett Russ31961942005-09-30 01:36:00 -0400550 .qc_prep = mv_qc_prep,
551 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900552 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400553
Brett Russ20f733e2005-09-01 18:26:17 -0400554 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900555 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400556
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400557 .error_handler = mv_error_handler,
558 .post_internal_cmd = mv_post_int_cmd,
559 .freeze = mv_eh_freeze,
560 .thaw = mv_eh_thaw,
561
Brett Russ20f733e2005-09-01 18:26:17 -0400562 .scr_read = mv_scr_read,
563 .scr_write = mv_scr_write,
564
Brett Russ31961942005-09-30 01:36:00 -0400565 .port_start = mv_port_start,
566 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400567};
568
Jeff Garzike4e7b892006-01-31 12:18:41 -0500569static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500570 .tf_load = ata_tf_load,
571 .tf_read = ata_tf_read,
572 .check_status = ata_check_status,
573 .exec_command = ata_exec_command,
574 .dev_select = ata_std_dev_select,
575
Jeff Garzikcffacd82007-03-09 09:46:47 -0500576 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500577
578 .qc_prep = mv_qc_prep_iie,
579 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900580 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500581
Jeff Garzike4e7b892006-01-31 12:18:41 -0500582 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900583 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400585 .error_handler = mv_error_handler,
586 .post_internal_cmd = mv_post_int_cmd,
587 .freeze = mv_eh_freeze,
588 .thaw = mv_eh_thaw,
589
Jeff Garzike4e7b892006-01-31 12:18:41 -0500590 .scr_read = mv_scr_read,
591 .scr_write = mv_scr_write,
592
593 .port_start = mv_port_start,
594 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500595};
596
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100597static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400598 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400599 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400600 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400601 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500602 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400603 },
604 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400606 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400607 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500608 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400609 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500610 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400613 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500614 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500615 },
Brett Russ20f733e2005-09-01 18:26:17 -0400616 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400618 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400619 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500620 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400621 },
622 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400623 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
624 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400625 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400626 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500627 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400628 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500629 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500631 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400632 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500633 .port_ops = &mv_iie_ops,
634 },
635 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500637 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400638 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .port_ops = &mv_iie_ops,
640 },
Brett Russ20f733e2005-09-01 18:26:17 -0400641};
642
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500643static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400644 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
645 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
646 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
647 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100648 /* RocketRAID 1740/174x have different identifiers */
649 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
650 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400651
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400652 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
653 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
654 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
655 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
656 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500657
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400658 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
659
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200660 /* Adaptec 1430SA */
661 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
662
Mark Lord02a121d2007-12-01 13:07:22 -0500663 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800664 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
665
Mark Lord02a121d2007-12-01 13:07:22 -0500666 /* Highpoint RocketRAID PCIe series */
667 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
668 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
669
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400670 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400671};
672
673static struct pci_driver mv_pci_driver = {
674 .name = DRV_NAME,
675 .id_table = mv_pci_tbl,
676 .probe = mv_init_one,
677 .remove = ata_pci_remove_one,
678};
679
Jeff Garzik47c2b672005-11-12 21:13:17 -0500680static const struct mv_hw_ops mv5xxx_ops = {
681 .phy_errata = mv5_phy_errata,
682 .enable_leds = mv5_enable_leds,
683 .read_preamp = mv5_read_preamp,
684 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500685 .reset_flash = mv5_reset_flash,
686 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500687};
688
689static const struct mv_hw_ops mv6xxx_ops = {
690 .phy_errata = mv6_phy_errata,
691 .enable_leds = mv6_enable_leds,
692 .read_preamp = mv6_read_preamp,
693 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500694 .reset_flash = mv6_reset_flash,
695 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500696};
697
Brett Russ20f733e2005-09-01 18:26:17 -0400698/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500699 * module options
700 */
701static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
702
703
Jeff Garzikd88184f2007-02-26 01:26:06 -0500704/* move to PCI layer or libata core? */
705static int pci_go_64(struct pci_dev *pdev)
706{
707 int rc;
708
709 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
710 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
711 if (rc) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
713 if (rc) {
714 dev_printk(KERN_ERR, &pdev->dev,
715 "64-bit DMA enable failed\n");
716 return rc;
717 }
718 }
719 } else {
720 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
721 if (rc) {
722 dev_printk(KERN_ERR, &pdev->dev,
723 "32-bit DMA enable failed\n");
724 return rc;
725 }
726 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
727 if (rc) {
728 dev_printk(KERN_ERR, &pdev->dev,
729 "32-bit consistent DMA enable failed\n");
730 return rc;
731 }
732 }
733
734 return rc;
735}
736
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500737/*
Brett Russ20f733e2005-09-01 18:26:17 -0400738 * Functions
739 */
740
741static inline void writelfl(unsigned long data, void __iomem *addr)
742{
743 writel(data, addr);
744 (void) readl(addr); /* flush to avoid PCI posted write */
745}
746
Brett Russ20f733e2005-09-01 18:26:17 -0400747static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
748{
749 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
750}
751
Jeff Garzikc9d39132005-11-13 17:47:51 -0500752static inline unsigned int mv_hc_from_port(unsigned int port)
753{
754 return port >> MV_PORT_HC_SHIFT;
755}
756
757static inline unsigned int mv_hardport_from_port(unsigned int port)
758{
759 return port & MV_PORT_MASK;
760}
761
762static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
763 unsigned int port)
764{
765 return mv_hc_base(base, mv_hc_from_port(port));
766}
767
Brett Russ20f733e2005-09-01 18:26:17 -0400768static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
769{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500770 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500771 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500772 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400773}
774
775static inline void __iomem *mv_ap_base(struct ata_port *ap)
776{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900777 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400778}
779
Jeff Garzikcca39742006-08-24 03:19:22 -0400780static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400781{
Jeff Garzikcca39742006-08-24 03:19:22 -0400782 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400783}
784
785static void mv_irq_clear(struct ata_port *ap)
786{
787}
788
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400789static void mv_set_edma_ptrs(void __iomem *port_mmio,
790 struct mv_host_priv *hpriv,
791 struct mv_port_priv *pp)
792{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400793 u32 index;
794
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400795 /*
796 * initialize request queue
797 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400798 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
799
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800 WARN_ON(pp->crqb_dma & 0x3ff);
801 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400802 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400803 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
804
805 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400806 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400807 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
808 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400809 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810
811 /*
812 * initialize response queue
813 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400814 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
815
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400816 WARN_ON(pp->crpb_dma & 0xff);
817 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
818
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400820 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
822 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400824
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400827}
828
Brett Russ05b308e2005-10-05 17:08:53 -0400829/**
830 * mv_start_dma - Enable eDMA engine
831 * @base: port base address
832 * @pp: port private data
833 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900834 * Verify the local cache of the eDMA state is accurate with a
835 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400836 *
837 * LOCKING:
838 * Inherited from caller.
839 */
Mark Lord0c589122008-01-26 18:31:16 -0500840static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400842{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400843 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500844 struct mv_host_priv *hpriv = ap->host->private_data;
845 int hard_port = mv_hardport_from_port(ap->port_no);
846 void __iomem *hc_mmio = mv_hc_base_from_port(
847 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
848 u32 hc_irq_cause, ipending;
849
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400850 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500851 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400852
Mark Lord0c589122008-01-26 18:31:16 -0500853 /* clear EDMA interrupt indicator, if any */
854 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
855 ipending = (DEV_IRQ << hard_port) |
856 (CRPB_DMA_DONE << hard_port);
857 if (hc_irq_cause & ipending) {
858 writelfl(hc_irq_cause & ~ipending,
859 hc_mmio + HC_IRQ_CAUSE_OFS);
860 }
861
862 mv_edma_cfg(ap, hpriv, port_mmio);
863
864 /* clear FIS IRQ Cause */
865 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
866
Mark Lordf630d562008-01-26 18:31:00 -0500867 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400868
Mark Lordf630d562008-01-26 18:31:00 -0500869 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400870 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
871 }
Mark Lordf630d562008-01-26 18:31:00 -0500872 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400873}
874
Brett Russ05b308e2005-10-05 17:08:53 -0400875/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400876 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400877 * @ap: ATA channel to manipulate
878 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900879 * Verify the local cache of the eDMA state is accurate with a
880 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400881 *
882 * LOCKING:
883 * Inherited from caller.
884 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400885static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400886{
887 void __iomem *port_mmio = mv_ap_base(ap);
888 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400889 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400890 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400891
Jeff Garzik4537deb2007-07-12 14:30:19 -0400892 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400893 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400894 */
Brett Russ31961942005-09-30 01:36:00 -0400895 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
896 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400897 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900898 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400899 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500900
Brett Russ31961942005-09-30 01:36:00 -0400901 /* now properly wait for the eDMA to stop */
902 for (i = 1000; i > 0; i--) {
903 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400904 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400905 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400906
Brett Russ31961942005-09-30 01:36:00 -0400907 udelay(100);
908 }
909
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400910 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900911 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400912 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400913 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400914
915 return err;
Brett Russ31961942005-09-30 01:36:00 -0400916}
917
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400918static int mv_stop_dma(struct ata_port *ap)
919{
920 unsigned long flags;
921 int rc;
922
923 spin_lock_irqsave(&ap->host->lock, flags);
924 rc = __mv_stop_dma(ap);
925 spin_unlock_irqrestore(&ap->host->lock, flags);
926
927 return rc;
928}
929
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400930#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400931static void mv_dump_mem(void __iomem *start, unsigned bytes)
932{
Brett Russ31961942005-09-30 01:36:00 -0400933 int b, w;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400937 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400938 b += sizeof(u32);
939 }
940 printk("\n");
941 }
Brett Russ31961942005-09-30 01:36:00 -0400942}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400943#endif
944
Brett Russ31961942005-09-30 01:36:00 -0400945static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946{
947#ifdef ATA_DEBUG
948 int b, w;
949 u32 dw;
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400953 (void) pci_read_config_dword(pdev, b, &dw);
954 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400955 b += sizeof(u32);
956 }
957 printk("\n");
958 }
959#endif
960}
961static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
963{
964#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500965 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
969
970 if (0 > port) {
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
973 num_hcs = 2;
974 } else {
975 start_hc = port >> MV_PORT_HC_SHIFT;
976 start_port = port;
977 num_ports = num_hcs = 1;
978 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400980 num_ports > 1 ? num_ports - 1 : start_port);
981
982 if (NULL != pdev) {
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
985 }
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700992 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
995 }
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400998 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400999 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001000 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001001 mv_dump_mem(port_base+0x300, 0x60);
1002 }
1003#endif
1004}
1005
Brett Russ20f733e2005-09-01 18:26:17 -04001006static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007{
1008 unsigned int ofs;
1009
1010 switch (sc_reg_in) {
1011 case SCR_STATUS:
1012 case SCR_CONTROL:
1013 case SCR_ERROR:
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 break;
1016 case SCR_ACTIVE:
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1018 break;
1019 default:
1020 ofs = 0xffffffffU;
1021 break;
1022 }
1023 return ofs;
1024}
1025
Tejun Heoda3dbb12007-07-16 14:29:40 +09001026static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001027{
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1029
Tejun Heoda3dbb12007-07-16 14:29:40 +09001030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1032 return 0;
1033 } else
1034 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001035}
1036
Tejun Heoda3dbb12007-07-16 14:29:40 +09001037static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001038{
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040
Tejun Heoda3dbb12007-07-16 14:29:40 +09001041 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001042 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001043 return 0;
1044 } else
1045 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001046}
1047
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001048static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1049 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001050{
Mark Lord0c589122008-01-26 18:31:16 -05001051 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001052
1053 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001054 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001055
Mark Lord0c589122008-01-26 18:31:16 -05001056 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001057 cfg |= (1 << 8); /* enab config burst size mask */
1058
Mark Lord0c589122008-01-26 18:31:16 -05001059 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001060 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1061
1062 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001063 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1064 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001065 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001066 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001067 }
1068
1069 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1070}
1071
Brett Russ05b308e2005-10-05 17:08:53 -04001072/**
1073 * mv_port_start - Port specific init/start routine.
1074 * @ap: ATA channel to manipulate
1075 *
1076 * Allocate and point to DMA memory, init port private memory,
1077 * zero indices.
1078 *
1079 * LOCKING:
1080 * Inherited from caller.
1081 */
Brett Russ31961942005-09-30 01:36:00 -04001082static int mv_port_start(struct ata_port *ap)
1083{
Jeff Garzikcca39742006-08-24 03:19:22 -04001084 struct device *dev = ap->host->dev;
1085 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001086 struct mv_port_priv *pp;
1087 void __iomem *port_mmio = mv_ap_base(ap);
1088 void *mem;
1089 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001090 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001091 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001092
Tejun Heo24dc5f32007-01-20 16:00:28 +09001093 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001094 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001095 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001096
Tejun Heo24dc5f32007-01-20 16:00:28 +09001097 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1098 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001099 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001100 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001101 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1102
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001103 rc = ata_pad_alloc(ap, dev);
1104 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001105 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001106
Jeff Garzik8b260242005-11-12 12:32:50 -05001107 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001108 * 32-slot command request table (CRQB), 32 bytes each in size
1109 */
1110 pp->crqb = mem;
1111 pp->crqb_dma = mem_dma;
1112 mem += MV_CRQB_Q_SZ;
1113 mem_dma += MV_CRQB_Q_SZ;
1114
Jeff Garzik8b260242005-11-12 12:32:50 -05001115 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001116 * 32-slot command response table (CRPB), 8 bytes each in size
1117 */
1118 pp->crpb = mem;
1119 pp->crpb_dma = mem_dma;
1120 mem += MV_CRPB_Q_SZ;
1121 mem_dma += MV_CRPB_Q_SZ;
1122
1123 /* Third item:
1124 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1125 */
1126 pp->sg_tbl = mem;
1127 pp->sg_tbl_dma = mem_dma;
1128
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001129 spin_lock_irqsave(&ap->host->lock, flags);
1130
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001131 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001132
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001133 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001134
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001135 spin_unlock_irqrestore(&ap->host->lock, flags);
1136
Brett Russ31961942005-09-30 01:36:00 -04001137 /* Don't turn on EDMA here...do it before DMA commands only. Else
1138 * we'll be unable to send non-data, PIO, etc due to restricted access
1139 * to shadow regs.
1140 */
1141 ap->private_data = pp;
1142 return 0;
1143}
1144
Brett Russ05b308e2005-10-05 17:08:53 -04001145/**
1146 * mv_port_stop - Port specific cleanup/stop routine.
1147 * @ap: ATA channel to manipulate
1148 *
1149 * Stop DMA, cleanup port memory.
1150 *
1151 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001152 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001153 */
Brett Russ31961942005-09-30 01:36:00 -04001154static void mv_port_stop(struct ata_port *ap)
1155{
Brett Russ31961942005-09-30 01:36:00 -04001156 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001157}
1158
Brett Russ05b308e2005-10-05 17:08:53 -04001159/**
1160 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1161 * @qc: queued command whose SG list to source from
1162 *
1163 * Populate the SG list and mark the last entry.
1164 *
1165 * LOCKING:
1166 * Inherited from caller.
1167 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001168static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001169{
1170 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001171 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001172 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001173 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001174
Jeff Garzikd88184f2007-02-26 01:26:06 -05001175 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001176 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001177 dma_addr_t addr = sg_dma_address(sg);
1178 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001179
Olof Johansson4007b492007-10-02 20:45:27 -05001180 while (sg_len) {
1181 u32 offset = addr & 0xffff;
1182 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001183
Olof Johansson4007b492007-10-02 20:45:27 -05001184 if ((offset + sg_len > 0x10000))
1185 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001186
Olof Johansson4007b492007-10-02 20:45:27 -05001187 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1188 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001189 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001190
1191 sg_len -= len;
1192 addr += len;
1193
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001194 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001195 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001196 }
Brett Russ31961942005-09-30 01:36:00 -04001197 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001198
1199 if (likely(last_sg))
1200 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001201}
1202
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001203static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001204{
Mark Lord559eeda2006-05-19 16:40:15 -04001205 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001206 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001207 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001208}
1209
Brett Russ05b308e2005-10-05 17:08:53 -04001210/**
1211 * mv_qc_prep - Host specific command preparation.
1212 * @qc: queued command to prepare
1213 *
1214 * This routine simply redirects to the general purpose routine
1215 * if command is not DMA. Else, it handles prep of the CRQB
1216 * (command request block), does some sanity checking, and calls
1217 * the SG load routine.
1218 *
1219 * LOCKING:
1220 * Inherited from caller.
1221 */
Brett Russ31961942005-09-30 01:36:00 -04001222static void mv_qc_prep(struct ata_queued_cmd *qc)
1223{
1224 struct ata_port *ap = qc->ap;
1225 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001226 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001227 struct ata_taskfile *tf;
1228 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001229 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001230
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001231 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001232 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001233
Brett Russ31961942005-09-30 01:36:00 -04001234 /* Fill in command request block
1235 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001236 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001237 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001238 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001239 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001240 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001241
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001242 /* get current queue index from software */
1243 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001244
Mark Lorda6432432006-05-19 16:36:36 -04001245 pp->crqb[in_index].sg_addr =
1246 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1247 pp->crqb[in_index].sg_addr_hi =
1248 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1249 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1250
1251 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001252 tf = &qc->tf;
1253
1254 /* Sadly, the CRQB cannot accomodate all registers--there are
1255 * only 11 bytes...so we must pick and choose required
1256 * registers based on the command. So, we drop feature and
1257 * hob_feature for [RW] DMA commands, but they are needed for
1258 * NCQ. NCQ will drop hob_nsect.
1259 */
1260 switch (tf->command) {
1261 case ATA_CMD_READ:
1262 case ATA_CMD_READ_EXT:
1263 case ATA_CMD_WRITE:
1264 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001265 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001266 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1267 break;
1268#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1269 case ATA_CMD_FPDMA_READ:
1270 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001271 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001272 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1273 break;
1274#endif /* FIXME: remove this line when NCQ added */
1275 default:
1276 /* The only other commands EDMA supports in non-queued and
1277 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1278 * of which are defined/used by Linux. If we get here, this
1279 * driver needs work.
1280 *
1281 * FIXME: modify libata to give qc_prep a return value and
1282 * return error here.
1283 */
1284 BUG_ON(tf->command);
1285 break;
1286 }
1287 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1288 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1290 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1292 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1294 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1295 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1296
Jeff Garzike4e7b892006-01-31 12:18:41 -05001297 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001298 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001299 mv_fill_sg(qc);
1300}
1301
1302/**
1303 * mv_qc_prep_iie - Host specific command preparation.
1304 * @qc: queued command to prepare
1305 *
1306 * This routine simply redirects to the general purpose routine
1307 * if command is not DMA. Else, it handles prep of the CRQB
1308 * (command request block), does some sanity checking, and calls
1309 * the SG load routine.
1310 *
1311 * LOCKING:
1312 * Inherited from caller.
1313 */
1314static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1315{
1316 struct ata_port *ap = qc->ap;
1317 struct mv_port_priv *pp = ap->private_data;
1318 struct mv_crqb_iie *crqb;
1319 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001320 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001321 u32 flags = 0;
1322
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001323 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001324 return;
1325
Jeff Garzike4e7b892006-01-31 12:18:41 -05001326 /* Fill in Gen IIE command request block
1327 */
1328 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1329 flags |= CRQB_FLAG_READ;
1330
Tejun Heobeec7db2006-02-11 19:11:13 +09001331 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001332 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001333 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001334 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001335
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001336 /* get current queue index from software */
1337 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001338
1339 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001340 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1341 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1342 crqb->flags = cpu_to_le32(flags);
1343
1344 tf = &qc->tf;
1345 crqb->ata_cmd[0] = cpu_to_le32(
1346 (tf->command << 16) |
1347 (tf->feature << 24)
1348 );
1349 crqb->ata_cmd[1] = cpu_to_le32(
1350 (tf->lbal << 0) |
1351 (tf->lbam << 8) |
1352 (tf->lbah << 16) |
1353 (tf->device << 24)
1354 );
1355 crqb->ata_cmd[2] = cpu_to_le32(
1356 (tf->hob_lbal << 0) |
1357 (tf->hob_lbam << 8) |
1358 (tf->hob_lbah << 16) |
1359 (tf->hob_feature << 24)
1360 );
1361 crqb->ata_cmd[3] = cpu_to_le32(
1362 (tf->nsect << 0) |
1363 (tf->hob_nsect << 8)
1364 );
1365
1366 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1367 return;
Brett Russ31961942005-09-30 01:36:00 -04001368 mv_fill_sg(qc);
1369}
1370
Brett Russ05b308e2005-10-05 17:08:53 -04001371/**
1372 * mv_qc_issue - Initiate a command to the host
1373 * @qc: queued command to start
1374 *
1375 * This routine simply redirects to the general purpose routine
1376 * if command is not DMA. Else, it sanity checks our local
1377 * caches of the request producer/consumer indices then enables
1378 * DMA and bumps the request producer index.
1379 *
1380 * LOCKING:
1381 * Inherited from caller.
1382 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001383static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001384{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001385 struct ata_port *ap = qc->ap;
1386 void __iomem *port_mmio = mv_ap_base(ap);
1387 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001388 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001389
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001390 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001391 /* We're about to send a non-EDMA capable command to the
1392 * port. Turn off EDMA so there won't be problems accessing
1393 * shadow block, etc registers.
1394 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001395 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001396 return ata_qc_issue_prot(qc);
1397 }
1398
Mark Lord0c589122008-01-26 18:31:16 -05001399 mv_start_dma(ap, port_mmio, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001400
1401 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001402
Brett Russ31961942005-09-30 01:36:00 -04001403 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001404 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1405 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001406
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001407 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001408
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001409 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001410
1411 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001412 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1413 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001414
1415 return 0;
1416}
1417
Brett Russ05b308e2005-10-05 17:08:53 -04001418/**
Brett Russ05b308e2005-10-05 17:08:53 -04001419 * mv_err_intr - Handle error interrupts on the port
1420 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001421 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001422 *
1423 * In most cases, just clear the interrupt and move on. However,
1424 * some cases require an eDMA reset, which is done right before
1425 * the COMRESET in mv_phy_reset(). The SERR case requires a
1426 * clear of pending errors in the SATA SERROR register. Finally,
1427 * if the port disabled DMA, update our cached copy to match.
1428 *
1429 * LOCKING:
1430 * Inherited from caller.
1431 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001432static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001433{
Brett Russ31961942005-09-30 01:36:00 -04001434 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001435 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1436 struct mv_port_priv *pp = ap->private_data;
1437 struct mv_host_priv *hpriv = ap->host->private_data;
1438 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1439 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001440 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001441
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001442 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001443
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001444 if (!edma_enabled) {
1445 /* just a guess: do we need to do this? should we
1446 * expand this, and do it in all cases?
1447 */
Tejun Heo936fd732007-08-06 18:36:23 +09001448 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1449 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001450 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001451
1452 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1453
1454 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1455
1456 /*
1457 * all generations share these EDMA error cause bits
1458 */
1459
1460 if (edma_err_cause & EDMA_ERR_DEV)
1461 err_mask |= AC_ERR_DEV;
1462 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001463 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 EDMA_ERR_INTRL_PAR)) {
1465 err_mask |= AC_ERR_ATA_BUS;
1466 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001467 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001468 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001469 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1470 ata_ehi_hotplugged(ehi);
1471 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001472 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001473 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474 }
1475
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001476 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 eh_freeze_mask = EDMA_EH_FREEZE_5;
1478
1479 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1480 struct mv_port_priv *pp = ap->private_data;
1481 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001482 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001483 }
1484 } else {
1485 eh_freeze_mask = EDMA_EH_FREEZE;
1486
1487 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1488 struct mv_port_priv *pp = ap->private_data;
1489 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001490 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 }
1492
1493 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001494 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1495 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001496 err_mask = AC_ERR_ATA_BUS;
1497 action |= ATA_EH_HARDRESET;
1498 }
1499 }
Brett Russ20f733e2005-09-01 18:26:17 -04001500
1501 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001502 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001503
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001504 if (!err_mask) {
1505 err_mask = AC_ERR_OTHER;
1506 action |= ATA_EH_HARDRESET;
1507 }
1508
1509 ehi->serror |= serr;
1510 ehi->action |= action;
1511
1512 if (qc)
1513 qc->err_mask |= err_mask;
1514 else
1515 ehi->err_mask |= err_mask;
1516
1517 if (edma_err_cause & eh_freeze_mask)
1518 ata_port_freeze(ap);
1519 else
1520 ata_port_abort(ap);
1521}
1522
1523static void mv_intr_pio(struct ata_port *ap)
1524{
1525 struct ata_queued_cmd *qc;
1526 u8 ata_status;
1527
1528 /* ignore spurious intr if drive still BUSY */
1529 ata_status = readb(ap->ioaddr.status_addr);
1530 if (unlikely(ata_status & ATA_BUSY))
1531 return;
1532
1533 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001534 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 if (unlikely(!qc)) /* no active tag */
1536 return;
1537 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1538 return;
1539
1540 /* and finally, complete the ATA command */
1541 qc->err_mask |= ac_err_mask(ata_status);
1542 ata_qc_complete(qc);
1543}
1544
1545static void mv_intr_edma(struct ata_port *ap)
1546{
1547 void __iomem *port_mmio = mv_ap_base(ap);
1548 struct mv_host_priv *hpriv = ap->host->private_data;
1549 struct mv_port_priv *pp = ap->private_data;
1550 struct ata_queued_cmd *qc;
1551 u32 out_index, in_index;
1552 bool work_done = false;
1553
1554 /* get h/w response queue pointer */
1555 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1556 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1557
1558 while (1) {
1559 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001560 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561
1562 /* get s/w response queue last-read pointer, and compare */
1563 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1564 if (in_index == out_index)
1565 break;
1566
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001567 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001568 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001569 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001571 /* Gen II/IIE: get active ATA command via tag, to enable
1572 * support for queueing. this works transparently for
1573 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001574 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001575 else if (IS_GEN_II(hpriv))
1576 tag = (le16_to_cpu(pp->crpb[out_index].id)
1577 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001578
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001579 else /* IS_GEN_IIE */
1580 tag = (le16_to_cpu(pp->crpb[out_index].id)
1581 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001582
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001583 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001584
1585 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1586 * bits (WARNING: might not necessarily be associated
1587 * with this command), which -should- be clear
1588 * if all is well
1589 */
1590 status = le16_to_cpu(pp->crpb[out_index].flags);
1591 if (unlikely(status & 0xff)) {
1592 mv_err_intr(ap, qc);
1593 return;
1594 }
1595
1596 /* and finally, complete the ATA command */
1597 if (qc) {
1598 qc->err_mask |=
1599 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1600 ata_qc_complete(qc);
1601 }
1602
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001603 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001604 * indicate (after the loop completes) to hardware
1605 * that we have consumed a response queue entry.
1606 */
1607 work_done = true;
1608 pp->resp_idx++;
1609 }
1610
1611 if (work_done)
1612 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1613 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1614 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001615}
1616
Brett Russ05b308e2005-10-05 17:08:53 -04001617/**
1618 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001619 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001620 * @relevant: port error bits relevant to this host controller
1621 * @hc: which host controller we're to look at
1622 *
1623 * Read then write clear the HC interrupt status then walk each
1624 * port connected to the HC and see if it needs servicing. Port
1625 * success ints are reported in the HC interrupt status reg, the
1626 * port error ints are reported in the higher level main
1627 * interrupt status register and thus are passed in via the
1628 * 'relevant' argument.
1629 *
1630 * LOCKING:
1631 * Inherited from caller.
1632 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001633static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001634{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001635 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001636 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001637 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001638 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001639
Jeff Garzik35177262007-02-24 21:26:42 -05001640 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001641 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001642 else
Brett Russ20f733e2005-09-01 18:26:17 -04001643 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001644
1645 /* we'll need the HC success int register in most cases */
1646 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001647 if (!hc_irq_cause)
1648 return;
1649
1650 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001651
1652 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001653 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001654
1655 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001656 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001657 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001658 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001659
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001660 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001661 continue;
1662
Brett Russ31961942005-09-30 01:36:00 -04001663 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001664 if (port >= MV_PORTS_PER_HC) {
1665 shift++; /* skip bit 8 in the HC Main IRQ reg */
1666 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001667 have_err_bits = ((PORT0_ERR << shift) & relevant);
1668
1669 if (unlikely(have_err_bits)) {
1670 struct ata_queued_cmd *qc;
1671
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001672 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001673 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1674 continue;
1675
1676 mv_err_intr(ap, qc);
1677 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001678 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001679
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001680 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1681
1682 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1683 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1684 mv_intr_edma(ap);
1685 } else {
1686 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1687 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001688 }
1689 }
1690 VPRINTK("EXIT\n");
1691}
1692
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001693static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1694{
Mark Lord02a121d2007-12-01 13:07:22 -05001695 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001696 struct ata_port *ap;
1697 struct ata_queued_cmd *qc;
1698 struct ata_eh_info *ehi;
1699 unsigned int i, err_mask, printed = 0;
1700 u32 err_cause;
1701
Mark Lord02a121d2007-12-01 13:07:22 -05001702 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001703
1704 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1705 err_cause);
1706
1707 DPRINTK("All regs @ PCI error\n");
1708 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1709
Mark Lord02a121d2007-12-01 13:07:22 -05001710 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711
1712 for (i = 0; i < host->n_ports; i++) {
1713 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001714 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001715 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001716 ata_ehi_clear_desc(ehi);
1717 if (!printed++)
1718 ata_ehi_push_desc(ehi,
1719 "PCI err cause 0x%08x", err_cause);
1720 err_mask = AC_ERR_HOST_BUS;
1721 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001722 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001723 if (qc)
1724 qc->err_mask |= err_mask;
1725 else
1726 ehi->err_mask |= err_mask;
1727
1728 ata_port_freeze(ap);
1729 }
1730 }
1731}
1732
Brett Russ05b308e2005-10-05 17:08:53 -04001733/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001734 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001735 * @irq: unused
1736 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001737 *
1738 * Read the read only register to determine if any host
1739 * controllers have pending interrupts. If so, call lower level
1740 * routine to handle. Also check for PCI errors which are only
1741 * reported here.
1742 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001743 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001744 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001745 * interrupts.
1746 */
David Howells7d12e782006-10-05 14:55:46 +01001747static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001748{
Jeff Garzikcca39742006-08-24 03:19:22 -04001749 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001750 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001751 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001752 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001753
Mark Lord646a4da2008-01-26 18:30:37 -05001754 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001755 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001756 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001757
1758 /* check the cases where we either have nothing pending or have read
1759 * a bogus register value which can indicate HW removal or PCI fault
1760 */
Mark Lord646a4da2008-01-26 18:30:37 -05001761 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1762 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001763
Jeff Garzikcca39742006-08-24 03:19:22 -04001764 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001765
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001766 if (unlikely(irq_stat & PCI_ERR)) {
1767 mv_pci_error(host, mmio);
1768 handled = 1;
1769 goto out_unlock; /* skip all other HC irq handling */
1770 }
1771
Brett Russ20f733e2005-09-01 18:26:17 -04001772 for (hc = 0; hc < n_hcs; hc++) {
1773 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1774 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001775 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001776 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001777 }
1778 }
Mark Lord615ab952006-05-19 16:24:56 -04001779
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001780out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001781 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001782
1783 return IRQ_RETVAL(handled);
1784}
1785
Jeff Garzikc9d39132005-11-13 17:47:51 -05001786static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1787{
1788 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1789 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1790
1791 return hc_mmio + ofs;
1792}
1793
1794static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1795{
1796 unsigned int ofs;
1797
1798 switch (sc_reg_in) {
1799 case SCR_STATUS:
1800 case SCR_ERROR:
1801 case SCR_CONTROL:
1802 ofs = sc_reg_in * sizeof(u32);
1803 break;
1804 default:
1805 ofs = 0xffffffffU;
1806 break;
1807 }
1808 return ofs;
1809}
1810
Tejun Heoda3dbb12007-07-16 14:29:40 +09001811static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001812{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001813 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1814 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001815 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1816
Tejun Heoda3dbb12007-07-16 14:29:40 +09001817 if (ofs != 0xffffffffU) {
1818 *val = readl(addr + ofs);
1819 return 0;
1820 } else
1821 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001822}
1823
Tejun Heoda3dbb12007-07-16 14:29:40 +09001824static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001825{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001826 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1827 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001828 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1829
Tejun Heoda3dbb12007-07-16 14:29:40 +09001830 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001831 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001832 return 0;
1833 } else
1834 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001835}
1836
Jeff Garzik522479f2005-11-12 22:14:02 -05001837static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1838{
Jeff Garzik522479f2005-11-12 22:14:02 -05001839 int early_5080;
1840
Auke Kok44c10132007-06-08 15:46:36 -07001841 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001842
1843 if (!early_5080) {
1844 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1845 tmp |= (1 << 0);
1846 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1847 }
1848
1849 mv_reset_pci_bus(pdev, mmio);
1850}
1851
1852static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1853{
1854 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1855}
1856
Jeff Garzik47c2b672005-11-12 21:13:17 -05001857static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001858 void __iomem *mmio)
1859{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001860 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1861 u32 tmp;
1862
1863 tmp = readl(phy_mmio + MV5_PHY_MODE);
1864
1865 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1866 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001867}
1868
Jeff Garzik47c2b672005-11-12 21:13:17 -05001869static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001870{
Jeff Garzik522479f2005-11-12 22:14:02 -05001871 u32 tmp;
1872
1873 writel(0, mmio + MV_GPIO_PORT_CTL);
1874
1875 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1876
1877 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1878 tmp |= ~(1 << 0);
1879 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001880}
1881
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001882static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1883 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001884{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1886 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1887 u32 tmp;
1888 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1889
1890 if (fix_apm_sq) {
1891 tmp = readl(phy_mmio + MV5_LT_MODE);
1892 tmp |= (1 << 19);
1893 writel(tmp, phy_mmio + MV5_LT_MODE);
1894
1895 tmp = readl(phy_mmio + MV5_PHY_CTL);
1896 tmp &= ~0x3;
1897 tmp |= 0x1;
1898 writel(tmp, phy_mmio + MV5_PHY_CTL);
1899 }
1900
1901 tmp = readl(phy_mmio + MV5_PHY_MODE);
1902 tmp &= ~mask;
1903 tmp |= hpriv->signal[port].pre;
1904 tmp |= hpriv->signal[port].amps;
1905 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001906}
1907
Jeff Garzikc9d39132005-11-13 17:47:51 -05001908
1909#undef ZERO
1910#define ZERO(reg) writel(0, port_mmio + (reg))
1911static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1912 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001913{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001914 void __iomem *port_mmio = mv_port_base(mmio, port);
1915
1916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1917
1918 mv_channel_reset(hpriv, mmio, port);
1919
1920 ZERO(0x028); /* command */
1921 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1922 ZERO(0x004); /* timer */
1923 ZERO(0x008); /* irq err cause */
1924 ZERO(0x00c); /* irq err mask */
1925 ZERO(0x010); /* rq bah */
1926 ZERO(0x014); /* rq inp */
1927 ZERO(0x018); /* rq outp */
1928 ZERO(0x01c); /* respq bah */
1929 ZERO(0x024); /* respq outp */
1930 ZERO(0x020); /* respq inp */
1931 ZERO(0x02c); /* test control */
1932 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1933}
1934#undef ZERO
1935
1936#define ZERO(reg) writel(0, hc_mmio + (reg))
1937static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1938 unsigned int hc)
1939{
1940 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1941 u32 tmp;
1942
1943 ZERO(0x00c);
1944 ZERO(0x010);
1945 ZERO(0x014);
1946 ZERO(0x018);
1947
1948 tmp = readl(hc_mmio + 0x20);
1949 tmp &= 0x1c1c1c1c;
1950 tmp |= 0x03030303;
1951 writel(tmp, hc_mmio + 0x20);
1952}
1953#undef ZERO
1954
1955static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int n_hc)
1957{
1958 unsigned int hc, port;
1959
1960 for (hc = 0; hc < n_hc; hc++) {
1961 for (port = 0; port < MV_PORTS_PER_HC; port++)
1962 mv5_reset_hc_port(hpriv, mmio,
1963 (hc * MV_PORTS_PER_HC) + port);
1964
1965 mv5_reset_one_hc(hpriv, mmio, hc);
1966 }
1967
1968 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001969}
1970
Jeff Garzik101ffae2005-11-12 22:17:49 -05001971#undef ZERO
1972#define ZERO(reg) writel(0, mmio + (reg))
1973static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1974{
Mark Lord02a121d2007-12-01 13:07:22 -05001975 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1976 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001977 u32 tmp;
1978
1979 tmp = readl(mmio + MV_PCI_MODE);
1980 tmp &= 0xff00ffff;
1981 writel(tmp, mmio + MV_PCI_MODE);
1982
1983 ZERO(MV_PCI_DISC_TIMER);
1984 ZERO(MV_PCI_MSI_TRIGGER);
1985 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1986 ZERO(HC_MAIN_IRQ_MASK_OFS);
1987 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001988 ZERO(hpriv->irq_cause_ofs);
1989 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001990 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1991 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1992 ZERO(MV_PCI_ERR_ATTRIBUTE);
1993 ZERO(MV_PCI_ERR_COMMAND);
1994}
1995#undef ZERO
1996
1997static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1998{
1999 u32 tmp;
2000
2001 mv5_reset_flash(hpriv, mmio);
2002
2003 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2004 tmp &= 0x3;
2005 tmp |= (1 << 5) | (1 << 6);
2006 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2007}
2008
2009/**
2010 * mv6_reset_hc - Perform the 6xxx global soft reset
2011 * @mmio: base address of the HBA
2012 *
2013 * This routine only applies to 6xxx parts.
2014 *
2015 * LOCKING:
2016 * Inherited from caller.
2017 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002018static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2019 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002020{
2021 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2022 int i, rc = 0;
2023 u32 t;
2024
2025 /* Following procedure defined in PCI "main command and status
2026 * register" table.
2027 */
2028 t = readl(reg);
2029 writel(t | STOP_PCI_MASTER, reg);
2030
2031 for (i = 0; i < 1000; i++) {
2032 udelay(1);
2033 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002034 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002035 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002036 }
2037 if (!(PCI_MASTER_EMPTY & t)) {
2038 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2039 rc = 1;
2040 goto done;
2041 }
2042
2043 /* set reset */
2044 i = 5;
2045 do {
2046 writel(t | GLOB_SFT_RST, reg);
2047 t = readl(reg);
2048 udelay(1);
2049 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2050
2051 if (!(GLOB_SFT_RST & t)) {
2052 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2053 rc = 1;
2054 goto done;
2055 }
2056
2057 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2058 i = 5;
2059 do {
2060 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2061 t = readl(reg);
2062 udelay(1);
2063 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2064
2065 if (GLOB_SFT_RST & t) {
2066 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2067 rc = 1;
2068 }
2069done:
2070 return rc;
2071}
2072
Jeff Garzik47c2b672005-11-12 21:13:17 -05002073static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002074 void __iomem *mmio)
2075{
2076 void __iomem *port_mmio;
2077 u32 tmp;
2078
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002079 tmp = readl(mmio + MV_RESET_CFG);
2080 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002081 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002082 hpriv->signal[idx].pre = 0x1 << 5;
2083 return;
2084 }
2085
2086 port_mmio = mv_port_base(mmio, idx);
2087 tmp = readl(port_mmio + PHY_MODE2);
2088
2089 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2090 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2091}
2092
Jeff Garzik47c2b672005-11-12 21:13:17 -05002093static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002094{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002095 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002096}
2097
Jeff Garzikc9d39132005-11-13 17:47:51 -05002098static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002099 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002100{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002101 void __iomem *port_mmio = mv_port_base(mmio, port);
2102
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002103 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002104 int fix_phy_mode2 =
2105 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002106 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002107 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2108 u32 m2, tmp;
2109
2110 if (fix_phy_mode2) {
2111 m2 = readl(port_mmio + PHY_MODE2);
2112 m2 &= ~(1 << 16);
2113 m2 |= (1 << 31);
2114 writel(m2, port_mmio + PHY_MODE2);
2115
2116 udelay(200);
2117
2118 m2 = readl(port_mmio + PHY_MODE2);
2119 m2 &= ~((1 << 16) | (1 << 31));
2120 writel(m2, port_mmio + PHY_MODE2);
2121
2122 udelay(200);
2123 }
2124
2125 /* who knows what this magic does */
2126 tmp = readl(port_mmio + PHY_MODE3);
2127 tmp &= ~0x7F800000;
2128 tmp |= 0x2A800000;
2129 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002130
2131 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002132 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002133
2134 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002135
2136 if (hp_flags & MV_HP_ERRATA_60X1B2)
2137 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002138
2139 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2140
2141 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002142
2143 if (hp_flags & MV_HP_ERRATA_60X1B2)
2144 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002145 }
2146
2147 /* Revert values of pre-emphasis and signal amps to the saved ones */
2148 m2 = readl(port_mmio + PHY_MODE2);
2149
2150 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002151 m2 |= hpriv->signal[port].amps;
2152 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002153 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002154
Jeff Garzike4e7b892006-01-31 12:18:41 -05002155 /* according to mvSata 3.6.1, some IIE values are fixed */
2156 if (IS_GEN_IIE(hpriv)) {
2157 m2 &= ~0xC30FF01F;
2158 m2 |= 0x0000900F;
2159 }
2160
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002161 writel(m2, port_mmio + PHY_MODE2);
2162}
2163
Jeff Garzikc9d39132005-11-13 17:47:51 -05002164static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2165 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002166{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002167 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002168
Brett Russ31961942005-09-30 01:36:00 -04002169 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002170
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002171 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002172 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002173 ifctl |= (1 << 7); /* enable gen2i speed */
2174 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002175 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2176 }
2177
Brett Russ20f733e2005-09-01 18:26:17 -04002178 udelay(25); /* allow reset propagation */
2179
2180 /* Spec never mentions clearing the bit. Marvell's driver does
2181 * clear the bit, however.
2182 */
Brett Russ31961942005-09-30 01:36:00 -04002183 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002184
Jeff Garzikc9d39132005-11-13 17:47:51 -05002185 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2186
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002187 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002188 mdelay(1);
2189}
2190
Jeff Garzikc9d39132005-11-13 17:47:51 -05002191/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002192 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002193 * @ap: ATA channel to manipulate
2194 *
2195 * Part of this is taken from __sata_phy_reset and modified to
2196 * not sleep since this routine gets called from interrupt level.
2197 *
2198 * LOCKING:
2199 * Inherited from caller. This is coded to safe to call at
2200 * interrupt level, i.e. it does not sleep.
2201 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002202static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2203 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002204{
2205 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002206 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002207 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002208 int retry = 5;
2209 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002210
2211 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002212
Tejun Heoda3dbb12007-07-16 14:29:40 +09002213#ifdef DEBUG
2214 {
2215 u32 sstatus, serror, scontrol;
2216
2217 mv_scr_read(ap, SCR_STATUS, &sstatus);
2218 mv_scr_read(ap, SCR_ERROR, &serror);
2219 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2220 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002221 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002222 }
2223#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002224
Jeff Garzik22374672005-11-17 10:59:48 -05002225 /* Issue COMRESET via SControl */
2226comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002227 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002228 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002229
Tejun Heo936fd732007-08-06 18:36:23 +09002230 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002231 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002232
Brett Russ31961942005-09-30 01:36:00 -04002233 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002234 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002235 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002236 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002237
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002238 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002239 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002240
Jeff Garzik22374672005-11-17 10:59:48 -05002241 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002242 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002243 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2244 (retry-- > 0))
2245 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002246
Tejun Heoda3dbb12007-07-16 14:29:40 +09002247#ifdef DEBUG
2248 {
2249 u32 sstatus, serror, scontrol;
2250
2251 mv_scr_read(ap, SCR_STATUS, &sstatus);
2252 mv_scr_read(ap, SCR_ERROR, &serror);
2253 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2254 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2255 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2256 }
2257#endif
Brett Russ31961942005-09-30 01:36:00 -04002258
Tejun Heo936fd732007-08-06 18:36:23 +09002259 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002260 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002261 return;
2262 }
2263
Jeff Garzik22374672005-11-17 10:59:48 -05002264 /* even after SStatus reflects that device is ready,
2265 * it seems to take a while for link to be fully
2266 * established (and thus Status no longer 0x80/0x7F),
2267 * so we poll a bit for that, here.
2268 */
2269 retry = 20;
2270 while (1) {
2271 u8 drv_stat = ata_check_status(ap);
2272 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2273 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002274 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002275 if (retry-- <= 0)
2276 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002277 if (time_after(jiffies, deadline))
2278 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002279 }
2280
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002281 /* FIXME: if we passed the deadline, the following
2282 * code probably produces an invalid result
2283 */
Brett Russ20f733e2005-09-01 18:26:17 -04002284
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002285 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002286 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002287
2288 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2289
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002290 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002291
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002292 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002293}
2294
Tejun Heocc0680a2007-08-06 18:36:23 +09002295static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002296{
Tejun Heocc0680a2007-08-06 18:36:23 +09002297 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002299 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002300 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002301
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002302 rc = mv_stop_dma(ap);
2303 if (rc)
2304 ehc->i.action |= ATA_EH_HARDRESET;
2305
2306 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2307 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2308 ehc->i.action |= ATA_EH_HARDRESET;
2309 }
2310
2311 /* if we're about to do hardreset, nothing more to do */
2312 if (ehc->i.action & ATA_EH_HARDRESET)
2313 return 0;
2314
Tejun Heocc0680a2007-08-06 18:36:23 +09002315 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002316 rc = ata_wait_ready(ap, deadline);
2317 else
2318 rc = -ENODEV;
2319
2320 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002321}
2322
Tejun Heocc0680a2007-08-06 18:36:23 +09002323static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002324 unsigned long deadline)
2325{
Tejun Heocc0680a2007-08-06 18:36:23 +09002326 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002327 struct mv_host_priv *hpriv = ap->host->private_data;
2328 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2329
2330 mv_stop_dma(ap);
2331
2332 mv_channel_reset(hpriv, mmio, ap->port_no);
2333
2334 mv_phy_reset(ap, class, deadline);
2335
2336 return 0;
2337}
2338
Tejun Heocc0680a2007-08-06 18:36:23 +09002339static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340{
Tejun Heocc0680a2007-08-06 18:36:23 +09002341 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002342 u32 serr;
2343
2344 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002345 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346
2347 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002348 sata_scr_read(link, SCR_ERROR, &serr);
2349 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350
2351 /* bail out if no device is present */
2352 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2353 DPRINTK("EXIT, no device\n");
2354 return;
2355 }
2356
2357 /* set up device control */
2358 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2359}
2360
2361static void mv_error_handler(struct ata_port *ap)
2362{
2363 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2364 mv_hardreset, mv_postreset);
2365}
2366
2367static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2368{
2369 mv_stop_dma(qc->ap);
2370}
2371
2372static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002373{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002374 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002375 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2376 u32 tmp, mask;
2377 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002378
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002379 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002380
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002381 shift = ap->port_no * 2;
2382 if (hc > 0)
2383 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002384
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002385 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002386
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 /* disable assertion of portN err, done events */
2388 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2389 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2390}
2391
2392static void mv_eh_thaw(struct ata_port *ap)
2393{
2394 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2395 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2396 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2397 void __iomem *port_mmio = mv_ap_base(ap);
2398 u32 tmp, mask, hc_irq_cause;
2399 unsigned int shift, hc_port_no = ap->port_no;
2400
2401 /* FIXME: handle coalescing completion events properly */
2402
2403 shift = ap->port_no * 2;
2404 if (hc > 0) {
2405 shift++;
2406 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002407 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408
2409 mask = 0x3 << shift;
2410
2411 /* clear EDMA errors on this port */
2412 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2413
2414 /* clear pending irq events */
2415 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2416 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2417 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2418 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2419
2420 /* enable assertion of portN err, done events */
2421 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2422 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002423}
2424
Brett Russ05b308e2005-10-05 17:08:53 -04002425/**
2426 * mv_port_init - Perform some early initialization on a single port.
2427 * @port: libata data structure storing shadow register addresses
2428 * @port_mmio: base address of the port
2429 *
2430 * Initialize shadow register mmio addresses, clear outstanding
2431 * interrupts on the port, and unmask interrupts for the future
2432 * start of the port.
2433 *
2434 * LOCKING:
2435 * Inherited from caller.
2436 */
Brett Russ31961942005-09-30 01:36:00 -04002437static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2438{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002439 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002440 unsigned serr_ofs;
2441
Jeff Garzik8b260242005-11-12 12:32:50 -05002442 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002443 */
2444 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002445 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002446 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2447 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2448 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2449 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2450 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2451 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002452 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002453 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2454 /* special case: control/altstatus doesn't have ATA_REG_ address */
2455 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2456
2457 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002458 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002459
Brett Russ31961942005-09-30 01:36:00 -04002460 /* Clear any currently outstanding port interrupt conditions */
2461 serr_ofs = mv_scr_offset(SCR_ERROR);
2462 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2463 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2464
Mark Lord646a4da2008-01-26 18:30:37 -05002465 /* unmask all non-transient EDMA error interrupts */
2466 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002467
Jeff Garzik8b260242005-11-12 12:32:50 -05002468 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002469 readl(port_mmio + EDMA_CFG_OFS),
2470 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2471 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002472}
2473
Tejun Heo4447d352007-04-17 23:44:08 +09002474static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002475{
Tejun Heo4447d352007-04-17 23:44:08 +09002476 struct pci_dev *pdev = to_pci_dev(host->dev);
2477 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002478 u32 hp_flags = hpriv->hp_flags;
2479
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002480 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002481 case chip_5080:
2482 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002483 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002484
Auke Kok44c10132007-06-08 15:46:36 -07002485 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002486 case 0x1:
2487 hp_flags |= MV_HP_ERRATA_50XXB0;
2488 break;
2489 case 0x3:
2490 hp_flags |= MV_HP_ERRATA_50XXB2;
2491 break;
2492 default:
2493 dev_printk(KERN_WARNING, &pdev->dev,
2494 "Applying 50XXB2 workarounds to unknown rev\n");
2495 hp_flags |= MV_HP_ERRATA_50XXB2;
2496 break;
2497 }
2498 break;
2499
2500 case chip_504x:
2501 case chip_508x:
2502 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002503 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002504
Auke Kok44c10132007-06-08 15:46:36 -07002505 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002506 case 0x0:
2507 hp_flags |= MV_HP_ERRATA_50XXB0;
2508 break;
2509 case 0x3:
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2512 default:
2513 dev_printk(KERN_WARNING, &pdev->dev,
2514 "Applying B2 workarounds to unknown rev\n");
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2516 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002517 }
2518 break;
2519
2520 case chip_604x:
2521 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002522 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002523 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002524
Auke Kok44c10132007-06-08 15:46:36 -07002525 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002526 case 0x7:
2527 hp_flags |= MV_HP_ERRATA_60X1B2;
2528 break;
2529 case 0x9:
2530 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002531 break;
2532 default:
2533 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002534 "Applying B2 workarounds to unknown rev\n");
2535 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002536 break;
2537 }
2538 break;
2539
Jeff Garzike4e7b892006-01-31 12:18:41 -05002540 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002541 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002542 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2543 (pdev->device == 0x2300 || pdev->device == 0x2310))
2544 {
Mark Lord4e520032007-12-11 12:58:05 -05002545 /*
2546 * Highpoint RocketRAID PCIe 23xx series cards:
2547 *
2548 * Unconfigured drives are treated as "Legacy"
2549 * by the BIOS, and it overwrites sector 8 with
2550 * a "Lgcy" metadata block prior to Linux boot.
2551 *
2552 * Configured drives (RAID or JBOD) leave sector 8
2553 * alone, but instead overwrite a high numbered
2554 * sector for the RAID metadata. This sector can
2555 * be determined exactly, by truncating the physical
2556 * drive capacity to a nice even GB value.
2557 *
2558 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2559 *
2560 * Warn the user, lest they think we're just buggy.
2561 */
2562 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2563 " BIOS CORRUPTS DATA on all attached drives,"
2564 " regardless of if/how they are configured."
2565 " BEWARE!\n");
2566 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2567 " use sectors 8-9 on \"Legacy\" drives,"
2568 " and avoid the final two gigabytes on"
2569 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002570 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002571 case chip_6042:
2572 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002573 hp_flags |= MV_HP_GEN_IIE;
2574
Auke Kok44c10132007-06-08 15:46:36 -07002575 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002576 case 0x0:
2577 hp_flags |= MV_HP_ERRATA_XX42A0;
2578 break;
2579 case 0x1:
2580 hp_flags |= MV_HP_ERRATA_60X1C0;
2581 break;
2582 default:
2583 dev_printk(KERN_WARNING, &pdev->dev,
2584 "Applying 60X1C0 workarounds to unknown rev\n");
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
2586 break;
2587 }
2588 break;
2589
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002590 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002591 dev_printk(KERN_ERR, &pdev->dev,
2592 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002593 return 1;
2594 }
2595
2596 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002597 if (hp_flags & MV_HP_PCIE) {
2598 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2599 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2600 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2601 } else {
2602 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2603 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2604 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2605 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002606
2607 return 0;
2608}
2609
Brett Russ05b308e2005-10-05 17:08:53 -04002610/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002611 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002612 * @host: ATA host to initialize
2613 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002614 *
2615 * If possible, do an early global reset of the host. Then do
2616 * our port init and clear/unmask all/relevant host interrupts.
2617 *
2618 * LOCKING:
2619 * Inherited from caller.
2620 */
Tejun Heo4447d352007-04-17 23:44:08 +09002621static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002622{
2623 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002624 struct pci_dev *pdev = to_pci_dev(host->dev);
2625 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2626 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002627
Jeff Garzik47c2b672005-11-12 21:13:17 -05002628 /* global interrupt mask */
2629 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2630
Tejun Heo4447d352007-04-17 23:44:08 +09002631 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002632 if (rc)
2633 goto done;
2634
Tejun Heo4447d352007-04-17 23:44:08 +09002635 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002636
Tejun Heo4447d352007-04-17 23:44:08 +09002637 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002638 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002639
Jeff Garzikc9d39132005-11-13 17:47:51 -05002640 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002641 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002642 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002643
Jeff Garzik522479f2005-11-12 22:14:02 -05002644 hpriv->ops->reset_flash(hpriv, mmio);
2645 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002646 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002647
Tejun Heo4447d352007-04-17 23:44:08 +09002648 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002649 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002650 void __iomem *port_mmio = mv_port_base(mmio, port);
2651
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002652 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002653 ifctl |= (1 << 7); /* enable gen2i speed */
2654 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002655 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2656 }
2657
Jeff Garzikc9d39132005-11-13 17:47:51 -05002658 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002659 }
2660
Tejun Heo4447d352007-04-17 23:44:08 +09002661 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002662 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002663 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002664 unsigned int offset = port_mmio - mmio;
2665
2666 mv_port_init(&ap->ioaddr, port_mmio);
2667
2668 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2669 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002670 }
2671
2672 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2674
2675 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2676 "(before clear)=0x%08x\n", hc,
2677 readl(hc_mmio + HC_CFG_OFS),
2678 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2679
2680 /* Clear any currently outstanding hc interrupt conditions */
2681 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002682 }
2683
Brett Russ31961942005-09-30 01:36:00 -04002684 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002685 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002686
2687 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002688 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002689
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002690 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002691 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2692 else
2693 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002694
2695 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002696 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002697 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2698 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002699 readl(mmio + hpriv->irq_cause_ofs),
2700 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002701
Brett Russ31961942005-09-30 01:36:00 -04002702done:
Brett Russ20f733e2005-09-01 18:26:17 -04002703 return rc;
2704}
2705
Brett Russ05b308e2005-10-05 17:08:53 -04002706/**
2707 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002708 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002709 *
2710 * FIXME: complete this.
2711 *
2712 * LOCKING:
2713 * Inherited from caller.
2714 */
Tejun Heo4447d352007-04-17 23:44:08 +09002715static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002716{
Tejun Heo4447d352007-04-17 23:44:08 +09002717 struct pci_dev *pdev = to_pci_dev(host->dev);
2718 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002719 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002720 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002721
2722 /* Use this to determine the HW stepping of the chip so we know
2723 * what errata to workaround
2724 */
Brett Russ31961942005-09-30 01:36:00 -04002725 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2726 if (scc == 0)
2727 scc_s = "SCSI";
2728 else if (scc == 0x01)
2729 scc_s = "RAID";
2730 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002731 scc_s = "?";
2732
2733 if (IS_GEN_I(hpriv))
2734 gen = "I";
2735 else if (IS_GEN_II(hpriv))
2736 gen = "II";
2737 else if (IS_GEN_IIE(hpriv))
2738 gen = "IIE";
2739 else
2740 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002741
Jeff Garzika9524a72005-10-30 14:39:11 -05002742 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002743 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2744 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002745 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2746}
2747
Brett Russ05b308e2005-10-05 17:08:53 -04002748/**
2749 * mv_init_one - handle a positive probe of a Marvell host
2750 * @pdev: PCI device found
2751 * @ent: PCI device ID entry for the matched host
2752 *
2753 * LOCKING:
2754 * Inherited from caller.
2755 */
Brett Russ20f733e2005-09-01 18:26:17 -04002756static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2757{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002758 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002759 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002760 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2761 struct ata_host *host;
2762 struct mv_host_priv *hpriv;
2763 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002764
Jeff Garzika9524a72005-10-30 14:39:11 -05002765 if (!printed_version++)
2766 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002767
Tejun Heo4447d352007-04-17 23:44:08 +09002768 /* allocate host */
2769 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2770
2771 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2772 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2773 if (!host || !hpriv)
2774 return -ENOMEM;
2775 host->private_data = hpriv;
2776
2777 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002778 rc = pcim_enable_device(pdev);
2779 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002780 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002781
Tejun Heo0d5ff562007-02-01 15:06:36 +09002782 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2783 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002784 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002785 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002786 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002787 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002788
Jeff Garzikd88184f2007-02-26 01:26:06 -05002789 rc = pci_go_64(pdev);
2790 if (rc)
2791 return rc;
2792
Brett Russ20f733e2005-09-01 18:26:17 -04002793 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002794 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002795 if (rc)
2796 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002797
Brett Russ31961942005-09-30 01:36:00 -04002798 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002799 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002800 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002801
Brett Russ31961942005-09-30 01:36:00 -04002802 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002803 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002804
Tejun Heo4447d352007-04-17 23:44:08 +09002805 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002806 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002807 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002808 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002809}
2810
2811static int __init mv_init(void)
2812{
Pavel Roskinb7887192006-08-10 18:13:18 +09002813 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002814}
2815
2816static void __exit mv_exit(void)
2817{
2818 pci_unregister_driver(&mv_pci_driver);
2819}
2820
2821MODULE_AUTHOR("Brett Russ");
2822MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2823MODULE_LICENSE("GPL");
2824MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2825MODULE_VERSION(DRV_VERSION);
2826
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002827module_param(msi, int, 0444);
2828MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2829
Brett Russ20f733e2005-09-01 18:26:17 -04002830module_init(mv_init);
2831module_exit(mv_exit);