blob: d15caf32045efcdb698661210168acf551e052f1 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
Mark Lord02a121d2007-12-01 13:07:22 -0500167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
Mark Lord02a121d2007-12-01 13:07:22 -0500171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500174
Brett Russ20f733e2005-09-01 18:26:17 -0400175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500214 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500220 SATA_INTERFACE_CTL = 0x050,
221
222 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400223
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400231
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500248
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500269
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400281 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400300
Brett Russ31961942005-09-30 01:36:00 -0400301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400303
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
306
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400310 EDMA_RSP_Q_PTR_SHIFT = 3,
311
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400316
Jeff Garzikc9d39132005-11-13 17:47:51 -0500317 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500319
Brett Russ31961942005-09-30 01:36:00 -0400320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500326 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400336};
337
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400338#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500340#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500341
Jeff Garzik095fec82005-11-12 09:50:49 -0500342enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
345 */
346 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500347
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
350 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
352
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400353 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
355};
356
Jeff Garzik522479f2005-11-12 22:14:02 -0500357enum chip_type {
358 chip_504x,
359 chip_508x,
360 chip_5080,
361 chip_604x,
362 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500363 chip_6042,
364 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500365};
366
Brett Russ31961942005-09-30 01:36:00 -0400367/* Command ReQuest Block: 32B */
368struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400369 __le32 sg_addr;
370 __le32 sg_addr_hi;
371 __le16 ctrl_flags;
372 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400373};
374
Jeff Garzike4e7b892006-01-31 12:18:41 -0500375struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400376 __le32 addr;
377 __le32 addr_hi;
378 __le32 flags;
379 __le32 len;
380 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500381};
382
Brett Russ31961942005-09-30 01:36:00 -0400383/* Command ResPonse Block: 8B */
384struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400385 __le16 id;
386 __le16 flags;
387 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400388};
389
390/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
391struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400392 __le32 addr;
393 __le32 flags_size;
394 __le32 addr_hi;
395 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400396};
397
398struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400399 struct mv_crqb *crqb;
400 dma_addr_t crqb_dma;
401 struct mv_crpb *crpb;
402 dma_addr_t crpb_dma;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400405
406 unsigned int req_idx;
407 unsigned int resp_idx;
408
Brett Russ31961942005-09-30 01:36:00 -0400409 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500412struct mv_port_signal {
413 u32 amps;
414 u32 pre;
415};
416
Mark Lord02a121d2007-12-01 13:07:22 -0500417struct mv_host_priv {
418 u32 hp_flags;
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
421 u32 irq_cause_ofs;
422 u32 irq_mask_ofs;
423 u32 unmask_all_irqs;
424};
425
Jeff Garzik47c2b672005-11-12 21:13:17 -0500426struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436};
437
Brett Russ20f733e2005-09-01 18:26:17 -0400438static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900439static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400443static int mv_port_start(struct ata_port *ap);
444static void mv_port_stop(struct ata_port *ap);
445static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500446static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900447static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400448static void mv_error_handler(struct ata_port *ap);
449static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450static void mv_eh_freeze(struct ata_port *ap);
451static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500452static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400453static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
454
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500455static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
456 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500457static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
458static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
459 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500460static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
461 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500462static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
463static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500464
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500465static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
466 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500467static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
468static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
469 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500470static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
471 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500472static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
473static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500474static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
475 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500476static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
477 void __iomem *port_mmio, int want_ncq);
478static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500479
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400480static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400481 .module = THIS_MODULE,
482 .name = DRV_NAME,
483 .ioctl = ata_scsi_ioctl,
484 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400485 .can_queue = ATA_DEF_QUEUE,
486 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400487 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400488 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
489 .emulated = ATA_SHT_EMULATED,
490 .use_clustering = 1,
491 .proc_name = DRV_NAME,
492 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400493 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400494 .slave_destroy = ata_scsi_slave_destroy,
495 .bios_param = ata_std_bios_param,
496};
497
498static struct scsi_host_template mv6_sht = {
499 .module = THIS_MODULE,
500 .name = DRV_NAME,
501 .ioctl = ata_scsi_ioctl,
502 .queuecommand = ata_scsi_queuecmd,
503 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400504 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400505 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400506 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
507 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500508 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400509 .proc_name = DRV_NAME,
510 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400511 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900512 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400513 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400514};
515
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500517 .tf_load = ata_tf_load,
518 .tf_read = ata_tf_read,
519 .check_status = ata_check_status,
520 .exec_command = ata_exec_command,
521 .dev_select = ata_std_dev_select,
522
Jeff Garzikcffacd82007-03-09 09:46:47 -0500523 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500524
525 .qc_prep = mv_qc_prep,
526 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900527 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500528
Jeff Garzikc9d39132005-11-13 17:47:51 -0500529 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900530 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500531
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400532 .error_handler = mv_error_handler,
533 .post_internal_cmd = mv_post_int_cmd,
534 .freeze = mv_eh_freeze,
535 .thaw = mv_eh_thaw,
536
Jeff Garzikc9d39132005-11-13 17:47:51 -0500537 .scr_read = mv5_scr_read,
538 .scr_write = mv5_scr_write,
539
540 .port_start = mv_port_start,
541 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500542};
543
544static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500545 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400546 .tf_load = ata_tf_load,
547 .tf_read = ata_tf_read,
548 .check_status = ata_check_status,
549 .exec_command = ata_exec_command,
550 .dev_select = ata_std_dev_select,
551
Jeff Garzikcffacd82007-03-09 09:46:47 -0500552 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400553
Brett Russ31961942005-09-30 01:36:00 -0400554 .qc_prep = mv_qc_prep,
555 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900556 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400557
Brett Russ20f733e2005-09-01 18:26:17 -0400558 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900559 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400560
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
564 .thaw = mv_eh_thaw,
565
Brett Russ20f733e2005-09-01 18:26:17 -0400566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
568
Brett Russ31961942005-09-30 01:36:00 -0400569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400571};
572
Jeff Garzike4e7b892006-01-31 12:18:41 -0500573static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500574 .tf_load = ata_tf_load,
575 .tf_read = ata_tf_read,
576 .check_status = ata_check_status,
577 .exec_command = ata_exec_command,
578 .dev_select = ata_std_dev_select,
579
Jeff Garzikcffacd82007-03-09 09:46:47 -0500580 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500581
582 .qc_prep = mv_qc_prep_iie,
583 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900584 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500585
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900587 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500588
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400589 .error_handler = mv_error_handler,
590 .post_internal_cmd = mv_post_int_cmd,
591 .freeze = mv_eh_freeze,
592 .thaw = mv_eh_thaw,
593
Jeff Garzike4e7b892006-01-31 12:18:41 -0500594 .scr_read = mv_scr_read,
595 .scr_write = mv_scr_write,
596
597 .port_start = mv_port_start,
598 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500599};
600
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100601static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400602 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400603 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400604 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400605 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500606 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400607 },
608 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400609 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400610 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400611 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500612 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400613 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500614 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500616 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400617 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500618 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500619 },
Brett Russ20f733e2005-09-01 18:26:17 -0400620 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400622 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400623 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500624 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400625 },
626 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
628 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400629 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400630 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500631 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400632 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500633 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400636 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500637 .port_ops = &mv_iie_ops,
638 },
639 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400640 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400642 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500643 .port_ops = &mv_iie_ops,
644 },
Brett Russ20f733e2005-09-01 18:26:17 -0400645};
646
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500647static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400648 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
649 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
650 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
651 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100652 /* RocketRAID 1740/174x have different identifiers */
653 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
654 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400655
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400656 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
657 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
658 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
659 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
660 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500661
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400662 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
663
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200664 /* Adaptec 1430SA */
665 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
666
Mark Lord02a121d2007-12-01 13:07:22 -0500667 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800668 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
669
Mark Lord02a121d2007-12-01 13:07:22 -0500670 /* Highpoint RocketRAID PCIe series */
671 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
672 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
673
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400674 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400675};
676
677static struct pci_driver mv_pci_driver = {
678 .name = DRV_NAME,
679 .id_table = mv_pci_tbl,
680 .probe = mv_init_one,
681 .remove = ata_pci_remove_one,
682};
683
Jeff Garzik47c2b672005-11-12 21:13:17 -0500684static const struct mv_hw_ops mv5xxx_ops = {
685 .phy_errata = mv5_phy_errata,
686 .enable_leds = mv5_enable_leds,
687 .read_preamp = mv5_read_preamp,
688 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500689 .reset_flash = mv5_reset_flash,
690 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500691};
692
693static const struct mv_hw_ops mv6xxx_ops = {
694 .phy_errata = mv6_phy_errata,
695 .enable_leds = mv6_enable_leds,
696 .read_preamp = mv6_read_preamp,
697 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500698 .reset_flash = mv6_reset_flash,
699 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500700};
701
Brett Russ20f733e2005-09-01 18:26:17 -0400702/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500703 * module options
704 */
705static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
706
707
Jeff Garzikd88184f2007-02-26 01:26:06 -0500708/* move to PCI layer or libata core? */
709static int pci_go_64(struct pci_dev *pdev)
710{
711 int rc;
712
713 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
715 if (rc) {
716 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
717 if (rc) {
718 dev_printk(KERN_ERR, &pdev->dev,
719 "64-bit DMA enable failed\n");
720 return rc;
721 }
722 }
723 } else {
724 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
725 if (rc) {
726 dev_printk(KERN_ERR, &pdev->dev,
727 "32-bit DMA enable failed\n");
728 return rc;
729 }
730 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
731 if (rc) {
732 dev_printk(KERN_ERR, &pdev->dev,
733 "32-bit consistent DMA enable failed\n");
734 return rc;
735 }
736 }
737
738 return rc;
739}
740
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500741/*
Brett Russ20f733e2005-09-01 18:26:17 -0400742 * Functions
743 */
744
745static inline void writelfl(unsigned long data, void __iomem *addr)
746{
747 writel(data, addr);
748 (void) readl(addr); /* flush to avoid PCI posted write */
749}
750
Brett Russ20f733e2005-09-01 18:26:17 -0400751static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
752{
753 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
754}
755
Jeff Garzikc9d39132005-11-13 17:47:51 -0500756static inline unsigned int mv_hc_from_port(unsigned int port)
757{
758 return port >> MV_PORT_HC_SHIFT;
759}
760
761static inline unsigned int mv_hardport_from_port(unsigned int port)
762{
763 return port & MV_PORT_MASK;
764}
765
766static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
767 unsigned int port)
768{
769 return mv_hc_base(base, mv_hc_from_port(port));
770}
771
Brett Russ20f733e2005-09-01 18:26:17 -0400772static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
773{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500774 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500775 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500776 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400777}
778
779static inline void __iomem *mv_ap_base(struct ata_port *ap)
780{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900781 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400782}
783
Jeff Garzikcca39742006-08-24 03:19:22 -0400784static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400785{
Jeff Garzikcca39742006-08-24 03:19:22 -0400786 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400787}
788
789static void mv_irq_clear(struct ata_port *ap)
790{
791}
792
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793static void mv_set_edma_ptrs(void __iomem *port_mmio,
794 struct mv_host_priv *hpriv,
795 struct mv_port_priv *pp)
796{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400797 u32 index;
798
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400799 /*
800 * initialize request queue
801 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400802 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
803
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400804 WARN_ON(pp->crqb_dma & 0x3ff);
805 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400806 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400807 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
808
809 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400810 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
812 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814
815 /*
816 * initialize response queue
817 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400818 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
819
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400820 WARN_ON(pp->crpb_dma & 0xff);
821 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
822
823 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400824 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400825 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
826 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400827 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400829 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400831}
832
Brett Russ05b308e2005-10-05 17:08:53 -0400833/**
834 * mv_start_dma - Enable eDMA engine
835 * @base: port base address
836 * @pp: port private data
837 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900838 * Verify the local cache of the eDMA state is accurate with a
839 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400840 *
841 * LOCKING:
842 * Inherited from caller.
843 */
Mark Lord0c589122008-01-26 18:31:16 -0500844static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500845 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400846{
Mark Lord72109162008-01-26 18:31:33 -0500847 int want_ncq = (protocol == ATA_PROT_NCQ);
848
849 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
850 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
851 if (want_ncq != using_ncq)
852 __mv_stop_dma(ap);
853 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400854 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500855 struct mv_host_priv *hpriv = ap->host->private_data;
856 int hard_port = mv_hardport_from_port(ap->port_no);
857 void __iomem *hc_mmio = mv_hc_base_from_port(
858 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
859 u32 hc_irq_cause, ipending;
860
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400861 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500862 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400863
Mark Lord0c589122008-01-26 18:31:16 -0500864 /* clear EDMA interrupt indicator, if any */
865 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
866 ipending = (DEV_IRQ << hard_port) |
867 (CRPB_DMA_DONE << hard_port);
868 if (hc_irq_cause & ipending) {
869 writelfl(hc_irq_cause & ~ipending,
870 hc_mmio + HC_IRQ_CAUSE_OFS);
871 }
872
Mark Lord72109162008-01-26 18:31:33 -0500873 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500874
875 /* clear FIS IRQ Cause */
876 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
877
Mark Lordf630d562008-01-26 18:31:00 -0500878 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400879
Mark Lordf630d562008-01-26 18:31:00 -0500880 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400881 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
882 }
Mark Lordf630d562008-01-26 18:31:00 -0500883 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400884}
885
Brett Russ05b308e2005-10-05 17:08:53 -0400886/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400887 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400888 * @ap: ATA channel to manipulate
889 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900890 * Verify the local cache of the eDMA state is accurate with a
891 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400892 *
893 * LOCKING:
894 * Inherited from caller.
895 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400896static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400897{
898 void __iomem *port_mmio = mv_ap_base(ap);
899 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400900 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400901 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400902
Jeff Garzik4537deb2007-07-12 14:30:19 -0400903 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400904 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400905 */
Brett Russ31961942005-09-30 01:36:00 -0400906 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
907 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400908 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900909 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400910 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500911
Brett Russ31961942005-09-30 01:36:00 -0400912 /* now properly wait for the eDMA to stop */
913 for (i = 1000; i > 0; i--) {
914 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400915 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400916 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400917
Brett Russ31961942005-09-30 01:36:00 -0400918 udelay(100);
919 }
920
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400921 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900922 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400923 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400924 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400925
926 return err;
Brett Russ31961942005-09-30 01:36:00 -0400927}
928
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400929static int mv_stop_dma(struct ata_port *ap)
930{
931 unsigned long flags;
932 int rc;
933
934 spin_lock_irqsave(&ap->host->lock, flags);
935 rc = __mv_stop_dma(ap);
936 spin_unlock_irqrestore(&ap->host->lock, flags);
937
938 return rc;
939}
940
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400941#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400942static void mv_dump_mem(void __iomem *start, unsigned bytes)
943{
Brett Russ31961942005-09-30 01:36:00 -0400944 int b, w;
945 for (b = 0; b < bytes; ) {
946 DPRINTK("%p: ", start + b);
947 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400948 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400949 b += sizeof(u32);
950 }
951 printk("\n");
952 }
Brett Russ31961942005-09-30 01:36:00 -0400953}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400954#endif
955
Brett Russ31961942005-09-30 01:36:00 -0400956static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
957{
958#ifdef ATA_DEBUG
959 int b, w;
960 u32 dw;
961 for (b = 0; b < bytes; ) {
962 DPRINTK("%02x: ", b);
963 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400964 (void) pci_read_config_dword(pdev, b, &dw);
965 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400966 b += sizeof(u32);
967 }
968 printk("\n");
969 }
970#endif
971}
972static void mv_dump_all_regs(void __iomem *mmio_base, int port,
973 struct pci_dev *pdev)
974{
975#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500976 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400977 port >> MV_PORT_HC_SHIFT);
978 void __iomem *port_base;
979 int start_port, num_ports, p, start_hc, num_hcs, hc;
980
981 if (0 > port) {
982 start_hc = start_port = 0;
983 num_ports = 8; /* shld be benign for 4 port devs */
984 num_hcs = 2;
985 } else {
986 start_hc = port >> MV_PORT_HC_SHIFT;
987 start_port = port;
988 num_ports = num_hcs = 1;
989 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500990 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400991 num_ports > 1 ? num_ports - 1 : start_port);
992
993 if (NULL != pdev) {
994 DPRINTK("PCI config space regs:\n");
995 mv_dump_pci_cfg(pdev, 0x68);
996 }
997 DPRINTK("PCI regs:\n");
998 mv_dump_mem(mmio_base+0xc00, 0x3c);
999 mv_dump_mem(mmio_base+0xd00, 0x34);
1000 mv_dump_mem(mmio_base+0xf00, 0x4);
1001 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1002 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001003 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001004 DPRINTK("HC regs (HC %i):\n", hc);
1005 mv_dump_mem(hc_base, 0x1c);
1006 }
1007 for (p = start_port; p < start_port + num_ports; p++) {
1008 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001009 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001010 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001011 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001012 mv_dump_mem(port_base+0x300, 0x60);
1013 }
1014#endif
1015}
1016
Brett Russ20f733e2005-09-01 18:26:17 -04001017static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1018{
1019 unsigned int ofs;
1020
1021 switch (sc_reg_in) {
1022 case SCR_STATUS:
1023 case SCR_CONTROL:
1024 case SCR_ERROR:
1025 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1026 break;
1027 case SCR_ACTIVE:
1028 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1029 break;
1030 default:
1031 ofs = 0xffffffffU;
1032 break;
1033 }
1034 return ofs;
1035}
1036
Tejun Heoda3dbb12007-07-16 14:29:40 +09001037static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001038{
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040
Tejun Heoda3dbb12007-07-16 14:29:40 +09001041 if (ofs != 0xffffffffU) {
1042 *val = readl(mv_ap_base(ap) + ofs);
1043 return 0;
1044 } else
1045 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001046}
1047
Tejun Heoda3dbb12007-07-16 14:29:40 +09001048static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001049{
1050 unsigned int ofs = mv_scr_offset(sc_reg_in);
1051
Tejun Heoda3dbb12007-07-16 14:29:40 +09001052 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001053 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001054 return 0;
1055 } else
1056 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001057}
1058
Mark Lordf2738272008-01-26 18:32:29 -05001059static void mv6_dev_config(struct ata_device *adev)
1060{
1061 /*
1062 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1063 * See mv_qc_prep() for more info.
1064 */
1065 if (adev->flags & ATA_DFLAG_NCQ)
1066 if (adev->max_sectors > ATA_MAX_SECTORS)
1067 adev->max_sectors = ATA_MAX_SECTORS;
1068}
1069
Mark Lord72109162008-01-26 18:31:33 -05001070static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1071 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001072{
Mark Lord0c589122008-01-26 18:31:16 -05001073 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001074
1075 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001077
Mark Lord0c589122008-01-26 18:31:16 -05001078 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001079 cfg |= (1 << 8); /* enab config burst size mask */
1080
Mark Lord0c589122008-01-26 18:31:16 -05001081 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001082 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1083
1084 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001085 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001087 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001089 }
1090
Mark Lord72109162008-01-26 18:31:33 -05001091 if (want_ncq) {
1092 cfg |= EDMA_CFG_NCQ;
1093 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1094 } else
1095 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1096
Jeff Garzike4e7b892006-01-31 12:18:41 -05001097 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1098}
1099
Brett Russ05b308e2005-10-05 17:08:53 -04001100/**
1101 * mv_port_start - Port specific init/start routine.
1102 * @ap: ATA channel to manipulate
1103 *
1104 * Allocate and point to DMA memory, init port private memory,
1105 * zero indices.
1106 *
1107 * LOCKING:
1108 * Inherited from caller.
1109 */
Brett Russ31961942005-09-30 01:36:00 -04001110static int mv_port_start(struct ata_port *ap)
1111{
Jeff Garzikcca39742006-08-24 03:19:22 -04001112 struct device *dev = ap->host->dev;
1113 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001114 struct mv_port_priv *pp;
1115 void __iomem *port_mmio = mv_ap_base(ap);
1116 void *mem;
1117 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001118 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001119 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001120
Tejun Heo24dc5f32007-01-20 16:00:28 +09001121 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001122 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001123 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001124
Tejun Heo24dc5f32007-01-20 16:00:28 +09001125 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1126 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001127 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001128 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001129 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1130
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001131 rc = ata_pad_alloc(ap, dev);
1132 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001133 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001134
Jeff Garzik8b260242005-11-12 12:32:50 -05001135 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001136 * 32-slot command request table (CRQB), 32 bytes each in size
1137 */
1138 pp->crqb = mem;
1139 pp->crqb_dma = mem_dma;
1140 mem += MV_CRQB_Q_SZ;
1141 mem_dma += MV_CRQB_Q_SZ;
1142
Jeff Garzik8b260242005-11-12 12:32:50 -05001143 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001144 * 32-slot command response table (CRPB), 8 bytes each in size
1145 */
1146 pp->crpb = mem;
1147 pp->crpb_dma = mem_dma;
1148 mem += MV_CRPB_Q_SZ;
1149 mem_dma += MV_CRPB_Q_SZ;
1150
1151 /* Third item:
1152 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1153 */
1154 pp->sg_tbl = mem;
1155 pp->sg_tbl_dma = mem_dma;
1156
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001157 spin_lock_irqsave(&ap->host->lock, flags);
1158
Mark Lord72109162008-01-26 18:31:33 -05001159 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Brett Russ31961942005-09-30 01:36:00 -04001160
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001161 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001162
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001163 spin_unlock_irqrestore(&ap->host->lock, flags);
1164
Brett Russ31961942005-09-30 01:36:00 -04001165 /* Don't turn on EDMA here...do it before DMA commands only. Else
1166 * we'll be unable to send non-data, PIO, etc due to restricted access
1167 * to shadow regs.
1168 */
1169 ap->private_data = pp;
1170 return 0;
1171}
1172
Brett Russ05b308e2005-10-05 17:08:53 -04001173/**
1174 * mv_port_stop - Port specific cleanup/stop routine.
1175 * @ap: ATA channel to manipulate
1176 *
1177 * Stop DMA, cleanup port memory.
1178 *
1179 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001180 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001181 */
Brett Russ31961942005-09-30 01:36:00 -04001182static void mv_port_stop(struct ata_port *ap)
1183{
Brett Russ31961942005-09-30 01:36:00 -04001184 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001185}
1186
Brett Russ05b308e2005-10-05 17:08:53 -04001187/**
1188 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1189 * @qc: queued command whose SG list to source from
1190 *
1191 * Populate the SG list and mark the last entry.
1192 *
1193 * LOCKING:
1194 * Inherited from caller.
1195 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001196static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001197{
1198 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001199 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001200 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001201 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001202
Jeff Garzikd88184f2007-02-26 01:26:06 -05001203 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001204 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001205 dma_addr_t addr = sg_dma_address(sg);
1206 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001207
Olof Johansson4007b492007-10-02 20:45:27 -05001208 while (sg_len) {
1209 u32 offset = addr & 0xffff;
1210 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001211
Olof Johansson4007b492007-10-02 20:45:27 -05001212 if ((offset + sg_len > 0x10000))
1213 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001214
Olof Johansson4007b492007-10-02 20:45:27 -05001215 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1216 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001217 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001218
1219 sg_len -= len;
1220 addr += len;
1221
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001222 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001223 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001224 }
Brett Russ31961942005-09-30 01:36:00 -04001225 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001226
1227 if (likely(last_sg))
1228 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001229}
1230
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001231static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001232{
Mark Lord559eeda2006-05-19 16:40:15 -04001233 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001234 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001235 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001236}
1237
Brett Russ05b308e2005-10-05 17:08:53 -04001238/**
1239 * mv_qc_prep - Host specific command preparation.
1240 * @qc: queued command to prepare
1241 *
1242 * This routine simply redirects to the general purpose routine
1243 * if command is not DMA. Else, it handles prep of the CRQB
1244 * (command request block), does some sanity checking, and calls
1245 * the SG load routine.
1246 *
1247 * LOCKING:
1248 * Inherited from caller.
1249 */
Brett Russ31961942005-09-30 01:36:00 -04001250static void mv_qc_prep(struct ata_queued_cmd *qc)
1251{
1252 struct ata_port *ap = qc->ap;
1253 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001254 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001255 struct ata_taskfile *tf;
1256 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001257 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001258
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001259 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001260 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001261
Brett Russ31961942005-09-30 01:36:00 -04001262 /* Fill in command request block
1263 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001264 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001265 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001266 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001267 flags |= qc->tag << CRQB_TAG_SHIFT;
1268
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001269 /* get current queue index from software */
1270 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001271
Mark Lorda6432432006-05-19 16:36:36 -04001272 pp->crqb[in_index].sg_addr =
1273 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1274 pp->crqb[in_index].sg_addr_hi =
1275 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1276 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1277
1278 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001279 tf = &qc->tf;
1280
1281 /* Sadly, the CRQB cannot accomodate all registers--there are
1282 * only 11 bytes...so we must pick and choose required
1283 * registers based on the command. So, we drop feature and
1284 * hob_feature for [RW] DMA commands, but they are needed for
1285 * NCQ. NCQ will drop hob_nsect.
1286 */
1287 switch (tf->command) {
1288 case ATA_CMD_READ:
1289 case ATA_CMD_READ_EXT:
1290 case ATA_CMD_WRITE:
1291 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001292 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001293 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1294 break;
1295#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1296 case ATA_CMD_FPDMA_READ:
1297 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001298 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001299 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1300 break;
1301#endif /* FIXME: remove this line when NCQ added */
1302 default:
1303 /* The only other commands EDMA supports in non-queued and
1304 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1305 * of which are defined/used by Linux. If we get here, this
1306 * driver needs work.
1307 *
1308 * FIXME: modify libata to give qc_prep a return value and
1309 * return error here.
1310 */
1311 BUG_ON(tf->command);
1312 break;
1313 }
1314 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1315 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1316 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1317 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1318 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1319 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1320 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1321 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1322 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1323
Jeff Garzike4e7b892006-01-31 12:18:41 -05001324 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001325 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001326 mv_fill_sg(qc);
1327}
1328
1329/**
1330 * mv_qc_prep_iie - Host specific command preparation.
1331 * @qc: queued command to prepare
1332 *
1333 * This routine simply redirects to the general purpose routine
1334 * if command is not DMA. Else, it handles prep of the CRQB
1335 * (command request block), does some sanity checking, and calls
1336 * the SG load routine.
1337 *
1338 * LOCKING:
1339 * Inherited from caller.
1340 */
1341static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1342{
1343 struct ata_port *ap = qc->ap;
1344 struct mv_port_priv *pp = ap->private_data;
1345 struct mv_crqb_iie *crqb;
1346 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001347 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001348 u32 flags = 0;
1349
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001350 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001351 return;
1352
Jeff Garzike4e7b892006-01-31 12:18:41 -05001353 /* Fill in Gen IIE command request block
1354 */
1355 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1356 flags |= CRQB_FLAG_READ;
1357
Tejun Heobeec7db2006-02-11 19:11:13 +09001358 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001359 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001360 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001361
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001362 /* get current queue index from software */
1363 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001364
1365 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001366 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1367 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1368 crqb->flags = cpu_to_le32(flags);
1369
1370 tf = &qc->tf;
1371 crqb->ata_cmd[0] = cpu_to_le32(
1372 (tf->command << 16) |
1373 (tf->feature << 24)
1374 );
1375 crqb->ata_cmd[1] = cpu_to_le32(
1376 (tf->lbal << 0) |
1377 (tf->lbam << 8) |
1378 (tf->lbah << 16) |
1379 (tf->device << 24)
1380 );
1381 crqb->ata_cmd[2] = cpu_to_le32(
1382 (tf->hob_lbal << 0) |
1383 (tf->hob_lbam << 8) |
1384 (tf->hob_lbah << 16) |
1385 (tf->hob_feature << 24)
1386 );
1387 crqb->ata_cmd[3] = cpu_to_le32(
1388 (tf->nsect << 0) |
1389 (tf->hob_nsect << 8)
1390 );
1391
1392 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1393 return;
Brett Russ31961942005-09-30 01:36:00 -04001394 mv_fill_sg(qc);
1395}
1396
Brett Russ05b308e2005-10-05 17:08:53 -04001397/**
1398 * mv_qc_issue - Initiate a command to the host
1399 * @qc: queued command to start
1400 *
1401 * This routine simply redirects to the general purpose routine
1402 * if command is not DMA. Else, it sanity checks our local
1403 * caches of the request producer/consumer indices then enables
1404 * DMA and bumps the request producer index.
1405 *
1406 * LOCKING:
1407 * Inherited from caller.
1408 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001409static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001410{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001411 struct ata_port *ap = qc->ap;
1412 void __iomem *port_mmio = mv_ap_base(ap);
1413 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001414 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001415
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001416 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001417 /* We're about to send a non-EDMA capable command to the
1418 * port. Turn off EDMA so there won't be problems accessing
1419 * shadow block, etc registers.
1420 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001421 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001422 return ata_qc_issue_prot(qc);
1423 }
1424
Mark Lord72109162008-01-26 18:31:33 -05001425 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001426
1427 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001428
Brett Russ31961942005-09-30 01:36:00 -04001429 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001430 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1431 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001433 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001434
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001435 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001436
1437 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001438 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1439 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001440
1441 return 0;
1442}
1443
Brett Russ05b308e2005-10-05 17:08:53 -04001444/**
Brett Russ05b308e2005-10-05 17:08:53 -04001445 * mv_err_intr - Handle error interrupts on the port
1446 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001447 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001448 *
1449 * In most cases, just clear the interrupt and move on. However,
1450 * some cases require an eDMA reset, which is done right before
1451 * the COMRESET in mv_phy_reset(). The SERR case requires a
1452 * clear of pending errors in the SATA SERROR register. Finally,
1453 * if the port disabled DMA, update our cached copy to match.
1454 *
1455 * LOCKING:
1456 * Inherited from caller.
1457 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001459{
Brett Russ31961942005-09-30 01:36:00 -04001460 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001461 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1462 struct mv_port_priv *pp = ap->private_data;
1463 struct mv_host_priv *hpriv = ap->host->private_data;
1464 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1465 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001466 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001467
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001469
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001470 if (!edma_enabled) {
1471 /* just a guess: do we need to do this? should we
1472 * expand this, and do it in all cases?
1473 */
Tejun Heo936fd732007-08-06 18:36:23 +09001474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001476 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477
1478 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1479
1480 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1481
1482 /*
1483 * all generations share these EDMA error cause bits
1484 */
1485
1486 if (edma_err_cause & EDMA_ERR_DEV)
1487 err_mask |= AC_ERR_DEV;
1488 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001489 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 EDMA_ERR_INTRL_PAR)) {
1491 err_mask |= AC_ERR_ATA_BUS;
1492 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001493 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001494 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001495 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1496 ata_ehi_hotplugged(ehi);
1497 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001498 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001499 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500 }
1501
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001502 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503 eh_freeze_mask = EDMA_EH_FREEZE_5;
1504
1505 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1506 struct mv_port_priv *pp = ap->private_data;
1507 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001508 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 }
1510 } else {
1511 eh_freeze_mask = EDMA_EH_FREEZE;
1512
1513 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1514 struct mv_port_priv *pp = ap->private_data;
1515 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001516 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517 }
1518
1519 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001520 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1521 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001522 err_mask = AC_ERR_ATA_BUS;
1523 action |= ATA_EH_HARDRESET;
1524 }
1525 }
Brett Russ20f733e2005-09-01 18:26:17 -04001526
1527 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001528 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001529
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 if (!err_mask) {
1531 err_mask = AC_ERR_OTHER;
1532 action |= ATA_EH_HARDRESET;
1533 }
1534
1535 ehi->serror |= serr;
1536 ehi->action |= action;
1537
1538 if (qc)
1539 qc->err_mask |= err_mask;
1540 else
1541 ehi->err_mask |= err_mask;
1542
1543 if (edma_err_cause & eh_freeze_mask)
1544 ata_port_freeze(ap);
1545 else
1546 ata_port_abort(ap);
1547}
1548
1549static void mv_intr_pio(struct ata_port *ap)
1550{
1551 struct ata_queued_cmd *qc;
1552 u8 ata_status;
1553
1554 /* ignore spurious intr if drive still BUSY */
1555 ata_status = readb(ap->ioaddr.status_addr);
1556 if (unlikely(ata_status & ATA_BUSY))
1557 return;
1558
1559 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001560 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 if (unlikely(!qc)) /* no active tag */
1562 return;
1563 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1564 return;
1565
1566 /* and finally, complete the ATA command */
1567 qc->err_mask |= ac_err_mask(ata_status);
1568 ata_qc_complete(qc);
1569}
1570
1571static void mv_intr_edma(struct ata_port *ap)
1572{
1573 void __iomem *port_mmio = mv_ap_base(ap);
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 struct mv_port_priv *pp = ap->private_data;
1576 struct ata_queued_cmd *qc;
1577 u32 out_index, in_index;
1578 bool work_done = false;
1579
1580 /* get h/w response queue pointer */
1581 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1582 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1583
1584 while (1) {
1585 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001586 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001587
1588 /* get s/w response queue last-read pointer, and compare */
1589 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1590 if (in_index == out_index)
1591 break;
1592
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001593 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001594 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001595 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001596
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001597 /* Gen II/IIE: get active ATA command via tag, to enable
1598 * support for queueing. this works transparently for
1599 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001600 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001601 else
1602 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001603
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001604 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001605
Mark Lordcb924412008-01-26 18:32:09 -05001606 /* For non-NCQ mode, the lower 8 bits of status
1607 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1608 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001609 */
1610 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001611 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001612 mv_err_intr(ap, qc);
1613 return;
1614 }
1615
1616 /* and finally, complete the ATA command */
1617 if (qc) {
1618 qc->err_mask |=
1619 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1620 ata_qc_complete(qc);
1621 }
1622
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001623 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001624 * indicate (after the loop completes) to hardware
1625 * that we have consumed a response queue entry.
1626 */
1627 work_done = true;
1628 pp->resp_idx++;
1629 }
1630
1631 if (work_done)
1632 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1633 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1634 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001635}
1636
Brett Russ05b308e2005-10-05 17:08:53 -04001637/**
1638 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001639 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001640 * @relevant: port error bits relevant to this host controller
1641 * @hc: which host controller we're to look at
1642 *
1643 * Read then write clear the HC interrupt status then walk each
1644 * port connected to the HC and see if it needs servicing. Port
1645 * success ints are reported in the HC interrupt status reg, the
1646 * port error ints are reported in the higher level main
1647 * interrupt status register and thus are passed in via the
1648 * 'relevant' argument.
1649 *
1650 * LOCKING:
1651 * Inherited from caller.
1652 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001653static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001654{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001655 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001656 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001657 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001658 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001659
Jeff Garzik35177262007-02-24 21:26:42 -05001660 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001661 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001662 else
Brett Russ20f733e2005-09-01 18:26:17 -04001663 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001664
1665 /* we'll need the HC success int register in most cases */
1666 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001667 if (!hc_irq_cause)
1668 return;
1669
1670 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001671
1672 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001673 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001674
1675 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001676 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001677 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001678 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001679
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001680 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001681 continue;
1682
Brett Russ31961942005-09-30 01:36:00 -04001683 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001684 if (port >= MV_PORTS_PER_HC) {
1685 shift++; /* skip bit 8 in the HC Main IRQ reg */
1686 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001687 have_err_bits = ((PORT0_ERR << shift) & relevant);
1688
1689 if (unlikely(have_err_bits)) {
1690 struct ata_queued_cmd *qc;
1691
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001692 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001693 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1694 continue;
1695
1696 mv_err_intr(ap, qc);
1697 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001698 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001699
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001700 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1701
1702 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1703 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1704 mv_intr_edma(ap);
1705 } else {
1706 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1707 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001708 }
1709 }
1710 VPRINTK("EXIT\n");
1711}
1712
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001713static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1714{
Mark Lord02a121d2007-12-01 13:07:22 -05001715 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001716 struct ata_port *ap;
1717 struct ata_queued_cmd *qc;
1718 struct ata_eh_info *ehi;
1719 unsigned int i, err_mask, printed = 0;
1720 u32 err_cause;
1721
Mark Lord02a121d2007-12-01 13:07:22 -05001722 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001723
1724 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1725 err_cause);
1726
1727 DPRINTK("All regs @ PCI error\n");
1728 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1729
Mark Lord02a121d2007-12-01 13:07:22 -05001730 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001731
1732 for (i = 0; i < host->n_ports; i++) {
1733 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001734 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001735 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001736 ata_ehi_clear_desc(ehi);
1737 if (!printed++)
1738 ata_ehi_push_desc(ehi,
1739 "PCI err cause 0x%08x", err_cause);
1740 err_mask = AC_ERR_HOST_BUS;
1741 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001742 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001743 if (qc)
1744 qc->err_mask |= err_mask;
1745 else
1746 ehi->err_mask |= err_mask;
1747
1748 ata_port_freeze(ap);
1749 }
1750 }
1751}
1752
Brett Russ05b308e2005-10-05 17:08:53 -04001753/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001754 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001755 * @irq: unused
1756 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001757 *
1758 * Read the read only register to determine if any host
1759 * controllers have pending interrupts. If so, call lower level
1760 * routine to handle. Also check for PCI errors which are only
1761 * reported here.
1762 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001763 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001764 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001765 * interrupts.
1766 */
David Howells7d12e782006-10-05 14:55:46 +01001767static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001768{
Jeff Garzikcca39742006-08-24 03:19:22 -04001769 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001770 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001771 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001772 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001773
Mark Lord646a4da2008-01-26 18:30:37 -05001774 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001775 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001776 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001777
1778 /* check the cases where we either have nothing pending or have read
1779 * a bogus register value which can indicate HW removal or PCI fault
1780 */
Mark Lord646a4da2008-01-26 18:30:37 -05001781 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1782 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001783
Jeff Garzikcca39742006-08-24 03:19:22 -04001784 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001785
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001786 if (unlikely(irq_stat & PCI_ERR)) {
1787 mv_pci_error(host, mmio);
1788 handled = 1;
1789 goto out_unlock; /* skip all other HC irq handling */
1790 }
1791
Brett Russ20f733e2005-09-01 18:26:17 -04001792 for (hc = 0; hc < n_hcs; hc++) {
1793 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1794 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001795 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001796 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001797 }
1798 }
Mark Lord615ab952006-05-19 16:24:56 -04001799
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001800out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001801 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001802
1803 return IRQ_RETVAL(handled);
1804}
1805
Jeff Garzikc9d39132005-11-13 17:47:51 -05001806static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1807{
1808 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1809 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1810
1811 return hc_mmio + ofs;
1812}
1813
1814static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1815{
1816 unsigned int ofs;
1817
1818 switch (sc_reg_in) {
1819 case SCR_STATUS:
1820 case SCR_ERROR:
1821 case SCR_CONTROL:
1822 ofs = sc_reg_in * sizeof(u32);
1823 break;
1824 default:
1825 ofs = 0xffffffffU;
1826 break;
1827 }
1828 return ofs;
1829}
1830
Tejun Heoda3dbb12007-07-16 14:29:40 +09001831static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001832{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001833 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1834 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001835 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1836
Tejun Heoda3dbb12007-07-16 14:29:40 +09001837 if (ofs != 0xffffffffU) {
1838 *val = readl(addr + ofs);
1839 return 0;
1840 } else
1841 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001842}
1843
Tejun Heoda3dbb12007-07-16 14:29:40 +09001844static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001845{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001846 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1847 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001848 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1849
Tejun Heoda3dbb12007-07-16 14:29:40 +09001850 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001851 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001852 return 0;
1853 } else
1854 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001855}
1856
Jeff Garzik522479f2005-11-12 22:14:02 -05001857static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1858{
Jeff Garzik522479f2005-11-12 22:14:02 -05001859 int early_5080;
1860
Auke Kok44c10132007-06-08 15:46:36 -07001861 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001862
1863 if (!early_5080) {
1864 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1865 tmp |= (1 << 0);
1866 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1867 }
1868
1869 mv_reset_pci_bus(pdev, mmio);
1870}
1871
1872static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1873{
1874 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1875}
1876
Jeff Garzik47c2b672005-11-12 21:13:17 -05001877static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001878 void __iomem *mmio)
1879{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001880 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1881 u32 tmp;
1882
1883 tmp = readl(phy_mmio + MV5_PHY_MODE);
1884
1885 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1886 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001887}
1888
Jeff Garzik47c2b672005-11-12 21:13:17 -05001889static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001890{
Jeff Garzik522479f2005-11-12 22:14:02 -05001891 u32 tmp;
1892
1893 writel(0, mmio + MV_GPIO_PORT_CTL);
1894
1895 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1896
1897 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1898 tmp |= ~(1 << 0);
1899 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001900}
1901
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001902static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1903 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001904{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001905 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1906 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1907 u32 tmp;
1908 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1909
1910 if (fix_apm_sq) {
1911 tmp = readl(phy_mmio + MV5_LT_MODE);
1912 tmp |= (1 << 19);
1913 writel(tmp, phy_mmio + MV5_LT_MODE);
1914
1915 tmp = readl(phy_mmio + MV5_PHY_CTL);
1916 tmp &= ~0x3;
1917 tmp |= 0x1;
1918 writel(tmp, phy_mmio + MV5_PHY_CTL);
1919 }
1920
1921 tmp = readl(phy_mmio + MV5_PHY_MODE);
1922 tmp &= ~mask;
1923 tmp |= hpriv->signal[port].pre;
1924 tmp |= hpriv->signal[port].amps;
1925 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001926}
1927
Jeff Garzikc9d39132005-11-13 17:47:51 -05001928
1929#undef ZERO
1930#define ZERO(reg) writel(0, port_mmio + (reg))
1931static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1932 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001933{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001934 void __iomem *port_mmio = mv_port_base(mmio, port);
1935
1936 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1937
1938 mv_channel_reset(hpriv, mmio, port);
1939
1940 ZERO(0x028); /* command */
1941 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1942 ZERO(0x004); /* timer */
1943 ZERO(0x008); /* irq err cause */
1944 ZERO(0x00c); /* irq err mask */
1945 ZERO(0x010); /* rq bah */
1946 ZERO(0x014); /* rq inp */
1947 ZERO(0x018); /* rq outp */
1948 ZERO(0x01c); /* respq bah */
1949 ZERO(0x024); /* respq outp */
1950 ZERO(0x020); /* respq inp */
1951 ZERO(0x02c); /* test control */
1952 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1953}
1954#undef ZERO
1955
1956#define ZERO(reg) writel(0, hc_mmio + (reg))
1957static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1958 unsigned int hc)
1959{
1960 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1961 u32 tmp;
1962
1963 ZERO(0x00c);
1964 ZERO(0x010);
1965 ZERO(0x014);
1966 ZERO(0x018);
1967
1968 tmp = readl(hc_mmio + 0x20);
1969 tmp &= 0x1c1c1c1c;
1970 tmp |= 0x03030303;
1971 writel(tmp, hc_mmio + 0x20);
1972}
1973#undef ZERO
1974
1975static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1976 unsigned int n_hc)
1977{
1978 unsigned int hc, port;
1979
1980 for (hc = 0; hc < n_hc; hc++) {
1981 for (port = 0; port < MV_PORTS_PER_HC; port++)
1982 mv5_reset_hc_port(hpriv, mmio,
1983 (hc * MV_PORTS_PER_HC) + port);
1984
1985 mv5_reset_one_hc(hpriv, mmio, hc);
1986 }
1987
1988 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001989}
1990
Jeff Garzik101ffae2005-11-12 22:17:49 -05001991#undef ZERO
1992#define ZERO(reg) writel(0, mmio + (reg))
1993static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1994{
Mark Lord02a121d2007-12-01 13:07:22 -05001995 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1996 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001997 u32 tmp;
1998
1999 tmp = readl(mmio + MV_PCI_MODE);
2000 tmp &= 0xff00ffff;
2001 writel(tmp, mmio + MV_PCI_MODE);
2002
2003 ZERO(MV_PCI_DISC_TIMER);
2004 ZERO(MV_PCI_MSI_TRIGGER);
2005 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2006 ZERO(HC_MAIN_IRQ_MASK_OFS);
2007 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002008 ZERO(hpriv->irq_cause_ofs);
2009 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002010 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2011 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2012 ZERO(MV_PCI_ERR_ATTRIBUTE);
2013 ZERO(MV_PCI_ERR_COMMAND);
2014}
2015#undef ZERO
2016
2017static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2018{
2019 u32 tmp;
2020
2021 mv5_reset_flash(hpriv, mmio);
2022
2023 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2024 tmp &= 0x3;
2025 tmp |= (1 << 5) | (1 << 6);
2026 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2027}
2028
2029/**
2030 * mv6_reset_hc - Perform the 6xxx global soft reset
2031 * @mmio: base address of the HBA
2032 *
2033 * This routine only applies to 6xxx parts.
2034 *
2035 * LOCKING:
2036 * Inherited from caller.
2037 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002038static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2039 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002040{
2041 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2042 int i, rc = 0;
2043 u32 t;
2044
2045 /* Following procedure defined in PCI "main command and status
2046 * register" table.
2047 */
2048 t = readl(reg);
2049 writel(t | STOP_PCI_MASTER, reg);
2050
2051 for (i = 0; i < 1000; i++) {
2052 udelay(1);
2053 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002054 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002055 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002056 }
2057 if (!(PCI_MASTER_EMPTY & t)) {
2058 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2059 rc = 1;
2060 goto done;
2061 }
2062
2063 /* set reset */
2064 i = 5;
2065 do {
2066 writel(t | GLOB_SFT_RST, reg);
2067 t = readl(reg);
2068 udelay(1);
2069 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2070
2071 if (!(GLOB_SFT_RST & t)) {
2072 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2073 rc = 1;
2074 goto done;
2075 }
2076
2077 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2078 i = 5;
2079 do {
2080 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2081 t = readl(reg);
2082 udelay(1);
2083 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2084
2085 if (GLOB_SFT_RST & t) {
2086 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2087 rc = 1;
2088 }
2089done:
2090 return rc;
2091}
2092
Jeff Garzik47c2b672005-11-12 21:13:17 -05002093static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002094 void __iomem *mmio)
2095{
2096 void __iomem *port_mmio;
2097 u32 tmp;
2098
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002099 tmp = readl(mmio + MV_RESET_CFG);
2100 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002101 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002102 hpriv->signal[idx].pre = 0x1 << 5;
2103 return;
2104 }
2105
2106 port_mmio = mv_port_base(mmio, idx);
2107 tmp = readl(port_mmio + PHY_MODE2);
2108
2109 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2110 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2111}
2112
Jeff Garzik47c2b672005-11-12 21:13:17 -05002113static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002114{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002115 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002116}
2117
Jeff Garzikc9d39132005-11-13 17:47:51 -05002118static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002119 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002120{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002121 void __iomem *port_mmio = mv_port_base(mmio, port);
2122
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002123 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002124 int fix_phy_mode2 =
2125 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002126 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2128 u32 m2, tmp;
2129
2130 if (fix_phy_mode2) {
2131 m2 = readl(port_mmio + PHY_MODE2);
2132 m2 &= ~(1 << 16);
2133 m2 |= (1 << 31);
2134 writel(m2, port_mmio + PHY_MODE2);
2135
2136 udelay(200);
2137
2138 m2 = readl(port_mmio + PHY_MODE2);
2139 m2 &= ~((1 << 16) | (1 << 31));
2140 writel(m2, port_mmio + PHY_MODE2);
2141
2142 udelay(200);
2143 }
2144
2145 /* who knows what this magic does */
2146 tmp = readl(port_mmio + PHY_MODE3);
2147 tmp &= ~0x7F800000;
2148 tmp |= 0x2A800000;
2149 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002150
2151 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002152 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153
2154 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002155
2156 if (hp_flags & MV_HP_ERRATA_60X1B2)
2157 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002158
2159 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2160
2161 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002162
2163 if (hp_flags & MV_HP_ERRATA_60X1B2)
2164 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002165 }
2166
2167 /* Revert values of pre-emphasis and signal amps to the saved ones */
2168 m2 = readl(port_mmio + PHY_MODE2);
2169
2170 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002171 m2 |= hpriv->signal[port].amps;
2172 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002173 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002174
Jeff Garzike4e7b892006-01-31 12:18:41 -05002175 /* according to mvSata 3.6.1, some IIE values are fixed */
2176 if (IS_GEN_IIE(hpriv)) {
2177 m2 &= ~0xC30FF01F;
2178 m2 |= 0x0000900F;
2179 }
2180
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002181 writel(m2, port_mmio + PHY_MODE2);
2182}
2183
Jeff Garzikc9d39132005-11-13 17:47:51 -05002184static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2185 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002186{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002187 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002188
Brett Russ31961942005-09-30 01:36:00 -04002189 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002191 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002192 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002193 ifctl |= (1 << 7); /* enable gen2i speed */
2194 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002195 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2196 }
2197
Brett Russ20f733e2005-09-01 18:26:17 -04002198 udelay(25); /* allow reset propagation */
2199
2200 /* Spec never mentions clearing the bit. Marvell's driver does
2201 * clear the bit, however.
2202 */
Brett Russ31961942005-09-30 01:36:00 -04002203 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002204
Jeff Garzikc9d39132005-11-13 17:47:51 -05002205 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2206
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002207 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002208 mdelay(1);
2209}
2210
Jeff Garzikc9d39132005-11-13 17:47:51 -05002211/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002212 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002213 * @ap: ATA channel to manipulate
2214 *
2215 * Part of this is taken from __sata_phy_reset and modified to
2216 * not sleep since this routine gets called from interrupt level.
2217 *
2218 * LOCKING:
2219 * Inherited from caller. This is coded to safe to call at
2220 * interrupt level, i.e. it does not sleep.
2221 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002222static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2223 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002224{
2225 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002226 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002227 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002228 int retry = 5;
2229 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002230
2231 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002232
Tejun Heoda3dbb12007-07-16 14:29:40 +09002233#ifdef DEBUG
2234 {
2235 u32 sstatus, serror, scontrol;
2236
2237 mv_scr_read(ap, SCR_STATUS, &sstatus);
2238 mv_scr_read(ap, SCR_ERROR, &serror);
2239 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2240 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002241 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002242 }
2243#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002244
Jeff Garzik22374672005-11-17 10:59:48 -05002245 /* Issue COMRESET via SControl */
2246comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002247 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002248 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002249
Tejun Heo936fd732007-08-06 18:36:23 +09002250 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002251 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002252
Brett Russ31961942005-09-30 01:36:00 -04002253 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002254 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002255 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002256 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002257
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002258 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002259 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002260
Jeff Garzik22374672005-11-17 10:59:48 -05002261 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002262 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002263 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2264 (retry-- > 0))
2265 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002266
Tejun Heoda3dbb12007-07-16 14:29:40 +09002267#ifdef DEBUG
2268 {
2269 u32 sstatus, serror, scontrol;
2270
2271 mv_scr_read(ap, SCR_STATUS, &sstatus);
2272 mv_scr_read(ap, SCR_ERROR, &serror);
2273 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2274 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2275 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2276 }
2277#endif
Brett Russ31961942005-09-30 01:36:00 -04002278
Tejun Heo936fd732007-08-06 18:36:23 +09002279 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002280 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002281 return;
2282 }
2283
Jeff Garzik22374672005-11-17 10:59:48 -05002284 /* even after SStatus reflects that device is ready,
2285 * it seems to take a while for link to be fully
2286 * established (and thus Status no longer 0x80/0x7F),
2287 * so we poll a bit for that, here.
2288 */
2289 retry = 20;
2290 while (1) {
2291 u8 drv_stat = ata_check_status(ap);
2292 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2293 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002294 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002295 if (retry-- <= 0)
2296 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002297 if (time_after(jiffies, deadline))
2298 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002299 }
2300
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002301 /* FIXME: if we passed the deadline, the following
2302 * code probably produces an invalid result
2303 */
Brett Russ20f733e2005-09-01 18:26:17 -04002304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002305 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002306 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002307
2308 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002310 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002311
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002312 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002313}
2314
Tejun Heocc0680a2007-08-06 18:36:23 +09002315static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002316{
Tejun Heocc0680a2007-08-06 18:36:23 +09002317 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002318 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002319 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002321
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002322 rc = mv_stop_dma(ap);
2323 if (rc)
2324 ehc->i.action |= ATA_EH_HARDRESET;
2325
2326 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2327 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2328 ehc->i.action |= ATA_EH_HARDRESET;
2329 }
2330
2331 /* if we're about to do hardreset, nothing more to do */
2332 if (ehc->i.action & ATA_EH_HARDRESET)
2333 return 0;
2334
Tejun Heocc0680a2007-08-06 18:36:23 +09002335 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002336 rc = ata_wait_ready(ap, deadline);
2337 else
2338 rc = -ENODEV;
2339
2340 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002341}
2342
Tejun Heocc0680a2007-08-06 18:36:23 +09002343static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002344 unsigned long deadline)
2345{
Tejun Heocc0680a2007-08-06 18:36:23 +09002346 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002347 struct mv_host_priv *hpriv = ap->host->private_data;
2348 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2349
2350 mv_stop_dma(ap);
2351
2352 mv_channel_reset(hpriv, mmio, ap->port_no);
2353
2354 mv_phy_reset(ap, class, deadline);
2355
2356 return 0;
2357}
2358
Tejun Heocc0680a2007-08-06 18:36:23 +09002359static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002360{
Tejun Heocc0680a2007-08-06 18:36:23 +09002361 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002362 u32 serr;
2363
2364 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002365 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002366
2367 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002368 sata_scr_read(link, SCR_ERROR, &serr);
2369 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002370
2371 /* bail out if no device is present */
2372 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2373 DPRINTK("EXIT, no device\n");
2374 return;
2375 }
2376
2377 /* set up device control */
2378 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2379}
2380
2381static void mv_error_handler(struct ata_port *ap)
2382{
2383 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2384 mv_hardreset, mv_postreset);
2385}
2386
2387static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2388{
2389 mv_stop_dma(qc->ap);
2390}
2391
2392static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002393{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002394 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002395 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2396 u32 tmp, mask;
2397 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002398
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002400
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002401 shift = ap->port_no * 2;
2402 if (hc > 0)
2403 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002404
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002406
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002407 /* disable assertion of portN err, done events */
2408 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2409 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2410}
2411
2412static void mv_eh_thaw(struct ata_port *ap)
2413{
2414 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2415 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2416 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2417 void __iomem *port_mmio = mv_ap_base(ap);
2418 u32 tmp, mask, hc_irq_cause;
2419 unsigned int shift, hc_port_no = ap->port_no;
2420
2421 /* FIXME: handle coalescing completion events properly */
2422
2423 shift = ap->port_no * 2;
2424 if (hc > 0) {
2425 shift++;
2426 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002427 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002428
2429 mask = 0x3 << shift;
2430
2431 /* clear EDMA errors on this port */
2432 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2433
2434 /* clear pending irq events */
2435 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2436 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2437 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2438 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2439
2440 /* enable assertion of portN err, done events */
2441 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2442 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002443}
2444
Brett Russ05b308e2005-10-05 17:08:53 -04002445/**
2446 * mv_port_init - Perform some early initialization on a single port.
2447 * @port: libata data structure storing shadow register addresses
2448 * @port_mmio: base address of the port
2449 *
2450 * Initialize shadow register mmio addresses, clear outstanding
2451 * interrupts on the port, and unmask interrupts for the future
2452 * start of the port.
2453 *
2454 * LOCKING:
2455 * Inherited from caller.
2456 */
Brett Russ31961942005-09-30 01:36:00 -04002457static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2458{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002459 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002460 unsigned serr_ofs;
2461
Jeff Garzik8b260242005-11-12 12:32:50 -05002462 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002463 */
2464 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002465 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002466 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2467 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2468 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2469 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2470 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2471 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002472 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002473 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2474 /* special case: control/altstatus doesn't have ATA_REG_ address */
2475 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2476
2477 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002478 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002479
Brett Russ31961942005-09-30 01:36:00 -04002480 /* Clear any currently outstanding port interrupt conditions */
2481 serr_ofs = mv_scr_offset(SCR_ERROR);
2482 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2483 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2484
Mark Lord646a4da2008-01-26 18:30:37 -05002485 /* unmask all non-transient EDMA error interrupts */
2486 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002487
Jeff Garzik8b260242005-11-12 12:32:50 -05002488 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002489 readl(port_mmio + EDMA_CFG_OFS),
2490 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2491 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002492}
2493
Tejun Heo4447d352007-04-17 23:44:08 +09002494static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002495{
Tejun Heo4447d352007-04-17 23:44:08 +09002496 struct pci_dev *pdev = to_pci_dev(host->dev);
2497 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002498 u32 hp_flags = hpriv->hp_flags;
2499
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002500 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002501 case chip_5080:
2502 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002503 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002504
Auke Kok44c10132007-06-08 15:46:36 -07002505 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002506 case 0x1:
2507 hp_flags |= MV_HP_ERRATA_50XXB0;
2508 break;
2509 case 0x3:
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2512 default:
2513 dev_printk(KERN_WARNING, &pdev->dev,
2514 "Applying 50XXB2 workarounds to unknown rev\n");
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2516 break;
2517 }
2518 break;
2519
2520 case chip_504x:
2521 case chip_508x:
2522 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002523 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002524
Auke Kok44c10132007-06-08 15:46:36 -07002525 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002526 case 0x0:
2527 hp_flags |= MV_HP_ERRATA_50XXB0;
2528 break;
2529 case 0x3:
2530 hp_flags |= MV_HP_ERRATA_50XXB2;
2531 break;
2532 default:
2533 dev_printk(KERN_WARNING, &pdev->dev,
2534 "Applying B2 workarounds to unknown rev\n");
2535 hp_flags |= MV_HP_ERRATA_50XXB2;
2536 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002537 }
2538 break;
2539
2540 case chip_604x:
2541 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002542 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002543 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002544
Auke Kok44c10132007-06-08 15:46:36 -07002545 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002546 case 0x7:
2547 hp_flags |= MV_HP_ERRATA_60X1B2;
2548 break;
2549 case 0x9:
2550 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002551 break;
2552 default:
2553 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002554 "Applying B2 workarounds to unknown rev\n");
2555 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002556 break;
2557 }
2558 break;
2559
Jeff Garzike4e7b892006-01-31 12:18:41 -05002560 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002561 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002562 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2563 (pdev->device == 0x2300 || pdev->device == 0x2310))
2564 {
Mark Lord4e520032007-12-11 12:58:05 -05002565 /*
2566 * Highpoint RocketRAID PCIe 23xx series cards:
2567 *
2568 * Unconfigured drives are treated as "Legacy"
2569 * by the BIOS, and it overwrites sector 8 with
2570 * a "Lgcy" metadata block prior to Linux boot.
2571 *
2572 * Configured drives (RAID or JBOD) leave sector 8
2573 * alone, but instead overwrite a high numbered
2574 * sector for the RAID metadata. This sector can
2575 * be determined exactly, by truncating the physical
2576 * drive capacity to a nice even GB value.
2577 *
2578 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2579 *
2580 * Warn the user, lest they think we're just buggy.
2581 */
2582 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2583 " BIOS CORRUPTS DATA on all attached drives,"
2584 " regardless of if/how they are configured."
2585 " BEWARE!\n");
2586 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2587 " use sectors 8-9 on \"Legacy\" drives,"
2588 " and avoid the final two gigabytes on"
2589 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002590 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002591 case chip_6042:
2592 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002593 hp_flags |= MV_HP_GEN_IIE;
2594
Auke Kok44c10132007-06-08 15:46:36 -07002595 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002596 case 0x0:
2597 hp_flags |= MV_HP_ERRATA_XX42A0;
2598 break;
2599 case 0x1:
2600 hp_flags |= MV_HP_ERRATA_60X1C0;
2601 break;
2602 default:
2603 dev_printk(KERN_WARNING, &pdev->dev,
2604 "Applying 60X1C0 workarounds to unknown rev\n");
2605 hp_flags |= MV_HP_ERRATA_60X1C0;
2606 break;
2607 }
2608 break;
2609
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002610 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002611 dev_printk(KERN_ERR, &pdev->dev,
2612 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002613 return 1;
2614 }
2615
2616 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002617 if (hp_flags & MV_HP_PCIE) {
2618 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2619 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2620 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2621 } else {
2622 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2623 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2624 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2625 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002626
2627 return 0;
2628}
2629
Brett Russ05b308e2005-10-05 17:08:53 -04002630/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002631 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002632 * @host: ATA host to initialize
2633 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002634 *
2635 * If possible, do an early global reset of the host. Then do
2636 * our port init and clear/unmask all/relevant host interrupts.
2637 *
2638 * LOCKING:
2639 * Inherited from caller.
2640 */
Tejun Heo4447d352007-04-17 23:44:08 +09002641static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002642{
2643 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002644 struct pci_dev *pdev = to_pci_dev(host->dev);
2645 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2646 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002647
Jeff Garzik47c2b672005-11-12 21:13:17 -05002648 /* global interrupt mask */
2649 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2650
Tejun Heo4447d352007-04-17 23:44:08 +09002651 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002652 if (rc)
2653 goto done;
2654
Tejun Heo4447d352007-04-17 23:44:08 +09002655 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002656
Tejun Heo4447d352007-04-17 23:44:08 +09002657 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002658 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002659
Jeff Garzikc9d39132005-11-13 17:47:51 -05002660 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002661 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002662 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002663
Jeff Garzik522479f2005-11-12 22:14:02 -05002664 hpriv->ops->reset_flash(hpriv, mmio);
2665 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002666 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002667
Tejun Heo4447d352007-04-17 23:44:08 +09002668 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002669 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002670 void __iomem *port_mmio = mv_port_base(mmio, port);
2671
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002672 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002673 ifctl |= (1 << 7); /* enable gen2i speed */
2674 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002675 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2676 }
2677
Jeff Garzikc9d39132005-11-13 17:47:51 -05002678 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002679 }
2680
Tejun Heo4447d352007-04-17 23:44:08 +09002681 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002682 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002683 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002684 unsigned int offset = port_mmio - mmio;
2685
2686 mv_port_init(&ap->ioaddr, port_mmio);
2687
2688 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2689 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002690 }
2691
2692 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002693 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2694
2695 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2696 "(before clear)=0x%08x\n", hc,
2697 readl(hc_mmio + HC_CFG_OFS),
2698 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2699
2700 /* Clear any currently outstanding hc interrupt conditions */
2701 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002702 }
2703
Brett Russ31961942005-09-30 01:36:00 -04002704 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002705 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002706
2707 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002708 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002709
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002710 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002711 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2712 else
2713 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002714
2715 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002716 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002717 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2718 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002719 readl(mmio + hpriv->irq_cause_ofs),
2720 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002721
Brett Russ31961942005-09-30 01:36:00 -04002722done:
Brett Russ20f733e2005-09-01 18:26:17 -04002723 return rc;
2724}
2725
Brett Russ05b308e2005-10-05 17:08:53 -04002726/**
2727 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002728 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002729 *
2730 * FIXME: complete this.
2731 *
2732 * LOCKING:
2733 * Inherited from caller.
2734 */
Tejun Heo4447d352007-04-17 23:44:08 +09002735static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002736{
Tejun Heo4447d352007-04-17 23:44:08 +09002737 struct pci_dev *pdev = to_pci_dev(host->dev);
2738 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002739 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002740 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002741
2742 /* Use this to determine the HW stepping of the chip so we know
2743 * what errata to workaround
2744 */
Brett Russ31961942005-09-30 01:36:00 -04002745 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2746 if (scc == 0)
2747 scc_s = "SCSI";
2748 else if (scc == 0x01)
2749 scc_s = "RAID";
2750 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002751 scc_s = "?";
2752
2753 if (IS_GEN_I(hpriv))
2754 gen = "I";
2755 else if (IS_GEN_II(hpriv))
2756 gen = "II";
2757 else if (IS_GEN_IIE(hpriv))
2758 gen = "IIE";
2759 else
2760 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002761
Jeff Garzika9524a72005-10-30 14:39:11 -05002762 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002763 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2764 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002765 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2766}
2767
Brett Russ05b308e2005-10-05 17:08:53 -04002768/**
2769 * mv_init_one - handle a positive probe of a Marvell host
2770 * @pdev: PCI device found
2771 * @ent: PCI device ID entry for the matched host
2772 *
2773 * LOCKING:
2774 * Inherited from caller.
2775 */
Brett Russ20f733e2005-09-01 18:26:17 -04002776static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2777{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002778 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002779 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002780 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2781 struct ata_host *host;
2782 struct mv_host_priv *hpriv;
2783 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002784
Jeff Garzika9524a72005-10-30 14:39:11 -05002785 if (!printed_version++)
2786 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002787
Tejun Heo4447d352007-04-17 23:44:08 +09002788 /* allocate host */
2789 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2790
2791 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2792 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2793 if (!host || !hpriv)
2794 return -ENOMEM;
2795 host->private_data = hpriv;
2796
2797 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002798 rc = pcim_enable_device(pdev);
2799 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002800 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002801
Tejun Heo0d5ff562007-02-01 15:06:36 +09002802 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2803 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002804 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002805 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002806 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002807 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002808
Jeff Garzikd88184f2007-02-26 01:26:06 -05002809 rc = pci_go_64(pdev);
2810 if (rc)
2811 return rc;
2812
Brett Russ20f733e2005-09-01 18:26:17 -04002813 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002814 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002815 if (rc)
2816 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002817
Brett Russ31961942005-09-30 01:36:00 -04002818 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002819 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002820 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002821
Brett Russ31961942005-09-30 01:36:00 -04002822 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002823 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002824
Tejun Heo4447d352007-04-17 23:44:08 +09002825 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002826 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002827 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002828 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002829}
2830
2831static int __init mv_init(void)
2832{
Pavel Roskinb7887192006-08-10 18:13:18 +09002833 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002834}
2835
2836static void __exit mv_exit(void)
2837{
2838 pci_unregister_driver(&mv_pci_driver);
2839}
2840
2841MODULE_AUTHOR("Brett Russ");
2842MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2843MODULE_LICENSE("GPL");
2844MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2845MODULE_VERSION(DRV_VERSION);
2846
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002847module_param(msi, int, 0444);
2848MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2849
Brett Russ20f733e2005-09-01 18:26:17 -04002850module_init(mv_init);
2851module_exit(mv_exit);