blob: d40c41c5f16c52a184cf26d5c42cad1ba34e1aef [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 7) Test and verify 3.0 Gbps support
39
40 8) Develop a low-power-consumption strategy, and implement it.
41
42 9) [Experiment, low priority] See if ATAPI can be supported using
43 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 like that.
45
46 10) [Experiment, low priority] Investigate interrupt coalescing.
47 Quite often, especially with PCI Message Signalled Interrupts (MSI),
48 the overhead reduced by interrupt mitigation is quite often not
49 worth the latency cost.
50
51 11) [Experiment, Marvell value added] Is it possible to use target
52 mode to cross-connect two Linux boxes with Marvell cards? If so,
53 creating LibATA target mode support would be very interesting.
54
55 Target mode, for those without docs, is the ability to directly
56 connect two SATA controllers.
57
58 13) Verify that 7042 is fully supported. I only have a 6042.
59
60*/
61
62
Brett Russ20f733e2005-09-01 18:26:17 -040063#include <linux/kernel.h>
64#include <linux/module.h>
65#include <linux/pci.h>
66#include <linux/init.h>
67#include <linux/blkdev.h>
68#include <linux/delay.h>
69#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050071#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050073#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075
76#define DRV_NAME "sata_mv"
Jeff Garzik8bc3fc42007-05-21 20:26:38 -040077#define DRV_VERSION "0.81"
Brett Russ20f733e2005-09-01 18:26:17 -040078
79enum {
80 /* BAR's are enumerated in terms of pci_resource_start() terms */
81 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
82 MV_IO_BAR = 2, /* offset 0x18: IO space */
83 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84
85 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
86 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87
88 MV_PCI_REG_BASE = 0,
89 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040090 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
91 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
92 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
93 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
94 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95
Brett Russ20f733e2005-09-01 18:26:17 -040096 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050097 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050098 MV_GPIO_PORT_CTL = 0x104f0,
99 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400100
101 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
103 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
104 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105
Brett Russ31961942005-09-30 01:36:00 -0400106 MV_MAX_Q_DEPTH = 32,
107 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108
109 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
110 * CRPB needs alignment on a 256B boundary. Size == 256B
111 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
112 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 */
114 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
115 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_MAX_SG_CT = 176,
117 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119
Brett Russ20f733e2005-09-01 18:26:17 -0400120 MV_PORTS_PER_HC = 4,
121 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
122 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400123 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORT_MASK = 3,
125
126 /* Host Flags */
127 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
128 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400129 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400130 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
131 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500132 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400133
Brett Russ31961942005-09-30 01:36:00 -0400134 CRQB_FLAG_READ = (1 << 0),
135 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
137 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400138 CRQB_CMD_ADDR_SHIFT = 8,
139 CRQB_CMD_CS = (0x2 << 11),
140 CRQB_CMD_LAST = (1 << 15),
141
142 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
144 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400145
146 EPRD_FLAG_END_OF_TBL = (1 << 31),
147
Brett Russ20f733e2005-09-01 18:26:17 -0400148 /* PCI interface registers */
149
Brett Russ31961942005-09-30 01:36:00 -0400150 PCI_COMMAND_OFS = 0xc00,
151
Brett Russ20f733e2005-09-01 18:26:17 -0400152 PCI_MAIN_CMD_STS_OFS = 0xd30,
153 STOP_PCI_MASTER = (1 << 2),
154 PCI_MASTER_EMPTY = (1 << 3),
155 GLOB_SFT_RST = (1 << 4),
156
Jeff Garzik522479f2005-11-12 22:14:02 -0500157 MV_PCI_MODE = 0xd00,
158 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
159 MV_PCI_DISC_TIMER = 0xd04,
160 MV_PCI_MSI_TRIGGER = 0xc38,
161 MV_PCI_SERR_MASK = 0xc28,
162 MV_PCI_XBAR_TMOUT = 0x1d04,
163 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
164 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
165 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
166 MV_PCI_ERR_COMMAND = 0x1d50,
167
168 PCI_IRQ_CAUSE_OFS = 0x1d58,
169 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400170 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171
172 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
173 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
174 PORT0_ERR = (1 << 0), /* shift by port # */
175 PORT0_DONE = (1 << 1), /* shift by port # */
176 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
177 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
178 PCI_ERR = (1 << 18),
179 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
180 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500181 PORTS_0_3_COAL_DONE = (1 << 8),
182 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400183 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
184 GPIO_INT = (1 << 22),
185 SELF_INT = (1 << 23),
186 TWSI_INT = (1 << 24),
187 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500188 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500189 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400190 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
191 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500192 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400194
195 /* SATAHC registers */
196 HC_CFG_OFS = 0,
197
198 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400199 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400200 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
201 DEV_IRQ = (1 << 8), /* shift by port # */
202
203 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400204 SHD_BLK_OFS = 0x100,
205 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400206
207 /* SATA registers */
208 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
209 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500210 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500211 PHY_MODE4 = 0x314,
212 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500213 MV5_PHY_MODE = 0x74,
214 MV5_LT_MODE = 0x30,
215 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500216 SATA_INTERFACE_CTL = 0x050,
217
218 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400219
220 /* Port registers */
221 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400222 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
223 EDMA_CFG_NCQ = (1 << 5),
224 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
225 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
226 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400227
228 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
229 EDMA_ERR_IRQ_MASK_OFS = 0xc,
230 EDMA_ERR_D_PAR = (1 << 0),
231 EDMA_ERR_PRD_PAR = (1 << 1),
232 EDMA_ERR_DEV = (1 << 2),
233 EDMA_ERR_DEV_DCON = (1 << 3),
234 EDMA_ERR_DEV_CON = (1 << 4),
235 EDMA_ERR_SERR = (1 << 5),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400236 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
237 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Brett Russ20f733e2005-09-01 18:26:17 -0400238 EDMA_ERR_BIST_ASYNC = (1 << 8),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400239 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Brett Russ20f733e2005-09-01 18:26:17 -0400240 EDMA_ERR_CRBQ_PAR = (1 << 9),
241 EDMA_ERR_CRPB_PAR = (1 << 10),
242 EDMA_ERR_INTRL_PAR = (1 << 11),
243 EDMA_ERR_IORDY = (1 << 12),
244 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
245 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
246 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
247 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
248 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
249 EDMA_ERR_TRANS_PROTO = (1 << 31),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400250 EDMA_ERR_OVERRUN_5 = (1 << 5),
251 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400252 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
253 EDMA_ERR_PRD_PAR |
254 EDMA_ERR_DEV_DCON |
255 EDMA_ERR_DEV_CON |
256 EDMA_ERR_SERR |
257 EDMA_ERR_SELF_DIS |
258 EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR |
260 EDMA_ERR_INTRL_PAR |
261 EDMA_ERR_IORDY |
262 EDMA_ERR_LNK_CTRL_RX_2 |
263 EDMA_ERR_LNK_DATA_RX |
264 EDMA_ERR_LNK_DATA_TX |
265 EDMA_ERR_TRANS_PROTO,
266 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
267 EDMA_ERR_PRD_PAR |
268 EDMA_ERR_DEV_DCON |
269 EDMA_ERR_DEV_CON |
270 EDMA_ERR_OVERRUN_5 |
271 EDMA_ERR_UNDERRUN_5 |
272 EDMA_ERR_SELF_DIS_5 |
273 EDMA_ERR_CRBQ_PAR |
274 EDMA_ERR_CRPB_PAR |
275 EDMA_ERR_INTRL_PAR |
276 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400277
Brett Russ31961942005-09-30 01:36:00 -0400278 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
279 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400280
281 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
282 EDMA_REQ_Q_PTR_SHIFT = 5,
283
284 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
285 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
286 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400287 EDMA_RSP_Q_PTR_SHIFT = 3,
288
Brett Russ20f733e2005-09-01 18:26:17 -0400289 EDMA_CMD_OFS = 0x28,
290 EDMA_EN = (1 << 0),
291 EDMA_DS = (1 << 1),
292 ATA_RST = (1 << 2),
293
Jeff Garzikc9d39132005-11-13 17:47:51 -0500294 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500295 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500296
Brett Russ31961942005-09-30 01:36:00 -0400297 /* Host private flags (hp_flags) */
298 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500299 MV_HP_ERRATA_50XXB0 = (1 << 1),
300 MV_HP_ERRATA_50XXB2 = (1 << 2),
301 MV_HP_ERRATA_60X1B2 = (1 << 3),
302 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500303 MV_HP_ERRATA_XX42A0 = (1 << 5),
304 MV_HP_50XX = (1 << 6),
305 MV_HP_GEN_IIE = (1 << 7),
Brett Russ20f733e2005-09-01 18:26:17 -0400306
Brett Russ31961942005-09-30 01:36:00 -0400307 /* Port private flags (pp_flags) */
308 MV_PP_FLAG_EDMA_EN = (1 << 0),
309 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400310 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
Brett Russ31961942005-09-30 01:36:00 -0400311};
312
Jeff Garzikc9d39132005-11-13 17:47:51 -0500313#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500314#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500315#define IS_GEN_I(hpriv) IS_50XX(hpriv)
316#define IS_GEN_II(hpriv) IS_60XX(hpriv)
317#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318
Jeff Garzik095fec82005-11-12 09:50:49 -0500319enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500320 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500321
322 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
323
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
325};
326
Jeff Garzik522479f2005-11-12 22:14:02 -0500327enum chip_type {
328 chip_504x,
329 chip_508x,
330 chip_5080,
331 chip_604x,
332 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500333 chip_6042,
334 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500335};
336
Brett Russ31961942005-09-30 01:36:00 -0400337/* Command ReQuest Block: 32B */
338struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400339 __le32 sg_addr;
340 __le32 sg_addr_hi;
341 __le16 ctrl_flags;
342 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400343};
344
Jeff Garzike4e7b892006-01-31 12:18:41 -0500345struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400346 __le32 addr;
347 __le32 addr_hi;
348 __le32 flags;
349 __le32 len;
350 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500351};
352
Brett Russ31961942005-09-30 01:36:00 -0400353/* Command ResPonse Block: 8B */
354struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400355 __le16 id;
356 __le16 flags;
357 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400358};
359
360/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
361struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400362 __le32 addr;
363 __le32 flags_size;
364 __le32 addr_hi;
365 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400366};
367
368struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400369 struct mv_crqb *crqb;
370 dma_addr_t crqb_dma;
371 struct mv_crpb *crpb;
372 dma_addr_t crpb_dma;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400375
376 unsigned int req_idx;
377 unsigned int resp_idx;
378
Brett Russ31961942005-09-30 01:36:00 -0400379 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400380};
381
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500382struct mv_port_signal {
383 u32 amps;
384 u32 pre;
385};
386
Jeff Garzik47c2b672005-11-12 21:13:17 -0500387struct mv_host_priv;
388struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500398};
399
Brett Russ20f733e2005-09-01 18:26:17 -0400400struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400401 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500402 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500403 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400404};
405
406static void mv_irq_clear(struct ata_port *ap);
407static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
408static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500409static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
410static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400411static int mv_port_start(struct ata_port *ap);
412static void mv_port_stop(struct ata_port *ap);
413static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500414static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900415static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400416static void mv_error_handler(struct ata_port *ap);
417static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418static void mv_eh_freeze(struct ata_port *ap);
419static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400420static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500422static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500424static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500427static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500429static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500431
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500432static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500434static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500437static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500439static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500441static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500443
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400444static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400445 .module = THIS_MODULE,
446 .name = DRV_NAME,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
454 .use_clustering = 1,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
460};
461
462static struct scsi_host_template mv6_sht = {
463 .module = THIS_MODULE,
464 .name = DRV_NAME,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400468 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500469 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500472 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900476 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400477 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400478};
479
Jeff Garzikc9d39132005-11-13 17:47:51 -0500480static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
482
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
488
Jeff Garzikcffacd82007-03-09 09:46:47 -0500489 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500490
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900493 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500494
Jeff Garzikc9d39132005-11-13 17:47:51 -0500495 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500498
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
502 .thaw = mv_eh_thaw,
503
Jeff Garzikc9d39132005-11-13 17:47:51 -0500504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
506
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509};
510
511static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400512 .port_disable = ata_port_disable,
513
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
Jeff Garzikcffacd82007-03-09 09:46:47 -0500520 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400521
Brett Russ31961942005-09-30 01:36:00 -0400522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900524 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400525
Brett Russ20f733e2005-09-01 18:26:17 -0400526 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400529
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
533 .thaw = mv_eh_thaw,
534
Brett Russ20f733e2005-09-01 18:26:17 -0400535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
537
Brett Russ31961942005-09-30 01:36:00 -0400538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400540};
541
Jeff Garzike4e7b892006-01-31 12:18:41 -0500542static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
544
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
550
Jeff Garzikcffacd82007-03-09 09:46:47 -0500551 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500552
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900555 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500556
Jeff Garzike4e7b892006-01-31 12:18:41 -0500557 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500560
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
564 .thaw = mv_eh_thaw,
565
Jeff Garzike4e7b892006-01-31 12:18:41 -0500566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500571};
572
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100573static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400574 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400575 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400576 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400577 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400579 },
580 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400582 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400583 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500584 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400585 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500586 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500588 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400589 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500590 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500591 },
Brett Russ20f733e2005-09-01 18:26:17 -0400592 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400594 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400595 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500596 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400597 },
598 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400601 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400602 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500603 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400604 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500605 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500607 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400608 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609 .port_ops = &mv_iie_ops,
610 },
611 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500613 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400614 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500615 .port_ops = &mv_iie_ops,
616 },
Brett Russ20f733e2005-09-01 18:26:17 -0400617};
618
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500619static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400624
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400625 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500630
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400631 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
632
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200633 /* Adaptec 1430SA */
634 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635
Olof Johanssone93f09d2007-01-18 18:39:59 -0600636 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
637
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800638 /* add Marvell 7042 support */
639 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
640
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400641 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400642};
643
644static struct pci_driver mv_pci_driver = {
645 .name = DRV_NAME,
646 .id_table = mv_pci_tbl,
647 .probe = mv_init_one,
648 .remove = ata_pci_remove_one,
649};
650
Jeff Garzik47c2b672005-11-12 21:13:17 -0500651static const struct mv_hw_ops mv5xxx_ops = {
652 .phy_errata = mv5_phy_errata,
653 .enable_leds = mv5_enable_leds,
654 .read_preamp = mv5_read_preamp,
655 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500656 .reset_flash = mv5_reset_flash,
657 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500658};
659
660static const struct mv_hw_ops mv6xxx_ops = {
661 .phy_errata = mv6_phy_errata,
662 .enable_leds = mv6_enable_leds,
663 .read_preamp = mv6_read_preamp,
664 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500665 .reset_flash = mv6_reset_flash,
666 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500667};
668
Brett Russ20f733e2005-09-01 18:26:17 -0400669/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500670 * module options
671 */
672static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
673
674
Jeff Garzikd88184f2007-02-26 01:26:06 -0500675/* move to PCI layer or libata core? */
676static int pci_go_64(struct pci_dev *pdev)
677{
678 int rc;
679
680 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
681 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
682 if (rc) {
683 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
684 if (rc) {
685 dev_printk(KERN_ERR, &pdev->dev,
686 "64-bit DMA enable failed\n");
687 return rc;
688 }
689 }
690 } else {
691 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
692 if (rc) {
693 dev_printk(KERN_ERR, &pdev->dev,
694 "32-bit DMA enable failed\n");
695 return rc;
696 }
697 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
698 if (rc) {
699 dev_printk(KERN_ERR, &pdev->dev,
700 "32-bit consistent DMA enable failed\n");
701 return rc;
702 }
703 }
704
705 return rc;
706}
707
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500708/*
Brett Russ20f733e2005-09-01 18:26:17 -0400709 * Functions
710 */
711
712static inline void writelfl(unsigned long data, void __iomem *addr)
713{
714 writel(data, addr);
715 (void) readl(addr); /* flush to avoid PCI posted write */
716}
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
719{
720 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
721}
722
Jeff Garzikc9d39132005-11-13 17:47:51 -0500723static inline unsigned int mv_hc_from_port(unsigned int port)
724{
725 return port >> MV_PORT_HC_SHIFT;
726}
727
728static inline unsigned int mv_hardport_from_port(unsigned int port)
729{
730 return port & MV_PORT_MASK;
731}
732
733static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
734 unsigned int port)
735{
736 return mv_hc_base(base, mv_hc_from_port(port));
737}
738
Brett Russ20f733e2005-09-01 18:26:17 -0400739static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
740{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500741 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500742 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500743 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400744}
745
746static inline void __iomem *mv_ap_base(struct ata_port *ap)
747{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900748 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400749}
750
Jeff Garzikcca39742006-08-24 03:19:22 -0400751static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400752{
Jeff Garzikcca39742006-08-24 03:19:22 -0400753 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400754}
755
756static void mv_irq_clear(struct ata_port *ap)
757{
758}
759
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400760static void mv_set_edma_ptrs(void __iomem *port_mmio,
761 struct mv_host_priv *hpriv,
762 struct mv_port_priv *pp)
763{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400764 u32 index;
765
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400766 /*
767 * initialize request queue
768 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400769 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
770
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400771 WARN_ON(pp->crqb_dma & 0x3ff);
772 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400773 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400774 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
775
776 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400777 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400778 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400780 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400781
782 /*
783 * initialize response queue
784 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400785 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
786
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787 WARN_ON(pp->crpb_dma & 0xff);
788 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
789
790 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400791 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400792 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
793 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400794 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400795
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400796 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798}
799
Brett Russ05b308e2005-10-05 17:08:53 -0400800/**
801 * mv_start_dma - Enable eDMA engine
802 * @base: port base address
803 * @pp: port private data
804 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900805 * Verify the local cache of the eDMA state is accurate with a
806 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400807 *
808 * LOCKING:
809 * Inherited from caller.
810 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
812 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400813{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400815 /* clear EDMA event indicators, if any */
816 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
817
818 mv_set_edma_ptrs(base, hpriv, pp);
819
Brett Russafb0edd2005-10-05 17:08:42 -0400820 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900823 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400824}
825
Brett Russ05b308e2005-10-05 17:08:53 -0400826/**
827 * mv_stop_dma - Disable eDMA engine
828 * @ap: ATA channel to manipulate
829 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900830 * Verify the local cache of the eDMA state is accurate with a
831 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400832 *
833 * LOCKING:
834 * Inherited from caller.
835 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400836static int mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400837{
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400840 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400842
Jeff Garzik4537deb2007-07-12 14:30:19 -0400843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400844 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400845 */
Brett Russ31961942005-09-30 01:36:00 -0400846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400848 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400850 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500851
Brett Russ31961942005-09-30 01:36:00 -0400852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400855 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400856 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400857
Brett Russ31961942005-09-30 01:36:00 -0400858 udelay(100);
859 }
860
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400861 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400863 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400864 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400865
866 return err;
Brett Russ31961942005-09-30 01:36:00 -0400867}
868
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400869#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400870static void mv_dump_mem(void __iomem *start, unsigned bytes)
871{
Brett Russ31961942005-09-30 01:36:00 -0400872 int b, w;
873 for (b = 0; b < bytes; ) {
874 DPRINTK("%p: ", start + b);
875 for (w = 0; b < bytes && w < 4; w++) {
876 printk("%08x ",readl(start + b));
877 b += sizeof(u32);
878 }
879 printk("\n");
880 }
Brett Russ31961942005-09-30 01:36:00 -0400881}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400882#endif
883
Brett Russ31961942005-09-30 01:36:00 -0400884static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
885{
886#ifdef ATA_DEBUG
887 int b, w;
888 u32 dw;
889 for (b = 0; b < bytes; ) {
890 DPRINTK("%02x: ", b);
891 for (w = 0; b < bytes && w < 4; w++) {
892 (void) pci_read_config_dword(pdev,b,&dw);
893 printk("%08x ",dw);
894 b += sizeof(u32);
895 }
896 printk("\n");
897 }
898#endif
899}
900static void mv_dump_all_regs(void __iomem *mmio_base, int port,
901 struct pci_dev *pdev)
902{
903#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500904 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400905 port >> MV_PORT_HC_SHIFT);
906 void __iomem *port_base;
907 int start_port, num_ports, p, start_hc, num_hcs, hc;
908
909 if (0 > port) {
910 start_hc = start_port = 0;
911 num_ports = 8; /* shld be benign for 4 port devs */
912 num_hcs = 2;
913 } else {
914 start_hc = port >> MV_PORT_HC_SHIFT;
915 start_port = port;
916 num_ports = num_hcs = 1;
917 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500918 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400919 num_ports > 1 ? num_ports - 1 : start_port);
920
921 if (NULL != pdev) {
922 DPRINTK("PCI config space regs:\n");
923 mv_dump_pci_cfg(pdev, 0x68);
924 }
925 DPRINTK("PCI regs:\n");
926 mv_dump_mem(mmio_base+0xc00, 0x3c);
927 mv_dump_mem(mmio_base+0xd00, 0x34);
928 mv_dump_mem(mmio_base+0xf00, 0x4);
929 mv_dump_mem(mmio_base+0x1d00, 0x6c);
930 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700931 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400932 DPRINTK("HC regs (HC %i):\n", hc);
933 mv_dump_mem(hc_base, 0x1c);
934 }
935 for (p = start_port; p < start_port + num_ports; p++) {
936 port_base = mv_port_base(mmio_base, p);
937 DPRINTK("EDMA regs (port %i):\n",p);
938 mv_dump_mem(port_base, 0x54);
939 DPRINTK("SATA regs (port %i):\n",p);
940 mv_dump_mem(port_base+0x300, 0x60);
941 }
942#endif
943}
944
Brett Russ20f733e2005-09-01 18:26:17 -0400945static unsigned int mv_scr_offset(unsigned int sc_reg_in)
946{
947 unsigned int ofs;
948
949 switch (sc_reg_in) {
950 case SCR_STATUS:
951 case SCR_CONTROL:
952 case SCR_ERROR:
953 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
954 break;
955 case SCR_ACTIVE:
956 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
957 break;
958 default:
959 ofs = 0xffffffffU;
960 break;
961 }
962 return ofs;
963}
964
965static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
966{
967 unsigned int ofs = mv_scr_offset(sc_reg_in);
968
Jeff Garzik35177262007-02-24 21:26:42 -0500969 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400970 return readl(mv_ap_base(ap) + ofs);
Jeff Garzik35177262007-02-24 21:26:42 -0500971 else
Brett Russ20f733e2005-09-01 18:26:17 -0400972 return (u32) ofs;
Brett Russ20f733e2005-09-01 18:26:17 -0400973}
974
975static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
976{
977 unsigned int ofs = mv_scr_offset(sc_reg_in);
978
Jeff Garzik35177262007-02-24 21:26:42 -0500979 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400980 writelfl(val, mv_ap_base(ap) + ofs);
Brett Russ20f733e2005-09-01 18:26:17 -0400981}
982
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400983static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
984 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500985{
986 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
987
988 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400989 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500990
Jeff Garzike728eab2007-02-25 02:53:41 -0500991 if (IS_GEN_I(hpriv)) {
992 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500993 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -0500994 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500995
Jeff Garzike728eab2007-02-25 02:53:41 -0500996 else if (IS_GEN_II(hpriv)) {
997 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500998 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -0500999 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1000 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001001
1002 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001003 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1004 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001005 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1006 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001007 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1008 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001009 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001010 }
1011
1012 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1013}
1014
Brett Russ05b308e2005-10-05 17:08:53 -04001015/**
1016 * mv_port_start - Port specific init/start routine.
1017 * @ap: ATA channel to manipulate
1018 *
1019 * Allocate and point to DMA memory, init port private memory,
1020 * zero indices.
1021 *
1022 * LOCKING:
1023 * Inherited from caller.
1024 */
Brett Russ31961942005-09-30 01:36:00 -04001025static int mv_port_start(struct ata_port *ap)
1026{
Jeff Garzikcca39742006-08-24 03:19:22 -04001027 struct device *dev = ap->host->dev;
1028 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001029 struct mv_port_priv *pp;
1030 void __iomem *port_mmio = mv_ap_base(ap);
1031 void *mem;
1032 dma_addr_t mem_dma;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001033 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001034
Tejun Heo24dc5f32007-01-20 16:00:28 +09001035 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001036 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001037 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001038
Tejun Heo24dc5f32007-01-20 16:00:28 +09001039 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1040 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001041 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001042 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001043 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1044
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001045 rc = ata_pad_alloc(ap, dev);
1046 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001047 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001048
Jeff Garzik8b260242005-11-12 12:32:50 -05001049 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001050 * 32-slot command request table (CRQB), 32 bytes each in size
1051 */
1052 pp->crqb = mem;
1053 pp->crqb_dma = mem_dma;
1054 mem += MV_CRQB_Q_SZ;
1055 mem_dma += MV_CRQB_Q_SZ;
1056
Jeff Garzik8b260242005-11-12 12:32:50 -05001057 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001058 * 32-slot command response table (CRPB), 8 bytes each in size
1059 */
1060 pp->crpb = mem;
1061 pp->crpb_dma = mem_dma;
1062 mem += MV_CRPB_Q_SZ;
1063 mem_dma += MV_CRPB_Q_SZ;
1064
1065 /* Third item:
1066 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1067 */
1068 pp->sg_tbl = mem;
1069 pp->sg_tbl_dma = mem_dma;
1070
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001071 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001072
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001073 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001074
Brett Russ31961942005-09-30 01:36:00 -04001075 /* Don't turn on EDMA here...do it before DMA commands only. Else
1076 * we'll be unable to send non-data, PIO, etc due to restricted access
1077 * to shadow regs.
1078 */
1079 ap->private_data = pp;
1080 return 0;
1081}
1082
Brett Russ05b308e2005-10-05 17:08:53 -04001083/**
1084 * mv_port_stop - Port specific cleanup/stop routine.
1085 * @ap: ATA channel to manipulate
1086 *
1087 * Stop DMA, cleanup port memory.
1088 *
1089 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001090 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001091 */
Brett Russ31961942005-09-30 01:36:00 -04001092static void mv_port_stop(struct ata_port *ap)
1093{
Brett Russafb0edd2005-10-05 17:08:42 -04001094 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04001095
Jeff Garzikcca39742006-08-24 03:19:22 -04001096 spin_lock_irqsave(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001097 mv_stop_dma(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04001098 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001099}
1100
Brett Russ05b308e2005-10-05 17:08:53 -04001101/**
1102 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1103 * @qc: queued command whose SG list to source from
1104 *
1105 * Populate the SG list and mark the last entry.
1106 *
1107 * LOCKING:
1108 * Inherited from caller.
1109 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001110static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001111{
1112 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001113 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001114 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001115 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001116
Jeff Garzikd88184f2007-02-26 01:26:06 -05001117 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001118 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001119 dma_addr_t addr = sg_dma_address(sg);
1120 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001121
Jeff Garzikd88184f2007-02-26 01:26:06 -05001122 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1123 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1124 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
Brett Russ31961942005-09-30 01:36:00 -04001125
Jeff Garzikd88184f2007-02-26 01:26:06 -05001126 if (ata_sg_is_last(sg, qc))
1127 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Jeff Garzik972c26b2005-10-18 22:14:54 -04001128
Jeff Garzikd88184f2007-02-26 01:26:06 -05001129 mv_sg++;
1130 n_sg++;
Brett Russ31961942005-09-30 01:36:00 -04001131 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001132
1133 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001134}
1135
Mark Lorde1469872006-05-22 19:02:03 -04001136static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001137{
Mark Lord559eeda2006-05-19 16:40:15 -04001138 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001139 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001140 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001141}
1142
Brett Russ05b308e2005-10-05 17:08:53 -04001143/**
1144 * mv_qc_prep - Host specific command preparation.
1145 * @qc: queued command to prepare
1146 *
1147 * This routine simply redirects to the general purpose routine
1148 * if command is not DMA. Else, it handles prep of the CRQB
1149 * (command request block), does some sanity checking, and calls
1150 * the SG load routine.
1151 *
1152 * LOCKING:
1153 * Inherited from caller.
1154 */
Brett Russ31961942005-09-30 01:36:00 -04001155static void mv_qc_prep(struct ata_queued_cmd *qc)
1156{
1157 struct ata_port *ap = qc->ap;
1158 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001159 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001160 struct ata_taskfile *tf;
1161 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001162 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001163
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001164 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001165 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001166
Brett Russ31961942005-09-30 01:36:00 -04001167 /* Fill in command request block
1168 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001169 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001170 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001171 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001172 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001173 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001174
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001175 /* get current queue index from software */
1176 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001177
Mark Lorda6432432006-05-19 16:36:36 -04001178 pp->crqb[in_index].sg_addr =
1179 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1180 pp->crqb[in_index].sg_addr_hi =
1181 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1182 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1183
1184 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001185 tf = &qc->tf;
1186
1187 /* Sadly, the CRQB cannot accomodate all registers--there are
1188 * only 11 bytes...so we must pick and choose required
1189 * registers based on the command. So, we drop feature and
1190 * hob_feature for [RW] DMA commands, but they are needed for
1191 * NCQ. NCQ will drop hob_nsect.
1192 */
1193 switch (tf->command) {
1194 case ATA_CMD_READ:
1195 case ATA_CMD_READ_EXT:
1196 case ATA_CMD_WRITE:
1197 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001198 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001199 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1200 break;
1201#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1202 case ATA_CMD_FPDMA_READ:
1203 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001204 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001205 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1206 break;
1207#endif /* FIXME: remove this line when NCQ added */
1208 default:
1209 /* The only other commands EDMA supports in non-queued and
1210 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1211 * of which are defined/used by Linux. If we get here, this
1212 * driver needs work.
1213 *
1214 * FIXME: modify libata to give qc_prep a return value and
1215 * return error here.
1216 */
1217 BUG_ON(tf->command);
1218 break;
1219 }
1220 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1221 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1222 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1223 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1224 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1225 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1226 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1227 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1228 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1229
Jeff Garzike4e7b892006-01-31 12:18:41 -05001230 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001231 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001232 mv_fill_sg(qc);
1233}
1234
1235/**
1236 * mv_qc_prep_iie - Host specific command preparation.
1237 * @qc: queued command to prepare
1238 *
1239 * This routine simply redirects to the general purpose routine
1240 * if command is not DMA. Else, it handles prep of the CRQB
1241 * (command request block), does some sanity checking, and calls
1242 * the SG load routine.
1243 *
1244 * LOCKING:
1245 * Inherited from caller.
1246 */
1247static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1248{
1249 struct ata_port *ap = qc->ap;
1250 struct mv_port_priv *pp = ap->private_data;
1251 struct mv_crqb_iie *crqb;
1252 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001253 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001254 u32 flags = 0;
1255
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001256 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001257 return;
1258
Jeff Garzike4e7b892006-01-31 12:18:41 -05001259 /* Fill in Gen IIE command request block
1260 */
1261 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1262 flags |= CRQB_FLAG_READ;
1263
Tejun Heobeec7db2006-02-11 19:11:13 +09001264 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001265 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001266 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001267 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001268
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001269 /* get current queue index from software */
1270 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001271
1272 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001273 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1274 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1275 crqb->flags = cpu_to_le32(flags);
1276
1277 tf = &qc->tf;
1278 crqb->ata_cmd[0] = cpu_to_le32(
1279 (tf->command << 16) |
1280 (tf->feature << 24)
1281 );
1282 crqb->ata_cmd[1] = cpu_to_le32(
1283 (tf->lbal << 0) |
1284 (tf->lbam << 8) |
1285 (tf->lbah << 16) |
1286 (tf->device << 24)
1287 );
1288 crqb->ata_cmd[2] = cpu_to_le32(
1289 (tf->hob_lbal << 0) |
1290 (tf->hob_lbam << 8) |
1291 (tf->hob_lbah << 16) |
1292 (tf->hob_feature << 24)
1293 );
1294 crqb->ata_cmd[3] = cpu_to_le32(
1295 (tf->nsect << 0) |
1296 (tf->hob_nsect << 8)
1297 );
1298
1299 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1300 return;
Brett Russ31961942005-09-30 01:36:00 -04001301 mv_fill_sg(qc);
1302}
1303
Brett Russ05b308e2005-10-05 17:08:53 -04001304/**
1305 * mv_qc_issue - Initiate a command to the host
1306 * @qc: queued command to start
1307 *
1308 * This routine simply redirects to the general purpose routine
1309 * if command is not DMA. Else, it sanity checks our local
1310 * caches of the request producer/consumer indices then enables
1311 * DMA and bumps the request producer index.
1312 *
1313 * LOCKING:
1314 * Inherited from caller.
1315 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001316static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001317{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001318 struct ata_port *ap = qc->ap;
1319 void __iomem *port_mmio = mv_ap_base(ap);
1320 struct mv_port_priv *pp = ap->private_data;
1321 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001322 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001323
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001324 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001325 /* We're about to send a non-EDMA capable command to the
1326 * port. Turn off EDMA so there won't be problems accessing
1327 * shadow block, etc registers.
1328 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001329 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001330 return ata_qc_issue_prot(qc);
1331 }
1332
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001333 mv_start_dma(port_mmio, hpriv, pp);
1334
1335 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001336
Brett Russ31961942005-09-30 01:36:00 -04001337 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001338 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1339 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001340
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001341 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001342
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001343 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001344
1345 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001346 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1347 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001348
1349 return 0;
1350}
1351
Brett Russ05b308e2005-10-05 17:08:53 -04001352/**
Brett Russ05b308e2005-10-05 17:08:53 -04001353 * mv_err_intr - Handle error interrupts on the port
1354 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001355 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001356 *
1357 * In most cases, just clear the interrupt and move on. However,
1358 * some cases require an eDMA reset, which is done right before
1359 * the COMRESET in mv_phy_reset(). The SERR case requires a
1360 * clear of pending errors in the SATA SERROR register. Finally,
1361 * if the port disabled DMA, update our cached copy to match.
1362 *
1363 * LOCKING:
1364 * Inherited from caller.
1365 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001366static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001367{
Brett Russ31961942005-09-30 01:36:00 -04001368 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001369 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1370 struct mv_port_priv *pp = ap->private_data;
1371 struct mv_host_priv *hpriv = ap->host->private_data;
1372 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1373 unsigned int action = 0, err_mask = 0;
1374 struct ata_eh_info *ehi = &ap->eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001375
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001376 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001377
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001378 if (!edma_enabled) {
1379 /* just a guess: do we need to do this? should we
1380 * expand this, and do it in all cases?
1381 */
Tejun Heo81952c52006-05-15 20:57:47 +09001382 sata_scr_read(ap, SCR_ERROR, &serr);
1383 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001384 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001385
1386 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1387
1388 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1389
1390 /*
1391 * all generations share these EDMA error cause bits
1392 */
1393
1394 if (edma_err_cause & EDMA_ERR_DEV)
1395 err_mask |= AC_ERR_DEV;
1396 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1397 EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR |
1398 EDMA_ERR_INTRL_PAR)) {
1399 err_mask |= AC_ERR_ATA_BUS;
1400 action |= ATA_EH_HARDRESET;
1401 ata_ehi_push_desc(ehi, ", parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001402 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001403 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1404 ata_ehi_hotplugged(ehi);
1405 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1406 ", dev disconnect" : ", dev connect");
1407 }
1408
1409 if (IS_50XX(hpriv)) {
1410 eh_freeze_mask = EDMA_EH_FREEZE_5;
1411
1412 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1413 struct mv_port_priv *pp = ap->private_data;
1414 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1415 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1416 }
1417 } else {
1418 eh_freeze_mask = EDMA_EH_FREEZE;
1419
1420 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1421 struct mv_port_priv *pp = ap->private_data;
1422 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1423 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1424 }
1425
1426 if (edma_err_cause & EDMA_ERR_SERR) {
1427 sata_scr_read(ap, SCR_ERROR, &serr);
1428 sata_scr_write_flush(ap, SCR_ERROR, serr);
1429 err_mask = AC_ERR_ATA_BUS;
1430 action |= ATA_EH_HARDRESET;
1431 }
1432 }
Brett Russ20f733e2005-09-01 18:26:17 -04001433
1434 /* Clear EDMA now that SERR cleanup done */
1435 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1436
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001437 if (!err_mask) {
1438 err_mask = AC_ERR_OTHER;
1439 action |= ATA_EH_HARDRESET;
1440 }
1441
1442 ehi->serror |= serr;
1443 ehi->action |= action;
1444
1445 if (qc)
1446 qc->err_mask |= err_mask;
1447 else
1448 ehi->err_mask |= err_mask;
1449
1450 if (edma_err_cause & eh_freeze_mask)
1451 ata_port_freeze(ap);
1452 else
1453 ata_port_abort(ap);
1454}
1455
1456static void mv_intr_pio(struct ata_port *ap)
1457{
1458 struct ata_queued_cmd *qc;
1459 u8 ata_status;
1460
1461 /* ignore spurious intr if drive still BUSY */
1462 ata_status = readb(ap->ioaddr.status_addr);
1463 if (unlikely(ata_status & ATA_BUSY))
1464 return;
1465
1466 /* get active ATA command */
1467 qc = ata_qc_from_tag(ap, ap->active_tag);
1468 if (unlikely(!qc)) /* no active tag */
1469 return;
1470 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1471 return;
1472
1473 /* and finally, complete the ATA command */
1474 qc->err_mask |= ac_err_mask(ata_status);
1475 ata_qc_complete(qc);
1476}
1477
1478static void mv_intr_edma(struct ata_port *ap)
1479{
1480 void __iomem *port_mmio = mv_ap_base(ap);
1481 struct mv_host_priv *hpriv = ap->host->private_data;
1482 struct mv_port_priv *pp = ap->private_data;
1483 struct ata_queued_cmd *qc;
1484 u32 out_index, in_index;
1485 bool work_done = false;
1486
1487 /* get h/w response queue pointer */
1488 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1489 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1490
1491 while (1) {
1492 u16 status;
1493
1494 /* get s/w response queue last-read pointer, and compare */
1495 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1496 if (in_index == out_index)
1497 break;
1498
1499
1500 /* 50xx: get active ATA command */
1501 if (IS_GEN_I(hpriv))
1502 qc = ata_qc_from_tag(ap, ap->active_tag);
1503
1504 /* 60xx: get active ATA command via tag, to enable support
1505 * for queueing. this works transparently for queued and
1506 * non-queued modes.
1507 */
1508 else {
1509 unsigned int tag;
1510
1511 if (IS_GEN_II(hpriv))
1512 tag = (le16_to_cpu(pp->crpb[out_index].id)
1513 >> CRPB_IOID_SHIFT_6) & 0x3f;
1514 else
1515 tag = (le16_to_cpu(pp->crpb[out_index].id)
1516 >> CRPB_IOID_SHIFT_7) & 0x3f;
1517
1518 qc = ata_qc_from_tag(ap, tag);
1519 }
1520
1521 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1522 * bits (WARNING: might not necessarily be associated
1523 * with this command), which -should- be clear
1524 * if all is well
1525 */
1526 status = le16_to_cpu(pp->crpb[out_index].flags);
1527 if (unlikely(status & 0xff)) {
1528 mv_err_intr(ap, qc);
1529 return;
1530 }
1531
1532 /* and finally, complete the ATA command */
1533 if (qc) {
1534 qc->err_mask |=
1535 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1536 ata_qc_complete(qc);
1537 }
1538
1539 /* advance software response queue pointer, to
1540 * indicate (after the loop completes) to hardware
1541 * that we have consumed a response queue entry.
1542 */
1543 work_done = true;
1544 pp->resp_idx++;
1545 }
1546
1547 if (work_done)
1548 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1549 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1550 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001551}
1552
Brett Russ05b308e2005-10-05 17:08:53 -04001553/**
1554 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001555 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001556 * @relevant: port error bits relevant to this host controller
1557 * @hc: which host controller we're to look at
1558 *
1559 * Read then write clear the HC interrupt status then walk each
1560 * port connected to the HC and see if it needs servicing. Port
1561 * success ints are reported in the HC interrupt status reg, the
1562 * port error ints are reported in the higher level main
1563 * interrupt status register and thus are passed in via the
1564 * 'relevant' argument.
1565 *
1566 * LOCKING:
1567 * Inherited from caller.
1568 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001569static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001570{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001571 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001572 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001573 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001574 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001575
Jeff Garzik35177262007-02-24 21:26:42 -05001576 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001577 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001578 else
Brett Russ20f733e2005-09-01 18:26:17 -04001579 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001580
1581 /* we'll need the HC success int register in most cases */
1582 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001583 if (!hc_irq_cause)
1584 return;
1585
1586 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001587
1588 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1589 hc,relevant,hc_irq_cause);
1590
1591 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001592 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001593 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001594 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001595
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001596 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001597 continue;
1598
Brett Russ31961942005-09-30 01:36:00 -04001599 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001600 if (port >= MV_PORTS_PER_HC) {
1601 shift++; /* skip bit 8 in the HC Main IRQ reg */
1602 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001603 have_err_bits = ((PORT0_ERR << shift) & relevant);
1604
1605 if (unlikely(have_err_bits)) {
1606 struct ata_queued_cmd *qc;
1607
1608 qc = ata_qc_from_tag(ap, ap->active_tag);
1609 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1610 continue;
1611
1612 mv_err_intr(ap, qc);
1613 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001614 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001615
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001616 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1617
1618 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1619 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1620 mv_intr_edma(ap);
1621 } else {
1622 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1623 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001624 }
1625 }
1626 VPRINTK("EXIT\n");
1627}
1628
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001629static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1630{
1631 struct ata_port *ap;
1632 struct ata_queued_cmd *qc;
1633 struct ata_eh_info *ehi;
1634 unsigned int i, err_mask, printed = 0;
1635 u32 err_cause;
1636
1637 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1638
1639 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1640 err_cause);
1641
1642 DPRINTK("All regs @ PCI error\n");
1643 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1644
1645 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1646
1647 for (i = 0; i < host->n_ports; i++) {
1648 ap = host->ports[i];
1649 if (!ata_port_offline(ap)) {
1650 ehi = &ap->eh_info;
1651 ata_ehi_clear_desc(ehi);
1652 if (!printed++)
1653 ata_ehi_push_desc(ehi,
1654 "PCI err cause 0x%08x", err_cause);
1655 err_mask = AC_ERR_HOST_BUS;
1656 ehi->action = ATA_EH_HARDRESET;
1657 qc = ata_qc_from_tag(ap, ap->active_tag);
1658 if (qc)
1659 qc->err_mask |= err_mask;
1660 else
1661 ehi->err_mask |= err_mask;
1662
1663 ata_port_freeze(ap);
1664 }
1665 }
1666}
1667
Brett Russ05b308e2005-10-05 17:08:53 -04001668/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001669 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001670 * @irq: unused
1671 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001672 *
1673 * Read the read only register to determine if any host
1674 * controllers have pending interrupts. If so, call lower level
1675 * routine to handle. Also check for PCI errors which are only
1676 * reported here.
1677 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001678 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001679 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001680 * interrupts.
1681 */
David Howells7d12e782006-10-05 14:55:46 +01001682static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001683{
Jeff Garzikcca39742006-08-24 03:19:22 -04001684 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001685 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001686 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001687 u32 irq_stat;
1688
Brett Russ20f733e2005-09-01 18:26:17 -04001689 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001690
1691 /* check the cases where we either have nothing pending or have read
1692 * a bogus register value which can indicate HW removal or PCI fault
1693 */
Jeff Garzik35177262007-02-24 21:26:42 -05001694 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001695 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001696
Jeff Garzikcca39742006-08-24 03:19:22 -04001697 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1698 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001699
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001700 if (unlikely(irq_stat & PCI_ERR)) {
1701 mv_pci_error(host, mmio);
1702 handled = 1;
1703 goto out_unlock; /* skip all other HC irq handling */
1704 }
1705
Brett Russ20f733e2005-09-01 18:26:17 -04001706 for (hc = 0; hc < n_hcs; hc++) {
1707 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1708 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001709 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001710 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001711 }
1712 }
Mark Lord615ab952006-05-19 16:24:56 -04001713
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001714out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001715 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001716
1717 return IRQ_RETVAL(handled);
1718}
1719
Jeff Garzikc9d39132005-11-13 17:47:51 -05001720static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1721{
1722 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1723 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1724
1725 return hc_mmio + ofs;
1726}
1727
1728static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1729{
1730 unsigned int ofs;
1731
1732 switch (sc_reg_in) {
1733 case SCR_STATUS:
1734 case SCR_ERROR:
1735 case SCR_CONTROL:
1736 ofs = sc_reg_in * sizeof(u32);
1737 break;
1738 default:
1739 ofs = 0xffffffffU;
1740 break;
1741 }
1742 return ofs;
1743}
1744
1745static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1746{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001747 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1748 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001749 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1750
1751 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001752 return readl(addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001753 else
1754 return (u32) ofs;
1755}
1756
1757static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1758{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001759 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1760 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001761 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1762
1763 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001764 writelfl(val, addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001765}
1766
Jeff Garzik522479f2005-11-12 22:14:02 -05001767static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1768{
1769 u8 rev_id;
1770 int early_5080;
1771
1772 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1773
1774 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1775
1776 if (!early_5080) {
1777 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1778 tmp |= (1 << 0);
1779 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1780 }
1781
1782 mv_reset_pci_bus(pdev, mmio);
1783}
1784
1785static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1786{
1787 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1788}
1789
Jeff Garzik47c2b672005-11-12 21:13:17 -05001790static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001791 void __iomem *mmio)
1792{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001793 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1794 u32 tmp;
1795
1796 tmp = readl(phy_mmio + MV5_PHY_MODE);
1797
1798 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1799 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001800}
1801
Jeff Garzik47c2b672005-11-12 21:13:17 -05001802static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001803{
Jeff Garzik522479f2005-11-12 22:14:02 -05001804 u32 tmp;
1805
1806 writel(0, mmio + MV_GPIO_PORT_CTL);
1807
1808 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1809
1810 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 tmp |= ~(1 << 0);
1812 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001813}
1814
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001815static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1816 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001817{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001818 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1819 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1820 u32 tmp;
1821 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1822
1823 if (fix_apm_sq) {
1824 tmp = readl(phy_mmio + MV5_LT_MODE);
1825 tmp |= (1 << 19);
1826 writel(tmp, phy_mmio + MV5_LT_MODE);
1827
1828 tmp = readl(phy_mmio + MV5_PHY_CTL);
1829 tmp &= ~0x3;
1830 tmp |= 0x1;
1831 writel(tmp, phy_mmio + MV5_PHY_CTL);
1832 }
1833
1834 tmp = readl(phy_mmio + MV5_PHY_MODE);
1835 tmp &= ~mask;
1836 tmp |= hpriv->signal[port].pre;
1837 tmp |= hpriv->signal[port].amps;
1838 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001839}
1840
Jeff Garzikc9d39132005-11-13 17:47:51 -05001841
1842#undef ZERO
1843#define ZERO(reg) writel(0, port_mmio + (reg))
1844static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1845 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001846{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001847 void __iomem *port_mmio = mv_port_base(mmio, port);
1848
1849 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1850
1851 mv_channel_reset(hpriv, mmio, port);
1852
1853 ZERO(0x028); /* command */
1854 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1855 ZERO(0x004); /* timer */
1856 ZERO(0x008); /* irq err cause */
1857 ZERO(0x00c); /* irq err mask */
1858 ZERO(0x010); /* rq bah */
1859 ZERO(0x014); /* rq inp */
1860 ZERO(0x018); /* rq outp */
1861 ZERO(0x01c); /* respq bah */
1862 ZERO(0x024); /* respq outp */
1863 ZERO(0x020); /* respq inp */
1864 ZERO(0x02c); /* test control */
1865 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1866}
1867#undef ZERO
1868
1869#define ZERO(reg) writel(0, hc_mmio + (reg))
1870static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1871 unsigned int hc)
1872{
1873 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1874 u32 tmp;
1875
1876 ZERO(0x00c);
1877 ZERO(0x010);
1878 ZERO(0x014);
1879 ZERO(0x018);
1880
1881 tmp = readl(hc_mmio + 0x20);
1882 tmp &= 0x1c1c1c1c;
1883 tmp |= 0x03030303;
1884 writel(tmp, hc_mmio + 0x20);
1885}
1886#undef ZERO
1887
1888static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1889 unsigned int n_hc)
1890{
1891 unsigned int hc, port;
1892
1893 for (hc = 0; hc < n_hc; hc++) {
1894 for (port = 0; port < MV_PORTS_PER_HC; port++)
1895 mv5_reset_hc_port(hpriv, mmio,
1896 (hc * MV_PORTS_PER_HC) + port);
1897
1898 mv5_reset_one_hc(hpriv, mmio, hc);
1899 }
1900
1901 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001902}
1903
Jeff Garzik101ffae2005-11-12 22:17:49 -05001904#undef ZERO
1905#define ZERO(reg) writel(0, mmio + (reg))
1906static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1907{
1908 u32 tmp;
1909
1910 tmp = readl(mmio + MV_PCI_MODE);
1911 tmp &= 0xff00ffff;
1912 writel(tmp, mmio + MV_PCI_MODE);
1913
1914 ZERO(MV_PCI_DISC_TIMER);
1915 ZERO(MV_PCI_MSI_TRIGGER);
1916 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1917 ZERO(HC_MAIN_IRQ_MASK_OFS);
1918 ZERO(MV_PCI_SERR_MASK);
1919 ZERO(PCI_IRQ_CAUSE_OFS);
1920 ZERO(PCI_IRQ_MASK_OFS);
1921 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1922 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1923 ZERO(MV_PCI_ERR_ATTRIBUTE);
1924 ZERO(MV_PCI_ERR_COMMAND);
1925}
1926#undef ZERO
1927
1928static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1929{
1930 u32 tmp;
1931
1932 mv5_reset_flash(hpriv, mmio);
1933
1934 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1935 tmp &= 0x3;
1936 tmp |= (1 << 5) | (1 << 6);
1937 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1938}
1939
1940/**
1941 * mv6_reset_hc - Perform the 6xxx global soft reset
1942 * @mmio: base address of the HBA
1943 *
1944 * This routine only applies to 6xxx parts.
1945 *
1946 * LOCKING:
1947 * Inherited from caller.
1948 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001949static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1950 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001951{
1952 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1953 int i, rc = 0;
1954 u32 t;
1955
1956 /* Following procedure defined in PCI "main command and status
1957 * register" table.
1958 */
1959 t = readl(reg);
1960 writel(t | STOP_PCI_MASTER, reg);
1961
1962 for (i = 0; i < 1000; i++) {
1963 udelay(1);
1964 t = readl(reg);
1965 if (PCI_MASTER_EMPTY & t) {
1966 break;
1967 }
1968 }
1969 if (!(PCI_MASTER_EMPTY & t)) {
1970 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1971 rc = 1;
1972 goto done;
1973 }
1974
1975 /* set reset */
1976 i = 5;
1977 do {
1978 writel(t | GLOB_SFT_RST, reg);
1979 t = readl(reg);
1980 udelay(1);
1981 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1982
1983 if (!(GLOB_SFT_RST & t)) {
1984 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1985 rc = 1;
1986 goto done;
1987 }
1988
1989 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1990 i = 5;
1991 do {
1992 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1993 t = readl(reg);
1994 udelay(1);
1995 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1996
1997 if (GLOB_SFT_RST & t) {
1998 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1999 rc = 1;
2000 }
2001done:
2002 return rc;
2003}
2004
Jeff Garzik47c2b672005-11-12 21:13:17 -05002005static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002006 void __iomem *mmio)
2007{
2008 void __iomem *port_mmio;
2009 u32 tmp;
2010
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002011 tmp = readl(mmio + MV_RESET_CFG);
2012 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002013 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002014 hpriv->signal[idx].pre = 0x1 << 5;
2015 return;
2016 }
2017
2018 port_mmio = mv_port_base(mmio, idx);
2019 tmp = readl(port_mmio + PHY_MODE2);
2020
2021 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2022 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2023}
2024
Jeff Garzik47c2b672005-11-12 21:13:17 -05002025static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002026{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002027 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002028}
2029
Jeff Garzikc9d39132005-11-13 17:47:51 -05002030static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002031 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002032{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002033 void __iomem *port_mmio = mv_port_base(mmio, port);
2034
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002035 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002036 int fix_phy_mode2 =
2037 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002038 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002039 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2040 u32 m2, tmp;
2041
2042 if (fix_phy_mode2) {
2043 m2 = readl(port_mmio + PHY_MODE2);
2044 m2 &= ~(1 << 16);
2045 m2 |= (1 << 31);
2046 writel(m2, port_mmio + PHY_MODE2);
2047
2048 udelay(200);
2049
2050 m2 = readl(port_mmio + PHY_MODE2);
2051 m2 &= ~((1 << 16) | (1 << 31));
2052 writel(m2, port_mmio + PHY_MODE2);
2053
2054 udelay(200);
2055 }
2056
2057 /* who knows what this magic does */
2058 tmp = readl(port_mmio + PHY_MODE3);
2059 tmp &= ~0x7F800000;
2060 tmp |= 0x2A800000;
2061 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002062
2063 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002064 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002065
2066 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002067
2068 if (hp_flags & MV_HP_ERRATA_60X1B2)
2069 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002070
2071 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2072
2073 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002074
2075 if (hp_flags & MV_HP_ERRATA_60X1B2)
2076 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002077 }
2078
2079 /* Revert values of pre-emphasis and signal amps to the saved ones */
2080 m2 = readl(port_mmio + PHY_MODE2);
2081
2082 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002083 m2 |= hpriv->signal[port].amps;
2084 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002085 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002086
Jeff Garzike4e7b892006-01-31 12:18:41 -05002087 /* according to mvSata 3.6.1, some IIE values are fixed */
2088 if (IS_GEN_IIE(hpriv)) {
2089 m2 &= ~0xC30FF01F;
2090 m2 |= 0x0000900F;
2091 }
2092
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002093 writel(m2, port_mmio + PHY_MODE2);
2094}
2095
Jeff Garzikc9d39132005-11-13 17:47:51 -05002096static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2097 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002098{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002099 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002100
Brett Russ31961942005-09-30 01:36:00 -04002101 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002102
2103 if (IS_60XX(hpriv)) {
2104 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002105 ifctl |= (1 << 7); /* enable gen2i speed */
2106 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002107 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2108 }
2109
Brett Russ20f733e2005-09-01 18:26:17 -04002110 udelay(25); /* allow reset propagation */
2111
2112 /* Spec never mentions clearing the bit. Marvell's driver does
2113 * clear the bit, however.
2114 */
Brett Russ31961942005-09-30 01:36:00 -04002115 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002116
Jeff Garzikc9d39132005-11-13 17:47:51 -05002117 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2118
2119 if (IS_50XX(hpriv))
2120 mdelay(1);
2121}
2122
Jeff Garzikc9d39132005-11-13 17:47:51 -05002123/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002124 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002125 * @ap: ATA channel to manipulate
2126 *
2127 * Part of this is taken from __sata_phy_reset and modified to
2128 * not sleep since this routine gets called from interrupt level.
2129 *
2130 * LOCKING:
2131 * Inherited from caller. This is coded to safe to call at
2132 * interrupt level, i.e. it does not sleep.
2133 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002134static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2135 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002136{
2137 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002138 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002139 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002140 int retry = 5;
2141 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002142
2143 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002144
Jeff Garzik095fec82005-11-12 09:50:49 -05002145 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002146 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2147 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04002148
Jeff Garzik22374672005-11-17 10:59:48 -05002149 /* Issue COMRESET via SControl */
2150comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09002151 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002152 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002153
Tejun Heo81952c52006-05-15 20:57:47 +09002154 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002155 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002156
Brett Russ31961942005-09-30 01:36:00 -04002157 do {
Tejun Heo81952c52006-05-15 20:57:47 +09002158 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002159 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002160 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002161
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002162 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002163 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002164
Jeff Garzik22374672005-11-17 10:59:48 -05002165 /* work around errata */
2166 if (IS_60XX(hpriv) &&
2167 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2168 (retry-- > 0))
2169 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002170
2171 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002172 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2173 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2174
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002175 if (ata_port_offline(ap)) {
2176 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002177 return;
2178 }
2179
Jeff Garzik22374672005-11-17 10:59:48 -05002180 /* even after SStatus reflects that device is ready,
2181 * it seems to take a while for link to be fully
2182 * established (and thus Status no longer 0x80/0x7F),
2183 * so we poll a bit for that, here.
2184 */
2185 retry = 20;
2186 while (1) {
2187 u8 drv_stat = ata_check_status(ap);
2188 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2189 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002190 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002191 if (retry-- <= 0)
2192 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002193 if (time_after(jiffies, deadline))
2194 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002195 }
2196
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002197 /* FIXME: if we passed the deadline, the following
2198 * code probably produces an invalid result
2199 */
Brett Russ20f733e2005-09-01 18:26:17 -04002200
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002201 /* finally, read device signature from TF registers */
2202 *class = ata_dev_try_classify(ap, 0, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002203
2204 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2205
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002206 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002207
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002208 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002209}
2210
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002211static int mv_prereset(struct ata_port *ap, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002212{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002213 struct mv_port_priv *pp = ap->private_data;
2214 struct ata_eh_context *ehc = &ap->eh_context;
2215 int rc;
2216
2217 rc = mv_stop_dma(ap);
2218 if (rc)
2219 ehc->i.action |= ATA_EH_HARDRESET;
2220
2221 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2222 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2223 ehc->i.action |= ATA_EH_HARDRESET;
2224 }
2225
2226 /* if we're about to do hardreset, nothing more to do */
2227 if (ehc->i.action & ATA_EH_HARDRESET)
2228 return 0;
2229
2230 if (ata_port_online(ap))
2231 rc = ata_wait_ready(ap, deadline);
2232 else
2233 rc = -ENODEV;
2234
2235 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002236}
2237
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002238static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2239 unsigned long deadline)
2240{
2241 struct mv_host_priv *hpriv = ap->host->private_data;
2242 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2243
2244 mv_stop_dma(ap);
2245
2246 mv_channel_reset(hpriv, mmio, ap->port_no);
2247
2248 mv_phy_reset(ap, class, deadline);
2249
2250 return 0;
2251}
2252
2253static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2254{
2255 u32 serr;
2256
2257 /* print link status */
2258 sata_print_link_status(ap);
2259
2260 /* clear SError */
2261 sata_scr_read(ap, SCR_ERROR, &serr);
2262 sata_scr_write_flush(ap, SCR_ERROR, serr);
2263
2264 /* bail out if no device is present */
2265 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2266 DPRINTK("EXIT, no device\n");
2267 return;
2268 }
2269
2270 /* set up device control */
2271 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2272}
2273
2274static void mv_error_handler(struct ata_port *ap)
2275{
2276 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2277 mv_hardreset, mv_postreset);
2278}
2279
2280static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2281{
2282 mv_stop_dma(qc->ap);
2283}
2284
2285static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002286{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002287 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002288 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2289 u32 tmp, mask;
2290 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002291
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002292 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002293
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002294 shift = ap->port_no * 2;
2295 if (hc > 0)
2296 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002297
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002299
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002300 /* disable assertion of portN err, done events */
2301 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2302 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2303}
2304
2305static void mv_eh_thaw(struct ata_port *ap)
2306{
2307 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2308 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2309 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2310 void __iomem *port_mmio = mv_ap_base(ap);
2311 u32 tmp, mask, hc_irq_cause;
2312 unsigned int shift, hc_port_no = ap->port_no;
2313
2314 /* FIXME: handle coalescing completion events properly */
2315
2316 shift = ap->port_no * 2;
2317 if (hc > 0) {
2318 shift++;
2319 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002320 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002321
2322 mask = 0x3 << shift;
2323
2324 /* clear EDMA errors on this port */
2325 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2326
2327 /* clear pending irq events */
2328 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2329 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2330 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2331 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2332
2333 /* enable assertion of portN err, done events */
2334 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2335 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002336}
2337
Brett Russ05b308e2005-10-05 17:08:53 -04002338/**
2339 * mv_port_init - Perform some early initialization on a single port.
2340 * @port: libata data structure storing shadow register addresses
2341 * @port_mmio: base address of the port
2342 *
2343 * Initialize shadow register mmio addresses, clear outstanding
2344 * interrupts on the port, and unmask interrupts for the future
2345 * start of the port.
2346 *
2347 * LOCKING:
2348 * Inherited from caller.
2349 */
Brett Russ31961942005-09-30 01:36:00 -04002350static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2351{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002352 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002353 unsigned serr_ofs;
2354
Jeff Garzik8b260242005-11-12 12:32:50 -05002355 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002356 */
2357 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002358 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002359 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2360 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2361 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2362 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2363 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2364 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002365 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002366 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2367 /* special case: control/altstatus doesn't have ATA_REG_ address */
2368 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2369
2370 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002371 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002372
Brett Russ31961942005-09-30 01:36:00 -04002373 /* Clear any currently outstanding port interrupt conditions */
2374 serr_ofs = mv_scr_offset(SCR_ERROR);
2375 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2376 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2377
Brett Russ20f733e2005-09-01 18:26:17 -04002378 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002379 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002380
Jeff Garzik8b260242005-11-12 12:32:50 -05002381 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002382 readl(port_mmio + EDMA_CFG_OFS),
2383 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2384 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002385}
2386
Tejun Heo4447d352007-04-17 23:44:08 +09002387static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002388{
Tejun Heo4447d352007-04-17 23:44:08 +09002389 struct pci_dev *pdev = to_pci_dev(host->dev);
2390 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002391 u8 rev_id;
2392 u32 hp_flags = hpriv->hp_flags;
2393
2394 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2395
2396 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002397 case chip_5080:
2398 hpriv->ops = &mv5xxx_ops;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002399 hp_flags |= MV_HP_50XX;
2400
Jeff Garzik47c2b672005-11-12 21:13:17 -05002401 switch (rev_id) {
2402 case 0x1:
2403 hp_flags |= MV_HP_ERRATA_50XXB0;
2404 break;
2405 case 0x3:
2406 hp_flags |= MV_HP_ERRATA_50XXB2;
2407 break;
2408 default:
2409 dev_printk(KERN_WARNING, &pdev->dev,
2410 "Applying 50XXB2 workarounds to unknown rev\n");
2411 hp_flags |= MV_HP_ERRATA_50XXB2;
2412 break;
2413 }
2414 break;
2415
2416 case chip_504x:
2417 case chip_508x:
2418 hpriv->ops = &mv5xxx_ops;
2419 hp_flags |= MV_HP_50XX;
2420
2421 switch (rev_id) {
2422 case 0x0:
2423 hp_flags |= MV_HP_ERRATA_50XXB0;
2424 break;
2425 case 0x3:
2426 hp_flags |= MV_HP_ERRATA_50XXB2;
2427 break;
2428 default:
2429 dev_printk(KERN_WARNING, &pdev->dev,
2430 "Applying B2 workarounds to unknown rev\n");
2431 hp_flags |= MV_HP_ERRATA_50XXB2;
2432 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002433 }
2434 break;
2435
2436 case chip_604x:
2437 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002438 hpriv->ops = &mv6xxx_ops;
2439
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002440 switch (rev_id) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002441 case 0x7:
2442 hp_flags |= MV_HP_ERRATA_60X1B2;
2443 break;
2444 case 0x9:
2445 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002446 break;
2447 default:
2448 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002449 "Applying B2 workarounds to unknown rev\n");
2450 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002451 break;
2452 }
2453 break;
2454
Jeff Garzike4e7b892006-01-31 12:18:41 -05002455 case chip_7042:
2456 case chip_6042:
2457 hpriv->ops = &mv6xxx_ops;
2458
2459 hp_flags |= MV_HP_GEN_IIE;
2460
2461 switch (rev_id) {
2462 case 0x0:
2463 hp_flags |= MV_HP_ERRATA_XX42A0;
2464 break;
2465 case 0x1:
2466 hp_flags |= MV_HP_ERRATA_60X1C0;
2467 break;
2468 default:
2469 dev_printk(KERN_WARNING, &pdev->dev,
2470 "Applying 60X1C0 workarounds to unknown rev\n");
2471 hp_flags |= MV_HP_ERRATA_60X1C0;
2472 break;
2473 }
2474 break;
2475
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002476 default:
2477 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2478 return 1;
2479 }
2480
2481 hpriv->hp_flags = hp_flags;
2482
2483 return 0;
2484}
2485
Brett Russ05b308e2005-10-05 17:08:53 -04002486/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002487 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002488 * @host: ATA host to initialize
2489 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002490 *
2491 * If possible, do an early global reset of the host. Then do
2492 * our port init and clear/unmask all/relevant host interrupts.
2493 *
2494 * LOCKING:
2495 * Inherited from caller.
2496 */
Tejun Heo4447d352007-04-17 23:44:08 +09002497static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002498{
2499 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002500 struct pci_dev *pdev = to_pci_dev(host->dev);
2501 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2502 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002503
Jeff Garzik47c2b672005-11-12 21:13:17 -05002504 /* global interrupt mask */
2505 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2506
Tejun Heo4447d352007-04-17 23:44:08 +09002507 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002508 if (rc)
2509 goto done;
2510
Tejun Heo4447d352007-04-17 23:44:08 +09002511 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002512
Tejun Heo4447d352007-04-17 23:44:08 +09002513 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002514 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002515
Jeff Garzikc9d39132005-11-13 17:47:51 -05002516 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002517 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002518 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002519
Jeff Garzik522479f2005-11-12 22:14:02 -05002520 hpriv->ops->reset_flash(hpriv, mmio);
2521 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002522 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002523
Tejun Heo4447d352007-04-17 23:44:08 +09002524 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002525 if (IS_60XX(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002526 void __iomem *port_mmio = mv_port_base(mmio, port);
2527
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002528 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002529 ifctl |= (1 << 7); /* enable gen2i speed */
2530 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002531 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2532 }
2533
Jeff Garzikc9d39132005-11-13 17:47:51 -05002534 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002535 }
2536
Tejun Heo4447d352007-04-17 23:44:08 +09002537 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002538 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002539 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002540 }
2541
2542 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002543 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2544
2545 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2546 "(before clear)=0x%08x\n", hc,
2547 readl(hc_mmio + HC_CFG_OFS),
2548 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2549
2550 /* Clear any currently outstanding hc interrupt conditions */
2551 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002552 }
2553
Brett Russ31961942005-09-30 01:36:00 -04002554 /* Clear any currently outstanding host interrupt conditions */
2555 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2556
2557 /* and unmask interrupt generation for host regs */
2558 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002559
2560 if (IS_50XX(hpriv))
2561 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2562 else
2563 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002564
2565 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002566 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002567 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2568 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2569 readl(mmio + PCI_IRQ_CAUSE_OFS),
2570 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002571
Brett Russ31961942005-09-30 01:36:00 -04002572done:
Brett Russ20f733e2005-09-01 18:26:17 -04002573 return rc;
2574}
2575
Brett Russ05b308e2005-10-05 17:08:53 -04002576/**
2577 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002578 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002579 *
2580 * FIXME: complete this.
2581 *
2582 * LOCKING:
2583 * Inherited from caller.
2584 */
Tejun Heo4447d352007-04-17 23:44:08 +09002585static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002586{
Tejun Heo4447d352007-04-17 23:44:08 +09002587 struct pci_dev *pdev = to_pci_dev(host->dev);
2588 struct mv_host_priv *hpriv = host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04002589 u8 rev_id, scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002590 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002591
2592 /* Use this to determine the HW stepping of the chip so we know
2593 * what errata to workaround
2594 */
2595 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2596
2597 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2598 if (scc == 0)
2599 scc_s = "SCSI";
2600 else if (scc == 0x01)
2601 scc_s = "RAID";
2602 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002603 scc_s = "?";
2604
2605 if (IS_GEN_I(hpriv))
2606 gen = "I";
2607 else if (IS_GEN_II(hpriv))
2608 gen = "II";
2609 else if (IS_GEN_IIE(hpriv))
2610 gen = "IIE";
2611 else
2612 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002613
Jeff Garzika9524a72005-10-30 14:39:11 -05002614 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002615 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2616 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002617 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2618}
2619
Brett Russ05b308e2005-10-05 17:08:53 -04002620/**
2621 * mv_init_one - handle a positive probe of a Marvell host
2622 * @pdev: PCI device found
2623 * @ent: PCI device ID entry for the matched host
2624 *
2625 * LOCKING:
2626 * Inherited from caller.
2627 */
Brett Russ20f733e2005-09-01 18:26:17 -04002628static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2629{
2630 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002631 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002632 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2633 struct ata_host *host;
2634 struct mv_host_priv *hpriv;
2635 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002636
Jeff Garzika9524a72005-10-30 14:39:11 -05002637 if (!printed_version++)
2638 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002639
Tejun Heo4447d352007-04-17 23:44:08 +09002640 /* allocate host */
2641 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2642
2643 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2644 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2645 if (!host || !hpriv)
2646 return -ENOMEM;
2647 host->private_data = hpriv;
2648
2649 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002650 rc = pcim_enable_device(pdev);
2651 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002652 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002653
Tejun Heo0d5ff562007-02-01 15:06:36 +09002654 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2655 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002656 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002657 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002658 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002659 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002660
Jeff Garzikd88184f2007-02-26 01:26:06 -05002661 rc = pci_go_64(pdev);
2662 if (rc)
2663 return rc;
2664
Brett Russ20f733e2005-09-01 18:26:17 -04002665 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002666 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002667 if (rc)
2668 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002669
Brett Russ31961942005-09-30 01:36:00 -04002670 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002671 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002672 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002673
Brett Russ31961942005-09-30 01:36:00 -04002674 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002675 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002676
Tejun Heo4447d352007-04-17 23:44:08 +09002677 pci_set_master(pdev);
Jeff Garzik4537deb2007-07-12 14:30:19 -04002678 pci_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002679 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002680 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002681}
2682
2683static int __init mv_init(void)
2684{
Pavel Roskinb7887192006-08-10 18:13:18 +09002685 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002686}
2687
2688static void __exit mv_exit(void)
2689{
2690 pci_unregister_driver(&mv_pci_driver);
2691}
2692
2693MODULE_AUTHOR("Brett Russ");
2694MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2695MODULE_LICENSE("GPL");
2696MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2697MODULE_VERSION(DRV_VERSION);
2698
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002699module_param(msi, int, 0444);
2700MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2701
Brett Russ20f733e2005-09-01 18:26:17 -04002702module_init(mv_init);
2703module_exit(mv_exit);