blob: c957e6e54ba1b61e493386dd92cb5a1a4ebebddd [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65*/
66
67
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/pci.h>
71#include <linux/init.h>
72#include <linux/blkdev.h>
73#include <linux/delay.h>
74#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080
81#define DRV_NAME "sata_mv"
Jeff Garzik8bc3fc42007-05-21 20:26:38 -040082#define DRV_VERSION "0.81"
Brett Russ20f733e2005-09-01 18:26:17 -040083
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040095 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
Brett Russ20f733e2005-09-01 18:26:17 -0400101 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500102 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
Brett Russ31961942005-09-30 01:36:00 -0400111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400112
Brett Russ31961942005-09-30 01:36:00 -0400113 MV_MAX_Q_DEPTH = 32,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
123 MV_MAX_SG_CT = 176,
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126
Brett Russ20f733e2005-09-01 18:26:17 -0400127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400131 MV_PORT_MASK = 3,
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Brett Russ31961942005-09-30 01:36:00 -0400136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzik50630192005-12-13 02:29:45 -0500137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
Albert Lee1f3461a2006-05-23 18:12:30 +0800138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
148
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 /* PCI interface registers */
152
Brett Russ31961942005-09-30 01:36:00 -0400153 PCI_COMMAND_OFS = 0xc00,
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 PCI_MAIN_CMD_STS_OFS = 0xd30,
156 STOP_PCI_MASTER = (1 << 2),
157 PCI_MASTER_EMPTY = (1 << 3),
158 GLOB_SFT_RST = (1 << 4),
159
Jeff Garzik522479f2005-11-12 22:14:02 -0500160 MV_PCI_MODE = 0xd00,
161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
162 MV_PCI_DISC_TIMER = 0xd04,
163 MV_PCI_MSI_TRIGGER = 0xc38,
164 MV_PCI_SERR_MASK = 0xc28,
165 MV_PCI_XBAR_TMOUT = 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
169 MV_PCI_ERR_COMMAND = 0x1d50,
170
171 PCI_IRQ_CAUSE_OFS = 0x1d58,
172 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500213 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0),
234 EDMA_ERR_PRD_PAR = (1 << 1),
235 EDMA_ERR_DEV = (1 << 2),
236 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7),
240 EDMA_ERR_BIST_ASYNC = (1 << 8),
241 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11),
244 EDMA_ERR_IORDY = (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
247 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31),
Jeff Garzik8b260242005-11-12 12:32:50 -0500251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Brett Russ20f733e2005-09-01 18:26:17 -0400252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
Jeff Garzik8b260242005-11-12 12:32:50 -0500254 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
Brett Russ20f733e2005-09-01 18:26:17 -0400255 EDMA_ERR_LNK_DATA_RX |
Jeff Garzik8b260242005-11-12 12:32:50 -0500256 EDMA_ERR_LNK_DATA_TX |
Brett Russ20f733e2005-09-01 18:26:17 -0400257 EDMA_ERR_TRANS_PROTO),
258
Brett Russ31961942005-09-30 01:36:00 -0400259 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400261
262 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
263 EDMA_REQ_Q_PTR_SHIFT = 5,
264
265 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400268 EDMA_RSP_Q_PTR_SHIFT = 3,
269
Brett Russ20f733e2005-09-01 18:26:17 -0400270 EDMA_CMD_OFS = 0x28,
271 EDMA_EN = (1 << 0),
272 EDMA_DS = (1 << 1),
273 ATA_RST = (1 << 2),
274
Jeff Garzikc9d39132005-11-13 17:47:51 -0500275 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500276 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500277
Brett Russ31961942005-09-30 01:36:00 -0400278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500280 MV_HP_ERRATA_50XXB0 = (1 << 1),
281 MV_HP_ERRATA_50XXB2 = (1 << 2),
282 MV_HP_ERRATA_60X1B2 = (1 << 3),
283 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500284 MV_HP_ERRATA_XX42A0 = (1 << 5),
285 MV_HP_50XX = (1 << 6),
286 MV_HP_GEN_IIE = (1 << 7),
Brett Russ20f733e2005-09-01 18:26:17 -0400287
Brett Russ31961942005-09-30 01:36:00 -0400288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
291};
292
Jeff Garzikc9d39132005-11-13 17:47:51 -0500293#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500295#define IS_GEN_I(hpriv) IS_50XX(hpriv)
296#define IS_GEN_II(hpriv) IS_60XX(hpriv)
297#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500298
Jeff Garzik095fec82005-11-12 09:50:49 -0500299enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500300 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500301
302 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
303
304 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
305};
306
Jeff Garzik522479f2005-11-12 22:14:02 -0500307enum chip_type {
308 chip_504x,
309 chip_508x,
310 chip_5080,
311 chip_604x,
312 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500313 chip_6042,
314 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500315};
316
Brett Russ31961942005-09-30 01:36:00 -0400317/* Command ReQuest Block: 32B */
318struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400319 __le32 sg_addr;
320 __le32 sg_addr_hi;
321 __le16 ctrl_flags;
322 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400323};
324
Jeff Garzike4e7b892006-01-31 12:18:41 -0500325struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400326 __le32 addr;
327 __le32 addr_hi;
328 __le32 flags;
329 __le32 len;
330 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500331};
332
Brett Russ31961942005-09-30 01:36:00 -0400333/* Command ResPonse Block: 8B */
334struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400335 __le16 id;
336 __le16 flags;
337 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400338};
339
340/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
341struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400342 __le32 addr;
343 __le32 flags_size;
344 __le32 addr_hi;
345 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400346};
347
348struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400349 struct mv_crqb *crqb;
350 dma_addr_t crqb_dma;
351 struct mv_crpb *crpb;
352 dma_addr_t crpb_dma;
353 struct mv_sg *sg_tbl;
354 dma_addr_t sg_tbl_dma;
Brett Russ31961942005-09-30 01:36:00 -0400355 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400356};
357
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500358struct mv_port_signal {
359 u32 amps;
360 u32 pre;
361};
362
Jeff Garzik47c2b672005-11-12 21:13:17 -0500363struct mv_host_priv;
364struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500365 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
366 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500367 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
369 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500370 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
371 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500372 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500374};
375
Brett Russ20f733e2005-09-01 18:26:17 -0400376struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400377 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500378 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500379 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400380};
381
382static void mv_irq_clear(struct ata_port *ap);
383static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500385static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ20f733e2005-09-01 18:26:17 -0400387static void mv_phy_reset(struct ata_port *ap);
Jeff Garzik22374672005-11-17 10:59:48 -0500388static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
Brett Russ31961942005-09-30 01:36:00 -0400389static int mv_port_start(struct ata_port *ap);
390static void mv_port_stop(struct ata_port *ap);
391static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500392static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900393static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400394static void mv_eng_timeout(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400395static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
396
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500397static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
398 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500399static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
401 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500402static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
403 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500404static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500406
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500407static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
408 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500409static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
411 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500412static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
413 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500414static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500416static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no);
418static void mv_stop_and_reset(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500419
Jeff Garzik193515d2005-11-07 00:59:37 -0500420static struct scsi_host_template mv_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400421 .module = THIS_MODULE,
422 .name = DRV_NAME,
423 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd,
Brett Russ31961942005-09-30 01:36:00 -0400425 .can_queue = MV_USE_Q_DEPTH,
Brett Russ20f733e2005-09-01 18:26:17 -0400426 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500427 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
429 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500430 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400431 .proc_name = DRV_NAME,
432 .dma_boundary = MV_DMA_BOUNDARY,
433 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900434 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400435 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400436};
437
Jeff Garzikc9d39132005-11-13 17:47:51 -0500438static const struct ata_port_operations mv5_ops = {
439 .port_disable = ata_port_disable,
440
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
446
447 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500448 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500449
450 .qc_prep = mv_qc_prep,
451 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900452 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500453
454 .eng_timeout = mv_eng_timeout,
455
Jeff Garzikc9d39132005-11-13 17:47:51 -0500456 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900457 .irq_on = ata_irq_on,
458 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500459
460 .scr_read = mv5_scr_read,
461 .scr_write = mv5_scr_write,
462
463 .port_start = mv_port_start,
464 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500465};
466
467static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400468 .port_disable = ata_port_disable,
469
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
475
476 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500477 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400478
Brett Russ31961942005-09-30 01:36:00 -0400479 .qc_prep = mv_qc_prep,
480 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900481 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400482
Brett Russ31961942005-09-30 01:36:00 -0400483 .eng_timeout = mv_eng_timeout,
Brett Russ20f733e2005-09-01 18:26:17 -0400484
Brett Russ20f733e2005-09-01 18:26:17 -0400485 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900486 .irq_on = ata_irq_on,
487 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400488
489 .scr_read = mv_scr_read,
490 .scr_write = mv_scr_write,
491
Brett Russ31961942005-09-30 01:36:00 -0400492 .port_start = mv_port_start,
493 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400494};
495
Jeff Garzike4e7b892006-01-31 12:18:41 -0500496static const struct ata_port_operations mv_iie_ops = {
497 .port_disable = ata_port_disable,
498
499 .tf_load = ata_tf_load,
500 .tf_read = ata_tf_read,
501 .check_status = ata_check_status,
502 .exec_command = ata_exec_command,
503 .dev_select = ata_std_dev_select,
504
505 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500506 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500507
508 .qc_prep = mv_qc_prep_iie,
509 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900510 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500511
512 .eng_timeout = mv_eng_timeout,
513
Jeff Garzike4e7b892006-01-31 12:18:41 -0500514 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900515 .irq_on = ata_irq_on,
516 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500517
518 .scr_read = mv_scr_read,
519 .scr_write = mv_scr_write,
520
521 .port_start = mv_port_start,
522 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500523};
524
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100525static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400526 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400527 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400528 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400531 },
532 { /* chip_508x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
Brett Russ31961942005-09-30 01:36:00 -0400534 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500535 .udma_mask = 0x7f, /* udma0-6 */
536 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400537 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500538 { /* chip_5080 */
Jeff Garzikcca39742006-08-24 03:19:22 -0400539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500540 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541 .udma_mask = 0x7f, /* udma0-6 */
542 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500543 },
Brett Russ20f733e2005-09-01 18:26:17 -0400544 { /* chip_604x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
Brett Russ31961942005-09-30 01:36:00 -0400546 .pio_mask = 0x1f, /* pio0-4 */
547 .udma_mask = 0x7f, /* udma0-6 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500548 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400549 },
550 { /* chip_608x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Brett Russ31961942005-09-30 01:36:00 -0400552 MV_FLAG_DUAL_HC),
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = 0x7f, /* udma0-6 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400556 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500557 { /* chip_6042 */
Jeff Garzikcca39742006-08-24 03:19:22 -0400558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = 0x7f, /* udma0-6 */
561 .port_ops = &mv_iie_ops,
562 },
563 { /* chip_7042 */
Olof Johanssone93f09d2007-01-18 18:39:59 -0600564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = 0x7f, /* udma0-6 */
567 .port_ops = &mv_iie_ops,
568 },
Brett Russ20f733e2005-09-01 18:26:17 -0400569};
570
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500571static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400572 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400576
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400577 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500582
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400583 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
584
Olof Johanssone93f09d2007-01-18 18:39:59 -0600585 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
586
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800587 /* add Marvell 7042 support */
588 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
589
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400590 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400591};
592
593static struct pci_driver mv_pci_driver = {
594 .name = DRV_NAME,
595 .id_table = mv_pci_tbl,
596 .probe = mv_init_one,
597 .remove = ata_pci_remove_one,
598};
599
Jeff Garzik47c2b672005-11-12 21:13:17 -0500600static const struct mv_hw_ops mv5xxx_ops = {
601 .phy_errata = mv5_phy_errata,
602 .enable_leds = mv5_enable_leds,
603 .read_preamp = mv5_read_preamp,
604 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500605 .reset_flash = mv5_reset_flash,
606 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500607};
608
609static const struct mv_hw_ops mv6xxx_ops = {
610 .phy_errata = mv6_phy_errata,
611 .enable_leds = mv6_enable_leds,
612 .read_preamp = mv6_read_preamp,
613 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500614 .reset_flash = mv6_reset_flash,
615 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500616};
617
Brett Russ20f733e2005-09-01 18:26:17 -0400618/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500619 * module options
620 */
621static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
622
623
Jeff Garzikd88184f2007-02-26 01:26:06 -0500624/* move to PCI layer or libata core? */
625static int pci_go_64(struct pci_dev *pdev)
626{
627 int rc;
628
629 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
630 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
631 if (rc) {
632 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
633 if (rc) {
634 dev_printk(KERN_ERR, &pdev->dev,
635 "64-bit DMA enable failed\n");
636 return rc;
637 }
638 }
639 } else {
640 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
641 if (rc) {
642 dev_printk(KERN_ERR, &pdev->dev,
643 "32-bit DMA enable failed\n");
644 return rc;
645 }
646 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
647 if (rc) {
648 dev_printk(KERN_ERR, &pdev->dev,
649 "32-bit consistent DMA enable failed\n");
650 return rc;
651 }
652 }
653
654 return rc;
655}
656
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500657/*
Brett Russ20f733e2005-09-01 18:26:17 -0400658 * Functions
659 */
660
661static inline void writelfl(unsigned long data, void __iomem *addr)
662{
663 writel(data, addr);
664 (void) readl(addr); /* flush to avoid PCI posted write */
665}
666
Brett Russ20f733e2005-09-01 18:26:17 -0400667static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
668{
669 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
670}
671
Jeff Garzikc9d39132005-11-13 17:47:51 -0500672static inline unsigned int mv_hc_from_port(unsigned int port)
673{
674 return port >> MV_PORT_HC_SHIFT;
675}
676
677static inline unsigned int mv_hardport_from_port(unsigned int port)
678{
679 return port & MV_PORT_MASK;
680}
681
682static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
683 unsigned int port)
684{
685 return mv_hc_base(base, mv_hc_from_port(port));
686}
687
Brett Russ20f733e2005-09-01 18:26:17 -0400688static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
689{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500690 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500691 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500692 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400693}
694
695static inline void __iomem *mv_ap_base(struct ata_port *ap)
696{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900697 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400698}
699
Jeff Garzikcca39742006-08-24 03:19:22 -0400700static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400701{
Jeff Garzikcca39742006-08-24 03:19:22 -0400702 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400703}
704
705static void mv_irq_clear(struct ata_port *ap)
706{
707}
708
Brett Russ05b308e2005-10-05 17:08:53 -0400709/**
710 * mv_start_dma - Enable eDMA engine
711 * @base: port base address
712 * @pp: port private data
713 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900714 * Verify the local cache of the eDMA state is accurate with a
715 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400716 *
717 * LOCKING:
718 * Inherited from caller.
719 */
Brett Russafb0edd2005-10-05 17:08:42 -0400720static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400721{
Brett Russafb0edd2005-10-05 17:08:42 -0400722 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
723 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
724 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
725 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900726 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400727}
728
Brett Russ05b308e2005-10-05 17:08:53 -0400729/**
730 * mv_stop_dma - Disable eDMA engine
731 * @ap: ATA channel to manipulate
732 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900733 * Verify the local cache of the eDMA state is accurate with a
734 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400735 *
736 * LOCKING:
737 * Inherited from caller.
738 */
Brett Russ31961942005-09-30 01:36:00 -0400739static void mv_stop_dma(struct ata_port *ap)
740{
741 void __iomem *port_mmio = mv_ap_base(ap);
742 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400743 u32 reg;
744 int i;
745
Brett Russafb0edd2005-10-05 17:08:42 -0400746 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
747 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400748 */
Brett Russ31961942005-09-30 01:36:00 -0400749 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
750 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400751 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900752 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400753 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500754
Brett Russ31961942005-09-30 01:36:00 -0400755 /* now properly wait for the eDMA to stop */
756 for (i = 1000; i > 0; i--) {
757 reg = readl(port_mmio + EDMA_CMD_OFS);
758 if (!(EDMA_EN & reg)) {
759 break;
760 }
761 udelay(100);
762 }
763
Brett Russ31961942005-09-30 01:36:00 -0400764 if (EDMA_EN & reg) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900765 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Brett Russafb0edd2005-10-05 17:08:42 -0400766 /* FIXME: Consider doing a reset here to recover */
Brett Russ31961942005-09-30 01:36:00 -0400767 }
768}
769
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400770#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400771static void mv_dump_mem(void __iomem *start, unsigned bytes)
772{
Brett Russ31961942005-09-30 01:36:00 -0400773 int b, w;
774 for (b = 0; b < bytes; ) {
775 DPRINTK("%p: ", start + b);
776 for (w = 0; b < bytes && w < 4; w++) {
777 printk("%08x ",readl(start + b));
778 b += sizeof(u32);
779 }
780 printk("\n");
781 }
Brett Russ31961942005-09-30 01:36:00 -0400782}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400783#endif
784
Brett Russ31961942005-09-30 01:36:00 -0400785static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
786{
787#ifdef ATA_DEBUG
788 int b, w;
789 u32 dw;
790 for (b = 0; b < bytes; ) {
791 DPRINTK("%02x: ", b);
792 for (w = 0; b < bytes && w < 4; w++) {
793 (void) pci_read_config_dword(pdev,b,&dw);
794 printk("%08x ",dw);
795 b += sizeof(u32);
796 }
797 printk("\n");
798 }
799#endif
800}
801static void mv_dump_all_regs(void __iomem *mmio_base, int port,
802 struct pci_dev *pdev)
803{
804#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500805 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400806 port >> MV_PORT_HC_SHIFT);
807 void __iomem *port_base;
808 int start_port, num_ports, p, start_hc, num_hcs, hc;
809
810 if (0 > port) {
811 start_hc = start_port = 0;
812 num_ports = 8; /* shld be benign for 4 port devs */
813 num_hcs = 2;
814 } else {
815 start_hc = port >> MV_PORT_HC_SHIFT;
816 start_port = port;
817 num_ports = num_hcs = 1;
818 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500819 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400820 num_ports > 1 ? num_ports - 1 : start_port);
821
822 if (NULL != pdev) {
823 DPRINTK("PCI config space regs:\n");
824 mv_dump_pci_cfg(pdev, 0x68);
825 }
826 DPRINTK("PCI regs:\n");
827 mv_dump_mem(mmio_base+0xc00, 0x3c);
828 mv_dump_mem(mmio_base+0xd00, 0x34);
829 mv_dump_mem(mmio_base+0xf00, 0x4);
830 mv_dump_mem(mmio_base+0x1d00, 0x6c);
831 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700832 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400833 DPRINTK("HC regs (HC %i):\n", hc);
834 mv_dump_mem(hc_base, 0x1c);
835 }
836 for (p = start_port; p < start_port + num_ports; p++) {
837 port_base = mv_port_base(mmio_base, p);
838 DPRINTK("EDMA regs (port %i):\n",p);
839 mv_dump_mem(port_base, 0x54);
840 DPRINTK("SATA regs (port %i):\n",p);
841 mv_dump_mem(port_base+0x300, 0x60);
842 }
843#endif
844}
845
Brett Russ20f733e2005-09-01 18:26:17 -0400846static unsigned int mv_scr_offset(unsigned int sc_reg_in)
847{
848 unsigned int ofs;
849
850 switch (sc_reg_in) {
851 case SCR_STATUS:
852 case SCR_CONTROL:
853 case SCR_ERROR:
854 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
855 break;
856 case SCR_ACTIVE:
857 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
858 break;
859 default:
860 ofs = 0xffffffffU;
861 break;
862 }
863 return ofs;
864}
865
866static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
867{
868 unsigned int ofs = mv_scr_offset(sc_reg_in);
869
Jeff Garzik35177262007-02-24 21:26:42 -0500870 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400871 return readl(mv_ap_base(ap) + ofs);
Jeff Garzik35177262007-02-24 21:26:42 -0500872 else
Brett Russ20f733e2005-09-01 18:26:17 -0400873 return (u32) ofs;
Brett Russ20f733e2005-09-01 18:26:17 -0400874}
875
876static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
877{
878 unsigned int ofs = mv_scr_offset(sc_reg_in);
879
Jeff Garzik35177262007-02-24 21:26:42 -0500880 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400881 writelfl(val, mv_ap_base(ap) + ofs);
Brett Russ20f733e2005-09-01 18:26:17 -0400882}
883
Jeff Garzike4e7b892006-01-31 12:18:41 -0500884static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
885{
886 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
887
888 /* set up non-NCQ EDMA configuration */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500889 cfg &= ~(1 << 9); /* disable equeue */
890
Jeff Garzike728eab2007-02-25 02:53:41 -0500891 if (IS_GEN_I(hpriv)) {
892 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500893 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -0500894 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500895
Jeff Garzike728eab2007-02-25 02:53:41 -0500896 else if (IS_GEN_II(hpriv)) {
897 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500898 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -0500899 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
900 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500901
902 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -0500903 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
904 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500905 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
906 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -0500907 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
908 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
909 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500910 }
911
912 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
913}
914
Brett Russ05b308e2005-10-05 17:08:53 -0400915/**
916 * mv_port_start - Port specific init/start routine.
917 * @ap: ATA channel to manipulate
918 *
919 * Allocate and point to DMA memory, init port private memory,
920 * zero indices.
921 *
922 * LOCKING:
923 * Inherited from caller.
924 */
Brett Russ31961942005-09-30 01:36:00 -0400925static int mv_port_start(struct ata_port *ap)
926{
Jeff Garzikcca39742006-08-24 03:19:22 -0400927 struct device *dev = ap->host->dev;
928 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400929 struct mv_port_priv *pp;
930 void __iomem *port_mmio = mv_ap_base(ap);
931 void *mem;
932 dma_addr_t mem_dma;
Tejun Heo24dc5f32007-01-20 16:00:28 +0900933 int rc;
Brett Russ31961942005-09-30 01:36:00 -0400934
Tejun Heo24dc5f32007-01-20 16:00:28 +0900935 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500936 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +0900937 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -0400938
Tejun Heo24dc5f32007-01-20 16:00:28 +0900939 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
940 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500941 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +0900942 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -0400943 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
944
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500945 rc = ata_pad_alloc(ap, dev);
946 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +0900947 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500948
Jeff Garzik8b260242005-11-12 12:32:50 -0500949 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -0400950 * 32-slot command request table (CRQB), 32 bytes each in size
951 */
952 pp->crqb = mem;
953 pp->crqb_dma = mem_dma;
954 mem += MV_CRQB_Q_SZ;
955 mem_dma += MV_CRQB_Q_SZ;
956
Jeff Garzik8b260242005-11-12 12:32:50 -0500957 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -0400958 * 32-slot command response table (CRPB), 8 bytes each in size
959 */
960 pp->crpb = mem;
961 pp->crpb_dma = mem_dma;
962 mem += MV_CRPB_Q_SZ;
963 mem_dma += MV_CRPB_Q_SZ;
964
965 /* Third item:
966 * Table of scatter-gather descriptors (ePRD), 16 bytes each
967 */
968 pp->sg_tbl = mem;
969 pp->sg_tbl_dma = mem_dma;
970
Jeff Garzike4e7b892006-01-31 12:18:41 -0500971 mv_edma_cfg(hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -0400972
973 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500974 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
Brett Russ31961942005-09-30 01:36:00 -0400975 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
976
Jeff Garzike4e7b892006-01-31 12:18:41 -0500977 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
978 writelfl(pp->crqb_dma & 0xffffffff,
979 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
980 else
981 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -0400982
983 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500984
985 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
986 writelfl(pp->crpb_dma & 0xffffffff,
987 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
988 else
989 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
990
Jeff Garzik8b260242005-11-12 12:32:50 -0500991 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
Brett Russ31961942005-09-30 01:36:00 -0400992 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
993
Brett Russ31961942005-09-30 01:36:00 -0400994 /* Don't turn on EDMA here...do it before DMA commands only. Else
995 * we'll be unable to send non-data, PIO, etc due to restricted access
996 * to shadow regs.
997 */
998 ap->private_data = pp;
999 return 0;
1000}
1001
Brett Russ05b308e2005-10-05 17:08:53 -04001002/**
1003 * mv_port_stop - Port specific cleanup/stop routine.
1004 * @ap: ATA channel to manipulate
1005 *
1006 * Stop DMA, cleanup port memory.
1007 *
1008 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001009 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001010 */
Brett Russ31961942005-09-30 01:36:00 -04001011static void mv_port_stop(struct ata_port *ap)
1012{
Brett Russafb0edd2005-10-05 17:08:42 -04001013 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04001014
Jeff Garzikcca39742006-08-24 03:19:22 -04001015 spin_lock_irqsave(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001016 mv_stop_dma(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04001017 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001018}
1019
Brett Russ05b308e2005-10-05 17:08:53 -04001020/**
1021 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1022 * @qc: queued command whose SG list to source from
1023 *
1024 * Populate the SG list and mark the last entry.
1025 *
1026 * LOCKING:
1027 * Inherited from caller.
1028 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001029static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001030{
1031 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001032 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001033 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001034 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001035
Jeff Garzikd88184f2007-02-26 01:26:06 -05001036 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001037 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001038 dma_addr_t addr = sg_dma_address(sg);
1039 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001040
Jeff Garzikd88184f2007-02-26 01:26:06 -05001041 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1042 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1043 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
Brett Russ31961942005-09-30 01:36:00 -04001044
Jeff Garzikd88184f2007-02-26 01:26:06 -05001045 if (ata_sg_is_last(sg, qc))
1046 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Jeff Garzik972c26b2005-10-18 22:14:54 -04001047
Jeff Garzikd88184f2007-02-26 01:26:06 -05001048 mv_sg++;
1049 n_sg++;
Brett Russ31961942005-09-30 01:36:00 -04001050 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001051
1052 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001053}
1054
Mark Lorda6432432006-05-19 16:36:36 -04001055static inline unsigned mv_inc_q_index(unsigned index)
Brett Russ31961942005-09-30 01:36:00 -04001056{
Mark Lorda6432432006-05-19 16:36:36 -04001057 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001058}
1059
Mark Lorde1469872006-05-22 19:02:03 -04001060static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001061{
Mark Lord559eeda2006-05-19 16:40:15 -04001062 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001063 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001064 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001065}
1066
Brett Russ05b308e2005-10-05 17:08:53 -04001067/**
1068 * mv_qc_prep - Host specific command preparation.
1069 * @qc: queued command to prepare
1070 *
1071 * This routine simply redirects to the general purpose routine
1072 * if command is not DMA. Else, it handles prep of the CRQB
1073 * (command request block), does some sanity checking, and calls
1074 * the SG load routine.
1075 *
1076 * LOCKING:
1077 * Inherited from caller.
1078 */
Brett Russ31961942005-09-30 01:36:00 -04001079static void mv_qc_prep(struct ata_queued_cmd *qc)
1080{
1081 struct ata_port *ap = qc->ap;
1082 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001083 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001084 struct ata_taskfile *tf;
1085 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001086 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001087
Jeff Garzike4e7b892006-01-31 12:18:41 -05001088 if (ATA_PROT_DMA != qc->tf.protocol)
Brett Russ31961942005-09-30 01:36:00 -04001089 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001090
Brett Russ31961942005-09-30 01:36:00 -04001091 /* Fill in command request block
1092 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001094 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001095 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001096 flags |= qc->tag << CRQB_TAG_SHIFT;
1097
Mark Lorda6432432006-05-19 16:36:36 -04001098 /* get current queue index from hardware */
1099 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1100 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001101
Mark Lorda6432432006-05-19 16:36:36 -04001102 pp->crqb[in_index].sg_addr =
1103 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1104 pp->crqb[in_index].sg_addr_hi =
1105 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1106 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1107
1108 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001109 tf = &qc->tf;
1110
1111 /* Sadly, the CRQB cannot accomodate all registers--there are
1112 * only 11 bytes...so we must pick and choose required
1113 * registers based on the command. So, we drop feature and
1114 * hob_feature for [RW] DMA commands, but they are needed for
1115 * NCQ. NCQ will drop hob_nsect.
1116 */
1117 switch (tf->command) {
1118 case ATA_CMD_READ:
1119 case ATA_CMD_READ_EXT:
1120 case ATA_CMD_WRITE:
1121 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001122 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001123 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1124 break;
1125#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1126 case ATA_CMD_FPDMA_READ:
1127 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001128 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001129 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1130 break;
1131#endif /* FIXME: remove this line when NCQ added */
1132 default:
1133 /* The only other commands EDMA supports in non-queued and
1134 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1135 * of which are defined/used by Linux. If we get here, this
1136 * driver needs work.
1137 *
1138 * FIXME: modify libata to give qc_prep a return value and
1139 * return error here.
1140 */
1141 BUG_ON(tf->command);
1142 break;
1143 }
1144 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1145 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1146 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1147 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1148 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1149 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1150 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1151 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1152 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1153
Jeff Garzike4e7b892006-01-31 12:18:41 -05001154 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001155 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001156 mv_fill_sg(qc);
1157}
1158
1159/**
1160 * mv_qc_prep_iie - Host specific command preparation.
1161 * @qc: queued command to prepare
1162 *
1163 * This routine simply redirects to the general purpose routine
1164 * if command is not DMA. Else, it handles prep of the CRQB
1165 * (command request block), does some sanity checking, and calls
1166 * the SG load routine.
1167 *
1168 * LOCKING:
1169 * Inherited from caller.
1170 */
1171static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1172{
1173 struct ata_port *ap = qc->ap;
1174 struct mv_port_priv *pp = ap->private_data;
1175 struct mv_crqb_iie *crqb;
1176 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001177 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001178 u32 flags = 0;
1179
1180 if (ATA_PROT_DMA != qc->tf.protocol)
1181 return;
1182
Jeff Garzike4e7b892006-01-31 12:18:41 -05001183 /* Fill in Gen IIE command request block
1184 */
1185 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1186 flags |= CRQB_FLAG_READ;
1187
Tejun Heobeec7db2006-02-11 19:11:13 +09001188 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001189 flags |= qc->tag << CRQB_TAG_SHIFT;
1190
Mark Lorda6432432006-05-19 16:36:36 -04001191 /* get current queue index from hardware */
1192 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1193 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1194
1195 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001196 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1197 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1198 crqb->flags = cpu_to_le32(flags);
1199
1200 tf = &qc->tf;
1201 crqb->ata_cmd[0] = cpu_to_le32(
1202 (tf->command << 16) |
1203 (tf->feature << 24)
1204 );
1205 crqb->ata_cmd[1] = cpu_to_le32(
1206 (tf->lbal << 0) |
1207 (tf->lbam << 8) |
1208 (tf->lbah << 16) |
1209 (tf->device << 24)
1210 );
1211 crqb->ata_cmd[2] = cpu_to_le32(
1212 (tf->hob_lbal << 0) |
1213 (tf->hob_lbam << 8) |
1214 (tf->hob_lbah << 16) |
1215 (tf->hob_feature << 24)
1216 );
1217 crqb->ata_cmd[3] = cpu_to_le32(
1218 (tf->nsect << 0) |
1219 (tf->hob_nsect << 8)
1220 );
1221
1222 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1223 return;
Brett Russ31961942005-09-30 01:36:00 -04001224 mv_fill_sg(qc);
1225}
1226
Brett Russ05b308e2005-10-05 17:08:53 -04001227/**
1228 * mv_qc_issue - Initiate a command to the host
1229 * @qc: queued command to start
1230 *
1231 * This routine simply redirects to the general purpose routine
1232 * if command is not DMA. Else, it sanity checks our local
1233 * caches of the request producer/consumer indices then enables
1234 * DMA and bumps the request producer index.
1235 *
1236 * LOCKING:
1237 * Inherited from caller.
1238 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001239static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001240{
1241 void __iomem *port_mmio = mv_ap_base(qc->ap);
1242 struct mv_port_priv *pp = qc->ap->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001243 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001244 u32 in_ptr;
1245
1246 if (ATA_PROT_DMA != qc->tf.protocol) {
1247 /* We're about to send a non-EDMA capable command to the
1248 * port. Turn off EDMA so there won't be problems accessing
1249 * shadow block, etc registers.
1250 */
1251 mv_stop_dma(qc->ap);
1252 return ata_qc_issue_prot(qc);
1253 }
1254
Mark Lorda6432432006-05-19 16:36:36 -04001255 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1256 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001257
Brett Russ31961942005-09-30 01:36:00 -04001258 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001259 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1260 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001261
Mark Lorda6432432006-05-19 16:36:36 -04001262 in_index = mv_inc_q_index(in_index); /* now incr producer index */
Brett Russ31961942005-09-30 01:36:00 -04001263
Brett Russafb0edd2005-10-05 17:08:42 -04001264 mv_start_dma(port_mmio, pp);
Brett Russ31961942005-09-30 01:36:00 -04001265
1266 /* and write the request in pointer to kick the EDMA to life */
1267 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001268 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001269 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1270
1271 return 0;
1272}
1273
Brett Russ05b308e2005-10-05 17:08:53 -04001274/**
1275 * mv_get_crpb_status - get status from most recently completed cmd
1276 * @ap: ATA channel to manipulate
1277 *
1278 * This routine is for use when the port is in DMA mode, when it
1279 * will be using the CRPB (command response block) method of
Tejun Heobeec7db2006-02-11 19:11:13 +09001280 * returning command completion information. We check indices
Brett Russ05b308e2005-10-05 17:08:53 -04001281 * are good, grab status, and bump the response consumer index to
1282 * prove that we're up to date.
1283 *
1284 * LOCKING:
1285 * Inherited from caller.
1286 */
Brett Russ31961942005-09-30 01:36:00 -04001287static u8 mv_get_crpb_status(struct ata_port *ap)
1288{
1289 void __iomem *port_mmio = mv_ap_base(ap);
1290 struct mv_port_priv *pp = ap->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001291 unsigned out_index;
Brett Russ31961942005-09-30 01:36:00 -04001292 u32 out_ptr;
Mark Lord806a6e72006-03-21 21:11:53 -05001293 u8 ata_status;
Brett Russ31961942005-09-30 01:36:00 -04001294
Mark Lorda6432432006-05-19 16:36:36 -04001295 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1296 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001297
Mark Lorda6432432006-05-19 16:36:36 -04001298 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1299 >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord806a6e72006-03-21 21:11:53 -05001300
Brett Russ31961942005-09-30 01:36:00 -04001301 /* increment our consumer index... */
Mark Lorda6432432006-05-19 16:36:36 -04001302 out_index = mv_inc_q_index(out_index);
Jeff Garzik8b260242005-11-12 12:32:50 -05001303
Brett Russ31961942005-09-30 01:36:00 -04001304 /* and, until we do NCQ, there should only be 1 CRPB waiting */
Mark Lorda6432432006-05-19 16:36:36 -04001305 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1306 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001307
1308 /* write out our inc'd consumer index so EDMA knows we're caught up */
1309 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001310 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001311 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1312
1313 /* Return ATA status register for completed CRPB */
Mark Lord806a6e72006-03-21 21:11:53 -05001314 return ata_status;
Brett Russ20f733e2005-09-01 18:26:17 -04001315}
1316
Brett Russ05b308e2005-10-05 17:08:53 -04001317/**
1318 * mv_err_intr - Handle error interrupts on the port
1319 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001320 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001321 *
1322 * In most cases, just clear the interrupt and move on. However,
1323 * some cases require an eDMA reset, which is done right before
1324 * the COMRESET in mv_phy_reset(). The SERR case requires a
1325 * clear of pending errors in the SATA SERROR register. Finally,
1326 * if the port disabled DMA, update our cached copy to match.
1327 *
1328 * LOCKING:
1329 * Inherited from caller.
1330 */
Mark Lord9b358e32006-05-19 16:21:03 -04001331static void mv_err_intr(struct ata_port *ap, int reset_allowed)
Brett Russ20f733e2005-09-01 18:26:17 -04001332{
Brett Russ31961942005-09-30 01:36:00 -04001333 void __iomem *port_mmio = mv_ap_base(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001334 u32 edma_err_cause, serr = 0;
1335
Brett Russ20f733e2005-09-01 18:26:17 -04001336 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1337
1338 if (EDMA_ERR_SERR & edma_err_cause) {
Tejun Heo81952c52006-05-15 20:57:47 +09001339 sata_scr_read(ap, SCR_ERROR, &serr);
1340 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001341 }
Brett Russafb0edd2005-10-05 17:08:42 -04001342 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1343 struct mv_port_priv *pp = ap->private_data;
1344 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1345 }
1346 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
Tejun Heo44877b42007-02-21 01:06:51 +09001347 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001348
1349 /* Clear EDMA now that SERR cleanup done */
1350 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1351
1352 /* check for fatal here and recover if needed */
Mark Lord9b358e32006-05-19 16:21:03 -04001353 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
Jeff Garzikc9d39132005-11-13 17:47:51 -05001354 mv_stop_and_reset(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001355}
1356
Brett Russ05b308e2005-10-05 17:08:53 -04001357/**
1358 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001359 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001360 * @relevant: port error bits relevant to this host controller
1361 * @hc: which host controller we're to look at
1362 *
1363 * Read then write clear the HC interrupt status then walk each
1364 * port connected to the HC and see if it needs servicing. Port
1365 * success ints are reported in the HC interrupt status reg, the
1366 * port error ints are reported in the higher level main
1367 * interrupt status register and thus are passed in via the
1368 * 'relevant' argument.
1369 *
1370 * LOCKING:
1371 * Inherited from caller.
1372 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001373static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001374{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001375 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001376 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001377 struct ata_queued_cmd *qc;
1378 u32 hc_irq_cause;
Brett Russ31961942005-09-30 01:36:00 -04001379 int shift, port, port0, hard_port, handled;
Jeff Garzika7dac442005-10-30 04:44:42 -05001380 unsigned int err_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001381
Jeff Garzik35177262007-02-24 21:26:42 -05001382 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001383 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001384 else
Brett Russ20f733e2005-09-01 18:26:17 -04001385 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001386
1387 /* we'll need the HC success int register in most cases */
1388 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzik35177262007-02-24 21:26:42 -05001389 if (hc_irq_cause)
Brett Russ31961942005-09-30 01:36:00 -04001390 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001391
1392 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1393 hc,relevant,hc_irq_cause);
1394
1395 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcd85f6e2006-03-20 19:49:54 -05001396 u8 ata_status = 0;
Jeff Garzikcca39742006-08-24 03:19:22 -04001397 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001398 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001399
Mark Lorde857f142006-05-19 16:33:03 -04001400 hard_port = mv_hardport_from_port(port); /* range 0..3 */
Brett Russ31961942005-09-30 01:36:00 -04001401 handled = 0; /* ensure ata_status is set if handled++ */
Brett Russ20f733e2005-09-01 18:26:17 -04001402
Mark Lord63af2a52006-03-29 09:50:31 -05001403 /* Note that DEV_IRQ might happen spuriously during EDMA,
Mark Lorde857f142006-05-19 16:33:03 -04001404 * and should be ignored in such cases.
1405 * The cause of this is still under investigation.
Jeff Garzik8190bdb2006-05-24 01:53:39 -04001406 */
Mark Lord63af2a52006-03-29 09:50:31 -05001407 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1408 /* EDMA: check for response queue interrupt */
1409 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1410 ata_status = mv_get_crpb_status(ap);
1411 handled = 1;
1412 }
1413 } else {
1414 /* PIO: check for device (drive) interrupt */
1415 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001416 ata_status = readb(ap->ioaddr.status_addr);
Mark Lord63af2a52006-03-29 09:50:31 -05001417 handled = 1;
Mark Lorde857f142006-05-19 16:33:03 -04001418 /* ignore spurious intr if drive still BUSY */
1419 if (ata_status & ATA_BUSY) {
1420 ata_status = 0;
1421 handled = 0;
1422 }
Mark Lord63af2a52006-03-29 09:50:31 -05001423 }
Brett Russ20f733e2005-09-01 18:26:17 -04001424 }
1425
Jeff Garzik029f5462006-04-02 10:30:40 -04001426 if (ap && (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001427 continue;
1428
Jeff Garzika7dac442005-10-30 04:44:42 -05001429 err_mask = ac_err_mask(ata_status);
1430
Brett Russ31961942005-09-30 01:36:00 -04001431 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001432 if (port >= MV_PORTS_PER_HC) {
1433 shift++; /* skip bit 8 in the HC Main IRQ reg */
1434 }
1435 if ((PORT0_ERR << shift) & relevant) {
Mark Lord9b358e32006-05-19 16:21:03 -04001436 mv_err_intr(ap, 1);
Jeff Garzika7dac442005-10-30 04:44:42 -05001437 err_mask |= AC_ERR_OTHER;
Mark Lord63af2a52006-03-29 09:50:31 -05001438 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001439 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001440
Mark Lord63af2a52006-03-29 09:50:31 -05001441 if (handled) {
Brett Russ20f733e2005-09-01 18:26:17 -04001442 qc = ata_qc_from_tag(ap, ap->active_tag);
Mark Lord63af2a52006-03-29 09:50:31 -05001443 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
Brett Russ20f733e2005-09-01 18:26:17 -04001444 VPRINTK("port %u IRQ found for qc, "
1445 "ata_status 0x%x\n", port,ata_status);
Brett Russ20f733e2005-09-01 18:26:17 -04001446 /* mark qc status appropriately */
Jeff Garzik701db692005-12-06 04:52:48 -05001447 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
Albert Leea22e2eb2005-12-05 15:38:02 +08001448 qc->err_mask |= err_mask;
1449 ata_qc_complete(qc);
1450 }
Brett Russ20f733e2005-09-01 18:26:17 -04001451 }
1452 }
1453 }
1454 VPRINTK("EXIT\n");
1455}
1456
Brett Russ05b308e2005-10-05 17:08:53 -04001457/**
Jeff Garzik8b260242005-11-12 12:32:50 -05001458 * mv_interrupt -
Brett Russ05b308e2005-10-05 17:08:53 -04001459 * @irq: unused
1460 * @dev_instance: private data; in this case the host structure
1461 * @regs: unused
1462 *
1463 * Read the read only register to determine if any host
1464 * controllers have pending interrupts. If so, call lower level
1465 * routine to handle. Also check for PCI errors which are only
1466 * reported here.
1467 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001468 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001469 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001470 * interrupts.
1471 */
David Howells7d12e782006-10-05 14:55:46 +01001472static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001473{
Jeff Garzikcca39742006-08-24 03:19:22 -04001474 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001475 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001476 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord615ab952006-05-19 16:24:56 -04001477 struct mv_host_priv *hpriv;
Brett Russ20f733e2005-09-01 18:26:17 -04001478 u32 irq_stat;
1479
Brett Russ20f733e2005-09-01 18:26:17 -04001480 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001481
1482 /* check the cases where we either have nothing pending or have read
1483 * a bogus register value which can indicate HW removal or PCI fault
1484 */
Jeff Garzik35177262007-02-24 21:26:42 -05001485 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001486 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001487
Jeff Garzikcca39742006-08-24 03:19:22 -04001488 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1489 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001490
1491 for (hc = 0; hc < n_hcs; hc++) {
1492 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1493 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001494 mv_host_intr(host, relevant, hc);
Brett Russ31961942005-09-30 01:36:00 -04001495 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001496 }
1497 }
Mark Lord615ab952006-05-19 16:24:56 -04001498
Jeff Garzikcca39742006-08-24 03:19:22 -04001499 hpriv = host->private_data;
Mark Lord615ab952006-05-19 16:24:56 -04001500 if (IS_60XX(hpriv)) {
1501 /* deal with the interrupt coalescing bits */
1502 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1503 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1504 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1505 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1506 }
1507 }
1508
Brett Russ20f733e2005-09-01 18:26:17 -04001509 if (PCI_ERR & irq_stat) {
Brett Russ31961942005-09-30 01:36:00 -04001510 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1511 readl(mmio + PCI_IRQ_CAUSE_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04001512
Brett Russafb0edd2005-10-05 17:08:42 -04001513 DPRINTK("All regs @ PCI error\n");
Jeff Garzikcca39742006-08-24 03:19:22 -04001514 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
Brett Russ31961942005-09-30 01:36:00 -04001515
1516 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1517 handled++;
1518 }
Jeff Garzikcca39742006-08-24 03:19:22 -04001519 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001520
1521 return IRQ_RETVAL(handled);
1522}
1523
Jeff Garzikc9d39132005-11-13 17:47:51 -05001524static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1525{
1526 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1527 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1528
1529 return hc_mmio + ofs;
1530}
1531
1532static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1533{
1534 unsigned int ofs;
1535
1536 switch (sc_reg_in) {
1537 case SCR_STATUS:
1538 case SCR_ERROR:
1539 case SCR_CONTROL:
1540 ofs = sc_reg_in * sizeof(u32);
1541 break;
1542 default:
1543 ofs = 0xffffffffU;
1544 break;
1545 }
1546 return ofs;
1547}
1548
1549static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1550{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001551 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1552 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001553 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1554
1555 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001556 return readl(addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001557 else
1558 return (u32) ofs;
1559}
1560
1561static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1562{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001563 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1564 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001565 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1566
1567 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001568 writelfl(val, addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001569}
1570
Jeff Garzik522479f2005-11-12 22:14:02 -05001571static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1572{
1573 u8 rev_id;
1574 int early_5080;
1575
1576 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1577
1578 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1579
1580 if (!early_5080) {
1581 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1582 tmp |= (1 << 0);
1583 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1584 }
1585
1586 mv_reset_pci_bus(pdev, mmio);
1587}
1588
1589static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1590{
1591 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1592}
1593
Jeff Garzik47c2b672005-11-12 21:13:17 -05001594static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001595 void __iomem *mmio)
1596{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001597 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1598 u32 tmp;
1599
1600 tmp = readl(phy_mmio + MV5_PHY_MODE);
1601
1602 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1603 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001604}
1605
Jeff Garzik47c2b672005-11-12 21:13:17 -05001606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001607{
Jeff Garzik522479f2005-11-12 22:14:02 -05001608 u32 tmp;
1609
1610 writel(0, mmio + MV_GPIO_PORT_CTL);
1611
1612 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1613
1614 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1615 tmp |= ~(1 << 0);
1616 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001617}
1618
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001619static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1620 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001621{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001622 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1623 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1624 u32 tmp;
1625 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1626
1627 if (fix_apm_sq) {
1628 tmp = readl(phy_mmio + MV5_LT_MODE);
1629 tmp |= (1 << 19);
1630 writel(tmp, phy_mmio + MV5_LT_MODE);
1631
1632 tmp = readl(phy_mmio + MV5_PHY_CTL);
1633 tmp &= ~0x3;
1634 tmp |= 0x1;
1635 writel(tmp, phy_mmio + MV5_PHY_CTL);
1636 }
1637
1638 tmp = readl(phy_mmio + MV5_PHY_MODE);
1639 tmp &= ~mask;
1640 tmp |= hpriv->signal[port].pre;
1641 tmp |= hpriv->signal[port].amps;
1642 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001643}
1644
Jeff Garzikc9d39132005-11-13 17:47:51 -05001645
1646#undef ZERO
1647#define ZERO(reg) writel(0, port_mmio + (reg))
1648static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1649 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001650{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001651 void __iomem *port_mmio = mv_port_base(mmio, port);
1652
1653 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1654
1655 mv_channel_reset(hpriv, mmio, port);
1656
1657 ZERO(0x028); /* command */
1658 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1659 ZERO(0x004); /* timer */
1660 ZERO(0x008); /* irq err cause */
1661 ZERO(0x00c); /* irq err mask */
1662 ZERO(0x010); /* rq bah */
1663 ZERO(0x014); /* rq inp */
1664 ZERO(0x018); /* rq outp */
1665 ZERO(0x01c); /* respq bah */
1666 ZERO(0x024); /* respq outp */
1667 ZERO(0x020); /* respq inp */
1668 ZERO(0x02c); /* test control */
1669 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1670}
1671#undef ZERO
1672
1673#define ZERO(reg) writel(0, hc_mmio + (reg))
1674static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1675 unsigned int hc)
1676{
1677 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1678 u32 tmp;
1679
1680 ZERO(0x00c);
1681 ZERO(0x010);
1682 ZERO(0x014);
1683 ZERO(0x018);
1684
1685 tmp = readl(hc_mmio + 0x20);
1686 tmp &= 0x1c1c1c1c;
1687 tmp |= 0x03030303;
1688 writel(tmp, hc_mmio + 0x20);
1689}
1690#undef ZERO
1691
1692static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1693 unsigned int n_hc)
1694{
1695 unsigned int hc, port;
1696
1697 for (hc = 0; hc < n_hc; hc++) {
1698 for (port = 0; port < MV_PORTS_PER_HC; port++)
1699 mv5_reset_hc_port(hpriv, mmio,
1700 (hc * MV_PORTS_PER_HC) + port);
1701
1702 mv5_reset_one_hc(hpriv, mmio, hc);
1703 }
1704
1705 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001706}
1707
Jeff Garzik101ffae2005-11-12 22:17:49 -05001708#undef ZERO
1709#define ZERO(reg) writel(0, mmio + (reg))
1710static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1711{
1712 u32 tmp;
1713
1714 tmp = readl(mmio + MV_PCI_MODE);
1715 tmp &= 0xff00ffff;
1716 writel(tmp, mmio + MV_PCI_MODE);
1717
1718 ZERO(MV_PCI_DISC_TIMER);
1719 ZERO(MV_PCI_MSI_TRIGGER);
1720 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1721 ZERO(HC_MAIN_IRQ_MASK_OFS);
1722 ZERO(MV_PCI_SERR_MASK);
1723 ZERO(PCI_IRQ_CAUSE_OFS);
1724 ZERO(PCI_IRQ_MASK_OFS);
1725 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1726 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1727 ZERO(MV_PCI_ERR_ATTRIBUTE);
1728 ZERO(MV_PCI_ERR_COMMAND);
1729}
1730#undef ZERO
1731
1732static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1733{
1734 u32 tmp;
1735
1736 mv5_reset_flash(hpriv, mmio);
1737
1738 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1739 tmp &= 0x3;
1740 tmp |= (1 << 5) | (1 << 6);
1741 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1742}
1743
1744/**
1745 * mv6_reset_hc - Perform the 6xxx global soft reset
1746 * @mmio: base address of the HBA
1747 *
1748 * This routine only applies to 6xxx parts.
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001753static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1754 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001755{
1756 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1757 int i, rc = 0;
1758 u32 t;
1759
1760 /* Following procedure defined in PCI "main command and status
1761 * register" table.
1762 */
1763 t = readl(reg);
1764 writel(t | STOP_PCI_MASTER, reg);
1765
1766 for (i = 0; i < 1000; i++) {
1767 udelay(1);
1768 t = readl(reg);
1769 if (PCI_MASTER_EMPTY & t) {
1770 break;
1771 }
1772 }
1773 if (!(PCI_MASTER_EMPTY & t)) {
1774 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1775 rc = 1;
1776 goto done;
1777 }
1778
1779 /* set reset */
1780 i = 5;
1781 do {
1782 writel(t | GLOB_SFT_RST, reg);
1783 t = readl(reg);
1784 udelay(1);
1785 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1786
1787 if (!(GLOB_SFT_RST & t)) {
1788 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1789 rc = 1;
1790 goto done;
1791 }
1792
1793 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1794 i = 5;
1795 do {
1796 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1797 t = readl(reg);
1798 udelay(1);
1799 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1800
1801 if (GLOB_SFT_RST & t) {
1802 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1803 rc = 1;
1804 }
1805done:
1806 return rc;
1807}
1808
Jeff Garzik47c2b672005-11-12 21:13:17 -05001809static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001810 void __iomem *mmio)
1811{
1812 void __iomem *port_mmio;
1813 u32 tmp;
1814
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001815 tmp = readl(mmio + MV_RESET_CFG);
1816 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001817 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001818 hpriv->signal[idx].pre = 0x1 << 5;
1819 return;
1820 }
1821
1822 port_mmio = mv_port_base(mmio, idx);
1823 tmp = readl(port_mmio + PHY_MODE2);
1824
1825 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1826 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1827}
1828
Jeff Garzik47c2b672005-11-12 21:13:17 -05001829static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001830{
Jeff Garzik47c2b672005-11-12 21:13:17 -05001831 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001832}
1833
Jeff Garzikc9d39132005-11-13 17:47:51 -05001834static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001835 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001836{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001837 void __iomem *port_mmio = mv_port_base(mmio, port);
1838
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001839 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001840 int fix_phy_mode2 =
1841 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001842 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05001843 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1844 u32 m2, tmp;
1845
1846 if (fix_phy_mode2) {
1847 m2 = readl(port_mmio + PHY_MODE2);
1848 m2 &= ~(1 << 16);
1849 m2 |= (1 << 31);
1850 writel(m2, port_mmio + PHY_MODE2);
1851
1852 udelay(200);
1853
1854 m2 = readl(port_mmio + PHY_MODE2);
1855 m2 &= ~((1 << 16) | (1 << 31));
1856 writel(m2, port_mmio + PHY_MODE2);
1857
1858 udelay(200);
1859 }
1860
1861 /* who knows what this magic does */
1862 tmp = readl(port_mmio + PHY_MODE3);
1863 tmp &= ~0x7F800000;
1864 tmp |= 0x2A800000;
1865 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001866
1867 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001868 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001869
1870 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001871
1872 if (hp_flags & MV_HP_ERRATA_60X1B2)
1873 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001874
1875 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1876
1877 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001878
1879 if (hp_flags & MV_HP_ERRATA_60X1B2)
1880 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001881 }
1882
1883 /* Revert values of pre-emphasis and signal amps to the saved ones */
1884 m2 = readl(port_mmio + PHY_MODE2);
1885
1886 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001887 m2 |= hpriv->signal[port].amps;
1888 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001889 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001890
Jeff Garzike4e7b892006-01-31 12:18:41 -05001891 /* according to mvSata 3.6.1, some IIE values are fixed */
1892 if (IS_GEN_IIE(hpriv)) {
1893 m2 &= ~0xC30FF01F;
1894 m2 |= 0x0000900F;
1895 }
1896
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001897 writel(m2, port_mmio + PHY_MODE2);
1898}
1899
Jeff Garzikc9d39132005-11-13 17:47:51 -05001900static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04001902{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001903 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04001904
Brett Russ31961942005-09-30 01:36:00 -04001905 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001906
1907 if (IS_60XX(hpriv)) {
1908 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04001909 ifctl |= (1 << 7); /* enable gen2i speed */
1910 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001911 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1912 }
1913
Brett Russ20f733e2005-09-01 18:26:17 -04001914 udelay(25); /* allow reset propagation */
1915
1916 /* Spec never mentions clearing the bit. Marvell's driver does
1917 * clear the bit, however.
1918 */
Brett Russ31961942005-09-30 01:36:00 -04001919 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001920
Jeff Garzikc9d39132005-11-13 17:47:51 -05001921 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1922
1923 if (IS_50XX(hpriv))
1924 mdelay(1);
1925}
1926
1927static void mv_stop_and_reset(struct ata_port *ap)
1928{
Jeff Garzikcca39742006-08-24 03:19:22 -04001929 struct mv_host_priv *hpriv = ap->host->private_data;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001930 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikc9d39132005-11-13 17:47:51 -05001931
1932 mv_stop_dma(ap);
1933
1934 mv_channel_reset(hpriv, mmio, ap->port_no);
1935
Jeff Garzik22374672005-11-17 10:59:48 -05001936 __mv_phy_reset(ap, 0);
1937}
1938
1939static inline void __msleep(unsigned int msec, int can_sleep)
1940{
1941 if (can_sleep)
1942 msleep(msec);
1943 else
1944 mdelay(msec);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001945}
1946
1947/**
Jeff Garzik22374672005-11-17 10:59:48 -05001948 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05001949 * @ap: ATA channel to manipulate
1950 *
1951 * Part of this is taken from __sata_phy_reset and modified to
1952 * not sleep since this routine gets called from interrupt level.
1953 *
1954 * LOCKING:
1955 * Inherited from caller. This is coded to safe to call at
1956 * interrupt level, i.e. it does not sleep.
1957 */
Jeff Garzik22374672005-11-17 10:59:48 -05001958static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001959{
1960 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04001961 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001962 void __iomem *port_mmio = mv_ap_base(ap);
1963 struct ata_taskfile tf;
1964 struct ata_device *dev = &ap->device[0];
1965 unsigned long timeout;
Jeff Garzik22374672005-11-17 10:59:48 -05001966 int retry = 5;
1967 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001968
1969 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001970
Jeff Garzik095fec82005-11-12 09:50:49 -05001971 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04001972 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1973 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04001974
Jeff Garzik22374672005-11-17 10:59:48 -05001975 /* Issue COMRESET via SControl */
1976comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09001977 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzik22374672005-11-17 10:59:48 -05001978 __msleep(1, can_sleep);
1979
Tejun Heo81952c52006-05-15 20:57:47 +09001980 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzik22374672005-11-17 10:59:48 -05001981 __msleep(20, can_sleep);
1982
1983 timeout = jiffies + msecs_to_jiffies(200);
Brett Russ31961942005-09-30 01:36:00 -04001984 do {
Tejun Heo81952c52006-05-15 20:57:47 +09001985 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04001986 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04001987 break;
Jeff Garzik22374672005-11-17 10:59:48 -05001988
1989 __msleep(1, can_sleep);
Brett Russ31961942005-09-30 01:36:00 -04001990 } while (time_before(jiffies, timeout));
Brett Russ20f733e2005-09-01 18:26:17 -04001991
Jeff Garzik22374672005-11-17 10:59:48 -05001992 /* work around errata */
1993 if (IS_60XX(hpriv) &&
1994 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1995 (retry-- > 0))
1996 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05001997
1998 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04001999 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2000 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2001
Tejun Heo81952c52006-05-15 20:57:47 +09002002 if (ata_port_online(ap)) {
Brett Russ31961942005-09-30 01:36:00 -04002003 ata_port_probe(ap);
2004 } else {
Tejun Heo81952c52006-05-15 20:57:47 +09002005 sata_scr_read(ap, SCR_STATUS, &sstatus);
Tejun Heof15a1da2006-05-15 20:57:56 +09002006 ata_port_printk(ap, KERN_INFO,
2007 "no device found (phy stat %08x)\n", sstatus);
Brett Russ31961942005-09-30 01:36:00 -04002008 ata_port_disable(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04002009 return;
2010 }
2011
Jeff Garzik22374672005-11-17 10:59:48 -05002012 /* even after SStatus reflects that device is ready,
2013 * it seems to take a while for link to be fully
2014 * established (and thus Status no longer 0x80/0x7F),
2015 * so we poll a bit for that, here.
2016 */
2017 retry = 20;
2018 while (1) {
2019 u8 drv_stat = ata_check_status(ap);
2020 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2021 break;
2022 __msleep(500, can_sleep);
2023 if (retry-- <= 0)
2024 break;
2025 }
2026
Tejun Heo0d5ff562007-02-01 15:06:36 +09002027 tf.lbah = readb(ap->ioaddr.lbah_addr);
2028 tf.lbam = readb(ap->ioaddr.lbam_addr);
2029 tf.lbal = readb(ap->ioaddr.lbal_addr);
2030 tf.nsect = readb(ap->ioaddr.nsect_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04002031
2032 dev->class = ata_dev_classify(&tf);
Tejun Heoe1211e32006-04-01 01:38:18 +09002033 if (!ata_dev_enabled(dev)) {
Brett Russ20f733e2005-09-01 18:26:17 -04002034 VPRINTK("Port disabled post-sig: No device present.\n");
2035 ata_port_disable(ap);
2036 }
Jeff Garzik095fec82005-11-12 09:50:49 -05002037
2038 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2039
2040 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2041
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002042 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002043}
2044
Jeff Garzik22374672005-11-17 10:59:48 -05002045static void mv_phy_reset(struct ata_port *ap)
2046{
2047 __mv_phy_reset(ap, 1);
2048}
2049
Brett Russ05b308e2005-10-05 17:08:53 -04002050/**
2051 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2052 * @ap: ATA channel to manipulate
2053 *
2054 * Intent is to clear all pending error conditions, reset the
2055 * chip/bus, fail the command, and move on.
2056 *
2057 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002058 * This routine holds the host lock while failing the command.
Brett Russ05b308e2005-10-05 17:08:53 -04002059 */
Brett Russ31961942005-09-30 01:36:00 -04002060static void mv_eng_timeout(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002061{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002062 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Brett Russ31961942005-09-30 01:36:00 -04002063 struct ata_queued_cmd *qc;
Mark Lord2f9719b2006-06-07 12:53:29 -04002064 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04002065
Tejun Heof15a1da2006-05-15 20:57:56 +09002066 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
Brett Russ31961942005-09-30 01:36:00 -04002067 DPRINTK("All regs @ start of eng_timeout\n");
Tejun Heo0d5ff562007-02-01 15:06:36 +09002068 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
Brett Russ31961942005-09-30 01:36:00 -04002069
2070 qc = ata_qc_from_tag(ap, ap->active_tag);
2071 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
Tejun Heo0d5ff562007-02-01 15:06:36 +09002072 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
Brett Russ31961942005-09-30 01:36:00 -04002073
Jeff Garzikcca39742006-08-24 03:19:22 -04002074 spin_lock_irqsave(&ap->host->lock, flags);
Mark Lord9b358e32006-05-19 16:21:03 -04002075 mv_err_intr(ap, 0);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002076 mv_stop_and_reset(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04002077 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04002078
Mark Lord9b358e32006-05-19 16:21:03 -04002079 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2080 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2081 qc->err_mask |= AC_ERR_TIMEOUT;
2082 ata_eh_qc_complete(qc);
2083 }
Brett Russ31961942005-09-30 01:36:00 -04002084}
2085
Brett Russ05b308e2005-10-05 17:08:53 -04002086/**
2087 * mv_port_init - Perform some early initialization on a single port.
2088 * @port: libata data structure storing shadow register addresses
2089 * @port_mmio: base address of the port
2090 *
2091 * Initialize shadow register mmio addresses, clear outstanding
2092 * interrupts on the port, and unmask interrupts for the future
2093 * start of the port.
2094 *
2095 * LOCKING:
2096 * Inherited from caller.
2097 */
Brett Russ31961942005-09-30 01:36:00 -04002098static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2099{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002100 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002101 unsigned serr_ofs;
2102
Jeff Garzik8b260242005-11-12 12:32:50 -05002103 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002104 */
2105 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002106 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002107 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2108 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2109 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2110 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2111 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2112 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002113 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002114 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2115 /* special case: control/altstatus doesn't have ATA_REG_ address */
2116 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2117
2118 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002119 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002120
Brett Russ31961942005-09-30 01:36:00 -04002121 /* Clear any currently outstanding port interrupt conditions */
2122 serr_ofs = mv_scr_offset(SCR_ERROR);
2123 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2124 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2125
Brett Russ20f733e2005-09-01 18:26:17 -04002126 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002127 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002128
Jeff Garzik8b260242005-11-12 12:32:50 -05002129 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002130 readl(port_mmio + EDMA_CFG_OFS),
2131 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2132 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002133}
2134
Tejun Heo4447d352007-04-17 23:44:08 +09002135static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002136{
Tejun Heo4447d352007-04-17 23:44:08 +09002137 struct pci_dev *pdev = to_pci_dev(host->dev);
2138 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002139 u8 rev_id;
2140 u32 hp_flags = hpriv->hp_flags;
2141
2142 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2143
2144 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002145 case chip_5080:
2146 hpriv->ops = &mv5xxx_ops;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002147 hp_flags |= MV_HP_50XX;
2148
Jeff Garzik47c2b672005-11-12 21:13:17 -05002149 switch (rev_id) {
2150 case 0x1:
2151 hp_flags |= MV_HP_ERRATA_50XXB0;
2152 break;
2153 case 0x3:
2154 hp_flags |= MV_HP_ERRATA_50XXB2;
2155 break;
2156 default:
2157 dev_printk(KERN_WARNING, &pdev->dev,
2158 "Applying 50XXB2 workarounds to unknown rev\n");
2159 hp_flags |= MV_HP_ERRATA_50XXB2;
2160 break;
2161 }
2162 break;
2163
2164 case chip_504x:
2165 case chip_508x:
2166 hpriv->ops = &mv5xxx_ops;
2167 hp_flags |= MV_HP_50XX;
2168
2169 switch (rev_id) {
2170 case 0x0:
2171 hp_flags |= MV_HP_ERRATA_50XXB0;
2172 break;
2173 case 0x3:
2174 hp_flags |= MV_HP_ERRATA_50XXB2;
2175 break;
2176 default:
2177 dev_printk(KERN_WARNING, &pdev->dev,
2178 "Applying B2 workarounds to unknown rev\n");
2179 hp_flags |= MV_HP_ERRATA_50XXB2;
2180 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002181 }
2182 break;
2183
2184 case chip_604x:
2185 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002186 hpriv->ops = &mv6xxx_ops;
2187
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002188 switch (rev_id) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002189 case 0x7:
2190 hp_flags |= MV_HP_ERRATA_60X1B2;
2191 break;
2192 case 0x9:
2193 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002194 break;
2195 default:
2196 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002197 "Applying B2 workarounds to unknown rev\n");
2198 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002199 break;
2200 }
2201 break;
2202
Jeff Garzike4e7b892006-01-31 12:18:41 -05002203 case chip_7042:
2204 case chip_6042:
2205 hpriv->ops = &mv6xxx_ops;
2206
2207 hp_flags |= MV_HP_GEN_IIE;
2208
2209 switch (rev_id) {
2210 case 0x0:
2211 hp_flags |= MV_HP_ERRATA_XX42A0;
2212 break;
2213 case 0x1:
2214 hp_flags |= MV_HP_ERRATA_60X1C0;
2215 break;
2216 default:
2217 dev_printk(KERN_WARNING, &pdev->dev,
2218 "Applying 60X1C0 workarounds to unknown rev\n");
2219 hp_flags |= MV_HP_ERRATA_60X1C0;
2220 break;
2221 }
2222 break;
2223
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002224 default:
2225 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2226 return 1;
2227 }
2228
2229 hpriv->hp_flags = hp_flags;
2230
2231 return 0;
2232}
2233
Brett Russ05b308e2005-10-05 17:08:53 -04002234/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002235 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002236 * @host: ATA host to initialize
2237 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002238 *
2239 * If possible, do an early global reset of the host. Then do
2240 * our port init and clear/unmask all/relevant host interrupts.
2241 *
2242 * LOCKING:
2243 * Inherited from caller.
2244 */
Tejun Heo4447d352007-04-17 23:44:08 +09002245static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002246{
2247 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002248 struct pci_dev *pdev = to_pci_dev(host->dev);
2249 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2250 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002251
Jeff Garzik47c2b672005-11-12 21:13:17 -05002252 /* global interrupt mask */
2253 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2254
Tejun Heo4447d352007-04-17 23:44:08 +09002255 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002256 if (rc)
2257 goto done;
2258
Tejun Heo4447d352007-04-17 23:44:08 +09002259 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002260
Tejun Heo4447d352007-04-17 23:44:08 +09002261 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002262 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002263
Jeff Garzikc9d39132005-11-13 17:47:51 -05002264 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002265 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002266 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002267
Jeff Garzik522479f2005-11-12 22:14:02 -05002268 hpriv->ops->reset_flash(hpriv, mmio);
2269 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002270 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002271
Tejun Heo4447d352007-04-17 23:44:08 +09002272 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002273 if (IS_60XX(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002274 void __iomem *port_mmio = mv_port_base(mmio, port);
2275
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002276 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002277 ifctl |= (1 << 7); /* enable gen2i speed */
2278 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002279 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2280 }
2281
Jeff Garzikc9d39132005-11-13 17:47:51 -05002282 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002283 }
2284
Tejun Heo4447d352007-04-17 23:44:08 +09002285 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002286 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002287 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002288 }
2289
2290 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002291 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2292
2293 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2294 "(before clear)=0x%08x\n", hc,
2295 readl(hc_mmio + HC_CFG_OFS),
2296 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2297
2298 /* Clear any currently outstanding hc interrupt conditions */
2299 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002300 }
2301
Brett Russ31961942005-09-30 01:36:00 -04002302 /* Clear any currently outstanding host interrupt conditions */
2303 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2304
2305 /* and unmask interrupt generation for host regs */
2306 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002307
2308 if (IS_50XX(hpriv))
2309 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2310 else
2311 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002312
2313 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002314 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002315 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2316 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2317 readl(mmio + PCI_IRQ_CAUSE_OFS),
2318 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002319
Brett Russ31961942005-09-30 01:36:00 -04002320done:
Brett Russ20f733e2005-09-01 18:26:17 -04002321 return rc;
2322}
2323
Brett Russ05b308e2005-10-05 17:08:53 -04002324/**
2325 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002326 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002327 *
2328 * FIXME: complete this.
2329 *
2330 * LOCKING:
2331 * Inherited from caller.
2332 */
Tejun Heo4447d352007-04-17 23:44:08 +09002333static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002334{
Tejun Heo4447d352007-04-17 23:44:08 +09002335 struct pci_dev *pdev = to_pci_dev(host->dev);
2336 struct mv_host_priv *hpriv = host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04002337 u8 rev_id, scc;
2338 const char *scc_s;
2339
2340 /* Use this to determine the HW stepping of the chip so we know
2341 * what errata to workaround
2342 */
2343 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2344
2345 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2346 if (scc == 0)
2347 scc_s = "SCSI";
2348 else if (scc == 0x01)
2349 scc_s = "RAID";
2350 else
2351 scc_s = "unknown";
2352
Jeff Garzika9524a72005-10-30 14:39:11 -05002353 dev_printk(KERN_INFO, &pdev->dev,
2354 "%u slots %u ports %s mode IRQ via %s\n",
Tejun Heo4447d352007-04-17 23:44:08 +09002355 (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002356 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2357}
2358
Brett Russ05b308e2005-10-05 17:08:53 -04002359/**
2360 * mv_init_one - handle a positive probe of a Marvell host
2361 * @pdev: PCI device found
2362 * @ent: PCI device ID entry for the matched host
2363 *
2364 * LOCKING:
2365 * Inherited from caller.
2366 */
Brett Russ20f733e2005-09-01 18:26:17 -04002367static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2368{
2369 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002370 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002371 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2372 struct ata_host *host;
2373 struct mv_host_priv *hpriv;
2374 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002375
Jeff Garzika9524a72005-10-30 14:39:11 -05002376 if (!printed_version++)
2377 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002378
Tejun Heo4447d352007-04-17 23:44:08 +09002379 /* allocate host */
2380 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2381
2382 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2383 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2384 if (!host || !hpriv)
2385 return -ENOMEM;
2386 host->private_data = hpriv;
2387
2388 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002389 rc = pcim_enable_device(pdev);
2390 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002391 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002392
Tejun Heo0d5ff562007-02-01 15:06:36 +09002393 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2394 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002395 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002396 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002397 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002398 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002399
Jeff Garzikd88184f2007-02-26 01:26:06 -05002400 rc = pci_go_64(pdev);
2401 if (rc)
2402 return rc;
2403
Brett Russ20f733e2005-09-01 18:26:17 -04002404 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002405 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002406 if (rc)
2407 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002408
Brett Russ31961942005-09-30 01:36:00 -04002409 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002410 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002411 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002412
Brett Russ31961942005-09-30 01:36:00 -04002413 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002414 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002415
Tejun Heo4447d352007-04-17 23:44:08 +09002416 pci_set_master(pdev);
2417 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2418 &mv_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002419}
2420
2421static int __init mv_init(void)
2422{
Pavel Roskinb7887192006-08-10 18:13:18 +09002423 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002424}
2425
2426static void __exit mv_exit(void)
2427{
2428 pci_unregister_driver(&mv_pci_driver);
2429}
2430
2431MODULE_AUTHOR("Brett Russ");
2432MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2433MODULE_LICENSE("GPL");
2434MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2435MODULE_VERSION(DRV_VERSION);
2436
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002437module_param(msi, int, 0444);
2438MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2439
Brett Russ20f733e2005-09-01 18:26:17 -04002440module_init(mv_init);
2441module_exit(mv_exit);