blob: 590f2f92b4e0e51f47bb1ab14b952266de0bde6d [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65*/
66
67
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/pci.h>
71#include <linux/init.h>
72#include <linux/blkdev.h>
73#include <linux/delay.h>
74#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080
81#define DRV_NAME "sata_mv"
Jeff Garzik8bc3fc42007-05-21 20:26:38 -040082#define DRV_VERSION "0.81"
Brett Russ20f733e2005-09-01 18:26:17 -040083
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040095 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
Brett Russ20f733e2005-09-01 18:26:17 -0400101 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500102 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
Brett Russ31961942005-09-30 01:36:00 -0400111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400112
Brett Russ31961942005-09-30 01:36:00 -0400113 MV_MAX_Q_DEPTH = 32,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
123 MV_MAX_SG_CT = 176,
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126
Brett Russ20f733e2005-09-01 18:26:17 -0400127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400131 MV_PORT_MASK = 3,
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Brett Russ31961942005-09-30 01:36:00 -0400136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzik50630192005-12-13 02:29:45 -0500137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
Albert Lee1f3461a2006-05-23 18:12:30 +0800138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
148
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 /* PCI interface registers */
152
Brett Russ31961942005-09-30 01:36:00 -0400153 PCI_COMMAND_OFS = 0xc00,
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 PCI_MAIN_CMD_STS_OFS = 0xd30,
156 STOP_PCI_MASTER = (1 << 2),
157 PCI_MASTER_EMPTY = (1 << 3),
158 GLOB_SFT_RST = (1 << 4),
159
Jeff Garzik522479f2005-11-12 22:14:02 -0500160 MV_PCI_MODE = 0xd00,
161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
162 MV_PCI_DISC_TIMER = 0xd04,
163 MV_PCI_MSI_TRIGGER = 0xc38,
164 MV_PCI_SERR_MASK = 0xc28,
165 MV_PCI_XBAR_TMOUT = 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
169 MV_PCI_ERR_COMMAND = 0x1d50,
170
171 PCI_IRQ_CAUSE_OFS = 0x1d58,
172 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500213 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0),
234 EDMA_ERR_PRD_PAR = (1 << 1),
235 EDMA_ERR_DEV = (1 << 2),
236 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7),
240 EDMA_ERR_BIST_ASYNC = (1 << 8),
241 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11),
244 EDMA_ERR_IORDY = (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
247 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31),
Jeff Garzik8b260242005-11-12 12:32:50 -0500251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Brett Russ20f733e2005-09-01 18:26:17 -0400252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
Jeff Garzik8b260242005-11-12 12:32:50 -0500254 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
Brett Russ20f733e2005-09-01 18:26:17 -0400255 EDMA_ERR_LNK_DATA_RX |
Jeff Garzik8b260242005-11-12 12:32:50 -0500256 EDMA_ERR_LNK_DATA_TX |
Brett Russ20f733e2005-09-01 18:26:17 -0400257 EDMA_ERR_TRANS_PROTO),
258
Brett Russ31961942005-09-30 01:36:00 -0400259 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400261
262 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
263 EDMA_REQ_Q_PTR_SHIFT = 5,
264
265 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400268 EDMA_RSP_Q_PTR_SHIFT = 3,
269
Brett Russ20f733e2005-09-01 18:26:17 -0400270 EDMA_CMD_OFS = 0x28,
271 EDMA_EN = (1 << 0),
272 EDMA_DS = (1 << 1),
273 ATA_RST = (1 << 2),
274
Jeff Garzikc9d39132005-11-13 17:47:51 -0500275 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500276 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500277
Brett Russ31961942005-09-30 01:36:00 -0400278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500280 MV_HP_ERRATA_50XXB0 = (1 << 1),
281 MV_HP_ERRATA_50XXB2 = (1 << 2),
282 MV_HP_ERRATA_60X1B2 = (1 << 3),
283 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500284 MV_HP_ERRATA_XX42A0 = (1 << 5),
285 MV_HP_50XX = (1 << 6),
286 MV_HP_GEN_IIE = (1 << 7),
Brett Russ20f733e2005-09-01 18:26:17 -0400287
Brett Russ31961942005-09-30 01:36:00 -0400288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
291};
292
Jeff Garzikc9d39132005-11-13 17:47:51 -0500293#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500295#define IS_GEN_I(hpriv) IS_50XX(hpriv)
296#define IS_GEN_II(hpriv) IS_60XX(hpriv)
297#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500298
Jeff Garzik095fec82005-11-12 09:50:49 -0500299enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500300 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500301
302 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
303
304 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
305};
306
Jeff Garzik522479f2005-11-12 22:14:02 -0500307enum chip_type {
308 chip_504x,
309 chip_508x,
310 chip_5080,
311 chip_604x,
312 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500313 chip_6042,
314 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500315};
316
Brett Russ31961942005-09-30 01:36:00 -0400317/* Command ReQuest Block: 32B */
318struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400319 __le32 sg_addr;
320 __le32 sg_addr_hi;
321 __le16 ctrl_flags;
322 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400323};
324
Jeff Garzike4e7b892006-01-31 12:18:41 -0500325struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400326 __le32 addr;
327 __le32 addr_hi;
328 __le32 flags;
329 __le32 len;
330 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500331};
332
Brett Russ31961942005-09-30 01:36:00 -0400333/* Command ResPonse Block: 8B */
334struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400335 __le16 id;
336 __le16 flags;
337 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400338};
339
340/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
341struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400342 __le32 addr;
343 __le32 flags_size;
344 __le32 addr_hi;
345 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400346};
347
348struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400349 struct mv_crqb *crqb;
350 dma_addr_t crqb_dma;
351 struct mv_crpb *crpb;
352 dma_addr_t crpb_dma;
353 struct mv_sg *sg_tbl;
354 dma_addr_t sg_tbl_dma;
Brett Russ31961942005-09-30 01:36:00 -0400355 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400356};
357
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500358struct mv_port_signal {
359 u32 amps;
360 u32 pre;
361};
362
Jeff Garzik47c2b672005-11-12 21:13:17 -0500363struct mv_host_priv;
364struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500365 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
366 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500367 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
369 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500370 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
371 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500372 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500374};
375
Brett Russ20f733e2005-09-01 18:26:17 -0400376struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400377 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500378 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500379 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400380};
381
382static void mv_irq_clear(struct ata_port *ap);
383static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500385static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ20f733e2005-09-01 18:26:17 -0400387static void mv_phy_reset(struct ata_port *ap);
Jeff Garzik22374672005-11-17 10:59:48 -0500388static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
Brett Russ31961942005-09-30 01:36:00 -0400389static int mv_port_start(struct ata_port *ap);
390static void mv_port_stop(struct ata_port *ap);
391static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500392static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900393static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400394static void mv_eng_timeout(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400395static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
396
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500397static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
398 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500399static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
401 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500402static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
403 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500404static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500406
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500407static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
408 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500409static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
411 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500412static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
413 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500414static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500416static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no);
418static void mv_stop_and_reset(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500419
Jeff Garzik193515d2005-11-07 00:59:37 -0500420static struct scsi_host_template mv_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400421 .module = THIS_MODULE,
422 .name = DRV_NAME,
423 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd,
Brett Russ31961942005-09-30 01:36:00 -0400425 .can_queue = MV_USE_Q_DEPTH,
Brett Russ20f733e2005-09-01 18:26:17 -0400426 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500427 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
429 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500430 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400431 .proc_name = DRV_NAME,
432 .dma_boundary = MV_DMA_BOUNDARY,
433 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900434 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400435 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400436};
437
Jeff Garzikc9d39132005-11-13 17:47:51 -0500438static const struct ata_port_operations mv5_ops = {
439 .port_disable = ata_port_disable,
440
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
446
447 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500448 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500449
450 .qc_prep = mv_qc_prep,
451 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900452 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500453
454 .eng_timeout = mv_eng_timeout,
455
Jeff Garzikc9d39132005-11-13 17:47:51 -0500456 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900457 .irq_on = ata_irq_on,
458 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500459
460 .scr_read = mv5_scr_read,
461 .scr_write = mv5_scr_write,
462
463 .port_start = mv_port_start,
464 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500465};
466
467static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400468 .port_disable = ata_port_disable,
469
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
475
476 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500477 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400478
Brett Russ31961942005-09-30 01:36:00 -0400479 .qc_prep = mv_qc_prep,
480 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900481 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400482
Brett Russ31961942005-09-30 01:36:00 -0400483 .eng_timeout = mv_eng_timeout,
Brett Russ20f733e2005-09-01 18:26:17 -0400484
Brett Russ20f733e2005-09-01 18:26:17 -0400485 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900486 .irq_on = ata_irq_on,
487 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400488
489 .scr_read = mv_scr_read,
490 .scr_write = mv_scr_write,
491
Brett Russ31961942005-09-30 01:36:00 -0400492 .port_start = mv_port_start,
493 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400494};
495
Jeff Garzike4e7b892006-01-31 12:18:41 -0500496static const struct ata_port_operations mv_iie_ops = {
497 .port_disable = ata_port_disable,
498
499 .tf_load = ata_tf_load,
500 .tf_read = ata_tf_read,
501 .check_status = ata_check_status,
502 .exec_command = ata_exec_command,
503 .dev_select = ata_std_dev_select,
504
505 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500506 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500507
508 .qc_prep = mv_qc_prep_iie,
509 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900510 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500511
512 .eng_timeout = mv_eng_timeout,
513
Jeff Garzike4e7b892006-01-31 12:18:41 -0500514 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900515 .irq_on = ata_irq_on,
516 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500517
518 .scr_read = mv_scr_read,
519 .scr_write = mv_scr_write,
520
521 .port_start = mv_port_start,
522 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500523};
524
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100525static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400526 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400527 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400528 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500529 .udma_mask = 0x7f, /* udma0-6 */
530 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400531 },
532 { /* chip_508x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
Brett Russ31961942005-09-30 01:36:00 -0400534 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500535 .udma_mask = 0x7f, /* udma0-6 */
536 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400537 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500538 { /* chip_5080 */
Jeff Garzikcca39742006-08-24 03:19:22 -0400539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500540 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541 .udma_mask = 0x7f, /* udma0-6 */
542 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500543 },
Brett Russ20f733e2005-09-01 18:26:17 -0400544 { /* chip_604x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
Brett Russ31961942005-09-30 01:36:00 -0400546 .pio_mask = 0x1f, /* pio0-4 */
547 .udma_mask = 0x7f, /* udma0-6 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500548 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400549 },
550 { /* chip_608x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Brett Russ31961942005-09-30 01:36:00 -0400552 MV_FLAG_DUAL_HC),
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = 0x7f, /* udma0-6 */
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400556 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500557 { /* chip_6042 */
Jeff Garzikcca39742006-08-24 03:19:22 -0400558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = 0x7f, /* udma0-6 */
561 .port_ops = &mv_iie_ops,
562 },
563 { /* chip_7042 */
Olof Johanssone93f09d2007-01-18 18:39:59 -0600564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = 0x7f, /* udma0-6 */
567 .port_ops = &mv_iie_ops,
568 },
Brett Russ20f733e2005-09-01 18:26:17 -0400569};
570
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500571static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400572 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400576
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400577 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500582
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400583 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
584
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200585 /* Adaptec 1430SA */
586 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
587
Olof Johanssone93f09d2007-01-18 18:39:59 -0600588 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
589
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800590 /* add Marvell 7042 support */
591 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
592
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400593 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400594};
595
596static struct pci_driver mv_pci_driver = {
597 .name = DRV_NAME,
598 .id_table = mv_pci_tbl,
599 .probe = mv_init_one,
600 .remove = ata_pci_remove_one,
601};
602
Jeff Garzik47c2b672005-11-12 21:13:17 -0500603static const struct mv_hw_ops mv5xxx_ops = {
604 .phy_errata = mv5_phy_errata,
605 .enable_leds = mv5_enable_leds,
606 .read_preamp = mv5_read_preamp,
607 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500608 .reset_flash = mv5_reset_flash,
609 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500610};
611
612static const struct mv_hw_ops mv6xxx_ops = {
613 .phy_errata = mv6_phy_errata,
614 .enable_leds = mv6_enable_leds,
615 .read_preamp = mv6_read_preamp,
616 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500617 .reset_flash = mv6_reset_flash,
618 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500619};
620
Brett Russ20f733e2005-09-01 18:26:17 -0400621/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500622 * module options
623 */
624static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
625
626
Jeff Garzikd88184f2007-02-26 01:26:06 -0500627/* move to PCI layer or libata core? */
628static int pci_go_64(struct pci_dev *pdev)
629{
630 int rc;
631
632 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
633 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
634 if (rc) {
635 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
636 if (rc) {
637 dev_printk(KERN_ERR, &pdev->dev,
638 "64-bit DMA enable failed\n");
639 return rc;
640 }
641 }
642 } else {
643 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
644 if (rc) {
645 dev_printk(KERN_ERR, &pdev->dev,
646 "32-bit DMA enable failed\n");
647 return rc;
648 }
649 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
650 if (rc) {
651 dev_printk(KERN_ERR, &pdev->dev,
652 "32-bit consistent DMA enable failed\n");
653 return rc;
654 }
655 }
656
657 return rc;
658}
659
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500660/*
Brett Russ20f733e2005-09-01 18:26:17 -0400661 * Functions
662 */
663
664static inline void writelfl(unsigned long data, void __iomem *addr)
665{
666 writel(data, addr);
667 (void) readl(addr); /* flush to avoid PCI posted write */
668}
669
Brett Russ20f733e2005-09-01 18:26:17 -0400670static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
671{
672 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
673}
674
Jeff Garzikc9d39132005-11-13 17:47:51 -0500675static inline unsigned int mv_hc_from_port(unsigned int port)
676{
677 return port >> MV_PORT_HC_SHIFT;
678}
679
680static inline unsigned int mv_hardport_from_port(unsigned int port)
681{
682 return port & MV_PORT_MASK;
683}
684
685static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
686 unsigned int port)
687{
688 return mv_hc_base(base, mv_hc_from_port(port));
689}
690
Brett Russ20f733e2005-09-01 18:26:17 -0400691static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
692{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500693 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500694 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500695 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400696}
697
698static inline void __iomem *mv_ap_base(struct ata_port *ap)
699{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900700 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400701}
702
Jeff Garzikcca39742006-08-24 03:19:22 -0400703static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400704{
Jeff Garzikcca39742006-08-24 03:19:22 -0400705 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400706}
707
708static void mv_irq_clear(struct ata_port *ap)
709{
710}
711
Brett Russ05b308e2005-10-05 17:08:53 -0400712/**
713 * mv_start_dma - Enable eDMA engine
714 * @base: port base address
715 * @pp: port private data
716 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900717 * Verify the local cache of the eDMA state is accurate with a
718 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400719 *
720 * LOCKING:
721 * Inherited from caller.
722 */
Brett Russafb0edd2005-10-05 17:08:42 -0400723static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400724{
Brett Russafb0edd2005-10-05 17:08:42 -0400725 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
726 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
727 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
728 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900729 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400730}
731
Brett Russ05b308e2005-10-05 17:08:53 -0400732/**
733 * mv_stop_dma - Disable eDMA engine
734 * @ap: ATA channel to manipulate
735 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900736 * Verify the local cache of the eDMA state is accurate with a
737 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400738 *
739 * LOCKING:
740 * Inherited from caller.
741 */
Brett Russ31961942005-09-30 01:36:00 -0400742static void mv_stop_dma(struct ata_port *ap)
743{
744 void __iomem *port_mmio = mv_ap_base(ap);
745 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400746 u32 reg;
747 int i;
748
Brett Russafb0edd2005-10-05 17:08:42 -0400749 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
750 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400751 */
Brett Russ31961942005-09-30 01:36:00 -0400752 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
753 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400754 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900755 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400756 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500757
Brett Russ31961942005-09-30 01:36:00 -0400758 /* now properly wait for the eDMA to stop */
759 for (i = 1000; i > 0; i--) {
760 reg = readl(port_mmio + EDMA_CMD_OFS);
761 if (!(EDMA_EN & reg)) {
762 break;
763 }
764 udelay(100);
765 }
766
Brett Russ31961942005-09-30 01:36:00 -0400767 if (EDMA_EN & reg) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900768 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Brett Russafb0edd2005-10-05 17:08:42 -0400769 /* FIXME: Consider doing a reset here to recover */
Brett Russ31961942005-09-30 01:36:00 -0400770 }
771}
772
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400773#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400774static void mv_dump_mem(void __iomem *start, unsigned bytes)
775{
Brett Russ31961942005-09-30 01:36:00 -0400776 int b, w;
777 for (b = 0; b < bytes; ) {
778 DPRINTK("%p: ", start + b);
779 for (w = 0; b < bytes && w < 4; w++) {
780 printk("%08x ",readl(start + b));
781 b += sizeof(u32);
782 }
783 printk("\n");
784 }
Brett Russ31961942005-09-30 01:36:00 -0400785}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400786#endif
787
Brett Russ31961942005-09-30 01:36:00 -0400788static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
789{
790#ifdef ATA_DEBUG
791 int b, w;
792 u32 dw;
793 for (b = 0; b < bytes; ) {
794 DPRINTK("%02x: ", b);
795 for (w = 0; b < bytes && w < 4; w++) {
796 (void) pci_read_config_dword(pdev,b,&dw);
797 printk("%08x ",dw);
798 b += sizeof(u32);
799 }
800 printk("\n");
801 }
802#endif
803}
804static void mv_dump_all_regs(void __iomem *mmio_base, int port,
805 struct pci_dev *pdev)
806{
807#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500808 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400809 port >> MV_PORT_HC_SHIFT);
810 void __iomem *port_base;
811 int start_port, num_ports, p, start_hc, num_hcs, hc;
812
813 if (0 > port) {
814 start_hc = start_port = 0;
815 num_ports = 8; /* shld be benign for 4 port devs */
816 num_hcs = 2;
817 } else {
818 start_hc = port >> MV_PORT_HC_SHIFT;
819 start_port = port;
820 num_ports = num_hcs = 1;
821 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500822 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400823 num_ports > 1 ? num_ports - 1 : start_port);
824
825 if (NULL != pdev) {
826 DPRINTK("PCI config space regs:\n");
827 mv_dump_pci_cfg(pdev, 0x68);
828 }
829 DPRINTK("PCI regs:\n");
830 mv_dump_mem(mmio_base+0xc00, 0x3c);
831 mv_dump_mem(mmio_base+0xd00, 0x34);
832 mv_dump_mem(mmio_base+0xf00, 0x4);
833 mv_dump_mem(mmio_base+0x1d00, 0x6c);
834 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700835 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400836 DPRINTK("HC regs (HC %i):\n", hc);
837 mv_dump_mem(hc_base, 0x1c);
838 }
839 for (p = start_port; p < start_port + num_ports; p++) {
840 port_base = mv_port_base(mmio_base, p);
841 DPRINTK("EDMA regs (port %i):\n",p);
842 mv_dump_mem(port_base, 0x54);
843 DPRINTK("SATA regs (port %i):\n",p);
844 mv_dump_mem(port_base+0x300, 0x60);
845 }
846#endif
847}
848
Brett Russ20f733e2005-09-01 18:26:17 -0400849static unsigned int mv_scr_offset(unsigned int sc_reg_in)
850{
851 unsigned int ofs;
852
853 switch (sc_reg_in) {
854 case SCR_STATUS:
855 case SCR_CONTROL:
856 case SCR_ERROR:
857 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
858 break;
859 case SCR_ACTIVE:
860 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
861 break;
862 default:
863 ofs = 0xffffffffU;
864 break;
865 }
866 return ofs;
867}
868
869static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
870{
871 unsigned int ofs = mv_scr_offset(sc_reg_in);
872
Jeff Garzik35177262007-02-24 21:26:42 -0500873 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400874 return readl(mv_ap_base(ap) + ofs);
Jeff Garzik35177262007-02-24 21:26:42 -0500875 else
Brett Russ20f733e2005-09-01 18:26:17 -0400876 return (u32) ofs;
Brett Russ20f733e2005-09-01 18:26:17 -0400877}
878
879static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
880{
881 unsigned int ofs = mv_scr_offset(sc_reg_in);
882
Jeff Garzik35177262007-02-24 21:26:42 -0500883 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400884 writelfl(val, mv_ap_base(ap) + ofs);
Brett Russ20f733e2005-09-01 18:26:17 -0400885}
886
Jeff Garzike4e7b892006-01-31 12:18:41 -0500887static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
888{
889 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
890
891 /* set up non-NCQ EDMA configuration */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500892 cfg &= ~(1 << 9); /* disable equeue */
893
Jeff Garzike728eab2007-02-25 02:53:41 -0500894 if (IS_GEN_I(hpriv)) {
895 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500896 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -0500897 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500898
Jeff Garzike728eab2007-02-25 02:53:41 -0500899 else if (IS_GEN_II(hpriv)) {
900 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500901 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -0500902 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
903 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500904
905 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -0500906 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
907 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500908 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
909 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -0500910 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
911 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
912 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500913 }
914
915 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
916}
917
Brett Russ05b308e2005-10-05 17:08:53 -0400918/**
919 * mv_port_start - Port specific init/start routine.
920 * @ap: ATA channel to manipulate
921 *
922 * Allocate and point to DMA memory, init port private memory,
923 * zero indices.
924 *
925 * LOCKING:
926 * Inherited from caller.
927 */
Brett Russ31961942005-09-30 01:36:00 -0400928static int mv_port_start(struct ata_port *ap)
929{
Jeff Garzikcca39742006-08-24 03:19:22 -0400930 struct device *dev = ap->host->dev;
931 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400932 struct mv_port_priv *pp;
933 void __iomem *port_mmio = mv_ap_base(ap);
934 void *mem;
935 dma_addr_t mem_dma;
Tejun Heo24dc5f32007-01-20 16:00:28 +0900936 int rc;
Brett Russ31961942005-09-30 01:36:00 -0400937
Tejun Heo24dc5f32007-01-20 16:00:28 +0900938 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500939 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +0900940 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -0400941
Tejun Heo24dc5f32007-01-20 16:00:28 +0900942 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
943 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500944 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +0900945 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -0400946 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
947
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500948 rc = ata_pad_alloc(ap, dev);
949 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +0900950 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -0500951
Jeff Garzik8b260242005-11-12 12:32:50 -0500952 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -0400953 * 32-slot command request table (CRQB), 32 bytes each in size
954 */
955 pp->crqb = mem;
956 pp->crqb_dma = mem_dma;
957 mem += MV_CRQB_Q_SZ;
958 mem_dma += MV_CRQB_Q_SZ;
959
Jeff Garzik8b260242005-11-12 12:32:50 -0500960 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -0400961 * 32-slot command response table (CRPB), 8 bytes each in size
962 */
963 pp->crpb = mem;
964 pp->crpb_dma = mem_dma;
965 mem += MV_CRPB_Q_SZ;
966 mem_dma += MV_CRPB_Q_SZ;
967
968 /* Third item:
969 * Table of scatter-gather descriptors (ePRD), 16 bytes each
970 */
971 pp->sg_tbl = mem;
972 pp->sg_tbl_dma = mem_dma;
973
Jeff Garzike4e7b892006-01-31 12:18:41 -0500974 mv_edma_cfg(hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -0400975
976 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500977 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
Brett Russ31961942005-09-30 01:36:00 -0400978 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
979
Jeff Garzike4e7b892006-01-31 12:18:41 -0500980 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
981 writelfl(pp->crqb_dma & 0xffffffff,
982 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
983 else
984 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -0400985
986 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500987
988 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
989 writelfl(pp->crpb_dma & 0xffffffff,
990 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
991 else
992 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
993
Jeff Garzik8b260242005-11-12 12:32:50 -0500994 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
Brett Russ31961942005-09-30 01:36:00 -0400995 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
996
Brett Russ31961942005-09-30 01:36:00 -0400997 /* Don't turn on EDMA here...do it before DMA commands only. Else
998 * we'll be unable to send non-data, PIO, etc due to restricted access
999 * to shadow regs.
1000 */
1001 ap->private_data = pp;
1002 return 0;
1003}
1004
Brett Russ05b308e2005-10-05 17:08:53 -04001005/**
1006 * mv_port_stop - Port specific cleanup/stop routine.
1007 * @ap: ATA channel to manipulate
1008 *
1009 * Stop DMA, cleanup port memory.
1010 *
1011 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001012 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001013 */
Brett Russ31961942005-09-30 01:36:00 -04001014static void mv_port_stop(struct ata_port *ap)
1015{
Brett Russafb0edd2005-10-05 17:08:42 -04001016 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04001017
Jeff Garzikcca39742006-08-24 03:19:22 -04001018 spin_lock_irqsave(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001019 mv_stop_dma(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04001020 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001021}
1022
Brett Russ05b308e2005-10-05 17:08:53 -04001023/**
1024 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1025 * @qc: queued command whose SG list to source from
1026 *
1027 * Populate the SG list and mark the last entry.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001032static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001033{
1034 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001035 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001036 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001037 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001038
Jeff Garzikd88184f2007-02-26 01:26:06 -05001039 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001040 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001041 dma_addr_t addr = sg_dma_address(sg);
1042 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001043
Jeff Garzikd88184f2007-02-26 01:26:06 -05001044 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1045 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1046 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
Brett Russ31961942005-09-30 01:36:00 -04001047
Jeff Garzikd88184f2007-02-26 01:26:06 -05001048 if (ata_sg_is_last(sg, qc))
1049 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Jeff Garzik972c26b2005-10-18 22:14:54 -04001050
Jeff Garzikd88184f2007-02-26 01:26:06 -05001051 mv_sg++;
1052 n_sg++;
Brett Russ31961942005-09-30 01:36:00 -04001053 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001054
1055 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001056}
1057
Mark Lorda6432432006-05-19 16:36:36 -04001058static inline unsigned mv_inc_q_index(unsigned index)
Brett Russ31961942005-09-30 01:36:00 -04001059{
Mark Lorda6432432006-05-19 16:36:36 -04001060 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001061}
1062
Mark Lorde1469872006-05-22 19:02:03 -04001063static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001064{
Mark Lord559eeda2006-05-19 16:40:15 -04001065 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001066 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001067 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001068}
1069
Brett Russ05b308e2005-10-05 17:08:53 -04001070/**
1071 * mv_qc_prep - Host specific command preparation.
1072 * @qc: queued command to prepare
1073 *
1074 * This routine simply redirects to the general purpose routine
1075 * if command is not DMA. Else, it handles prep of the CRQB
1076 * (command request block), does some sanity checking, and calls
1077 * the SG load routine.
1078 *
1079 * LOCKING:
1080 * Inherited from caller.
1081 */
Brett Russ31961942005-09-30 01:36:00 -04001082static void mv_qc_prep(struct ata_queued_cmd *qc)
1083{
1084 struct ata_port *ap = qc->ap;
1085 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001086 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001087 struct ata_taskfile *tf;
1088 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001089 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001090
Jeff Garzike4e7b892006-01-31 12:18:41 -05001091 if (ATA_PROT_DMA != qc->tf.protocol)
Brett Russ31961942005-09-30 01:36:00 -04001092 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001093
Brett Russ31961942005-09-30 01:36:00 -04001094 /* Fill in command request block
1095 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001096 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001097 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001098 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001099 flags |= qc->tag << CRQB_TAG_SHIFT;
1100
Mark Lorda6432432006-05-19 16:36:36 -04001101 /* get current queue index from hardware */
1102 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1103 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001104
Mark Lorda6432432006-05-19 16:36:36 -04001105 pp->crqb[in_index].sg_addr =
1106 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1107 pp->crqb[in_index].sg_addr_hi =
1108 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1109 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1110
1111 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001112 tf = &qc->tf;
1113
1114 /* Sadly, the CRQB cannot accomodate all registers--there are
1115 * only 11 bytes...so we must pick and choose required
1116 * registers based on the command. So, we drop feature and
1117 * hob_feature for [RW] DMA commands, but they are needed for
1118 * NCQ. NCQ will drop hob_nsect.
1119 */
1120 switch (tf->command) {
1121 case ATA_CMD_READ:
1122 case ATA_CMD_READ_EXT:
1123 case ATA_CMD_WRITE:
1124 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001125 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001126 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1127 break;
1128#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1129 case ATA_CMD_FPDMA_READ:
1130 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001131 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001132 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1133 break;
1134#endif /* FIXME: remove this line when NCQ added */
1135 default:
1136 /* The only other commands EDMA supports in non-queued and
1137 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1138 * of which are defined/used by Linux. If we get here, this
1139 * driver needs work.
1140 *
1141 * FIXME: modify libata to give qc_prep a return value and
1142 * return error here.
1143 */
1144 BUG_ON(tf->command);
1145 break;
1146 }
1147 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1148 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1149 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1150 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1151 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1152 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1153 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1154 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1155 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1156
Jeff Garzike4e7b892006-01-31 12:18:41 -05001157 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001158 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001159 mv_fill_sg(qc);
1160}
1161
1162/**
1163 * mv_qc_prep_iie - Host specific command preparation.
1164 * @qc: queued command to prepare
1165 *
1166 * This routine simply redirects to the general purpose routine
1167 * if command is not DMA. Else, it handles prep of the CRQB
1168 * (command request block), does some sanity checking, and calls
1169 * the SG load routine.
1170 *
1171 * LOCKING:
1172 * Inherited from caller.
1173 */
1174static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1175{
1176 struct ata_port *ap = qc->ap;
1177 struct mv_port_priv *pp = ap->private_data;
1178 struct mv_crqb_iie *crqb;
1179 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001180 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001181 u32 flags = 0;
1182
1183 if (ATA_PROT_DMA != qc->tf.protocol)
1184 return;
1185
Jeff Garzike4e7b892006-01-31 12:18:41 -05001186 /* Fill in Gen IIE command request block
1187 */
1188 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1189 flags |= CRQB_FLAG_READ;
1190
Tejun Heobeec7db2006-02-11 19:11:13 +09001191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001192 flags |= qc->tag << CRQB_TAG_SHIFT;
1193
Mark Lorda6432432006-05-19 16:36:36 -04001194 /* get current queue index from hardware */
1195 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1196 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1197
1198 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001199 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1201 crqb->flags = cpu_to_le32(flags);
1202
1203 tf = &qc->tf;
1204 crqb->ata_cmd[0] = cpu_to_le32(
1205 (tf->command << 16) |
1206 (tf->feature << 24)
1207 );
1208 crqb->ata_cmd[1] = cpu_to_le32(
1209 (tf->lbal << 0) |
1210 (tf->lbam << 8) |
1211 (tf->lbah << 16) |
1212 (tf->device << 24)
1213 );
1214 crqb->ata_cmd[2] = cpu_to_le32(
1215 (tf->hob_lbal << 0) |
1216 (tf->hob_lbam << 8) |
1217 (tf->hob_lbah << 16) |
1218 (tf->hob_feature << 24)
1219 );
1220 crqb->ata_cmd[3] = cpu_to_le32(
1221 (tf->nsect << 0) |
1222 (tf->hob_nsect << 8)
1223 );
1224
1225 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1226 return;
Brett Russ31961942005-09-30 01:36:00 -04001227 mv_fill_sg(qc);
1228}
1229
Brett Russ05b308e2005-10-05 17:08:53 -04001230/**
1231 * mv_qc_issue - Initiate a command to the host
1232 * @qc: queued command to start
1233 *
1234 * This routine simply redirects to the general purpose routine
1235 * if command is not DMA. Else, it sanity checks our local
1236 * caches of the request producer/consumer indices then enables
1237 * DMA and bumps the request producer index.
1238 *
1239 * LOCKING:
1240 * Inherited from caller.
1241 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001242static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001243{
1244 void __iomem *port_mmio = mv_ap_base(qc->ap);
1245 struct mv_port_priv *pp = qc->ap->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001246 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001247 u32 in_ptr;
1248
1249 if (ATA_PROT_DMA != qc->tf.protocol) {
1250 /* We're about to send a non-EDMA capable command to the
1251 * port. Turn off EDMA so there won't be problems accessing
1252 * shadow block, etc registers.
1253 */
1254 mv_stop_dma(qc->ap);
1255 return ata_qc_issue_prot(qc);
1256 }
1257
Mark Lorda6432432006-05-19 16:36:36 -04001258 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1259 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001260
Brett Russ31961942005-09-30 01:36:00 -04001261 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001262 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1263 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001264
Mark Lorda6432432006-05-19 16:36:36 -04001265 in_index = mv_inc_q_index(in_index); /* now incr producer index */
Brett Russ31961942005-09-30 01:36:00 -04001266
Brett Russafb0edd2005-10-05 17:08:42 -04001267 mv_start_dma(port_mmio, pp);
Brett Russ31961942005-09-30 01:36:00 -04001268
1269 /* and write the request in pointer to kick the EDMA to life */
1270 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001271 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001272 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1273
1274 return 0;
1275}
1276
Brett Russ05b308e2005-10-05 17:08:53 -04001277/**
1278 * mv_get_crpb_status - get status from most recently completed cmd
1279 * @ap: ATA channel to manipulate
1280 *
1281 * This routine is for use when the port is in DMA mode, when it
1282 * will be using the CRPB (command response block) method of
Tejun Heobeec7db2006-02-11 19:11:13 +09001283 * returning command completion information. We check indices
Brett Russ05b308e2005-10-05 17:08:53 -04001284 * are good, grab status, and bump the response consumer index to
1285 * prove that we're up to date.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
Brett Russ31961942005-09-30 01:36:00 -04001290static u8 mv_get_crpb_status(struct ata_port *ap)
1291{
1292 void __iomem *port_mmio = mv_ap_base(ap);
1293 struct mv_port_priv *pp = ap->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001294 unsigned out_index;
Brett Russ31961942005-09-30 01:36:00 -04001295 u32 out_ptr;
Mark Lord806a6e72006-03-21 21:11:53 -05001296 u8 ata_status;
Brett Russ31961942005-09-30 01:36:00 -04001297
Mark Lorda6432432006-05-19 16:36:36 -04001298 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1299 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001300
Mark Lorda6432432006-05-19 16:36:36 -04001301 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1302 >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord806a6e72006-03-21 21:11:53 -05001303
Brett Russ31961942005-09-30 01:36:00 -04001304 /* increment our consumer index... */
Mark Lorda6432432006-05-19 16:36:36 -04001305 out_index = mv_inc_q_index(out_index);
Jeff Garzik8b260242005-11-12 12:32:50 -05001306
Brett Russ31961942005-09-30 01:36:00 -04001307 /* and, until we do NCQ, there should only be 1 CRPB waiting */
Mark Lorda6432432006-05-19 16:36:36 -04001308 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1309 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001310
1311 /* write out our inc'd consumer index so EDMA knows we're caught up */
1312 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001313 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001314 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1315
1316 /* Return ATA status register for completed CRPB */
Mark Lord806a6e72006-03-21 21:11:53 -05001317 return ata_status;
Brett Russ20f733e2005-09-01 18:26:17 -04001318}
1319
Brett Russ05b308e2005-10-05 17:08:53 -04001320/**
1321 * mv_err_intr - Handle error interrupts on the port
1322 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001323 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001324 *
1325 * In most cases, just clear the interrupt and move on. However,
1326 * some cases require an eDMA reset, which is done right before
1327 * the COMRESET in mv_phy_reset(). The SERR case requires a
1328 * clear of pending errors in the SATA SERROR register. Finally,
1329 * if the port disabled DMA, update our cached copy to match.
1330 *
1331 * LOCKING:
1332 * Inherited from caller.
1333 */
Mark Lord9b358e32006-05-19 16:21:03 -04001334static void mv_err_intr(struct ata_port *ap, int reset_allowed)
Brett Russ20f733e2005-09-01 18:26:17 -04001335{
Brett Russ31961942005-09-30 01:36:00 -04001336 void __iomem *port_mmio = mv_ap_base(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001337 u32 edma_err_cause, serr = 0;
1338
Brett Russ20f733e2005-09-01 18:26:17 -04001339 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1340
1341 if (EDMA_ERR_SERR & edma_err_cause) {
Tejun Heo81952c52006-05-15 20:57:47 +09001342 sata_scr_read(ap, SCR_ERROR, &serr);
1343 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001344 }
Brett Russafb0edd2005-10-05 17:08:42 -04001345 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1346 struct mv_port_priv *pp = ap->private_data;
1347 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1348 }
1349 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
Tejun Heo44877b42007-02-21 01:06:51 +09001350 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001351
1352 /* Clear EDMA now that SERR cleanup done */
1353 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1354
1355 /* check for fatal here and recover if needed */
Mark Lord9b358e32006-05-19 16:21:03 -04001356 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
Jeff Garzikc9d39132005-11-13 17:47:51 -05001357 mv_stop_and_reset(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001358}
1359
Brett Russ05b308e2005-10-05 17:08:53 -04001360/**
1361 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001362 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001363 * @relevant: port error bits relevant to this host controller
1364 * @hc: which host controller we're to look at
1365 *
1366 * Read then write clear the HC interrupt status then walk each
1367 * port connected to the HC and see if it needs servicing. Port
1368 * success ints are reported in the HC interrupt status reg, the
1369 * port error ints are reported in the higher level main
1370 * interrupt status register and thus are passed in via the
1371 * 'relevant' argument.
1372 *
1373 * LOCKING:
1374 * Inherited from caller.
1375 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001376static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001377{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001378 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001379 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001380 struct ata_queued_cmd *qc;
1381 u32 hc_irq_cause;
Brett Russ31961942005-09-30 01:36:00 -04001382 int shift, port, port0, hard_port, handled;
Jeff Garzika7dac442005-10-30 04:44:42 -05001383 unsigned int err_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001384
Jeff Garzik35177262007-02-24 21:26:42 -05001385 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001386 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001387 else
Brett Russ20f733e2005-09-01 18:26:17 -04001388 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001389
1390 /* we'll need the HC success int register in most cases */
1391 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzik35177262007-02-24 21:26:42 -05001392 if (hc_irq_cause)
Brett Russ31961942005-09-30 01:36:00 -04001393 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001394
1395 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1396 hc,relevant,hc_irq_cause);
1397
1398 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcd85f6e2006-03-20 19:49:54 -05001399 u8 ata_status = 0;
Jeff Garzikcca39742006-08-24 03:19:22 -04001400 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001401 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001402
Mark Lorde857f142006-05-19 16:33:03 -04001403 hard_port = mv_hardport_from_port(port); /* range 0..3 */
Brett Russ31961942005-09-30 01:36:00 -04001404 handled = 0; /* ensure ata_status is set if handled++ */
Brett Russ20f733e2005-09-01 18:26:17 -04001405
Mark Lord63af2a52006-03-29 09:50:31 -05001406 /* Note that DEV_IRQ might happen spuriously during EDMA,
Mark Lorde857f142006-05-19 16:33:03 -04001407 * and should be ignored in such cases.
1408 * The cause of this is still under investigation.
Jeff Garzik8190bdb2006-05-24 01:53:39 -04001409 */
Mark Lord63af2a52006-03-29 09:50:31 -05001410 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1411 /* EDMA: check for response queue interrupt */
1412 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1413 ata_status = mv_get_crpb_status(ap);
1414 handled = 1;
1415 }
1416 } else {
1417 /* PIO: check for device (drive) interrupt */
1418 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001419 ata_status = readb(ap->ioaddr.status_addr);
Mark Lord63af2a52006-03-29 09:50:31 -05001420 handled = 1;
Mark Lorde857f142006-05-19 16:33:03 -04001421 /* ignore spurious intr if drive still BUSY */
1422 if (ata_status & ATA_BUSY) {
1423 ata_status = 0;
1424 handled = 0;
1425 }
Mark Lord63af2a52006-03-29 09:50:31 -05001426 }
Brett Russ20f733e2005-09-01 18:26:17 -04001427 }
1428
Jeff Garzik029f5462006-04-02 10:30:40 -04001429 if (ap && (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001430 continue;
1431
Jeff Garzika7dac442005-10-30 04:44:42 -05001432 err_mask = ac_err_mask(ata_status);
1433
Brett Russ31961942005-09-30 01:36:00 -04001434 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001435 if (port >= MV_PORTS_PER_HC) {
1436 shift++; /* skip bit 8 in the HC Main IRQ reg */
1437 }
1438 if ((PORT0_ERR << shift) & relevant) {
Mark Lord9b358e32006-05-19 16:21:03 -04001439 mv_err_intr(ap, 1);
Jeff Garzika7dac442005-10-30 04:44:42 -05001440 err_mask |= AC_ERR_OTHER;
Mark Lord63af2a52006-03-29 09:50:31 -05001441 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001442 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001443
Mark Lord63af2a52006-03-29 09:50:31 -05001444 if (handled) {
Brett Russ20f733e2005-09-01 18:26:17 -04001445 qc = ata_qc_from_tag(ap, ap->active_tag);
Mark Lord63af2a52006-03-29 09:50:31 -05001446 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
Brett Russ20f733e2005-09-01 18:26:17 -04001447 VPRINTK("port %u IRQ found for qc, "
1448 "ata_status 0x%x\n", port,ata_status);
Brett Russ20f733e2005-09-01 18:26:17 -04001449 /* mark qc status appropriately */
Jeff Garzik701db692005-12-06 04:52:48 -05001450 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
Albert Leea22e2eb2005-12-05 15:38:02 +08001451 qc->err_mask |= err_mask;
1452 ata_qc_complete(qc);
1453 }
Brett Russ20f733e2005-09-01 18:26:17 -04001454 }
1455 }
1456 }
1457 VPRINTK("EXIT\n");
1458}
1459
Brett Russ05b308e2005-10-05 17:08:53 -04001460/**
Jeff Garzik8b260242005-11-12 12:32:50 -05001461 * mv_interrupt -
Brett Russ05b308e2005-10-05 17:08:53 -04001462 * @irq: unused
1463 * @dev_instance: private data; in this case the host structure
1464 * @regs: unused
1465 *
1466 * Read the read only register to determine if any host
1467 * controllers have pending interrupts. If so, call lower level
1468 * routine to handle. Also check for PCI errors which are only
1469 * reported here.
1470 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001471 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001472 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001473 * interrupts.
1474 */
David Howells7d12e782006-10-05 14:55:46 +01001475static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001476{
Jeff Garzikcca39742006-08-24 03:19:22 -04001477 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001478 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001479 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord615ab952006-05-19 16:24:56 -04001480 struct mv_host_priv *hpriv;
Brett Russ20f733e2005-09-01 18:26:17 -04001481 u32 irq_stat;
1482
Brett Russ20f733e2005-09-01 18:26:17 -04001483 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001484
1485 /* check the cases where we either have nothing pending or have read
1486 * a bogus register value which can indicate HW removal or PCI fault
1487 */
Jeff Garzik35177262007-02-24 21:26:42 -05001488 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001489 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001490
Jeff Garzikcca39742006-08-24 03:19:22 -04001491 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1492 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001493
1494 for (hc = 0; hc < n_hcs; hc++) {
1495 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1496 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001497 mv_host_intr(host, relevant, hc);
Brett Russ31961942005-09-30 01:36:00 -04001498 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001499 }
1500 }
Mark Lord615ab952006-05-19 16:24:56 -04001501
Jeff Garzikcca39742006-08-24 03:19:22 -04001502 hpriv = host->private_data;
Mark Lord615ab952006-05-19 16:24:56 -04001503 if (IS_60XX(hpriv)) {
1504 /* deal with the interrupt coalescing bits */
1505 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1506 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1507 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1508 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1509 }
1510 }
1511
Brett Russ20f733e2005-09-01 18:26:17 -04001512 if (PCI_ERR & irq_stat) {
Brett Russ31961942005-09-30 01:36:00 -04001513 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1514 readl(mmio + PCI_IRQ_CAUSE_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04001515
Brett Russafb0edd2005-10-05 17:08:42 -04001516 DPRINTK("All regs @ PCI error\n");
Jeff Garzikcca39742006-08-24 03:19:22 -04001517 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
Brett Russ31961942005-09-30 01:36:00 -04001518
1519 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1520 handled++;
1521 }
Jeff Garzikcca39742006-08-24 03:19:22 -04001522 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001523
1524 return IRQ_RETVAL(handled);
1525}
1526
Jeff Garzikc9d39132005-11-13 17:47:51 -05001527static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1528{
1529 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1530 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1531
1532 return hc_mmio + ofs;
1533}
1534
1535static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1536{
1537 unsigned int ofs;
1538
1539 switch (sc_reg_in) {
1540 case SCR_STATUS:
1541 case SCR_ERROR:
1542 case SCR_CONTROL:
1543 ofs = sc_reg_in * sizeof(u32);
1544 break;
1545 default:
1546 ofs = 0xffffffffU;
1547 break;
1548 }
1549 return ofs;
1550}
1551
1552static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1553{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001554 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1555 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001556 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1557
1558 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001559 return readl(addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001560 else
1561 return (u32) ofs;
1562}
1563
1564static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1565{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001566 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1567 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001568 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1569
1570 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001571 writelfl(val, addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001572}
1573
Jeff Garzik522479f2005-11-12 22:14:02 -05001574static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1575{
1576 u8 rev_id;
1577 int early_5080;
1578
1579 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1580
1581 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1582
1583 if (!early_5080) {
1584 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1585 tmp |= (1 << 0);
1586 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1587 }
1588
1589 mv_reset_pci_bus(pdev, mmio);
1590}
1591
1592static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1593{
1594 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1595}
1596
Jeff Garzik47c2b672005-11-12 21:13:17 -05001597static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001598 void __iomem *mmio)
1599{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001600 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1601 u32 tmp;
1602
1603 tmp = readl(phy_mmio + MV5_PHY_MODE);
1604
1605 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1606 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001607}
1608
Jeff Garzik47c2b672005-11-12 21:13:17 -05001609static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001610{
Jeff Garzik522479f2005-11-12 22:14:02 -05001611 u32 tmp;
1612
1613 writel(0, mmio + MV_GPIO_PORT_CTL);
1614
1615 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1616
1617 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1618 tmp |= ~(1 << 0);
1619 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001620}
1621
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001622static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1623 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001624{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001625 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1626 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1627 u32 tmp;
1628 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1629
1630 if (fix_apm_sq) {
1631 tmp = readl(phy_mmio + MV5_LT_MODE);
1632 tmp |= (1 << 19);
1633 writel(tmp, phy_mmio + MV5_LT_MODE);
1634
1635 tmp = readl(phy_mmio + MV5_PHY_CTL);
1636 tmp &= ~0x3;
1637 tmp |= 0x1;
1638 writel(tmp, phy_mmio + MV5_PHY_CTL);
1639 }
1640
1641 tmp = readl(phy_mmio + MV5_PHY_MODE);
1642 tmp &= ~mask;
1643 tmp |= hpriv->signal[port].pre;
1644 tmp |= hpriv->signal[port].amps;
1645 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001646}
1647
Jeff Garzikc9d39132005-11-13 17:47:51 -05001648
1649#undef ZERO
1650#define ZERO(reg) writel(0, port_mmio + (reg))
1651static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1652 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001653{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001654 void __iomem *port_mmio = mv_port_base(mmio, port);
1655
1656 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1657
1658 mv_channel_reset(hpriv, mmio, port);
1659
1660 ZERO(0x028); /* command */
1661 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1662 ZERO(0x004); /* timer */
1663 ZERO(0x008); /* irq err cause */
1664 ZERO(0x00c); /* irq err mask */
1665 ZERO(0x010); /* rq bah */
1666 ZERO(0x014); /* rq inp */
1667 ZERO(0x018); /* rq outp */
1668 ZERO(0x01c); /* respq bah */
1669 ZERO(0x024); /* respq outp */
1670 ZERO(0x020); /* respq inp */
1671 ZERO(0x02c); /* test control */
1672 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1673}
1674#undef ZERO
1675
1676#define ZERO(reg) writel(0, hc_mmio + (reg))
1677static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1678 unsigned int hc)
1679{
1680 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1681 u32 tmp;
1682
1683 ZERO(0x00c);
1684 ZERO(0x010);
1685 ZERO(0x014);
1686 ZERO(0x018);
1687
1688 tmp = readl(hc_mmio + 0x20);
1689 tmp &= 0x1c1c1c1c;
1690 tmp |= 0x03030303;
1691 writel(tmp, hc_mmio + 0x20);
1692}
1693#undef ZERO
1694
1695static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1696 unsigned int n_hc)
1697{
1698 unsigned int hc, port;
1699
1700 for (hc = 0; hc < n_hc; hc++) {
1701 for (port = 0; port < MV_PORTS_PER_HC; port++)
1702 mv5_reset_hc_port(hpriv, mmio,
1703 (hc * MV_PORTS_PER_HC) + port);
1704
1705 mv5_reset_one_hc(hpriv, mmio, hc);
1706 }
1707
1708 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001709}
1710
Jeff Garzik101ffae2005-11-12 22:17:49 -05001711#undef ZERO
1712#define ZERO(reg) writel(0, mmio + (reg))
1713static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1714{
1715 u32 tmp;
1716
1717 tmp = readl(mmio + MV_PCI_MODE);
1718 tmp &= 0xff00ffff;
1719 writel(tmp, mmio + MV_PCI_MODE);
1720
1721 ZERO(MV_PCI_DISC_TIMER);
1722 ZERO(MV_PCI_MSI_TRIGGER);
1723 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1724 ZERO(HC_MAIN_IRQ_MASK_OFS);
1725 ZERO(MV_PCI_SERR_MASK);
1726 ZERO(PCI_IRQ_CAUSE_OFS);
1727 ZERO(PCI_IRQ_MASK_OFS);
1728 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1729 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1730 ZERO(MV_PCI_ERR_ATTRIBUTE);
1731 ZERO(MV_PCI_ERR_COMMAND);
1732}
1733#undef ZERO
1734
1735static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1736{
1737 u32 tmp;
1738
1739 mv5_reset_flash(hpriv, mmio);
1740
1741 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1742 tmp &= 0x3;
1743 tmp |= (1 << 5) | (1 << 6);
1744 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1745}
1746
1747/**
1748 * mv6_reset_hc - Perform the 6xxx global soft reset
1749 * @mmio: base address of the HBA
1750 *
1751 * This routine only applies to 6xxx parts.
1752 *
1753 * LOCKING:
1754 * Inherited from caller.
1755 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001756static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1757 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001758{
1759 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1760 int i, rc = 0;
1761 u32 t;
1762
1763 /* Following procedure defined in PCI "main command and status
1764 * register" table.
1765 */
1766 t = readl(reg);
1767 writel(t | STOP_PCI_MASTER, reg);
1768
1769 for (i = 0; i < 1000; i++) {
1770 udelay(1);
1771 t = readl(reg);
1772 if (PCI_MASTER_EMPTY & t) {
1773 break;
1774 }
1775 }
1776 if (!(PCI_MASTER_EMPTY & t)) {
1777 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1778 rc = 1;
1779 goto done;
1780 }
1781
1782 /* set reset */
1783 i = 5;
1784 do {
1785 writel(t | GLOB_SFT_RST, reg);
1786 t = readl(reg);
1787 udelay(1);
1788 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1789
1790 if (!(GLOB_SFT_RST & t)) {
1791 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1792 rc = 1;
1793 goto done;
1794 }
1795
1796 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1797 i = 5;
1798 do {
1799 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1800 t = readl(reg);
1801 udelay(1);
1802 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1803
1804 if (GLOB_SFT_RST & t) {
1805 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1806 rc = 1;
1807 }
1808done:
1809 return rc;
1810}
1811
Jeff Garzik47c2b672005-11-12 21:13:17 -05001812static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001813 void __iomem *mmio)
1814{
1815 void __iomem *port_mmio;
1816 u32 tmp;
1817
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001818 tmp = readl(mmio + MV_RESET_CFG);
1819 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001820 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001821 hpriv->signal[idx].pre = 0x1 << 5;
1822 return;
1823 }
1824
1825 port_mmio = mv_port_base(mmio, idx);
1826 tmp = readl(port_mmio + PHY_MODE2);
1827
1828 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1829 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1830}
1831
Jeff Garzik47c2b672005-11-12 21:13:17 -05001832static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001833{
Jeff Garzik47c2b672005-11-12 21:13:17 -05001834 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001835}
1836
Jeff Garzikc9d39132005-11-13 17:47:51 -05001837static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001838 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001839{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001840 void __iomem *port_mmio = mv_port_base(mmio, port);
1841
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001842 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001843 int fix_phy_mode2 =
1844 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001845 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05001846 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1847 u32 m2, tmp;
1848
1849 if (fix_phy_mode2) {
1850 m2 = readl(port_mmio + PHY_MODE2);
1851 m2 &= ~(1 << 16);
1852 m2 |= (1 << 31);
1853 writel(m2, port_mmio + PHY_MODE2);
1854
1855 udelay(200);
1856
1857 m2 = readl(port_mmio + PHY_MODE2);
1858 m2 &= ~((1 << 16) | (1 << 31));
1859 writel(m2, port_mmio + PHY_MODE2);
1860
1861 udelay(200);
1862 }
1863
1864 /* who knows what this magic does */
1865 tmp = readl(port_mmio + PHY_MODE3);
1866 tmp &= ~0x7F800000;
1867 tmp |= 0x2A800000;
1868 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001869
1870 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001871 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001872
1873 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001874
1875 if (hp_flags & MV_HP_ERRATA_60X1B2)
1876 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001877
1878 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1879
1880 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001881
1882 if (hp_flags & MV_HP_ERRATA_60X1B2)
1883 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001884 }
1885
1886 /* Revert values of pre-emphasis and signal amps to the saved ones */
1887 m2 = readl(port_mmio + PHY_MODE2);
1888
1889 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001890 m2 |= hpriv->signal[port].amps;
1891 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001892 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001893
Jeff Garzike4e7b892006-01-31 12:18:41 -05001894 /* according to mvSata 3.6.1, some IIE values are fixed */
1895 if (IS_GEN_IIE(hpriv)) {
1896 m2 &= ~0xC30FF01F;
1897 m2 |= 0x0000900F;
1898 }
1899
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001900 writel(m2, port_mmio + PHY_MODE2);
1901}
1902
Jeff Garzikc9d39132005-11-13 17:47:51 -05001903static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04001905{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001906 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04001907
Brett Russ31961942005-09-30 01:36:00 -04001908 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001909
1910 if (IS_60XX(hpriv)) {
1911 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04001912 ifctl |= (1 << 7); /* enable gen2i speed */
1913 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001914 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1915 }
1916
Brett Russ20f733e2005-09-01 18:26:17 -04001917 udelay(25); /* allow reset propagation */
1918
1919 /* Spec never mentions clearing the bit. Marvell's driver does
1920 * clear the bit, however.
1921 */
Brett Russ31961942005-09-30 01:36:00 -04001922 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001923
Jeff Garzikc9d39132005-11-13 17:47:51 -05001924 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1925
1926 if (IS_50XX(hpriv))
1927 mdelay(1);
1928}
1929
1930static void mv_stop_and_reset(struct ata_port *ap)
1931{
Jeff Garzikcca39742006-08-24 03:19:22 -04001932 struct mv_host_priv *hpriv = ap->host->private_data;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001933 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikc9d39132005-11-13 17:47:51 -05001934
1935 mv_stop_dma(ap);
1936
1937 mv_channel_reset(hpriv, mmio, ap->port_no);
1938
Jeff Garzik22374672005-11-17 10:59:48 -05001939 __mv_phy_reset(ap, 0);
1940}
1941
1942static inline void __msleep(unsigned int msec, int can_sleep)
1943{
1944 if (can_sleep)
1945 msleep(msec);
1946 else
1947 mdelay(msec);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001948}
1949
1950/**
Jeff Garzik22374672005-11-17 10:59:48 -05001951 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05001952 * @ap: ATA channel to manipulate
1953 *
1954 * Part of this is taken from __sata_phy_reset and modified to
1955 * not sleep since this routine gets called from interrupt level.
1956 *
1957 * LOCKING:
1958 * Inherited from caller. This is coded to safe to call at
1959 * interrupt level, i.e. it does not sleep.
1960 */
Jeff Garzik22374672005-11-17 10:59:48 -05001961static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001962{
1963 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04001964 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001965 void __iomem *port_mmio = mv_ap_base(ap);
1966 struct ata_taskfile tf;
1967 struct ata_device *dev = &ap->device[0];
1968 unsigned long timeout;
Jeff Garzik22374672005-11-17 10:59:48 -05001969 int retry = 5;
1970 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001971
1972 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001973
Jeff Garzik095fec82005-11-12 09:50:49 -05001974 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04001975 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1976 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04001977
Jeff Garzik22374672005-11-17 10:59:48 -05001978 /* Issue COMRESET via SControl */
1979comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09001980 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzik22374672005-11-17 10:59:48 -05001981 __msleep(1, can_sleep);
1982
Tejun Heo81952c52006-05-15 20:57:47 +09001983 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzik22374672005-11-17 10:59:48 -05001984 __msleep(20, can_sleep);
1985
1986 timeout = jiffies + msecs_to_jiffies(200);
Brett Russ31961942005-09-30 01:36:00 -04001987 do {
Tejun Heo81952c52006-05-15 20:57:47 +09001988 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04001989 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04001990 break;
Jeff Garzik22374672005-11-17 10:59:48 -05001991
1992 __msleep(1, can_sleep);
Brett Russ31961942005-09-30 01:36:00 -04001993 } while (time_before(jiffies, timeout));
Brett Russ20f733e2005-09-01 18:26:17 -04001994
Jeff Garzik22374672005-11-17 10:59:48 -05001995 /* work around errata */
1996 if (IS_60XX(hpriv) &&
1997 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1998 (retry-- > 0))
1999 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002000
2001 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002002 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2003 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2004
Tejun Heo81952c52006-05-15 20:57:47 +09002005 if (ata_port_online(ap)) {
Brett Russ31961942005-09-30 01:36:00 -04002006 ata_port_probe(ap);
2007 } else {
Tejun Heo81952c52006-05-15 20:57:47 +09002008 sata_scr_read(ap, SCR_STATUS, &sstatus);
Tejun Heof15a1da2006-05-15 20:57:56 +09002009 ata_port_printk(ap, KERN_INFO,
2010 "no device found (phy stat %08x)\n", sstatus);
Brett Russ31961942005-09-30 01:36:00 -04002011 ata_port_disable(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04002012 return;
2013 }
2014
Jeff Garzik22374672005-11-17 10:59:48 -05002015 /* even after SStatus reflects that device is ready,
2016 * it seems to take a while for link to be fully
2017 * established (and thus Status no longer 0x80/0x7F),
2018 * so we poll a bit for that, here.
2019 */
2020 retry = 20;
2021 while (1) {
2022 u8 drv_stat = ata_check_status(ap);
2023 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2024 break;
2025 __msleep(500, can_sleep);
2026 if (retry-- <= 0)
2027 break;
2028 }
2029
Tejun Heo0d5ff562007-02-01 15:06:36 +09002030 tf.lbah = readb(ap->ioaddr.lbah_addr);
2031 tf.lbam = readb(ap->ioaddr.lbam_addr);
2032 tf.lbal = readb(ap->ioaddr.lbal_addr);
2033 tf.nsect = readb(ap->ioaddr.nsect_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04002034
2035 dev->class = ata_dev_classify(&tf);
Tejun Heoe1211e32006-04-01 01:38:18 +09002036 if (!ata_dev_enabled(dev)) {
Brett Russ20f733e2005-09-01 18:26:17 -04002037 VPRINTK("Port disabled post-sig: No device present.\n");
2038 ata_port_disable(ap);
2039 }
Jeff Garzik095fec82005-11-12 09:50:49 -05002040
2041 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2042
2043 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2044
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002045 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002046}
2047
Jeff Garzik22374672005-11-17 10:59:48 -05002048static void mv_phy_reset(struct ata_port *ap)
2049{
2050 __mv_phy_reset(ap, 1);
2051}
2052
Brett Russ05b308e2005-10-05 17:08:53 -04002053/**
2054 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2055 * @ap: ATA channel to manipulate
2056 *
2057 * Intent is to clear all pending error conditions, reset the
2058 * chip/bus, fail the command, and move on.
2059 *
2060 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002061 * This routine holds the host lock while failing the command.
Brett Russ05b308e2005-10-05 17:08:53 -04002062 */
Brett Russ31961942005-09-30 01:36:00 -04002063static void mv_eng_timeout(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002064{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002065 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Brett Russ31961942005-09-30 01:36:00 -04002066 struct ata_queued_cmd *qc;
Mark Lord2f9719b2006-06-07 12:53:29 -04002067 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04002068
Tejun Heof15a1da2006-05-15 20:57:56 +09002069 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
Brett Russ31961942005-09-30 01:36:00 -04002070 DPRINTK("All regs @ start of eng_timeout\n");
Tejun Heo0d5ff562007-02-01 15:06:36 +09002071 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
Brett Russ31961942005-09-30 01:36:00 -04002072
2073 qc = ata_qc_from_tag(ap, ap->active_tag);
2074 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
Tejun Heo0d5ff562007-02-01 15:06:36 +09002075 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
Brett Russ31961942005-09-30 01:36:00 -04002076
Jeff Garzikcca39742006-08-24 03:19:22 -04002077 spin_lock_irqsave(&ap->host->lock, flags);
Mark Lord9b358e32006-05-19 16:21:03 -04002078 mv_err_intr(ap, 0);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002079 mv_stop_and_reset(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04002080 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04002081
Mark Lord9b358e32006-05-19 16:21:03 -04002082 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2083 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2084 qc->err_mask |= AC_ERR_TIMEOUT;
2085 ata_eh_qc_complete(qc);
2086 }
Brett Russ31961942005-09-30 01:36:00 -04002087}
2088
Brett Russ05b308e2005-10-05 17:08:53 -04002089/**
2090 * mv_port_init - Perform some early initialization on a single port.
2091 * @port: libata data structure storing shadow register addresses
2092 * @port_mmio: base address of the port
2093 *
2094 * Initialize shadow register mmio addresses, clear outstanding
2095 * interrupts on the port, and unmask interrupts for the future
2096 * start of the port.
2097 *
2098 * LOCKING:
2099 * Inherited from caller.
2100 */
Brett Russ31961942005-09-30 01:36:00 -04002101static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2102{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002103 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002104 unsigned serr_ofs;
2105
Jeff Garzik8b260242005-11-12 12:32:50 -05002106 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002107 */
2108 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002109 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002110 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2111 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2112 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2113 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2114 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2115 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002116 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002117 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2118 /* special case: control/altstatus doesn't have ATA_REG_ address */
2119 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2120
2121 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002122 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002123
Brett Russ31961942005-09-30 01:36:00 -04002124 /* Clear any currently outstanding port interrupt conditions */
2125 serr_ofs = mv_scr_offset(SCR_ERROR);
2126 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2127 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2128
Brett Russ20f733e2005-09-01 18:26:17 -04002129 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002130 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002131
Jeff Garzik8b260242005-11-12 12:32:50 -05002132 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002133 readl(port_mmio + EDMA_CFG_OFS),
2134 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2135 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002136}
2137
Tejun Heo4447d352007-04-17 23:44:08 +09002138static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002139{
Tejun Heo4447d352007-04-17 23:44:08 +09002140 struct pci_dev *pdev = to_pci_dev(host->dev);
2141 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002142 u8 rev_id;
2143 u32 hp_flags = hpriv->hp_flags;
2144
2145 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2146
2147 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002148 case chip_5080:
2149 hpriv->ops = &mv5xxx_ops;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002150 hp_flags |= MV_HP_50XX;
2151
Jeff Garzik47c2b672005-11-12 21:13:17 -05002152 switch (rev_id) {
2153 case 0x1:
2154 hp_flags |= MV_HP_ERRATA_50XXB0;
2155 break;
2156 case 0x3:
2157 hp_flags |= MV_HP_ERRATA_50XXB2;
2158 break;
2159 default:
2160 dev_printk(KERN_WARNING, &pdev->dev,
2161 "Applying 50XXB2 workarounds to unknown rev\n");
2162 hp_flags |= MV_HP_ERRATA_50XXB2;
2163 break;
2164 }
2165 break;
2166
2167 case chip_504x:
2168 case chip_508x:
2169 hpriv->ops = &mv5xxx_ops;
2170 hp_flags |= MV_HP_50XX;
2171
2172 switch (rev_id) {
2173 case 0x0:
2174 hp_flags |= MV_HP_ERRATA_50XXB0;
2175 break;
2176 case 0x3:
2177 hp_flags |= MV_HP_ERRATA_50XXB2;
2178 break;
2179 default:
2180 dev_printk(KERN_WARNING, &pdev->dev,
2181 "Applying B2 workarounds to unknown rev\n");
2182 hp_flags |= MV_HP_ERRATA_50XXB2;
2183 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002184 }
2185 break;
2186
2187 case chip_604x:
2188 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002189 hpriv->ops = &mv6xxx_ops;
2190
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002191 switch (rev_id) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002192 case 0x7:
2193 hp_flags |= MV_HP_ERRATA_60X1B2;
2194 break;
2195 case 0x9:
2196 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002197 break;
2198 default:
2199 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002200 "Applying B2 workarounds to unknown rev\n");
2201 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002202 break;
2203 }
2204 break;
2205
Jeff Garzike4e7b892006-01-31 12:18:41 -05002206 case chip_7042:
2207 case chip_6042:
2208 hpriv->ops = &mv6xxx_ops;
2209
2210 hp_flags |= MV_HP_GEN_IIE;
2211
2212 switch (rev_id) {
2213 case 0x0:
2214 hp_flags |= MV_HP_ERRATA_XX42A0;
2215 break;
2216 case 0x1:
2217 hp_flags |= MV_HP_ERRATA_60X1C0;
2218 break;
2219 default:
2220 dev_printk(KERN_WARNING, &pdev->dev,
2221 "Applying 60X1C0 workarounds to unknown rev\n");
2222 hp_flags |= MV_HP_ERRATA_60X1C0;
2223 break;
2224 }
2225 break;
2226
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002227 default:
2228 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2229 return 1;
2230 }
2231
2232 hpriv->hp_flags = hp_flags;
2233
2234 return 0;
2235}
2236
Brett Russ05b308e2005-10-05 17:08:53 -04002237/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002238 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002239 * @host: ATA host to initialize
2240 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002241 *
2242 * If possible, do an early global reset of the host. Then do
2243 * our port init and clear/unmask all/relevant host interrupts.
2244 *
2245 * LOCKING:
2246 * Inherited from caller.
2247 */
Tejun Heo4447d352007-04-17 23:44:08 +09002248static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002249{
2250 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002251 struct pci_dev *pdev = to_pci_dev(host->dev);
2252 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2253 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002254
Jeff Garzik47c2b672005-11-12 21:13:17 -05002255 /* global interrupt mask */
2256 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2257
Tejun Heo4447d352007-04-17 23:44:08 +09002258 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002259 if (rc)
2260 goto done;
2261
Tejun Heo4447d352007-04-17 23:44:08 +09002262 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002263
Tejun Heo4447d352007-04-17 23:44:08 +09002264 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002265 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002266
Jeff Garzikc9d39132005-11-13 17:47:51 -05002267 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002268 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002269 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002270
Jeff Garzik522479f2005-11-12 22:14:02 -05002271 hpriv->ops->reset_flash(hpriv, mmio);
2272 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002273 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002274
Tejun Heo4447d352007-04-17 23:44:08 +09002275 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002276 if (IS_60XX(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002277 void __iomem *port_mmio = mv_port_base(mmio, port);
2278
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002279 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002280 ifctl |= (1 << 7); /* enable gen2i speed */
2281 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002282 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2283 }
2284
Jeff Garzikc9d39132005-11-13 17:47:51 -05002285 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002286 }
2287
Tejun Heo4447d352007-04-17 23:44:08 +09002288 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002289 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002290 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002291 }
2292
2293 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002294 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2295
2296 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2297 "(before clear)=0x%08x\n", hc,
2298 readl(hc_mmio + HC_CFG_OFS),
2299 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2300
2301 /* Clear any currently outstanding hc interrupt conditions */
2302 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002303 }
2304
Brett Russ31961942005-09-30 01:36:00 -04002305 /* Clear any currently outstanding host interrupt conditions */
2306 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2307
2308 /* and unmask interrupt generation for host regs */
2309 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002310
2311 if (IS_50XX(hpriv))
2312 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2313 else
2314 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002315
2316 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002317 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002318 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2319 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2320 readl(mmio + PCI_IRQ_CAUSE_OFS),
2321 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002322
Brett Russ31961942005-09-30 01:36:00 -04002323done:
Brett Russ20f733e2005-09-01 18:26:17 -04002324 return rc;
2325}
2326
Brett Russ05b308e2005-10-05 17:08:53 -04002327/**
2328 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002329 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002330 *
2331 * FIXME: complete this.
2332 *
2333 * LOCKING:
2334 * Inherited from caller.
2335 */
Tejun Heo4447d352007-04-17 23:44:08 +09002336static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002337{
Tejun Heo4447d352007-04-17 23:44:08 +09002338 struct pci_dev *pdev = to_pci_dev(host->dev);
2339 struct mv_host_priv *hpriv = host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04002340 u8 rev_id, scc;
2341 const char *scc_s;
2342
2343 /* Use this to determine the HW stepping of the chip so we know
2344 * what errata to workaround
2345 */
2346 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2347
2348 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2349 if (scc == 0)
2350 scc_s = "SCSI";
2351 else if (scc == 0x01)
2352 scc_s = "RAID";
2353 else
2354 scc_s = "unknown";
2355
Jeff Garzika9524a72005-10-30 14:39:11 -05002356 dev_printk(KERN_INFO, &pdev->dev,
2357 "%u slots %u ports %s mode IRQ via %s\n",
Tejun Heo4447d352007-04-17 23:44:08 +09002358 (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002359 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2360}
2361
Brett Russ05b308e2005-10-05 17:08:53 -04002362/**
2363 * mv_init_one - handle a positive probe of a Marvell host
2364 * @pdev: PCI device found
2365 * @ent: PCI device ID entry for the matched host
2366 *
2367 * LOCKING:
2368 * Inherited from caller.
2369 */
Brett Russ20f733e2005-09-01 18:26:17 -04002370static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2371{
2372 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002373 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002374 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2375 struct ata_host *host;
2376 struct mv_host_priv *hpriv;
2377 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002378
Jeff Garzika9524a72005-10-30 14:39:11 -05002379 if (!printed_version++)
2380 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002381
Tejun Heo4447d352007-04-17 23:44:08 +09002382 /* allocate host */
2383 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2384
2385 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2386 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2387 if (!host || !hpriv)
2388 return -ENOMEM;
2389 host->private_data = hpriv;
2390
2391 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002392 rc = pcim_enable_device(pdev);
2393 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002394 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002395
Tejun Heo0d5ff562007-02-01 15:06:36 +09002396 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2397 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002398 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002399 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002400 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002401 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002402
Jeff Garzikd88184f2007-02-26 01:26:06 -05002403 rc = pci_go_64(pdev);
2404 if (rc)
2405 return rc;
2406
Brett Russ20f733e2005-09-01 18:26:17 -04002407 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002408 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002409 if (rc)
2410 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002411
Brett Russ31961942005-09-30 01:36:00 -04002412 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002413 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002414 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002415
Brett Russ31961942005-09-30 01:36:00 -04002416 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002417 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002418
Tejun Heo4447d352007-04-17 23:44:08 +09002419 pci_set_master(pdev);
2420 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2421 &mv_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002422}
2423
2424static int __init mv_init(void)
2425{
Pavel Roskinb7887192006-08-10 18:13:18 +09002426 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002427}
2428
2429static void __exit mv_exit(void)
2430{
2431 pci_unregister_driver(&mv_pci_driver);
2432}
2433
2434MODULE_AUTHOR("Brett Russ");
2435MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2436MODULE_LICENSE("GPL");
2437MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2438MODULE_VERSION(DRV_VERSION);
2439
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002440module_param(msi, int, 0444);
2441MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2442
Brett Russ20f733e2005-09-01 18:26:17 -04002443module_init(mv_init);
2444module_exit(mv_exit);