blob: 68c3f22890d29d23fa5941645d15c218f0f51d67 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500209 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Brett Russ20f733e2005-09-01 18:26:17 -0400244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400276
Brett Russ31961942005-09-30 01:36:00 -0400277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400292
Jeff Garzikc9d39132005-11-13 17:47:51 -0500293 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500295
Brett Russ31961942005-09-30 01:36:00 -0400296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500302 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400306
Brett Russ31961942005-09-30 01:36:00 -0400307 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400310};
311
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500315
Jeff Garzik095fec82005-11-12 09:50:49 -0500316enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500321
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
Jeff Garzik522479f2005-11-12 22:14:02 -0500331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500337 chip_6042,
338 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500339};
340
Brett Russ31961942005-09-30 01:36:00 -0400341/* Command ReQuest Block: 32B */
342struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400347};
348
Jeff Garzike4e7b892006-01-31 12:18:41 -0500349struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500355};
356
Brett Russ31961942005-09-30 01:36:00 -0400357/* Command ResPonse Block: 8B */
358struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400362};
363
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400370};
371
372struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
Brett Russ31961942005-09-30 01:36:00 -0400383 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400384};
385
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
Jeff Garzik47c2b672005-11-12 21:13:17 -0500391struct mv_host_priv;
392struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500402};
403
Brett Russ20f733e2005-09-01 18:26:17 -0400404struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400405 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500406 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500407 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
410static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
Jeff Garzik6c087722007-10-12 00:16:23 -0400424static int mv_slave_config(struct scsi_device *sdev);
Brett Russ20f733e2005-09-01 18:26:17 -0400425static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500427static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500429static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500432static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500434static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500437static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500439static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
441 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500442static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
443 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500444static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500446static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500448
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400449static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400450 .module = THIS_MODULE,
451 .name = DRV_NAME,
452 .ioctl = ata_scsi_ioctl,
453 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400454 .can_queue = ATA_DEF_QUEUE,
455 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400456 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400457 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
458 .emulated = ATA_SHT_EMULATED,
459 .use_clustering = 1,
460 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik6c087722007-10-12 00:16:23 -0400462 .slave_configure = mv_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400463 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param,
465};
466
467static struct scsi_host_template mv6_sht = {
468 .module = THIS_MODULE,
469 .name = DRV_NAME,
470 .ioctl = ata_scsi_ioctl,
471 .queuecommand = ata_scsi_queuecmd,
472 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400473 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400474 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400475 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
476 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500477 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400478 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik6c087722007-10-12 00:16:23 -0400480 .slave_configure = mv_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900481 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400482 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400483};
484
Jeff Garzikc9d39132005-11-13 17:47:51 -0500485static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500486 .tf_load = ata_tf_load,
487 .tf_read = ata_tf_read,
488 .check_status = ata_check_status,
489 .exec_command = ata_exec_command,
490 .dev_select = ata_std_dev_select,
491
Jeff Garzikcffacd82007-03-09 09:46:47 -0500492 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500493
494 .qc_prep = mv_qc_prep,
495 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900496 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500497
Jeff Garzikc9d39132005-11-13 17:47:51 -0500498 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900499 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500500
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400501 .error_handler = mv_error_handler,
502 .post_internal_cmd = mv_post_int_cmd,
503 .freeze = mv_eh_freeze,
504 .thaw = mv_eh_thaw,
505
Jeff Garzikc9d39132005-11-13 17:47:51 -0500506 .scr_read = mv5_scr_read,
507 .scr_write = mv5_scr_write,
508
509 .port_start = mv_port_start,
510 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500511};
512
513static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
519
Jeff Garzikcffacd82007-03-09 09:46:47 -0500520 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400521
Brett Russ31961942005-09-30 01:36:00 -0400522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900524 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400525
Brett Russ20f733e2005-09-01 18:26:17 -0400526 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900527 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400528
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
532 .thaw = mv_eh_thaw,
533
Brett Russ20f733e2005-09-01 18:26:17 -0400534 .scr_read = mv_scr_read,
535 .scr_write = mv_scr_write,
536
Brett Russ31961942005-09-30 01:36:00 -0400537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400539};
540
Jeff Garzike4e7b892006-01-31 12:18:41 -0500541static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
547
Jeff Garzikcffacd82007-03-09 09:46:47 -0500548 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500549
550 .qc_prep = mv_qc_prep_iie,
551 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900552 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500553
Jeff Garzike4e7b892006-01-31 12:18:41 -0500554 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900555 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500556
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400557 .error_handler = mv_error_handler,
558 .post_internal_cmd = mv_post_int_cmd,
559 .freeze = mv_eh_freeze,
560 .thaw = mv_eh_thaw,
561
Jeff Garzike4e7b892006-01-31 12:18:41 -0500562 .scr_read = mv_scr_read,
563 .scr_write = mv_scr_write,
564
565 .port_start = mv_port_start,
566 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500567};
568
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100569static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400570 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400571 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400572 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400573 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500574 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400575 },
576 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400577 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400578 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400579 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500580 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400581 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500582 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500586 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500587 },
Brett Russ20f733e2005-09-01 18:26:17 -0400588 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500592 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400593 },
594 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400597 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400598 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500599 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400600 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500601 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500605 .port_ops = &mv_iie_ops,
606 },
607 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400610 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500611 .port_ops = &mv_iie_ops,
612 },
Brett Russ20f733e2005-09-01 18:26:17 -0400613};
614
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500615static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400616 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
617 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
618 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
619 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100620 /* RocketRAID 1740/174x have different identifiers */
621 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
622 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400623
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400624 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500629
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400630 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
631
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200632 /* Adaptec 1430SA */
633 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
634
Olof Johanssone93f09d2007-01-18 18:39:59 -0600635 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
636
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800637 /* add Marvell 7042 support */
638 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
639
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400640 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400641};
642
643static struct pci_driver mv_pci_driver = {
644 .name = DRV_NAME,
645 .id_table = mv_pci_tbl,
646 .probe = mv_init_one,
647 .remove = ata_pci_remove_one,
648};
649
Jeff Garzik47c2b672005-11-12 21:13:17 -0500650static const struct mv_hw_ops mv5xxx_ops = {
651 .phy_errata = mv5_phy_errata,
652 .enable_leds = mv5_enable_leds,
653 .read_preamp = mv5_read_preamp,
654 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500655 .reset_flash = mv5_reset_flash,
656 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500657};
658
659static const struct mv_hw_ops mv6xxx_ops = {
660 .phy_errata = mv6_phy_errata,
661 .enable_leds = mv6_enable_leds,
662 .read_preamp = mv6_read_preamp,
663 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500664 .reset_flash = mv6_reset_flash,
665 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500666};
667
Brett Russ20f733e2005-09-01 18:26:17 -0400668/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500669 * module options
670 */
671static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
672
673
Jeff Garzikd88184f2007-02-26 01:26:06 -0500674/* move to PCI layer or libata core? */
675static int pci_go_64(struct pci_dev *pdev)
676{
677 int rc;
678
679 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
681 if (rc) {
682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
683 if (rc) {
684 dev_printk(KERN_ERR, &pdev->dev,
685 "64-bit DMA enable failed\n");
686 return rc;
687 }
688 }
689 } else {
690 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "32-bit DMA enable failed\n");
694 return rc;
695 }
696 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
697 if (rc) {
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit consistent DMA enable failed\n");
700 return rc;
701 }
702 }
703
704 return rc;
705}
706
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500707/*
Brett Russ20f733e2005-09-01 18:26:17 -0400708 * Functions
709 */
710
711static inline void writelfl(unsigned long data, void __iomem *addr)
712{
713 writel(data, addr);
714 (void) readl(addr); /* flush to avoid PCI posted write */
715}
716
Brett Russ20f733e2005-09-01 18:26:17 -0400717static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
718{
719 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
720}
721
Jeff Garzikc9d39132005-11-13 17:47:51 -0500722static inline unsigned int mv_hc_from_port(unsigned int port)
723{
724 return port >> MV_PORT_HC_SHIFT;
725}
726
727static inline unsigned int mv_hardport_from_port(unsigned int port)
728{
729 return port & MV_PORT_MASK;
730}
731
732static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
733 unsigned int port)
734{
735 return mv_hc_base(base, mv_hc_from_port(port));
736}
737
Brett Russ20f733e2005-09-01 18:26:17 -0400738static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
739{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500740 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500741 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500742 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400743}
744
745static inline void __iomem *mv_ap_base(struct ata_port *ap)
746{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900747 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400748}
749
Jeff Garzikcca39742006-08-24 03:19:22 -0400750static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400751{
Jeff Garzikcca39742006-08-24 03:19:22 -0400752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400753}
754
755static void mv_irq_clear(struct ata_port *ap)
756{
757}
758
Jeff Garzik6c087722007-10-12 00:16:23 -0400759static int mv_slave_config(struct scsi_device *sdev)
760{
761 int rc = ata_scsi_slave_config(sdev);
762 if (rc)
763 return rc;
764
765 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
766
767 return 0; /* scsi layer doesn't check return value, sigh */
768}
769
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400770static void mv_set_edma_ptrs(void __iomem *port_mmio,
771 struct mv_host_priv *hpriv,
772 struct mv_port_priv *pp)
773{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400774 u32 index;
775
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400776 /*
777 * initialize request queue
778 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400779 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
780
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400781 WARN_ON(pp->crqb_dma & 0x3ff);
782 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
785
786 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400787 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400788 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
789 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791
792 /*
793 * initialize response queue
794 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
796
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 WARN_ON(pp->crpb_dma & 0xff);
798 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
799
800 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400801 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400802 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
803 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400806 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400807 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808}
809
Brett Russ05b308e2005-10-05 17:08:53 -0400810/**
811 * mv_start_dma - Enable eDMA engine
812 * @base: port base address
813 * @pp: port private data
814 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900815 * Verify the local cache of the eDMA state is accurate with a
816 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400817 *
818 * LOCKING:
819 * Inherited from caller.
820 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
822 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400823{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400824 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 /* clear EDMA event indicators, if any */
826 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
827
828 mv_set_edma_ptrs(base, hpriv, pp);
829
Brett Russafb0edd2005-10-05 17:08:42 -0400830 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
831 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
832 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900833 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400834}
835
Brett Russ05b308e2005-10-05 17:08:53 -0400836/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400837 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400838 * @ap: ATA channel to manipulate
839 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900840 * Verify the local cache of the eDMA state is accurate with a
841 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400842 *
843 * LOCKING:
844 * Inherited from caller.
845 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400846static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400847{
848 void __iomem *port_mmio = mv_ap_base(ap);
849 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400850 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400851 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400852
Jeff Garzik4537deb2007-07-12 14:30:19 -0400853 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400854 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400855 */
Brett Russ31961942005-09-30 01:36:00 -0400856 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
857 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400858 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900859 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400860 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500861
Brett Russ31961942005-09-30 01:36:00 -0400862 /* now properly wait for the eDMA to stop */
863 for (i = 1000; i > 0; i--) {
864 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400865 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400866 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400867
Brett Russ31961942005-09-30 01:36:00 -0400868 udelay(100);
869 }
870
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400871 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900872 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400873 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400874 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400875
876 return err;
Brett Russ31961942005-09-30 01:36:00 -0400877}
878
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400879static int mv_stop_dma(struct ata_port *ap)
880{
881 unsigned long flags;
882 int rc;
883
884 spin_lock_irqsave(&ap->host->lock, flags);
885 rc = __mv_stop_dma(ap);
886 spin_unlock_irqrestore(&ap->host->lock, flags);
887
888 return rc;
889}
890
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400891#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400892static void mv_dump_mem(void __iomem *start, unsigned bytes)
893{
Brett Russ31961942005-09-30 01:36:00 -0400894 int b, w;
895 for (b = 0; b < bytes; ) {
896 DPRINTK("%p: ", start + b);
897 for (w = 0; b < bytes && w < 4; w++) {
898 printk("%08x ",readl(start + b));
899 b += sizeof(u32);
900 }
901 printk("\n");
902 }
Brett Russ31961942005-09-30 01:36:00 -0400903}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400904#endif
905
Brett Russ31961942005-09-30 01:36:00 -0400906static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
907{
908#ifdef ATA_DEBUG
909 int b, w;
910 u32 dw;
911 for (b = 0; b < bytes; ) {
912 DPRINTK("%02x: ", b);
913 for (w = 0; b < bytes && w < 4; w++) {
914 (void) pci_read_config_dword(pdev,b,&dw);
915 printk("%08x ",dw);
916 b += sizeof(u32);
917 }
918 printk("\n");
919 }
920#endif
921}
922static void mv_dump_all_regs(void __iomem *mmio_base, int port,
923 struct pci_dev *pdev)
924{
925#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500926 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400927 port >> MV_PORT_HC_SHIFT);
928 void __iomem *port_base;
929 int start_port, num_ports, p, start_hc, num_hcs, hc;
930
931 if (0 > port) {
932 start_hc = start_port = 0;
933 num_ports = 8; /* shld be benign for 4 port devs */
934 num_hcs = 2;
935 } else {
936 start_hc = port >> MV_PORT_HC_SHIFT;
937 start_port = port;
938 num_ports = num_hcs = 1;
939 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500940 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400941 num_ports > 1 ? num_ports - 1 : start_port);
942
943 if (NULL != pdev) {
944 DPRINTK("PCI config space regs:\n");
945 mv_dump_pci_cfg(pdev, 0x68);
946 }
947 DPRINTK("PCI regs:\n");
948 mv_dump_mem(mmio_base+0xc00, 0x3c);
949 mv_dump_mem(mmio_base+0xd00, 0x34);
950 mv_dump_mem(mmio_base+0xf00, 0x4);
951 mv_dump_mem(mmio_base+0x1d00, 0x6c);
952 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700953 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400954 DPRINTK("HC regs (HC %i):\n", hc);
955 mv_dump_mem(hc_base, 0x1c);
956 }
957 for (p = start_port; p < start_port + num_ports; p++) {
958 port_base = mv_port_base(mmio_base, p);
959 DPRINTK("EDMA regs (port %i):\n",p);
960 mv_dump_mem(port_base, 0x54);
961 DPRINTK("SATA regs (port %i):\n",p);
962 mv_dump_mem(port_base+0x300, 0x60);
963 }
964#endif
965}
966
Brett Russ20f733e2005-09-01 18:26:17 -0400967static unsigned int mv_scr_offset(unsigned int sc_reg_in)
968{
969 unsigned int ofs;
970
971 switch (sc_reg_in) {
972 case SCR_STATUS:
973 case SCR_CONTROL:
974 case SCR_ERROR:
975 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
976 break;
977 case SCR_ACTIVE:
978 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
979 break;
980 default:
981 ofs = 0xffffffffU;
982 break;
983 }
984 return ofs;
985}
986
Tejun Heoda3dbb12007-07-16 14:29:40 +0900987static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400988{
989 unsigned int ofs = mv_scr_offset(sc_reg_in);
990
Tejun Heoda3dbb12007-07-16 14:29:40 +0900991 if (ofs != 0xffffffffU) {
992 *val = readl(mv_ap_base(ap) + ofs);
993 return 0;
994 } else
995 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400996}
997
Tejun Heoda3dbb12007-07-16 14:29:40 +0900998static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400999{
1000 unsigned int ofs = mv_scr_offset(sc_reg_in);
1001
Tejun Heoda3dbb12007-07-16 14:29:40 +09001002 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001003 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001004 return 0;
1005 } else
1006 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001007}
1008
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001009static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1010 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001011{
1012 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1013
1014 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001015 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001016
Jeff Garzike728eab2007-02-25 02:53:41 -05001017 if (IS_GEN_I(hpriv)) {
1018 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001019 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -05001020 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001021
Jeff Garzike728eab2007-02-25 02:53:41 -05001022 else if (IS_GEN_II(hpriv)) {
1023 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001024 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -05001025 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1026 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001027
1028 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001029 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1030 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001031 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1032 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001033 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1034 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001035 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001036 }
1037
1038 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1039}
1040
Brett Russ05b308e2005-10-05 17:08:53 -04001041/**
1042 * mv_port_start - Port specific init/start routine.
1043 * @ap: ATA channel to manipulate
1044 *
1045 * Allocate and point to DMA memory, init port private memory,
1046 * zero indices.
1047 *
1048 * LOCKING:
1049 * Inherited from caller.
1050 */
Brett Russ31961942005-09-30 01:36:00 -04001051static int mv_port_start(struct ata_port *ap)
1052{
Jeff Garzikcca39742006-08-24 03:19:22 -04001053 struct device *dev = ap->host->dev;
1054 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001055 struct mv_port_priv *pp;
1056 void __iomem *port_mmio = mv_ap_base(ap);
1057 void *mem;
1058 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001059 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001060 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001061
Tejun Heo24dc5f32007-01-20 16:00:28 +09001062 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001063 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001064 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001065
Tejun Heo24dc5f32007-01-20 16:00:28 +09001066 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1067 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001068 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001069 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001070 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1071
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001072 rc = ata_pad_alloc(ap, dev);
1073 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001074 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001075
Jeff Garzik8b260242005-11-12 12:32:50 -05001076 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001077 * 32-slot command request table (CRQB), 32 bytes each in size
1078 */
1079 pp->crqb = mem;
1080 pp->crqb_dma = mem_dma;
1081 mem += MV_CRQB_Q_SZ;
1082 mem_dma += MV_CRQB_Q_SZ;
1083
Jeff Garzik8b260242005-11-12 12:32:50 -05001084 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001085 * 32-slot command response table (CRPB), 8 bytes each in size
1086 */
1087 pp->crpb = mem;
1088 pp->crpb_dma = mem_dma;
1089 mem += MV_CRPB_Q_SZ;
1090 mem_dma += MV_CRPB_Q_SZ;
1091
1092 /* Third item:
1093 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1094 */
1095 pp->sg_tbl = mem;
1096 pp->sg_tbl_dma = mem_dma;
1097
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001098 spin_lock_irqsave(&ap->host->lock, flags);
1099
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001100 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001101
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001102 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001103
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001104 spin_unlock_irqrestore(&ap->host->lock, flags);
1105
Brett Russ31961942005-09-30 01:36:00 -04001106 /* Don't turn on EDMA here...do it before DMA commands only. Else
1107 * we'll be unable to send non-data, PIO, etc due to restricted access
1108 * to shadow regs.
1109 */
1110 ap->private_data = pp;
1111 return 0;
1112}
1113
Brett Russ05b308e2005-10-05 17:08:53 -04001114/**
1115 * mv_port_stop - Port specific cleanup/stop routine.
1116 * @ap: ATA channel to manipulate
1117 *
1118 * Stop DMA, cleanup port memory.
1119 *
1120 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001121 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001122 */
Brett Russ31961942005-09-30 01:36:00 -04001123static void mv_port_stop(struct ata_port *ap)
1124{
Brett Russ31961942005-09-30 01:36:00 -04001125 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001126}
1127
Brett Russ05b308e2005-10-05 17:08:53 -04001128/**
1129 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1130 * @qc: queued command whose SG list to source from
1131 *
1132 * Populate the SG list and mark the last entry.
1133 *
1134 * LOCKING:
1135 * Inherited from caller.
1136 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001137static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001138{
1139 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001140 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001141 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001142
Jeff Garzikd88184f2007-02-26 01:26:06 -05001143 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001144 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001145 dma_addr_t addr = sg_dma_address(sg);
1146 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001147
Olof Johansson4007b492007-10-02 20:45:27 -05001148 while (sg_len) {
1149 u32 offset = addr & 0xffff;
1150 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001151
Olof Johansson4007b492007-10-02 20:45:27 -05001152 if ((offset + sg_len > 0x10000))
1153 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001154
Olof Johansson4007b492007-10-02 20:45:27 -05001155 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1156 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001157 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001158
1159 sg_len -= len;
1160 addr += len;
1161
1162 if (!sg_len && ata_sg_is_last(sg, qc))
1163 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1164
1165 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001166 }
1167
Brett Russ31961942005-09-30 01:36:00 -04001168 }
1169}
1170
Mark Lorde1469872006-05-22 19:02:03 -04001171static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001172{
Mark Lord559eeda2006-05-19 16:40:15 -04001173 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001174 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001175 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001176}
1177
Brett Russ05b308e2005-10-05 17:08:53 -04001178/**
1179 * mv_qc_prep - Host specific command preparation.
1180 * @qc: queued command to prepare
1181 *
1182 * This routine simply redirects to the general purpose routine
1183 * if command is not DMA. Else, it handles prep of the CRQB
1184 * (command request block), does some sanity checking, and calls
1185 * the SG load routine.
1186 *
1187 * LOCKING:
1188 * Inherited from caller.
1189 */
Brett Russ31961942005-09-30 01:36:00 -04001190static void mv_qc_prep(struct ata_queued_cmd *qc)
1191{
1192 struct ata_port *ap = qc->ap;
1193 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001194 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001195 struct ata_taskfile *tf;
1196 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001197 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001198
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001199 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001200 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001201
Brett Russ31961942005-09-30 01:36:00 -04001202 /* Fill in command request block
1203 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001204 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001205 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001206 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001207 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001208 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001209
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001210 /* get current queue index from software */
1211 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001212
Mark Lorda6432432006-05-19 16:36:36 -04001213 pp->crqb[in_index].sg_addr =
1214 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1215 pp->crqb[in_index].sg_addr_hi =
1216 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1217 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1218
1219 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001220 tf = &qc->tf;
1221
1222 /* Sadly, the CRQB cannot accomodate all registers--there are
1223 * only 11 bytes...so we must pick and choose required
1224 * registers based on the command. So, we drop feature and
1225 * hob_feature for [RW] DMA commands, but they are needed for
1226 * NCQ. NCQ will drop hob_nsect.
1227 */
1228 switch (tf->command) {
1229 case ATA_CMD_READ:
1230 case ATA_CMD_READ_EXT:
1231 case ATA_CMD_WRITE:
1232 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001233 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001234 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1235 break;
1236#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1237 case ATA_CMD_FPDMA_READ:
1238 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001239 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001240 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1241 break;
1242#endif /* FIXME: remove this line when NCQ added */
1243 default:
1244 /* The only other commands EDMA supports in non-queued and
1245 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1246 * of which are defined/used by Linux. If we get here, this
1247 * driver needs work.
1248 *
1249 * FIXME: modify libata to give qc_prep a return value and
1250 * return error here.
1251 */
1252 BUG_ON(tf->command);
1253 break;
1254 }
1255 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1260 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1261 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1262 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1263 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1264
Jeff Garzike4e7b892006-01-31 12:18:41 -05001265 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001266 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001267 mv_fill_sg(qc);
1268}
1269
1270/**
1271 * mv_qc_prep_iie - Host specific command preparation.
1272 * @qc: queued command to prepare
1273 *
1274 * This routine simply redirects to the general purpose routine
1275 * if command is not DMA. Else, it handles prep of the CRQB
1276 * (command request block), does some sanity checking, and calls
1277 * the SG load routine.
1278 *
1279 * LOCKING:
1280 * Inherited from caller.
1281 */
1282static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1283{
1284 struct ata_port *ap = qc->ap;
1285 struct mv_port_priv *pp = ap->private_data;
1286 struct mv_crqb_iie *crqb;
1287 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001288 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001289 u32 flags = 0;
1290
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001291 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001292 return;
1293
Jeff Garzike4e7b892006-01-31 12:18:41 -05001294 /* Fill in Gen IIE command request block
1295 */
1296 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1297 flags |= CRQB_FLAG_READ;
1298
Tejun Heobeec7db2006-02-11 19:11:13 +09001299 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001300 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001301 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001302 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001303
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001304 /* get current queue index from software */
1305 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001306
1307 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001308 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1309 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1310 crqb->flags = cpu_to_le32(flags);
1311
1312 tf = &qc->tf;
1313 crqb->ata_cmd[0] = cpu_to_le32(
1314 (tf->command << 16) |
1315 (tf->feature << 24)
1316 );
1317 crqb->ata_cmd[1] = cpu_to_le32(
1318 (tf->lbal << 0) |
1319 (tf->lbam << 8) |
1320 (tf->lbah << 16) |
1321 (tf->device << 24)
1322 );
1323 crqb->ata_cmd[2] = cpu_to_le32(
1324 (tf->hob_lbal << 0) |
1325 (tf->hob_lbam << 8) |
1326 (tf->hob_lbah << 16) |
1327 (tf->hob_feature << 24)
1328 );
1329 crqb->ata_cmd[3] = cpu_to_le32(
1330 (tf->nsect << 0) |
1331 (tf->hob_nsect << 8)
1332 );
1333
1334 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1335 return;
Brett Russ31961942005-09-30 01:36:00 -04001336 mv_fill_sg(qc);
1337}
1338
Brett Russ05b308e2005-10-05 17:08:53 -04001339/**
1340 * mv_qc_issue - Initiate a command to the host
1341 * @qc: queued command to start
1342 *
1343 * This routine simply redirects to the general purpose routine
1344 * if command is not DMA. Else, it sanity checks our local
1345 * caches of the request producer/consumer indices then enables
1346 * DMA and bumps the request producer index.
1347 *
1348 * LOCKING:
1349 * Inherited from caller.
1350 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001351static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001352{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001353 struct ata_port *ap = qc->ap;
1354 void __iomem *port_mmio = mv_ap_base(ap);
1355 struct mv_port_priv *pp = ap->private_data;
1356 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001357 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001358
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001359 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001360 /* We're about to send a non-EDMA capable command to the
1361 * port. Turn off EDMA so there won't be problems accessing
1362 * shadow block, etc registers.
1363 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001364 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001365 return ata_qc_issue_prot(qc);
1366 }
1367
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001368 mv_start_dma(port_mmio, hpriv, pp);
1369
1370 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001371
Brett Russ31961942005-09-30 01:36:00 -04001372 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001373 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1374 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001375
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001376 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001377
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001378 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001379
1380 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001381 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1382 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001383
1384 return 0;
1385}
1386
Brett Russ05b308e2005-10-05 17:08:53 -04001387/**
Brett Russ05b308e2005-10-05 17:08:53 -04001388 * mv_err_intr - Handle error interrupts on the port
1389 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001390 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001391 *
1392 * In most cases, just clear the interrupt and move on. However,
1393 * some cases require an eDMA reset, which is done right before
1394 * the COMRESET in mv_phy_reset(). The SERR case requires a
1395 * clear of pending errors in the SATA SERROR register. Finally,
1396 * if the port disabled DMA, update our cached copy to match.
1397 *
1398 * LOCKING:
1399 * Inherited from caller.
1400 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001402{
Brett Russ31961942005-09-30 01:36:00 -04001403 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001404 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1405 struct mv_port_priv *pp = ap->private_data;
1406 struct mv_host_priv *hpriv = ap->host->private_data;
1407 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1408 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001409 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001410
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001411 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001412
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001413 if (!edma_enabled) {
1414 /* just a guess: do we need to do this? should we
1415 * expand this, and do it in all cases?
1416 */
Tejun Heo936fd732007-08-06 18:36:23 +09001417 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1418 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001419 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001420
1421 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1422
1423 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1424
1425 /*
1426 * all generations share these EDMA error cause bits
1427 */
1428
1429 if (edma_err_cause & EDMA_ERR_DEV)
1430 err_mask |= AC_ERR_DEV;
1431 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001432 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001433 EDMA_ERR_INTRL_PAR)) {
1434 err_mask |= AC_ERR_ATA_BUS;
1435 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001436 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001437 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001438 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1439 ata_ehi_hotplugged(ehi);
1440 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001441 "dev disconnect" : "dev connect");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001442 }
1443
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001444 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001445 eh_freeze_mask = EDMA_EH_FREEZE_5;
1446
1447 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1448 struct mv_port_priv *pp = ap->private_data;
1449 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001450 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001451 }
1452 } else {
1453 eh_freeze_mask = EDMA_EH_FREEZE;
1454
1455 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1456 struct mv_port_priv *pp = ap->private_data;
1457 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001458 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 }
1460
1461 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001462 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1463 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 err_mask = AC_ERR_ATA_BUS;
1465 action |= ATA_EH_HARDRESET;
1466 }
1467 }
Brett Russ20f733e2005-09-01 18:26:17 -04001468
1469 /* Clear EDMA now that SERR cleanup done */
1470 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1471
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472 if (!err_mask) {
1473 err_mask = AC_ERR_OTHER;
1474 action |= ATA_EH_HARDRESET;
1475 }
1476
1477 ehi->serror |= serr;
1478 ehi->action |= action;
1479
1480 if (qc)
1481 qc->err_mask |= err_mask;
1482 else
1483 ehi->err_mask |= err_mask;
1484
1485 if (edma_err_cause & eh_freeze_mask)
1486 ata_port_freeze(ap);
1487 else
1488 ata_port_abort(ap);
1489}
1490
1491static void mv_intr_pio(struct ata_port *ap)
1492{
1493 struct ata_queued_cmd *qc;
1494 u8 ata_status;
1495
1496 /* ignore spurious intr if drive still BUSY */
1497 ata_status = readb(ap->ioaddr.status_addr);
1498 if (unlikely(ata_status & ATA_BUSY))
1499 return;
1500
1501 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001502 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503 if (unlikely(!qc)) /* no active tag */
1504 return;
1505 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1506 return;
1507
1508 /* and finally, complete the ATA command */
1509 qc->err_mask |= ac_err_mask(ata_status);
1510 ata_qc_complete(qc);
1511}
1512
1513static void mv_intr_edma(struct ata_port *ap)
1514{
1515 void __iomem *port_mmio = mv_ap_base(ap);
1516 struct mv_host_priv *hpriv = ap->host->private_data;
1517 struct mv_port_priv *pp = ap->private_data;
1518 struct ata_queued_cmd *qc;
1519 u32 out_index, in_index;
1520 bool work_done = false;
1521
1522 /* get h/w response queue pointer */
1523 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1524 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1525
1526 while (1) {
1527 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001528 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529
1530 /* get s/w response queue last-read pointer, and compare */
1531 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1532 if (in_index == out_index)
1533 break;
1534
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001536 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001537 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001539 /* Gen II/IIE: get active ATA command via tag, to enable
1540 * support for queueing. this works transparently for
1541 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001542 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001543 else if (IS_GEN_II(hpriv))
1544 tag = (le16_to_cpu(pp->crpb[out_index].id)
1545 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001546
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001547 else /* IS_GEN_IIE */
1548 tag = (le16_to_cpu(pp->crpb[out_index].id)
1549 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001551 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001552
1553 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1554 * bits (WARNING: might not necessarily be associated
1555 * with this command), which -should- be clear
1556 * if all is well
1557 */
1558 status = le16_to_cpu(pp->crpb[out_index].flags);
1559 if (unlikely(status & 0xff)) {
1560 mv_err_intr(ap, qc);
1561 return;
1562 }
1563
1564 /* and finally, complete the ATA command */
1565 if (qc) {
1566 qc->err_mask |=
1567 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1568 ata_qc_complete(qc);
1569 }
1570
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001571 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001572 * indicate (after the loop completes) to hardware
1573 * that we have consumed a response queue entry.
1574 */
1575 work_done = true;
1576 pp->resp_idx++;
1577 }
1578
1579 if (work_done)
1580 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1581 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1582 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001583}
1584
Brett Russ05b308e2005-10-05 17:08:53 -04001585/**
1586 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001587 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001588 * @relevant: port error bits relevant to this host controller
1589 * @hc: which host controller we're to look at
1590 *
1591 * Read then write clear the HC interrupt status then walk each
1592 * port connected to the HC and see if it needs servicing. Port
1593 * success ints are reported in the HC interrupt status reg, the
1594 * port error ints are reported in the higher level main
1595 * interrupt status register and thus are passed in via the
1596 * 'relevant' argument.
1597 *
1598 * LOCKING:
1599 * Inherited from caller.
1600 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001601static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001602{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001603 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001604 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001605 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001606 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001607
Jeff Garzik35177262007-02-24 21:26:42 -05001608 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001609 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001610 else
Brett Russ20f733e2005-09-01 18:26:17 -04001611 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001612
1613 /* we'll need the HC success int register in most cases */
1614 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001615 if (!hc_irq_cause)
1616 return;
1617
1618 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001619
1620 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1621 hc,relevant,hc_irq_cause);
1622
1623 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001624 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001625 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001627
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001628 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001629 continue;
1630
Brett Russ31961942005-09-30 01:36:00 -04001631 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001632 if (port >= MV_PORTS_PER_HC) {
1633 shift++; /* skip bit 8 in the HC Main IRQ reg */
1634 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635 have_err_bits = ((PORT0_ERR << shift) & relevant);
1636
1637 if (unlikely(have_err_bits)) {
1638 struct ata_queued_cmd *qc;
1639
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001640 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001641 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1642 continue;
1643
1644 mv_err_intr(ap, qc);
1645 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001646 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001647
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001648 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1649
1650 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1651 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1652 mv_intr_edma(ap);
1653 } else {
1654 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1655 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001656 }
1657 }
1658 VPRINTK("EXIT\n");
1659}
1660
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001661static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1662{
1663 struct ata_port *ap;
1664 struct ata_queued_cmd *qc;
1665 struct ata_eh_info *ehi;
1666 unsigned int i, err_mask, printed = 0;
1667 u32 err_cause;
1668
1669 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1670
1671 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1672 err_cause);
1673
1674 DPRINTK("All regs @ PCI error\n");
1675 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1676
1677 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1678
1679 for (i = 0; i < host->n_ports; i++) {
1680 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001681 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001682 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001683 ata_ehi_clear_desc(ehi);
1684 if (!printed++)
1685 ata_ehi_push_desc(ehi,
1686 "PCI err cause 0x%08x", err_cause);
1687 err_mask = AC_ERR_HOST_BUS;
1688 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001690 if (qc)
1691 qc->err_mask |= err_mask;
1692 else
1693 ehi->err_mask |= err_mask;
1694
1695 ata_port_freeze(ap);
1696 }
1697 }
1698}
1699
Brett Russ05b308e2005-10-05 17:08:53 -04001700/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001701 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001702 * @irq: unused
1703 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001704 *
1705 * Read the read only register to determine if any host
1706 * controllers have pending interrupts. If so, call lower level
1707 * routine to handle. Also check for PCI errors which are only
1708 * reported here.
1709 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001710 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001711 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001712 * interrupts.
1713 */
David Howells7d12e782006-10-05 14:55:46 +01001714static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001715{
Jeff Garzikcca39742006-08-24 03:19:22 -04001716 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001717 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001718 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001719 u32 irq_stat;
1720
Brett Russ20f733e2005-09-01 18:26:17 -04001721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001722
1723 /* check the cases where we either have nothing pending or have read
1724 * a bogus register value which can indicate HW removal or PCI fault
1725 */
Jeff Garzik35177262007-02-24 21:26:42 -05001726 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001727 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001728
Jeff Garzikcca39742006-08-24 03:19:22 -04001729 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1730 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001731
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001732 if (unlikely(irq_stat & PCI_ERR)) {
1733 mv_pci_error(host, mmio);
1734 handled = 1;
1735 goto out_unlock; /* skip all other HC irq handling */
1736 }
1737
Brett Russ20f733e2005-09-01 18:26:17 -04001738 for (hc = 0; hc < n_hcs; hc++) {
1739 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1740 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001741 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001742 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001743 }
1744 }
Mark Lord615ab952006-05-19 16:24:56 -04001745
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001746out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001747 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001748
1749 return IRQ_RETVAL(handled);
1750}
1751
Jeff Garzikc9d39132005-11-13 17:47:51 -05001752static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1753{
1754 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1755 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1756
1757 return hc_mmio + ofs;
1758}
1759
1760static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1761{
1762 unsigned int ofs;
1763
1764 switch (sc_reg_in) {
1765 case SCR_STATUS:
1766 case SCR_ERROR:
1767 case SCR_CONTROL:
1768 ofs = sc_reg_in * sizeof(u32);
1769 break;
1770 default:
1771 ofs = 0xffffffffU;
1772 break;
1773 }
1774 return ofs;
1775}
1776
Tejun Heoda3dbb12007-07-16 14:29:40 +09001777static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001778{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001779 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1780 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001781 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1782
Tejun Heoda3dbb12007-07-16 14:29:40 +09001783 if (ofs != 0xffffffffU) {
1784 *val = readl(addr + ofs);
1785 return 0;
1786 } else
1787 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001788}
1789
Tejun Heoda3dbb12007-07-16 14:29:40 +09001790static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001791{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001792 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1793 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001794 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1795
Tejun Heoda3dbb12007-07-16 14:29:40 +09001796 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001797 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001798 return 0;
1799 } else
1800 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001801}
1802
Jeff Garzik522479f2005-11-12 22:14:02 -05001803static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1804{
Jeff Garzik522479f2005-11-12 22:14:02 -05001805 int early_5080;
1806
Auke Kok44c10132007-06-08 15:46:36 -07001807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001808
1809 if (!early_5080) {
1810 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 tmp |= (1 << 0);
1812 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1813 }
1814
1815 mv_reset_pci_bus(pdev, mmio);
1816}
1817
1818static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1819{
1820 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1821}
1822
Jeff Garzik47c2b672005-11-12 21:13:17 -05001823static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001824 void __iomem *mmio)
1825{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001826 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1827 u32 tmp;
1828
1829 tmp = readl(phy_mmio + MV5_PHY_MODE);
1830
1831 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1832 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001833}
1834
Jeff Garzik47c2b672005-11-12 21:13:17 -05001835static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001836{
Jeff Garzik522479f2005-11-12 22:14:02 -05001837 u32 tmp;
1838
1839 writel(0, mmio + MV_GPIO_PORT_CTL);
1840
1841 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1842
1843 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1844 tmp |= ~(1 << 0);
1845 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001846}
1847
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001848static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1849 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001850{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001851 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1852 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1853 u32 tmp;
1854 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1855
1856 if (fix_apm_sq) {
1857 tmp = readl(phy_mmio + MV5_LT_MODE);
1858 tmp |= (1 << 19);
1859 writel(tmp, phy_mmio + MV5_LT_MODE);
1860
1861 tmp = readl(phy_mmio + MV5_PHY_CTL);
1862 tmp &= ~0x3;
1863 tmp |= 0x1;
1864 writel(tmp, phy_mmio + MV5_PHY_CTL);
1865 }
1866
1867 tmp = readl(phy_mmio + MV5_PHY_MODE);
1868 tmp &= ~mask;
1869 tmp |= hpriv->signal[port].pre;
1870 tmp |= hpriv->signal[port].amps;
1871 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001872}
1873
Jeff Garzikc9d39132005-11-13 17:47:51 -05001874
1875#undef ZERO
1876#define ZERO(reg) writel(0, port_mmio + (reg))
1877static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001879{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001880 void __iomem *port_mmio = mv_port_base(mmio, port);
1881
1882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1883
1884 mv_channel_reset(hpriv, mmio, port);
1885
1886 ZERO(0x028); /* command */
1887 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1888 ZERO(0x004); /* timer */
1889 ZERO(0x008); /* irq err cause */
1890 ZERO(0x00c); /* irq err mask */
1891 ZERO(0x010); /* rq bah */
1892 ZERO(0x014); /* rq inp */
1893 ZERO(0x018); /* rq outp */
1894 ZERO(0x01c); /* respq bah */
1895 ZERO(0x024); /* respq outp */
1896 ZERO(0x020); /* respq inp */
1897 ZERO(0x02c); /* test control */
1898 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1899}
1900#undef ZERO
1901
1902#define ZERO(reg) writel(0, hc_mmio + (reg))
1903static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int hc)
1905{
1906 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1907 u32 tmp;
1908
1909 ZERO(0x00c);
1910 ZERO(0x010);
1911 ZERO(0x014);
1912 ZERO(0x018);
1913
1914 tmp = readl(hc_mmio + 0x20);
1915 tmp &= 0x1c1c1c1c;
1916 tmp |= 0x03030303;
1917 writel(tmp, hc_mmio + 0x20);
1918}
1919#undef ZERO
1920
1921static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1922 unsigned int n_hc)
1923{
1924 unsigned int hc, port;
1925
1926 for (hc = 0; hc < n_hc; hc++) {
1927 for (port = 0; port < MV_PORTS_PER_HC; port++)
1928 mv5_reset_hc_port(hpriv, mmio,
1929 (hc * MV_PORTS_PER_HC) + port);
1930
1931 mv5_reset_one_hc(hpriv, mmio, hc);
1932 }
1933
1934 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001935}
1936
Jeff Garzik101ffae2005-11-12 22:17:49 -05001937#undef ZERO
1938#define ZERO(reg) writel(0, mmio + (reg))
1939static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1940{
1941 u32 tmp;
1942
1943 tmp = readl(mmio + MV_PCI_MODE);
1944 tmp &= 0xff00ffff;
1945 writel(tmp, mmio + MV_PCI_MODE);
1946
1947 ZERO(MV_PCI_DISC_TIMER);
1948 ZERO(MV_PCI_MSI_TRIGGER);
1949 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1950 ZERO(HC_MAIN_IRQ_MASK_OFS);
1951 ZERO(MV_PCI_SERR_MASK);
1952 ZERO(PCI_IRQ_CAUSE_OFS);
1953 ZERO(PCI_IRQ_MASK_OFS);
1954 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1955 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1956 ZERO(MV_PCI_ERR_ATTRIBUTE);
1957 ZERO(MV_PCI_ERR_COMMAND);
1958}
1959#undef ZERO
1960
1961static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1962{
1963 u32 tmp;
1964
1965 mv5_reset_flash(hpriv, mmio);
1966
1967 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1968 tmp &= 0x3;
1969 tmp |= (1 << 5) | (1 << 6);
1970 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1971}
1972
1973/**
1974 * mv6_reset_hc - Perform the 6xxx global soft reset
1975 * @mmio: base address of the HBA
1976 *
1977 * This routine only applies to 6xxx parts.
1978 *
1979 * LOCKING:
1980 * Inherited from caller.
1981 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001982static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1983 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001984{
1985 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1986 int i, rc = 0;
1987 u32 t;
1988
1989 /* Following procedure defined in PCI "main command and status
1990 * register" table.
1991 */
1992 t = readl(reg);
1993 writel(t | STOP_PCI_MASTER, reg);
1994
1995 for (i = 0; i < 1000; i++) {
1996 udelay(1);
1997 t = readl(reg);
1998 if (PCI_MASTER_EMPTY & t) {
1999 break;
2000 }
2001 }
2002 if (!(PCI_MASTER_EMPTY & t)) {
2003 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2004 rc = 1;
2005 goto done;
2006 }
2007
2008 /* set reset */
2009 i = 5;
2010 do {
2011 writel(t | GLOB_SFT_RST, reg);
2012 t = readl(reg);
2013 udelay(1);
2014 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2015
2016 if (!(GLOB_SFT_RST & t)) {
2017 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2018 rc = 1;
2019 goto done;
2020 }
2021
2022 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2023 i = 5;
2024 do {
2025 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2026 t = readl(reg);
2027 udelay(1);
2028 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2029
2030 if (GLOB_SFT_RST & t) {
2031 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2032 rc = 1;
2033 }
2034done:
2035 return rc;
2036}
2037
Jeff Garzik47c2b672005-11-12 21:13:17 -05002038static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002039 void __iomem *mmio)
2040{
2041 void __iomem *port_mmio;
2042 u32 tmp;
2043
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002044 tmp = readl(mmio + MV_RESET_CFG);
2045 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002046 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002047 hpriv->signal[idx].pre = 0x1 << 5;
2048 return;
2049 }
2050
2051 port_mmio = mv_port_base(mmio, idx);
2052 tmp = readl(port_mmio + PHY_MODE2);
2053
2054 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2055 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2056}
2057
Jeff Garzik47c2b672005-11-12 21:13:17 -05002058static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002059{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002060 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002061}
2062
Jeff Garzikc9d39132005-11-13 17:47:51 -05002063static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002064 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002065{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002066 void __iomem *port_mmio = mv_port_base(mmio, port);
2067
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002068 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002069 int fix_phy_mode2 =
2070 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002071 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002072 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 u32 m2, tmp;
2074
2075 if (fix_phy_mode2) {
2076 m2 = readl(port_mmio + PHY_MODE2);
2077 m2 &= ~(1 << 16);
2078 m2 |= (1 << 31);
2079 writel(m2, port_mmio + PHY_MODE2);
2080
2081 udelay(200);
2082
2083 m2 = readl(port_mmio + PHY_MODE2);
2084 m2 &= ~((1 << 16) | (1 << 31));
2085 writel(m2, port_mmio + PHY_MODE2);
2086
2087 udelay(200);
2088 }
2089
2090 /* who knows what this magic does */
2091 tmp = readl(port_mmio + PHY_MODE3);
2092 tmp &= ~0x7F800000;
2093 tmp |= 0x2A800000;
2094 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002095
2096 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002097 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002098
2099 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002100
2101 if (hp_flags & MV_HP_ERRATA_60X1B2)
2102 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002103
2104 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2105
2106 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002107
2108 if (hp_flags & MV_HP_ERRATA_60X1B2)
2109 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002110 }
2111
2112 /* Revert values of pre-emphasis and signal amps to the saved ones */
2113 m2 = readl(port_mmio + PHY_MODE2);
2114
2115 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002116 m2 |= hpriv->signal[port].amps;
2117 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002118 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002119
Jeff Garzike4e7b892006-01-31 12:18:41 -05002120 /* according to mvSata 3.6.1, some IIE values are fixed */
2121 if (IS_GEN_IIE(hpriv)) {
2122 m2 &= ~0xC30FF01F;
2123 m2 |= 0x0000900F;
2124 }
2125
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002126 writel(m2, port_mmio + PHY_MODE2);
2127}
2128
Jeff Garzikc9d39132005-11-13 17:47:51 -05002129static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2130 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002131{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002132 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002133
Brett Russ31961942005-09-30 01:36:00 -04002134 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002135
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002136 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002137 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002138 ifctl |= (1 << 7); /* enable gen2i speed */
2139 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002140 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2141 }
2142
Brett Russ20f733e2005-09-01 18:26:17 -04002143 udelay(25); /* allow reset propagation */
2144
2145 /* Spec never mentions clearing the bit. Marvell's driver does
2146 * clear the bit, however.
2147 */
Brett Russ31961942005-09-30 01:36:00 -04002148 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002149
Jeff Garzikc9d39132005-11-13 17:47:51 -05002150 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2151
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002152 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002153 mdelay(1);
2154}
2155
Jeff Garzikc9d39132005-11-13 17:47:51 -05002156/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002157 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002158 * @ap: ATA channel to manipulate
2159 *
2160 * Part of this is taken from __sata_phy_reset and modified to
2161 * not sleep since this routine gets called from interrupt level.
2162 *
2163 * LOCKING:
2164 * Inherited from caller. This is coded to safe to call at
2165 * interrupt level, i.e. it does not sleep.
2166 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002167static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2168 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002169{
2170 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002171 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002172 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002173 int retry = 5;
2174 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002175
2176 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002177
Tejun Heoda3dbb12007-07-16 14:29:40 +09002178#ifdef DEBUG
2179 {
2180 u32 sstatus, serror, scontrol;
2181
2182 mv_scr_read(ap, SCR_STATUS, &sstatus);
2183 mv_scr_read(ap, SCR_ERROR, &serror);
2184 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2185 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2186 "SCtrl 0x%08x\n", status, serror, scontrol);
2187 }
2188#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002189
Jeff Garzik22374672005-11-17 10:59:48 -05002190 /* Issue COMRESET via SControl */
2191comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002192 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002193 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002194
Tejun Heo936fd732007-08-06 18:36:23 +09002195 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002196 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002197
Brett Russ31961942005-09-30 01:36:00 -04002198 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002199 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002200 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002201 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002202
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002203 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002204 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002205
Jeff Garzik22374672005-11-17 10:59:48 -05002206 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002207 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002208 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2209 (retry-- > 0))
2210 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002211
Tejun Heoda3dbb12007-07-16 14:29:40 +09002212#ifdef DEBUG
2213 {
2214 u32 sstatus, serror, scontrol;
2215
2216 mv_scr_read(ap, SCR_STATUS, &sstatus);
2217 mv_scr_read(ap, SCR_ERROR, &serror);
2218 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2219 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 }
2222#endif
Brett Russ31961942005-09-30 01:36:00 -04002223
Tejun Heo936fd732007-08-06 18:36:23 +09002224 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002225 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002226 return;
2227 }
2228
Jeff Garzik22374672005-11-17 10:59:48 -05002229 /* even after SStatus reflects that device is ready,
2230 * it seems to take a while for link to be fully
2231 * established (and thus Status no longer 0x80/0x7F),
2232 * so we poll a bit for that, here.
2233 */
2234 retry = 20;
2235 while (1) {
2236 u8 drv_stat = ata_check_status(ap);
2237 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2238 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002239 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002240 if (retry-- <= 0)
2241 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002242 if (time_after(jiffies, deadline))
2243 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002244 }
2245
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002246 /* FIXME: if we passed the deadline, the following
2247 * code probably produces an invalid result
2248 */
Brett Russ20f733e2005-09-01 18:26:17 -04002249
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002250 /* finally, read device signature from TF registers */
2251 *class = ata_dev_try_classify(ap, 0, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002252
2253 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2254
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002255 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002256
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002257 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002258}
2259
Tejun Heocc0680a2007-08-06 18:36:23 +09002260static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002261{
Tejun Heocc0680a2007-08-06 18:36:23 +09002262 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002263 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002264 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002265 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002266
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002267 rc = mv_stop_dma(ap);
2268 if (rc)
2269 ehc->i.action |= ATA_EH_HARDRESET;
2270
2271 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2272 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2273 ehc->i.action |= ATA_EH_HARDRESET;
2274 }
2275
2276 /* if we're about to do hardreset, nothing more to do */
2277 if (ehc->i.action & ATA_EH_HARDRESET)
2278 return 0;
2279
Tejun Heocc0680a2007-08-06 18:36:23 +09002280 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002281 rc = ata_wait_ready(ap, deadline);
2282 else
2283 rc = -ENODEV;
2284
2285 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002286}
2287
Tejun Heocc0680a2007-08-06 18:36:23 +09002288static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002289 unsigned long deadline)
2290{
Tejun Heocc0680a2007-08-06 18:36:23 +09002291 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002292 struct mv_host_priv *hpriv = ap->host->private_data;
2293 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2294
2295 mv_stop_dma(ap);
2296
2297 mv_channel_reset(hpriv, mmio, ap->port_no);
2298
2299 mv_phy_reset(ap, class, deadline);
2300
2301 return 0;
2302}
2303
Tejun Heocc0680a2007-08-06 18:36:23 +09002304static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002305{
Tejun Heocc0680a2007-08-06 18:36:23 +09002306 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002307 u32 serr;
2308
2309 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002310 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002311
2312 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002313 sata_scr_read(link, SCR_ERROR, &serr);
2314 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002315
2316 /* bail out if no device is present */
2317 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2318 DPRINTK("EXIT, no device\n");
2319 return;
2320 }
2321
2322 /* set up device control */
2323 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2324}
2325
2326static void mv_error_handler(struct ata_port *ap)
2327{
2328 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2329 mv_hardreset, mv_postreset);
2330}
2331
2332static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2333{
2334 mv_stop_dma(qc->ap);
2335}
2336
2337static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002338{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002339 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 u32 tmp, mask;
2342 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002343
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002344 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002345
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 shift = ap->port_no * 2;
2347 if (hc > 0)
2348 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002349
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002351
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002352 /* disable assertion of portN err, done events */
2353 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2354 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2355}
2356
2357static void mv_eh_thaw(struct ata_port *ap)
2358{
2359 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2360 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2361 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2362 void __iomem *port_mmio = mv_ap_base(ap);
2363 u32 tmp, mask, hc_irq_cause;
2364 unsigned int shift, hc_port_no = ap->port_no;
2365
2366 /* FIXME: handle coalescing completion events properly */
2367
2368 shift = ap->port_no * 2;
2369 if (hc > 0) {
2370 shift++;
2371 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002372 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002373
2374 mask = 0x3 << shift;
2375
2376 /* clear EDMA errors on this port */
2377 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2378
2379 /* clear pending irq events */
2380 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2381 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2382 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2383 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2384
2385 /* enable assertion of portN err, done events */
2386 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2387 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002388}
2389
Brett Russ05b308e2005-10-05 17:08:53 -04002390/**
2391 * mv_port_init - Perform some early initialization on a single port.
2392 * @port: libata data structure storing shadow register addresses
2393 * @port_mmio: base address of the port
2394 *
2395 * Initialize shadow register mmio addresses, clear outstanding
2396 * interrupts on the port, and unmask interrupts for the future
2397 * start of the port.
2398 *
2399 * LOCKING:
2400 * Inherited from caller.
2401 */
Brett Russ31961942005-09-30 01:36:00 -04002402static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2403{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002404 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002405 unsigned serr_ofs;
2406
Jeff Garzik8b260242005-11-12 12:32:50 -05002407 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002408 */
2409 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002410 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002411 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2412 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2413 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2414 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2415 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2416 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002417 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002418 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2419 /* special case: control/altstatus doesn't have ATA_REG_ address */
2420 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2421
2422 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002423 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002424
Brett Russ31961942005-09-30 01:36:00 -04002425 /* Clear any currently outstanding port interrupt conditions */
2426 serr_ofs = mv_scr_offset(SCR_ERROR);
2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429
Brett Russ20f733e2005-09-01 18:26:17 -04002430 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002431 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002432
Jeff Garzik8b260242005-11-12 12:32:50 -05002433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002434 readl(port_mmio + EDMA_CFG_OFS),
2435 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2436 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002437}
2438
Tejun Heo4447d352007-04-17 23:44:08 +09002439static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002440{
Tejun Heo4447d352007-04-17 23:44:08 +09002441 struct pci_dev *pdev = to_pci_dev(host->dev);
2442 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002443 u32 hp_flags = hpriv->hp_flags;
2444
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002445 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002446 case chip_5080:
2447 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002448 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002449
Auke Kok44c10132007-06-08 15:46:36 -07002450 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002451 case 0x1:
2452 hp_flags |= MV_HP_ERRATA_50XXB0;
2453 break;
2454 case 0x3:
2455 hp_flags |= MV_HP_ERRATA_50XXB2;
2456 break;
2457 default:
2458 dev_printk(KERN_WARNING, &pdev->dev,
2459 "Applying 50XXB2 workarounds to unknown rev\n");
2460 hp_flags |= MV_HP_ERRATA_50XXB2;
2461 break;
2462 }
2463 break;
2464
2465 case chip_504x:
2466 case chip_508x:
2467 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002468 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002469
Auke Kok44c10132007-06-08 15:46:36 -07002470 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002471 case 0x0:
2472 hp_flags |= MV_HP_ERRATA_50XXB0;
2473 break;
2474 case 0x3:
2475 hp_flags |= MV_HP_ERRATA_50XXB2;
2476 break;
2477 default:
2478 dev_printk(KERN_WARNING, &pdev->dev,
2479 "Applying B2 workarounds to unknown rev\n");
2480 hp_flags |= MV_HP_ERRATA_50XXB2;
2481 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002482 }
2483 break;
2484
2485 case chip_604x:
2486 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002487 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002488 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002489
Auke Kok44c10132007-06-08 15:46:36 -07002490 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002491 case 0x7:
2492 hp_flags |= MV_HP_ERRATA_60X1B2;
2493 break;
2494 case 0x9:
2495 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002496 break;
2497 default:
2498 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002499 "Applying B2 workarounds to unknown rev\n");
2500 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002501 break;
2502 }
2503 break;
2504
Jeff Garzike4e7b892006-01-31 12:18:41 -05002505 case chip_7042:
2506 case chip_6042:
2507 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002508 hp_flags |= MV_HP_GEN_IIE;
2509
Auke Kok44c10132007-06-08 15:46:36 -07002510 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002511 case 0x0:
2512 hp_flags |= MV_HP_ERRATA_XX42A0;
2513 break;
2514 case 0x1:
2515 hp_flags |= MV_HP_ERRATA_60X1C0;
2516 break;
2517 default:
2518 dev_printk(KERN_WARNING, &pdev->dev,
2519 "Applying 60X1C0 workarounds to unknown rev\n");
2520 hp_flags |= MV_HP_ERRATA_60X1C0;
2521 break;
2522 }
2523 break;
2524
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002525 default:
2526 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2527 return 1;
2528 }
2529
2530 hpriv->hp_flags = hp_flags;
2531
2532 return 0;
2533}
2534
Brett Russ05b308e2005-10-05 17:08:53 -04002535/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002536 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002537 * @host: ATA host to initialize
2538 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002539 *
2540 * If possible, do an early global reset of the host. Then do
2541 * our port init and clear/unmask all/relevant host interrupts.
2542 *
2543 * LOCKING:
2544 * Inherited from caller.
2545 */
Tejun Heo4447d352007-04-17 23:44:08 +09002546static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002547{
2548 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002549 struct pci_dev *pdev = to_pci_dev(host->dev);
2550 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2551 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002552
Jeff Garzik47c2b672005-11-12 21:13:17 -05002553 /* global interrupt mask */
2554 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2555
Tejun Heo4447d352007-04-17 23:44:08 +09002556 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002557 if (rc)
2558 goto done;
2559
Tejun Heo4447d352007-04-17 23:44:08 +09002560 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002561
Tejun Heo4447d352007-04-17 23:44:08 +09002562 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002563 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002564
Jeff Garzikc9d39132005-11-13 17:47:51 -05002565 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002566 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002567 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002568
Jeff Garzik522479f2005-11-12 22:14:02 -05002569 hpriv->ops->reset_flash(hpriv, mmio);
2570 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002571 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002572
Tejun Heo4447d352007-04-17 23:44:08 +09002573 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002574 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002575 void __iomem *port_mmio = mv_port_base(mmio, port);
2576
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002577 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002578 ifctl |= (1 << 7); /* enable gen2i speed */
2579 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002580 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2581 }
2582
Jeff Garzikc9d39132005-11-13 17:47:51 -05002583 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002584 }
2585
Tejun Heo4447d352007-04-17 23:44:08 +09002586 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002587 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002588 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002589 }
2590
2591 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002592 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2593
2594 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2595 "(before clear)=0x%08x\n", hc,
2596 readl(hc_mmio + HC_CFG_OFS),
2597 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2598
2599 /* Clear any currently outstanding hc interrupt conditions */
2600 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002601 }
2602
Brett Russ31961942005-09-30 01:36:00 -04002603 /* Clear any currently outstanding host interrupt conditions */
2604 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2605
2606 /* and unmask interrupt generation for host regs */
2607 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002608
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002609 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002610 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2611 else
2612 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002613
2614 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002615 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002616 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2617 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2618 readl(mmio + PCI_IRQ_CAUSE_OFS),
2619 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002620
Brett Russ31961942005-09-30 01:36:00 -04002621done:
Brett Russ20f733e2005-09-01 18:26:17 -04002622 return rc;
2623}
2624
Brett Russ05b308e2005-10-05 17:08:53 -04002625/**
2626 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002627 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002628 *
2629 * FIXME: complete this.
2630 *
2631 * LOCKING:
2632 * Inherited from caller.
2633 */
Tejun Heo4447d352007-04-17 23:44:08 +09002634static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002635{
Tejun Heo4447d352007-04-17 23:44:08 +09002636 struct pci_dev *pdev = to_pci_dev(host->dev);
2637 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002638 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002639 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002640
2641 /* Use this to determine the HW stepping of the chip so we know
2642 * what errata to workaround
2643 */
Brett Russ31961942005-09-30 01:36:00 -04002644 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2645 if (scc == 0)
2646 scc_s = "SCSI";
2647 else if (scc == 0x01)
2648 scc_s = "RAID";
2649 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002650 scc_s = "?";
2651
2652 if (IS_GEN_I(hpriv))
2653 gen = "I";
2654 else if (IS_GEN_II(hpriv))
2655 gen = "II";
2656 else if (IS_GEN_IIE(hpriv))
2657 gen = "IIE";
2658 else
2659 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002660
Jeff Garzika9524a72005-10-30 14:39:11 -05002661 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002662 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2663 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002664 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2665}
2666
Brett Russ05b308e2005-10-05 17:08:53 -04002667/**
2668 * mv_init_one - handle a positive probe of a Marvell host
2669 * @pdev: PCI device found
2670 * @ent: PCI device ID entry for the matched host
2671 *
2672 * LOCKING:
2673 * Inherited from caller.
2674 */
Brett Russ20f733e2005-09-01 18:26:17 -04002675static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2676{
2677 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002678 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002679 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2680 struct ata_host *host;
2681 struct mv_host_priv *hpriv;
2682 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002683
Jeff Garzika9524a72005-10-30 14:39:11 -05002684 if (!printed_version++)
2685 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002686
Tejun Heo4447d352007-04-17 23:44:08 +09002687 /* allocate host */
2688 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2689
2690 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2691 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2692 if (!host || !hpriv)
2693 return -ENOMEM;
2694 host->private_data = hpriv;
2695
2696 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002697 rc = pcim_enable_device(pdev);
2698 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002699 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002700
Tejun Heo0d5ff562007-02-01 15:06:36 +09002701 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2702 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002703 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002704 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002705 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002706 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002707
Jeff Garzikd88184f2007-02-26 01:26:06 -05002708 rc = pci_go_64(pdev);
2709 if (rc)
2710 return rc;
2711
Brett Russ20f733e2005-09-01 18:26:17 -04002712 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002713 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002714 if (rc)
2715 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002716
Brett Russ31961942005-09-30 01:36:00 -04002717 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002718 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002719 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002720
Brett Russ31961942005-09-30 01:36:00 -04002721 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002722 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002723
Tejun Heo4447d352007-04-17 23:44:08 +09002724 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002725 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002726 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002727 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002728}
2729
2730static int __init mv_init(void)
2731{
Pavel Roskinb7887192006-08-10 18:13:18 +09002732 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002733}
2734
2735static void __exit mv_exit(void)
2736{
2737 pci_unregister_driver(&mv_pci_driver);
2738}
2739
2740MODULE_AUTHOR("Brett Russ");
2741MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2742MODULE_LICENSE("GPL");
2743MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2744MODULE_VERSION(DRV_VERSION);
2745
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002746module_param(msi, int, 0444);
2747MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2748
Brett Russ20f733e2005-09-01 18:26:17 -04002749module_init(mv_init);
2750module_exit(mv_exit);