blob: 7f1b13e89cf74c5cb75cd8f67b2a19abf816d88c [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500209 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Brett Russ20f733e2005-09-01 18:26:17 -0400244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400276
Brett Russ31961942005-09-30 01:36:00 -0400277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400292
Jeff Garzikc9d39132005-11-13 17:47:51 -0500293 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500295
Brett Russ31961942005-09-30 01:36:00 -0400296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500302 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400306
Brett Russ31961942005-09-30 01:36:00 -0400307 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400310};
311
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500315
Jeff Garzik095fec82005-11-12 09:50:49 -0500316enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500321
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
Jeff Garzik522479f2005-11-12 22:14:02 -0500331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500337 chip_6042,
338 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500339};
340
Brett Russ31961942005-09-30 01:36:00 -0400341/* Command ReQuest Block: 32B */
342struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400347};
348
Jeff Garzike4e7b892006-01-31 12:18:41 -0500349struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500355};
356
Brett Russ31961942005-09-30 01:36:00 -0400357/* Command ResPonse Block: 8B */
358struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400362};
363
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400370};
371
372struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
Brett Russ31961942005-09-30 01:36:00 -0400383 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400384};
385
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
Jeff Garzik47c2b672005-11-12 21:13:17 -0500391struct mv_host_priv;
392struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500402};
403
Brett Russ20f733e2005-09-01 18:26:17 -0400404struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400405 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500406 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500407 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
410static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400424static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
425
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500426static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500428static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
429static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
430 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500431static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500433static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
434static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500435
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500436static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
437 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500438static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
439static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
440 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500441static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500443static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
444static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500445static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
446 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500447
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400448static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400449 .module = THIS_MODULE,
450 .name = DRV_NAME,
451 .ioctl = ata_scsi_ioctl,
452 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400453 .can_queue = ATA_DEF_QUEUE,
454 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400455 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400456 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
457 .emulated = ATA_SHT_EMULATED,
458 .use_clustering = 1,
459 .proc_name = DRV_NAME,
460 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400461 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400462 .slave_destroy = ata_scsi_slave_destroy,
463 .bios_param = ata_std_bios_param,
464};
465
466static struct scsi_host_template mv6_sht = {
467 .module = THIS_MODULE,
468 .name = DRV_NAME,
469 .ioctl = ata_scsi_ioctl,
470 .queuecommand = ata_scsi_queuecmd,
471 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400472 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400473 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400474 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
475 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500476 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400477 .proc_name = DRV_NAME,
478 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400479 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900480 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400481 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400482};
483
Jeff Garzikc9d39132005-11-13 17:47:51 -0500484static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500485 .tf_load = ata_tf_load,
486 .tf_read = ata_tf_read,
487 .check_status = ata_check_status,
488 .exec_command = ata_exec_command,
489 .dev_select = ata_std_dev_select,
490
Jeff Garzikcffacd82007-03-09 09:46:47 -0500491 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500492
493 .qc_prep = mv_qc_prep,
494 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900495 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500496
Jeff Garzikc9d39132005-11-13 17:47:51 -0500497 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900498 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500499
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400500 .error_handler = mv_error_handler,
501 .post_internal_cmd = mv_post_int_cmd,
502 .freeze = mv_eh_freeze,
503 .thaw = mv_eh_thaw,
504
Jeff Garzikc9d39132005-11-13 17:47:51 -0500505 .scr_read = mv5_scr_read,
506 .scr_write = mv5_scr_write,
507
508 .port_start = mv_port_start,
509 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500510};
511
512static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
518
Jeff Garzikcffacd82007-03-09 09:46:47 -0500519 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400520
Brett Russ31961942005-09-30 01:36:00 -0400521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900523 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400524
Brett Russ20f733e2005-09-01 18:26:17 -0400525 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900526 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400527
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400528 .error_handler = mv_error_handler,
529 .post_internal_cmd = mv_post_int_cmd,
530 .freeze = mv_eh_freeze,
531 .thaw = mv_eh_thaw,
532
Brett Russ20f733e2005-09-01 18:26:17 -0400533 .scr_read = mv_scr_read,
534 .scr_write = mv_scr_write,
535
Brett Russ31961942005-09-30 01:36:00 -0400536 .port_start = mv_port_start,
537 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400538};
539
Jeff Garzike4e7b892006-01-31 12:18:41 -0500540static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500541 .tf_load = ata_tf_load,
542 .tf_read = ata_tf_read,
543 .check_status = ata_check_status,
544 .exec_command = ata_exec_command,
545 .dev_select = ata_std_dev_select,
546
Jeff Garzikcffacd82007-03-09 09:46:47 -0500547 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500548
549 .qc_prep = mv_qc_prep_iie,
550 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900551 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500552
Jeff Garzike4e7b892006-01-31 12:18:41 -0500553 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900554 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500555
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400556 .error_handler = mv_error_handler,
557 .post_internal_cmd = mv_post_int_cmd,
558 .freeze = mv_eh_freeze,
559 .thaw = mv_eh_thaw,
560
Jeff Garzike4e7b892006-01-31 12:18:41 -0500561 .scr_read = mv_scr_read,
562 .scr_write = mv_scr_write,
563
564 .port_start = mv_port_start,
565 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500566};
567
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100568static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400569 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400570 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400571 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400572 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400574 },
575 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400576 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400577 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400578 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500579 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400580 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500581 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400582 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500583 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400584 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500585 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500586 },
Brett Russ20f733e2005-09-01 18:26:17 -0400587 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400588 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400589 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400590 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500591 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400592 },
593 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
595 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400599 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500600 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604 .port_ops = &mv_iie_ops,
605 },
606 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 .port_ops = &mv_iie_ops,
611 },
Brett Russ20f733e2005-09-01 18:26:17 -0400612};
613
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500614static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400615 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
616 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
617 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
618 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100619 /* RocketRAID 1740/174x have different identifiers */
620 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
621 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400622
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400623 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
624 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
626 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
627 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500628
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400629 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
630
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200631 /* Adaptec 1430SA */
632 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
633
Olof Johanssone93f09d2007-01-18 18:39:59 -0600634 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
635
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800636 /* add Marvell 7042 support */
637 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400639 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400640};
641
642static struct pci_driver mv_pci_driver = {
643 .name = DRV_NAME,
644 .id_table = mv_pci_tbl,
645 .probe = mv_init_one,
646 .remove = ata_pci_remove_one,
647};
648
Jeff Garzik47c2b672005-11-12 21:13:17 -0500649static const struct mv_hw_ops mv5xxx_ops = {
650 .phy_errata = mv5_phy_errata,
651 .enable_leds = mv5_enable_leds,
652 .read_preamp = mv5_read_preamp,
653 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500654 .reset_flash = mv5_reset_flash,
655 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500656};
657
658static const struct mv_hw_ops mv6xxx_ops = {
659 .phy_errata = mv6_phy_errata,
660 .enable_leds = mv6_enable_leds,
661 .read_preamp = mv6_read_preamp,
662 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500663 .reset_flash = mv6_reset_flash,
664 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500665};
666
Brett Russ20f733e2005-09-01 18:26:17 -0400667/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500668 * module options
669 */
670static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
671
672
Jeff Garzikd88184f2007-02-26 01:26:06 -0500673/* move to PCI layer or libata core? */
674static int pci_go_64(struct pci_dev *pdev)
675{
676 int rc;
677
678 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
679 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
680 if (rc) {
681 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
682 if (rc) {
683 dev_printk(KERN_ERR, &pdev->dev,
684 "64-bit DMA enable failed\n");
685 return rc;
686 }
687 }
688 } else {
689 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
690 if (rc) {
691 dev_printk(KERN_ERR, &pdev->dev,
692 "32-bit DMA enable failed\n");
693 return rc;
694 }
695 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
696 if (rc) {
697 dev_printk(KERN_ERR, &pdev->dev,
698 "32-bit consistent DMA enable failed\n");
699 return rc;
700 }
701 }
702
703 return rc;
704}
705
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500706/*
Brett Russ20f733e2005-09-01 18:26:17 -0400707 * Functions
708 */
709
710static inline void writelfl(unsigned long data, void __iomem *addr)
711{
712 writel(data, addr);
713 (void) readl(addr); /* flush to avoid PCI posted write */
714}
715
Brett Russ20f733e2005-09-01 18:26:17 -0400716static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
717{
718 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
719}
720
Jeff Garzikc9d39132005-11-13 17:47:51 -0500721static inline unsigned int mv_hc_from_port(unsigned int port)
722{
723 return port >> MV_PORT_HC_SHIFT;
724}
725
726static inline unsigned int mv_hardport_from_port(unsigned int port)
727{
728 return port & MV_PORT_MASK;
729}
730
731static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
732 unsigned int port)
733{
734 return mv_hc_base(base, mv_hc_from_port(port));
735}
736
Brett Russ20f733e2005-09-01 18:26:17 -0400737static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
738{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500739 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500740 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500741 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400742}
743
744static inline void __iomem *mv_ap_base(struct ata_port *ap)
745{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900746 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
Jeff Garzikcca39742006-08-24 03:19:22 -0400749static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400750{
Jeff Garzikcca39742006-08-24 03:19:22 -0400751 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400752}
753
754static void mv_irq_clear(struct ata_port *ap)
755{
756}
757
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400758static void mv_set_edma_ptrs(void __iomem *port_mmio,
759 struct mv_host_priv *hpriv,
760 struct mv_port_priv *pp)
761{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400762 u32 index;
763
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400764 /*
765 * initialize request queue
766 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400767 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
768
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400769 WARN_ON(pp->crqb_dma & 0x3ff);
770 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400771 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400772 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
773
774 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400775 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400776 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
777 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400778 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779
780 /*
781 * initialize response queue
782 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
784
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400785 WARN_ON(pp->crpb_dma & 0xff);
786 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
787
788 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400789 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400790 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
791 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400792 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400794 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400795 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400796}
797
Brett Russ05b308e2005-10-05 17:08:53 -0400798/**
799 * mv_start_dma - Enable eDMA engine
800 * @base: port base address
801 * @pp: port private data
802 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900803 * Verify the local cache of the eDMA state is accurate with a
804 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400805 *
806 * LOCKING:
807 * Inherited from caller.
808 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400811{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 /* clear EDMA event indicators, if any */
814 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
815
816 mv_set_edma_ptrs(base, hpriv, pp);
817
Brett Russafb0edd2005-10-05 17:08:42 -0400818 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
819 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
820 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900821 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400822}
823
Brett Russ05b308e2005-10-05 17:08:53 -0400824/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400825 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400826 * @ap: ATA channel to manipulate
827 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900828 * Verify the local cache of the eDMA state is accurate with a
829 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400830 *
831 * LOCKING:
832 * Inherited from caller.
833 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400834static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400835{
836 void __iomem *port_mmio = mv_ap_base(ap);
837 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400838 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400839 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400840
Jeff Garzik4537deb2007-07-12 14:30:19 -0400841 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400842 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400843 */
Brett Russ31961942005-09-30 01:36:00 -0400844 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
845 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400846 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900847 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400848 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500849
Brett Russ31961942005-09-30 01:36:00 -0400850 /* now properly wait for the eDMA to stop */
851 for (i = 1000; i > 0; i--) {
852 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400853 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400854 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400855
Brett Russ31961942005-09-30 01:36:00 -0400856 udelay(100);
857 }
858
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400859 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900860 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400861 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400862 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400863
864 return err;
Brett Russ31961942005-09-30 01:36:00 -0400865}
866
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400867static int mv_stop_dma(struct ata_port *ap)
868{
869 unsigned long flags;
870 int rc;
871
872 spin_lock_irqsave(&ap->host->lock, flags);
873 rc = __mv_stop_dma(ap);
874 spin_unlock_irqrestore(&ap->host->lock, flags);
875
876 return rc;
877}
878
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400879#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400880static void mv_dump_mem(void __iomem *start, unsigned bytes)
881{
Brett Russ31961942005-09-30 01:36:00 -0400882 int b, w;
883 for (b = 0; b < bytes; ) {
884 DPRINTK("%p: ", start + b);
885 for (w = 0; b < bytes && w < 4; w++) {
886 printk("%08x ",readl(start + b));
887 b += sizeof(u32);
888 }
889 printk("\n");
890 }
Brett Russ31961942005-09-30 01:36:00 -0400891}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400892#endif
893
Brett Russ31961942005-09-30 01:36:00 -0400894static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
895{
896#ifdef ATA_DEBUG
897 int b, w;
898 u32 dw;
899 for (b = 0; b < bytes; ) {
900 DPRINTK("%02x: ", b);
901 for (w = 0; b < bytes && w < 4; w++) {
902 (void) pci_read_config_dword(pdev,b,&dw);
903 printk("%08x ",dw);
904 b += sizeof(u32);
905 }
906 printk("\n");
907 }
908#endif
909}
910static void mv_dump_all_regs(void __iomem *mmio_base, int port,
911 struct pci_dev *pdev)
912{
913#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500914 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400915 port >> MV_PORT_HC_SHIFT);
916 void __iomem *port_base;
917 int start_port, num_ports, p, start_hc, num_hcs, hc;
918
919 if (0 > port) {
920 start_hc = start_port = 0;
921 num_ports = 8; /* shld be benign for 4 port devs */
922 num_hcs = 2;
923 } else {
924 start_hc = port >> MV_PORT_HC_SHIFT;
925 start_port = port;
926 num_ports = num_hcs = 1;
927 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500928 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400929 num_ports > 1 ? num_ports - 1 : start_port);
930
931 if (NULL != pdev) {
932 DPRINTK("PCI config space regs:\n");
933 mv_dump_pci_cfg(pdev, 0x68);
934 }
935 DPRINTK("PCI regs:\n");
936 mv_dump_mem(mmio_base+0xc00, 0x3c);
937 mv_dump_mem(mmio_base+0xd00, 0x34);
938 mv_dump_mem(mmio_base+0xf00, 0x4);
939 mv_dump_mem(mmio_base+0x1d00, 0x6c);
940 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700941 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400942 DPRINTK("HC regs (HC %i):\n", hc);
943 mv_dump_mem(hc_base, 0x1c);
944 }
945 for (p = start_port; p < start_port + num_ports; p++) {
946 port_base = mv_port_base(mmio_base, p);
947 DPRINTK("EDMA regs (port %i):\n",p);
948 mv_dump_mem(port_base, 0x54);
949 DPRINTK("SATA regs (port %i):\n",p);
950 mv_dump_mem(port_base+0x300, 0x60);
951 }
952#endif
953}
954
Brett Russ20f733e2005-09-01 18:26:17 -0400955static unsigned int mv_scr_offset(unsigned int sc_reg_in)
956{
957 unsigned int ofs;
958
959 switch (sc_reg_in) {
960 case SCR_STATUS:
961 case SCR_CONTROL:
962 case SCR_ERROR:
963 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
964 break;
965 case SCR_ACTIVE:
966 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
967 break;
968 default:
969 ofs = 0xffffffffU;
970 break;
971 }
972 return ofs;
973}
974
Tejun Heoda3dbb12007-07-16 14:29:40 +0900975static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400976{
977 unsigned int ofs = mv_scr_offset(sc_reg_in);
978
Tejun Heoda3dbb12007-07-16 14:29:40 +0900979 if (ofs != 0xffffffffU) {
980 *val = readl(mv_ap_base(ap) + ofs);
981 return 0;
982 } else
983 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400984}
985
Tejun Heoda3dbb12007-07-16 14:29:40 +0900986static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400987{
988 unsigned int ofs = mv_scr_offset(sc_reg_in);
989
Tejun Heoda3dbb12007-07-16 14:29:40 +0900990 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400991 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900992 return 0;
993 } else
994 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400995}
996
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400997static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
998 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500999{
1000 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1001
1002 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001003 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001004
Jeff Garzike728eab2007-02-25 02:53:41 -05001005 if (IS_GEN_I(hpriv)) {
1006 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001007 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -05001008 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001009
Jeff Garzike728eab2007-02-25 02:53:41 -05001010 else if (IS_GEN_II(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001012 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -05001013 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1014 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001015
1016 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001017 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1018 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001019 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1020 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001021 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1022 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001023 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001024 }
1025
1026 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1027}
1028
Brett Russ05b308e2005-10-05 17:08:53 -04001029/**
1030 * mv_port_start - Port specific init/start routine.
1031 * @ap: ATA channel to manipulate
1032 *
1033 * Allocate and point to DMA memory, init port private memory,
1034 * zero indices.
1035 *
1036 * LOCKING:
1037 * Inherited from caller.
1038 */
Brett Russ31961942005-09-30 01:36:00 -04001039static int mv_port_start(struct ata_port *ap)
1040{
Jeff Garzikcca39742006-08-24 03:19:22 -04001041 struct device *dev = ap->host->dev;
1042 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001043 struct mv_port_priv *pp;
1044 void __iomem *port_mmio = mv_ap_base(ap);
1045 void *mem;
1046 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001047 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001048 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001049
Tejun Heo24dc5f32007-01-20 16:00:28 +09001050 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001051 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001052 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001053
Tejun Heo24dc5f32007-01-20 16:00:28 +09001054 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1055 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001056 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001057 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001058 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1059
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001060 rc = ata_pad_alloc(ap, dev);
1061 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001062 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001063
Jeff Garzik8b260242005-11-12 12:32:50 -05001064 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001065 * 32-slot command request table (CRQB), 32 bytes each in size
1066 */
1067 pp->crqb = mem;
1068 pp->crqb_dma = mem_dma;
1069 mem += MV_CRQB_Q_SZ;
1070 mem_dma += MV_CRQB_Q_SZ;
1071
Jeff Garzik8b260242005-11-12 12:32:50 -05001072 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001073 * 32-slot command response table (CRPB), 8 bytes each in size
1074 */
1075 pp->crpb = mem;
1076 pp->crpb_dma = mem_dma;
1077 mem += MV_CRPB_Q_SZ;
1078 mem_dma += MV_CRPB_Q_SZ;
1079
1080 /* Third item:
1081 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1082 */
1083 pp->sg_tbl = mem;
1084 pp->sg_tbl_dma = mem_dma;
1085
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001086 spin_lock_irqsave(&ap->host->lock, flags);
1087
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001088 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001089
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001090 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001091
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001092 spin_unlock_irqrestore(&ap->host->lock, flags);
1093
Brett Russ31961942005-09-30 01:36:00 -04001094 /* Don't turn on EDMA here...do it before DMA commands only. Else
1095 * we'll be unable to send non-data, PIO, etc due to restricted access
1096 * to shadow regs.
1097 */
1098 ap->private_data = pp;
1099 return 0;
1100}
1101
Brett Russ05b308e2005-10-05 17:08:53 -04001102/**
1103 * mv_port_stop - Port specific cleanup/stop routine.
1104 * @ap: ATA channel to manipulate
1105 *
1106 * Stop DMA, cleanup port memory.
1107 *
1108 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001109 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001110 */
Brett Russ31961942005-09-30 01:36:00 -04001111static void mv_port_stop(struct ata_port *ap)
1112{
Brett Russ31961942005-09-30 01:36:00 -04001113 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001114}
1115
Brett Russ05b308e2005-10-05 17:08:53 -04001116/**
1117 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1118 * @qc: queued command whose SG list to source from
1119 *
1120 * Populate the SG list and mark the last entry.
1121 *
1122 * LOCKING:
1123 * Inherited from caller.
1124 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001125static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001126{
1127 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001128 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001129 struct mv_sg *mv_sg, *last_sg = NULL;
Brett Russ31961942005-09-30 01:36:00 -04001130
Jeff Garzikd88184f2007-02-26 01:26:06 -05001131 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001132 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001133 dma_addr_t addr = sg_dma_address(sg);
1134 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001135
Olof Johansson4007b492007-10-02 20:45:27 -05001136 while (sg_len) {
1137 u32 offset = addr & 0xffff;
1138 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001139
Olof Johansson4007b492007-10-02 20:45:27 -05001140 if ((offset + sg_len > 0x10000))
1141 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001142
Olof Johansson4007b492007-10-02 20:45:27 -05001143 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1144 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001145 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001146
1147 sg_len -= len;
1148 addr += len;
1149
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001150 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001151 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001152 }
Brett Russ31961942005-09-30 01:36:00 -04001153 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001154
1155 if (likely(last_sg))
1156 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001157}
1158
Mark Lorde1469872006-05-22 19:02:03 -04001159static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001160{
Mark Lord559eeda2006-05-19 16:40:15 -04001161 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001162 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001163 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001164}
1165
Brett Russ05b308e2005-10-05 17:08:53 -04001166/**
1167 * mv_qc_prep - Host specific command preparation.
1168 * @qc: queued command to prepare
1169 *
1170 * This routine simply redirects to the general purpose routine
1171 * if command is not DMA. Else, it handles prep of the CRQB
1172 * (command request block), does some sanity checking, and calls
1173 * the SG load routine.
1174 *
1175 * LOCKING:
1176 * Inherited from caller.
1177 */
Brett Russ31961942005-09-30 01:36:00 -04001178static void mv_qc_prep(struct ata_queued_cmd *qc)
1179{
1180 struct ata_port *ap = qc->ap;
1181 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001182 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001183 struct ata_taskfile *tf;
1184 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001185 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001186
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001187 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001188 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001189
Brett Russ31961942005-09-30 01:36:00 -04001190 /* Fill in command request block
1191 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001192 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001193 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001194 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001195 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001196 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001197
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001198 /* get current queue index from software */
1199 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001200
Mark Lorda6432432006-05-19 16:36:36 -04001201 pp->crqb[in_index].sg_addr =
1202 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1203 pp->crqb[in_index].sg_addr_hi =
1204 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1205 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1206
1207 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001208 tf = &qc->tf;
1209
1210 /* Sadly, the CRQB cannot accomodate all registers--there are
1211 * only 11 bytes...so we must pick and choose required
1212 * registers based on the command. So, we drop feature and
1213 * hob_feature for [RW] DMA commands, but they are needed for
1214 * NCQ. NCQ will drop hob_nsect.
1215 */
1216 switch (tf->command) {
1217 case ATA_CMD_READ:
1218 case ATA_CMD_READ_EXT:
1219 case ATA_CMD_WRITE:
1220 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001221 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001222 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1223 break;
1224#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1225 case ATA_CMD_FPDMA_READ:
1226 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001227 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001228 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1229 break;
1230#endif /* FIXME: remove this line when NCQ added */
1231 default:
1232 /* The only other commands EDMA supports in non-queued and
1233 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1234 * of which are defined/used by Linux. If we get here, this
1235 * driver needs work.
1236 *
1237 * FIXME: modify libata to give qc_prep a return value and
1238 * return error here.
1239 */
1240 BUG_ON(tf->command);
1241 break;
1242 }
1243 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1244 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1245 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1246 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1247 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1248 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1249 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1250 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1251 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1252
Jeff Garzike4e7b892006-01-31 12:18:41 -05001253 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001254 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001255 mv_fill_sg(qc);
1256}
1257
1258/**
1259 * mv_qc_prep_iie - Host specific command preparation.
1260 * @qc: queued command to prepare
1261 *
1262 * This routine simply redirects to the general purpose routine
1263 * if command is not DMA. Else, it handles prep of the CRQB
1264 * (command request block), does some sanity checking, and calls
1265 * the SG load routine.
1266 *
1267 * LOCKING:
1268 * Inherited from caller.
1269 */
1270static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1271{
1272 struct ata_port *ap = qc->ap;
1273 struct mv_port_priv *pp = ap->private_data;
1274 struct mv_crqb_iie *crqb;
1275 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001276 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001277 u32 flags = 0;
1278
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001279 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001280 return;
1281
Jeff Garzike4e7b892006-01-31 12:18:41 -05001282 /* Fill in Gen IIE command request block
1283 */
1284 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1285 flags |= CRQB_FLAG_READ;
1286
Tejun Heobeec7db2006-02-11 19:11:13 +09001287 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001288 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001289 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001290 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001291
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001292 /* get current queue index from software */
1293 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001294
1295 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001296 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1297 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1298 crqb->flags = cpu_to_le32(flags);
1299
1300 tf = &qc->tf;
1301 crqb->ata_cmd[0] = cpu_to_le32(
1302 (tf->command << 16) |
1303 (tf->feature << 24)
1304 );
1305 crqb->ata_cmd[1] = cpu_to_le32(
1306 (tf->lbal << 0) |
1307 (tf->lbam << 8) |
1308 (tf->lbah << 16) |
1309 (tf->device << 24)
1310 );
1311 crqb->ata_cmd[2] = cpu_to_le32(
1312 (tf->hob_lbal << 0) |
1313 (tf->hob_lbam << 8) |
1314 (tf->hob_lbah << 16) |
1315 (tf->hob_feature << 24)
1316 );
1317 crqb->ata_cmd[3] = cpu_to_le32(
1318 (tf->nsect << 0) |
1319 (tf->hob_nsect << 8)
1320 );
1321
1322 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1323 return;
Brett Russ31961942005-09-30 01:36:00 -04001324 mv_fill_sg(qc);
1325}
1326
Brett Russ05b308e2005-10-05 17:08:53 -04001327/**
1328 * mv_qc_issue - Initiate a command to the host
1329 * @qc: queued command to start
1330 *
1331 * This routine simply redirects to the general purpose routine
1332 * if command is not DMA. Else, it sanity checks our local
1333 * caches of the request producer/consumer indices then enables
1334 * DMA and bumps the request producer index.
1335 *
1336 * LOCKING:
1337 * Inherited from caller.
1338 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001339static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001340{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001341 struct ata_port *ap = qc->ap;
1342 void __iomem *port_mmio = mv_ap_base(ap);
1343 struct mv_port_priv *pp = ap->private_data;
1344 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001345 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001346
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001347 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001348 /* We're about to send a non-EDMA capable command to the
1349 * port. Turn off EDMA so there won't be problems accessing
1350 * shadow block, etc registers.
1351 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001352 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001353 return ata_qc_issue_prot(qc);
1354 }
1355
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001356 mv_start_dma(port_mmio, hpriv, pp);
1357
1358 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001359
Brett Russ31961942005-09-30 01:36:00 -04001360 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001361 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1362 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001363
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001364 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001365
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001366 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001367
1368 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001369 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1370 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001371
1372 return 0;
1373}
1374
Brett Russ05b308e2005-10-05 17:08:53 -04001375/**
Brett Russ05b308e2005-10-05 17:08:53 -04001376 * mv_err_intr - Handle error interrupts on the port
1377 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001378 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001379 *
1380 * In most cases, just clear the interrupt and move on. However,
1381 * some cases require an eDMA reset, which is done right before
1382 * the COMRESET in mv_phy_reset(). The SERR case requires a
1383 * clear of pending errors in the SATA SERROR register. Finally,
1384 * if the port disabled DMA, update our cached copy to match.
1385 *
1386 * LOCKING:
1387 * Inherited from caller.
1388 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001389static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001390{
Brett Russ31961942005-09-30 01:36:00 -04001391 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001392 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1393 struct mv_port_priv *pp = ap->private_data;
1394 struct mv_host_priv *hpriv = ap->host->private_data;
1395 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1396 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001397 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001398
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001399 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001400
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 if (!edma_enabled) {
1402 /* just a guess: do we need to do this? should we
1403 * expand this, and do it in all cases?
1404 */
Tejun Heo936fd732007-08-06 18:36:23 +09001405 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1406 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001407 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001408
1409 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1410
1411 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1412
1413 /*
1414 * all generations share these EDMA error cause bits
1415 */
1416
1417 if (edma_err_cause & EDMA_ERR_DEV)
1418 err_mask |= AC_ERR_DEV;
1419 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001420 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001421 EDMA_ERR_INTRL_PAR)) {
1422 err_mask |= AC_ERR_ATA_BUS;
1423 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001424 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001425 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001426 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1427 ata_ehi_hotplugged(ehi);
1428 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001429 "dev disconnect" : "dev connect");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 }
1431
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001432 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001433 eh_freeze_mask = EDMA_EH_FREEZE_5;
1434
1435 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1436 struct mv_port_priv *pp = ap->private_data;
1437 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001438 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001439 }
1440 } else {
1441 eh_freeze_mask = EDMA_EH_FREEZE;
1442
1443 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1444 struct mv_port_priv *pp = ap->private_data;
1445 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001446 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 }
1448
1449 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001450 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1451 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001452 err_mask = AC_ERR_ATA_BUS;
1453 action |= ATA_EH_HARDRESET;
1454 }
1455 }
Brett Russ20f733e2005-09-01 18:26:17 -04001456
1457 /* Clear EDMA now that SERR cleanup done */
1458 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1459
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 if (!err_mask) {
1461 err_mask = AC_ERR_OTHER;
1462 action |= ATA_EH_HARDRESET;
1463 }
1464
1465 ehi->serror |= serr;
1466 ehi->action |= action;
1467
1468 if (qc)
1469 qc->err_mask |= err_mask;
1470 else
1471 ehi->err_mask |= err_mask;
1472
1473 if (edma_err_cause & eh_freeze_mask)
1474 ata_port_freeze(ap);
1475 else
1476 ata_port_abort(ap);
1477}
1478
1479static void mv_intr_pio(struct ata_port *ap)
1480{
1481 struct ata_queued_cmd *qc;
1482 u8 ata_status;
1483
1484 /* ignore spurious intr if drive still BUSY */
1485 ata_status = readb(ap->ioaddr.status_addr);
1486 if (unlikely(ata_status & ATA_BUSY))
1487 return;
1488
1489 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001490 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 if (unlikely(!qc)) /* no active tag */
1492 return;
1493 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1494 return;
1495
1496 /* and finally, complete the ATA command */
1497 qc->err_mask |= ac_err_mask(ata_status);
1498 ata_qc_complete(qc);
1499}
1500
1501static void mv_intr_edma(struct ata_port *ap)
1502{
1503 void __iomem *port_mmio = mv_ap_base(ap);
1504 struct mv_host_priv *hpriv = ap->host->private_data;
1505 struct mv_port_priv *pp = ap->private_data;
1506 struct ata_queued_cmd *qc;
1507 u32 out_index, in_index;
1508 bool work_done = false;
1509
1510 /* get h/w response queue pointer */
1511 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1512 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1513
1514 while (1) {
1515 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001516 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517
1518 /* get s/w response queue last-read pointer, and compare */
1519 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1520 if (in_index == out_index)
1521 break;
1522
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001523 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001524 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001525 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001526
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001527 /* Gen II/IIE: get active ATA command via tag, to enable
1528 * support for queueing. this works transparently for
1529 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001531 else if (IS_GEN_II(hpriv))
1532 tag = (le16_to_cpu(pp->crpb[out_index].id)
1533 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001534
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001535 else /* IS_GEN_IIE */
1536 tag = (le16_to_cpu(pp->crpb[out_index].id)
1537 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001539 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540
1541 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1542 * bits (WARNING: might not necessarily be associated
1543 * with this command), which -should- be clear
1544 * if all is well
1545 */
1546 status = le16_to_cpu(pp->crpb[out_index].flags);
1547 if (unlikely(status & 0xff)) {
1548 mv_err_intr(ap, qc);
1549 return;
1550 }
1551
1552 /* and finally, complete the ATA command */
1553 if (qc) {
1554 qc->err_mask |=
1555 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1556 ata_qc_complete(qc);
1557 }
1558
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001559 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560 * indicate (after the loop completes) to hardware
1561 * that we have consumed a response queue entry.
1562 */
1563 work_done = true;
1564 pp->resp_idx++;
1565 }
1566
1567 if (work_done)
1568 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1569 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1570 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001571}
1572
Brett Russ05b308e2005-10-05 17:08:53 -04001573/**
1574 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001575 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001576 * @relevant: port error bits relevant to this host controller
1577 * @hc: which host controller we're to look at
1578 *
1579 * Read then write clear the HC interrupt status then walk each
1580 * port connected to the HC and see if it needs servicing. Port
1581 * success ints are reported in the HC interrupt status reg, the
1582 * port error ints are reported in the higher level main
1583 * interrupt status register and thus are passed in via the
1584 * 'relevant' argument.
1585 *
1586 * LOCKING:
1587 * Inherited from caller.
1588 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001589static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001590{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001591 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001592 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001593 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001594 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001595
Jeff Garzik35177262007-02-24 21:26:42 -05001596 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001597 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001598 else
Brett Russ20f733e2005-09-01 18:26:17 -04001599 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001600
1601 /* we'll need the HC success int register in most cases */
1602 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001603 if (!hc_irq_cause)
1604 return;
1605
1606 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001607
1608 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1609 hc,relevant,hc_irq_cause);
1610
1611 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001612 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001613 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001614 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001615
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001616 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001617 continue;
1618
Brett Russ31961942005-09-30 01:36:00 -04001619 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001620 if (port >= MV_PORTS_PER_HC) {
1621 shift++; /* skip bit 8 in the HC Main IRQ reg */
1622 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001623 have_err_bits = ((PORT0_ERR << shift) & relevant);
1624
1625 if (unlikely(have_err_bits)) {
1626 struct ata_queued_cmd *qc;
1627
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001628 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001629 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1630 continue;
1631
1632 mv_err_intr(ap, qc);
1633 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001634 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001635
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001636 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1637
1638 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1639 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1640 mv_intr_edma(ap);
1641 } else {
1642 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1643 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001644 }
1645 }
1646 VPRINTK("EXIT\n");
1647}
1648
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001649static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1650{
1651 struct ata_port *ap;
1652 struct ata_queued_cmd *qc;
1653 struct ata_eh_info *ehi;
1654 unsigned int i, err_mask, printed = 0;
1655 u32 err_cause;
1656
1657 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1658
1659 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1660 err_cause);
1661
1662 DPRINTK("All regs @ PCI error\n");
1663 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1664
1665 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1666
1667 for (i = 0; i < host->n_ports; i++) {
1668 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001669 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001670 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001671 ata_ehi_clear_desc(ehi);
1672 if (!printed++)
1673 ata_ehi_push_desc(ehi,
1674 "PCI err cause 0x%08x", err_cause);
1675 err_mask = AC_ERR_HOST_BUS;
1676 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001677 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001678 if (qc)
1679 qc->err_mask |= err_mask;
1680 else
1681 ehi->err_mask |= err_mask;
1682
1683 ata_port_freeze(ap);
1684 }
1685 }
1686}
1687
Brett Russ05b308e2005-10-05 17:08:53 -04001688/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001689 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001690 * @irq: unused
1691 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001692 *
1693 * Read the read only register to determine if any host
1694 * controllers have pending interrupts. If so, call lower level
1695 * routine to handle. Also check for PCI errors which are only
1696 * reported here.
1697 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001698 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001699 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001700 * interrupts.
1701 */
David Howells7d12e782006-10-05 14:55:46 +01001702static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001703{
Jeff Garzikcca39742006-08-24 03:19:22 -04001704 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001705 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001706 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001707 u32 irq_stat;
1708
Brett Russ20f733e2005-09-01 18:26:17 -04001709 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001710
1711 /* check the cases where we either have nothing pending or have read
1712 * a bogus register value which can indicate HW removal or PCI fault
1713 */
Jeff Garzik35177262007-02-24 21:26:42 -05001714 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001715 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001716
Jeff Garzikcca39742006-08-24 03:19:22 -04001717 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1718 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 if (unlikely(irq_stat & PCI_ERR)) {
1721 mv_pci_error(host, mmio);
1722 handled = 1;
1723 goto out_unlock; /* skip all other HC irq handling */
1724 }
1725
Brett Russ20f733e2005-09-01 18:26:17 -04001726 for (hc = 0; hc < n_hcs; hc++) {
1727 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1728 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001729 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001731 }
1732 }
Mark Lord615ab952006-05-19 16:24:56 -04001733
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001734out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001735 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001736
1737 return IRQ_RETVAL(handled);
1738}
1739
Jeff Garzikc9d39132005-11-13 17:47:51 -05001740static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1741{
1742 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1743 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1744
1745 return hc_mmio + ofs;
1746}
1747
1748static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1749{
1750 unsigned int ofs;
1751
1752 switch (sc_reg_in) {
1753 case SCR_STATUS:
1754 case SCR_ERROR:
1755 case SCR_CONTROL:
1756 ofs = sc_reg_in * sizeof(u32);
1757 break;
1758 default:
1759 ofs = 0xffffffffU;
1760 break;
1761 }
1762 return ofs;
1763}
1764
Tejun Heoda3dbb12007-07-16 14:29:40 +09001765static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001766{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001767 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1768 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001769 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1770
Tejun Heoda3dbb12007-07-16 14:29:40 +09001771 if (ofs != 0xffffffffU) {
1772 *val = readl(addr + ofs);
1773 return 0;
1774 } else
1775 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001776}
1777
Tejun Heoda3dbb12007-07-16 14:29:40 +09001778static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001779{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001780 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1781 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001782 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1783
Tejun Heoda3dbb12007-07-16 14:29:40 +09001784 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001785 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001786 return 0;
1787 } else
1788 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001789}
1790
Jeff Garzik522479f2005-11-12 22:14:02 -05001791static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1792{
Jeff Garzik522479f2005-11-12 22:14:02 -05001793 int early_5080;
1794
Auke Kok44c10132007-06-08 15:46:36 -07001795 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001796
1797 if (!early_5080) {
1798 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1799 tmp |= (1 << 0);
1800 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1801 }
1802
1803 mv_reset_pci_bus(pdev, mmio);
1804}
1805
1806static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1807{
1808 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1809}
1810
Jeff Garzik47c2b672005-11-12 21:13:17 -05001811static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001812 void __iomem *mmio)
1813{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001814 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1815 u32 tmp;
1816
1817 tmp = readl(phy_mmio + MV5_PHY_MODE);
1818
1819 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1820 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001821}
1822
Jeff Garzik47c2b672005-11-12 21:13:17 -05001823static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001824{
Jeff Garzik522479f2005-11-12 22:14:02 -05001825 u32 tmp;
1826
1827 writel(0, mmio + MV_GPIO_PORT_CTL);
1828
1829 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1830
1831 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1832 tmp |= ~(1 << 0);
1833 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001834}
1835
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001836static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1837 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001838{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001839 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1840 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1841 u32 tmp;
1842 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1843
1844 if (fix_apm_sq) {
1845 tmp = readl(phy_mmio + MV5_LT_MODE);
1846 tmp |= (1 << 19);
1847 writel(tmp, phy_mmio + MV5_LT_MODE);
1848
1849 tmp = readl(phy_mmio + MV5_PHY_CTL);
1850 tmp &= ~0x3;
1851 tmp |= 0x1;
1852 writel(tmp, phy_mmio + MV5_PHY_CTL);
1853 }
1854
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1856 tmp &= ~mask;
1857 tmp |= hpriv->signal[port].pre;
1858 tmp |= hpriv->signal[port].amps;
1859 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001860}
1861
Jeff Garzikc9d39132005-11-13 17:47:51 -05001862
1863#undef ZERO
1864#define ZERO(reg) writel(0, port_mmio + (reg))
1865static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1866 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001867{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001868 void __iomem *port_mmio = mv_port_base(mmio, port);
1869
1870 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1871
1872 mv_channel_reset(hpriv, mmio, port);
1873
1874 ZERO(0x028); /* command */
1875 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1876 ZERO(0x004); /* timer */
1877 ZERO(0x008); /* irq err cause */
1878 ZERO(0x00c); /* irq err mask */
1879 ZERO(0x010); /* rq bah */
1880 ZERO(0x014); /* rq inp */
1881 ZERO(0x018); /* rq outp */
1882 ZERO(0x01c); /* respq bah */
1883 ZERO(0x024); /* respq outp */
1884 ZERO(0x020); /* respq inp */
1885 ZERO(0x02c); /* test control */
1886 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1887}
1888#undef ZERO
1889
1890#define ZERO(reg) writel(0, hc_mmio + (reg))
1891static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1892 unsigned int hc)
1893{
1894 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1895 u32 tmp;
1896
1897 ZERO(0x00c);
1898 ZERO(0x010);
1899 ZERO(0x014);
1900 ZERO(0x018);
1901
1902 tmp = readl(hc_mmio + 0x20);
1903 tmp &= 0x1c1c1c1c;
1904 tmp |= 0x03030303;
1905 writel(tmp, hc_mmio + 0x20);
1906}
1907#undef ZERO
1908
1909static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1910 unsigned int n_hc)
1911{
1912 unsigned int hc, port;
1913
1914 for (hc = 0; hc < n_hc; hc++) {
1915 for (port = 0; port < MV_PORTS_PER_HC; port++)
1916 mv5_reset_hc_port(hpriv, mmio,
1917 (hc * MV_PORTS_PER_HC) + port);
1918
1919 mv5_reset_one_hc(hpriv, mmio, hc);
1920 }
1921
1922 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001923}
1924
Jeff Garzik101ffae2005-11-12 22:17:49 -05001925#undef ZERO
1926#define ZERO(reg) writel(0, mmio + (reg))
1927static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1928{
1929 u32 tmp;
1930
1931 tmp = readl(mmio + MV_PCI_MODE);
1932 tmp &= 0xff00ffff;
1933 writel(tmp, mmio + MV_PCI_MODE);
1934
1935 ZERO(MV_PCI_DISC_TIMER);
1936 ZERO(MV_PCI_MSI_TRIGGER);
1937 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1938 ZERO(HC_MAIN_IRQ_MASK_OFS);
1939 ZERO(MV_PCI_SERR_MASK);
1940 ZERO(PCI_IRQ_CAUSE_OFS);
1941 ZERO(PCI_IRQ_MASK_OFS);
1942 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1943 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1944 ZERO(MV_PCI_ERR_ATTRIBUTE);
1945 ZERO(MV_PCI_ERR_COMMAND);
1946}
1947#undef ZERO
1948
1949static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1950{
1951 u32 tmp;
1952
1953 mv5_reset_flash(hpriv, mmio);
1954
1955 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1956 tmp &= 0x3;
1957 tmp |= (1 << 5) | (1 << 6);
1958 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1959}
1960
1961/**
1962 * mv6_reset_hc - Perform the 6xxx global soft reset
1963 * @mmio: base address of the HBA
1964 *
1965 * This routine only applies to 6xxx parts.
1966 *
1967 * LOCKING:
1968 * Inherited from caller.
1969 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001970static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001972{
1973 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1974 int i, rc = 0;
1975 u32 t;
1976
1977 /* Following procedure defined in PCI "main command and status
1978 * register" table.
1979 */
1980 t = readl(reg);
1981 writel(t | STOP_PCI_MASTER, reg);
1982
1983 for (i = 0; i < 1000; i++) {
1984 udelay(1);
1985 t = readl(reg);
1986 if (PCI_MASTER_EMPTY & t) {
1987 break;
1988 }
1989 }
1990 if (!(PCI_MASTER_EMPTY & t)) {
1991 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1992 rc = 1;
1993 goto done;
1994 }
1995
1996 /* set reset */
1997 i = 5;
1998 do {
1999 writel(t | GLOB_SFT_RST, reg);
2000 t = readl(reg);
2001 udelay(1);
2002 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2003
2004 if (!(GLOB_SFT_RST & t)) {
2005 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2006 rc = 1;
2007 goto done;
2008 }
2009
2010 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2011 i = 5;
2012 do {
2013 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2014 t = readl(reg);
2015 udelay(1);
2016 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2017
2018 if (GLOB_SFT_RST & t) {
2019 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2020 rc = 1;
2021 }
2022done:
2023 return rc;
2024}
2025
Jeff Garzik47c2b672005-11-12 21:13:17 -05002026static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002027 void __iomem *mmio)
2028{
2029 void __iomem *port_mmio;
2030 u32 tmp;
2031
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002032 tmp = readl(mmio + MV_RESET_CFG);
2033 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002034 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002035 hpriv->signal[idx].pre = 0x1 << 5;
2036 return;
2037 }
2038
2039 port_mmio = mv_port_base(mmio, idx);
2040 tmp = readl(port_mmio + PHY_MODE2);
2041
2042 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2043 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2044}
2045
Jeff Garzik47c2b672005-11-12 21:13:17 -05002046static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002047{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002048 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002049}
2050
Jeff Garzikc9d39132005-11-13 17:47:51 -05002051static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002052 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002053{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002054 void __iomem *port_mmio = mv_port_base(mmio, port);
2055
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002056 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002057 int fix_phy_mode2 =
2058 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002059 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002060 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2061 u32 m2, tmp;
2062
2063 if (fix_phy_mode2) {
2064 m2 = readl(port_mmio + PHY_MODE2);
2065 m2 &= ~(1 << 16);
2066 m2 |= (1 << 31);
2067 writel(m2, port_mmio + PHY_MODE2);
2068
2069 udelay(200);
2070
2071 m2 = readl(port_mmio + PHY_MODE2);
2072 m2 &= ~((1 << 16) | (1 << 31));
2073 writel(m2, port_mmio + PHY_MODE2);
2074
2075 udelay(200);
2076 }
2077
2078 /* who knows what this magic does */
2079 tmp = readl(port_mmio + PHY_MODE3);
2080 tmp &= ~0x7F800000;
2081 tmp |= 0x2A800000;
2082 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002083
2084 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002085 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002086
2087 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002088
2089 if (hp_flags & MV_HP_ERRATA_60X1B2)
2090 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002091
2092 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2093
2094 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002095
2096 if (hp_flags & MV_HP_ERRATA_60X1B2)
2097 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002098 }
2099
2100 /* Revert values of pre-emphasis and signal amps to the saved ones */
2101 m2 = readl(port_mmio + PHY_MODE2);
2102
2103 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002104 m2 |= hpriv->signal[port].amps;
2105 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002106 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002107
Jeff Garzike4e7b892006-01-31 12:18:41 -05002108 /* according to mvSata 3.6.1, some IIE values are fixed */
2109 if (IS_GEN_IIE(hpriv)) {
2110 m2 &= ~0xC30FF01F;
2111 m2 |= 0x0000900F;
2112 }
2113
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002114 writel(m2, port_mmio + PHY_MODE2);
2115}
2116
Jeff Garzikc9d39132005-11-13 17:47:51 -05002117static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2118 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002119{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002120 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002121
Brett Russ31961942005-09-30 01:36:00 -04002122 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002123
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002124 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002125 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002126 ifctl |= (1 << 7); /* enable gen2i speed */
2127 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002128 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2129 }
2130
Brett Russ20f733e2005-09-01 18:26:17 -04002131 udelay(25); /* allow reset propagation */
2132
2133 /* Spec never mentions clearing the bit. Marvell's driver does
2134 * clear the bit, however.
2135 */
Brett Russ31961942005-09-30 01:36:00 -04002136 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002137
Jeff Garzikc9d39132005-11-13 17:47:51 -05002138 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2139
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002140 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002141 mdelay(1);
2142}
2143
Jeff Garzikc9d39132005-11-13 17:47:51 -05002144/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002145 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002146 * @ap: ATA channel to manipulate
2147 *
2148 * Part of this is taken from __sata_phy_reset and modified to
2149 * not sleep since this routine gets called from interrupt level.
2150 *
2151 * LOCKING:
2152 * Inherited from caller. This is coded to safe to call at
2153 * interrupt level, i.e. it does not sleep.
2154 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002155static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2156 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002157{
2158 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002159 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002160 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002161 int retry = 5;
2162 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002163
2164 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002165
Tejun Heoda3dbb12007-07-16 14:29:40 +09002166#ifdef DEBUG
2167 {
2168 u32 sstatus, serror, scontrol;
2169
2170 mv_scr_read(ap, SCR_STATUS, &sstatus);
2171 mv_scr_read(ap, SCR_ERROR, &serror);
2172 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2173 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2174 "SCtrl 0x%08x\n", status, serror, scontrol);
2175 }
2176#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002177
Jeff Garzik22374672005-11-17 10:59:48 -05002178 /* Issue COMRESET via SControl */
2179comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002180 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002181 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002182
Tejun Heo936fd732007-08-06 18:36:23 +09002183 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002184 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002185
Brett Russ31961942005-09-30 01:36:00 -04002186 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002187 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002188 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002189 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002190
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002191 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002192 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002193
Jeff Garzik22374672005-11-17 10:59:48 -05002194 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002195 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002196 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2197 (retry-- > 0))
2198 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002199
Tejun Heoda3dbb12007-07-16 14:29:40 +09002200#ifdef DEBUG
2201 {
2202 u32 sstatus, serror, scontrol;
2203
2204 mv_scr_read(ap, SCR_STATUS, &sstatus);
2205 mv_scr_read(ap, SCR_ERROR, &serror);
2206 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2207 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2208 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2209 }
2210#endif
Brett Russ31961942005-09-30 01:36:00 -04002211
Tejun Heo936fd732007-08-06 18:36:23 +09002212 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002213 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002214 return;
2215 }
2216
Jeff Garzik22374672005-11-17 10:59:48 -05002217 /* even after SStatus reflects that device is ready,
2218 * it seems to take a while for link to be fully
2219 * established (and thus Status no longer 0x80/0x7F),
2220 * so we poll a bit for that, here.
2221 */
2222 retry = 20;
2223 while (1) {
2224 u8 drv_stat = ata_check_status(ap);
2225 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2226 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002227 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002228 if (retry-- <= 0)
2229 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002230 if (time_after(jiffies, deadline))
2231 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002232 }
2233
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002234 /* FIXME: if we passed the deadline, the following
2235 * code probably produces an invalid result
2236 */
Brett Russ20f733e2005-09-01 18:26:17 -04002237
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002238 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002239 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002240
2241 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2242
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002243 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002244
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002245 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002246}
2247
Tejun Heocc0680a2007-08-06 18:36:23 +09002248static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002249{
Tejun Heocc0680a2007-08-06 18:36:23 +09002250 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002251 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002252 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002253 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002254
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002255 rc = mv_stop_dma(ap);
2256 if (rc)
2257 ehc->i.action |= ATA_EH_HARDRESET;
2258
2259 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2260 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2261 ehc->i.action |= ATA_EH_HARDRESET;
2262 }
2263
2264 /* if we're about to do hardreset, nothing more to do */
2265 if (ehc->i.action & ATA_EH_HARDRESET)
2266 return 0;
2267
Tejun Heocc0680a2007-08-06 18:36:23 +09002268 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002269 rc = ata_wait_ready(ap, deadline);
2270 else
2271 rc = -ENODEV;
2272
2273 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002274}
2275
Tejun Heocc0680a2007-08-06 18:36:23 +09002276static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002277 unsigned long deadline)
2278{
Tejun Heocc0680a2007-08-06 18:36:23 +09002279 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002280 struct mv_host_priv *hpriv = ap->host->private_data;
2281 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2282
2283 mv_stop_dma(ap);
2284
2285 mv_channel_reset(hpriv, mmio, ap->port_no);
2286
2287 mv_phy_reset(ap, class, deadline);
2288
2289 return 0;
2290}
2291
Tejun Heocc0680a2007-08-06 18:36:23 +09002292static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002293{
Tejun Heocc0680a2007-08-06 18:36:23 +09002294 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002295 u32 serr;
2296
2297 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002298 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002299
2300 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002301 sata_scr_read(link, SCR_ERROR, &serr);
2302 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002303
2304 /* bail out if no device is present */
2305 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2306 DPRINTK("EXIT, no device\n");
2307 return;
2308 }
2309
2310 /* set up device control */
2311 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2312}
2313
2314static void mv_error_handler(struct ata_port *ap)
2315{
2316 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2317 mv_hardreset, mv_postreset);
2318}
2319
2320static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2321{
2322 mv_stop_dma(qc->ap);
2323}
2324
2325static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002326{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002327 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002328 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2329 u32 tmp, mask;
2330 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002331
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002332 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002333
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 shift = ap->port_no * 2;
2335 if (hc > 0)
2336 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002337
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002338 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002339
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 /* disable assertion of portN err, done events */
2341 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2342 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2343}
2344
2345static void mv_eh_thaw(struct ata_port *ap)
2346{
2347 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2348 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2349 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2350 void __iomem *port_mmio = mv_ap_base(ap);
2351 u32 tmp, mask, hc_irq_cause;
2352 unsigned int shift, hc_port_no = ap->port_no;
2353
2354 /* FIXME: handle coalescing completion events properly */
2355
2356 shift = ap->port_no * 2;
2357 if (hc > 0) {
2358 shift++;
2359 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002360 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002361
2362 mask = 0x3 << shift;
2363
2364 /* clear EDMA errors on this port */
2365 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2366
2367 /* clear pending irq events */
2368 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2369 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2370 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2371 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2372
2373 /* enable assertion of portN err, done events */
2374 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2375 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002376}
2377
Brett Russ05b308e2005-10-05 17:08:53 -04002378/**
2379 * mv_port_init - Perform some early initialization on a single port.
2380 * @port: libata data structure storing shadow register addresses
2381 * @port_mmio: base address of the port
2382 *
2383 * Initialize shadow register mmio addresses, clear outstanding
2384 * interrupts on the port, and unmask interrupts for the future
2385 * start of the port.
2386 *
2387 * LOCKING:
2388 * Inherited from caller.
2389 */
Brett Russ31961942005-09-30 01:36:00 -04002390static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2391{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002392 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002393 unsigned serr_ofs;
2394
Jeff Garzik8b260242005-11-12 12:32:50 -05002395 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002396 */
2397 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002398 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002399 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2400 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2401 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2402 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2403 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2404 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002405 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002406 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2407 /* special case: control/altstatus doesn't have ATA_REG_ address */
2408 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2409
2410 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002411 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002412
Brett Russ31961942005-09-30 01:36:00 -04002413 /* Clear any currently outstanding port interrupt conditions */
2414 serr_ofs = mv_scr_offset(SCR_ERROR);
2415 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2416 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2417
Brett Russ20f733e2005-09-01 18:26:17 -04002418 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002419 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002420
Jeff Garzik8b260242005-11-12 12:32:50 -05002421 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002422 readl(port_mmio + EDMA_CFG_OFS),
2423 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2424 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002425}
2426
Tejun Heo4447d352007-04-17 23:44:08 +09002427static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002428{
Tejun Heo4447d352007-04-17 23:44:08 +09002429 struct pci_dev *pdev = to_pci_dev(host->dev);
2430 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002431 u32 hp_flags = hpriv->hp_flags;
2432
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002433 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002434 case chip_5080:
2435 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002436 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002437
Auke Kok44c10132007-06-08 15:46:36 -07002438 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002439 case 0x1:
2440 hp_flags |= MV_HP_ERRATA_50XXB0;
2441 break;
2442 case 0x3:
2443 hp_flags |= MV_HP_ERRATA_50XXB2;
2444 break;
2445 default:
2446 dev_printk(KERN_WARNING, &pdev->dev,
2447 "Applying 50XXB2 workarounds to unknown rev\n");
2448 hp_flags |= MV_HP_ERRATA_50XXB2;
2449 break;
2450 }
2451 break;
2452
2453 case chip_504x:
2454 case chip_508x:
2455 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002456 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002457
Auke Kok44c10132007-06-08 15:46:36 -07002458 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002459 case 0x0:
2460 hp_flags |= MV_HP_ERRATA_50XXB0;
2461 break;
2462 case 0x3:
2463 hp_flags |= MV_HP_ERRATA_50XXB2;
2464 break;
2465 default:
2466 dev_printk(KERN_WARNING, &pdev->dev,
2467 "Applying B2 workarounds to unknown rev\n");
2468 hp_flags |= MV_HP_ERRATA_50XXB2;
2469 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002470 }
2471 break;
2472
2473 case chip_604x:
2474 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002475 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002476 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002477
Auke Kok44c10132007-06-08 15:46:36 -07002478 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002479 case 0x7:
2480 hp_flags |= MV_HP_ERRATA_60X1B2;
2481 break;
2482 case 0x9:
2483 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002484 break;
2485 default:
2486 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002487 "Applying B2 workarounds to unknown rev\n");
2488 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002489 break;
2490 }
2491 break;
2492
Jeff Garzike4e7b892006-01-31 12:18:41 -05002493 case chip_7042:
2494 case chip_6042:
2495 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002496 hp_flags |= MV_HP_GEN_IIE;
2497
Auke Kok44c10132007-06-08 15:46:36 -07002498 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002499 case 0x0:
2500 hp_flags |= MV_HP_ERRATA_XX42A0;
2501 break;
2502 case 0x1:
2503 hp_flags |= MV_HP_ERRATA_60X1C0;
2504 break;
2505 default:
2506 dev_printk(KERN_WARNING, &pdev->dev,
2507 "Applying 60X1C0 workarounds to unknown rev\n");
2508 hp_flags |= MV_HP_ERRATA_60X1C0;
2509 break;
2510 }
2511 break;
2512
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002513 default:
2514 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2515 return 1;
2516 }
2517
2518 hpriv->hp_flags = hp_flags;
2519
2520 return 0;
2521}
2522
Brett Russ05b308e2005-10-05 17:08:53 -04002523/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002524 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002525 * @host: ATA host to initialize
2526 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002527 *
2528 * If possible, do an early global reset of the host. Then do
2529 * our port init and clear/unmask all/relevant host interrupts.
2530 *
2531 * LOCKING:
2532 * Inherited from caller.
2533 */
Tejun Heo4447d352007-04-17 23:44:08 +09002534static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002535{
2536 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002537 struct pci_dev *pdev = to_pci_dev(host->dev);
2538 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2539 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002540
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541 /* global interrupt mask */
2542 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2543
Tejun Heo4447d352007-04-17 23:44:08 +09002544 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002545 if (rc)
2546 goto done;
2547
Tejun Heo4447d352007-04-17 23:44:08 +09002548 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002549
Tejun Heo4447d352007-04-17 23:44:08 +09002550 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002551 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002552
Jeff Garzikc9d39132005-11-13 17:47:51 -05002553 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002554 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002555 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002556
Jeff Garzik522479f2005-11-12 22:14:02 -05002557 hpriv->ops->reset_flash(hpriv, mmio);
2558 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002560
Tejun Heo4447d352007-04-17 23:44:08 +09002561 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002562 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002563 void __iomem *port_mmio = mv_port_base(mmio, port);
2564
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002565 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002566 ifctl |= (1 << 7); /* enable gen2i speed */
2567 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002568 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2569 }
2570
Jeff Garzikc9d39132005-11-13 17:47:51 -05002571 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002572 }
2573
Tejun Heo4447d352007-04-17 23:44:08 +09002574 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002575 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002576 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002577 unsigned int offset = port_mmio - mmio;
2578
2579 mv_port_init(&ap->ioaddr, port_mmio);
2580
2581 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2582 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002583 }
2584
2585 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002586 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2587
2588 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2589 "(before clear)=0x%08x\n", hc,
2590 readl(hc_mmio + HC_CFG_OFS),
2591 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2592
2593 /* Clear any currently outstanding hc interrupt conditions */
2594 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002595 }
2596
Brett Russ31961942005-09-30 01:36:00 -04002597 /* Clear any currently outstanding host interrupt conditions */
2598 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2599
2600 /* and unmask interrupt generation for host regs */
2601 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002602
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002603 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002604 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2605 else
2606 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002607
2608 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002609 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002610 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2611 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2612 readl(mmio + PCI_IRQ_CAUSE_OFS),
2613 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002614
Brett Russ31961942005-09-30 01:36:00 -04002615done:
Brett Russ20f733e2005-09-01 18:26:17 -04002616 return rc;
2617}
2618
Brett Russ05b308e2005-10-05 17:08:53 -04002619/**
2620 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002621 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002622 *
2623 * FIXME: complete this.
2624 *
2625 * LOCKING:
2626 * Inherited from caller.
2627 */
Tejun Heo4447d352007-04-17 23:44:08 +09002628static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002629{
Tejun Heo4447d352007-04-17 23:44:08 +09002630 struct pci_dev *pdev = to_pci_dev(host->dev);
2631 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002632 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002633 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002634
2635 /* Use this to determine the HW stepping of the chip so we know
2636 * what errata to workaround
2637 */
Brett Russ31961942005-09-30 01:36:00 -04002638 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2639 if (scc == 0)
2640 scc_s = "SCSI";
2641 else if (scc == 0x01)
2642 scc_s = "RAID";
2643 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002644 scc_s = "?";
2645
2646 if (IS_GEN_I(hpriv))
2647 gen = "I";
2648 else if (IS_GEN_II(hpriv))
2649 gen = "II";
2650 else if (IS_GEN_IIE(hpriv))
2651 gen = "IIE";
2652 else
2653 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002654
Jeff Garzika9524a72005-10-30 14:39:11 -05002655 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002656 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2657 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002658 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2659}
2660
Brett Russ05b308e2005-10-05 17:08:53 -04002661/**
2662 * mv_init_one - handle a positive probe of a Marvell host
2663 * @pdev: PCI device found
2664 * @ent: PCI device ID entry for the matched host
2665 *
2666 * LOCKING:
2667 * Inherited from caller.
2668 */
Brett Russ20f733e2005-09-01 18:26:17 -04002669static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2670{
2671 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002672 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002673 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2674 struct ata_host *host;
2675 struct mv_host_priv *hpriv;
2676 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002677
Jeff Garzika9524a72005-10-30 14:39:11 -05002678 if (!printed_version++)
2679 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002680
Tejun Heo4447d352007-04-17 23:44:08 +09002681 /* allocate host */
2682 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2683
2684 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2685 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2686 if (!host || !hpriv)
2687 return -ENOMEM;
2688 host->private_data = hpriv;
2689
2690 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002691 rc = pcim_enable_device(pdev);
2692 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002693 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002694
Tejun Heo0d5ff562007-02-01 15:06:36 +09002695 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2696 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002697 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002698 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002699 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002700 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002701
Jeff Garzikd88184f2007-02-26 01:26:06 -05002702 rc = pci_go_64(pdev);
2703 if (rc)
2704 return rc;
2705
Brett Russ20f733e2005-09-01 18:26:17 -04002706 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002707 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002708 if (rc)
2709 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002710
Brett Russ31961942005-09-30 01:36:00 -04002711 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002712 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002713 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002714
Brett Russ31961942005-09-30 01:36:00 -04002715 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002716 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002717
Tejun Heo4447d352007-04-17 23:44:08 +09002718 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002719 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002720 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002721 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002722}
2723
2724static int __init mv_init(void)
2725{
Pavel Roskinb7887192006-08-10 18:13:18 +09002726 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002727}
2728
2729static void __exit mv_exit(void)
2730{
2731 pci_unregister_driver(&mv_pci_driver);
2732}
2733
2734MODULE_AUTHOR("Brett Russ");
2735MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2736MODULE_LICENSE("GPL");
2737MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2738MODULE_VERSION(DRV_VERSION);
2739
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002740module_param(msi, int, 0444);
2741MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2742
Brett Russ20f733e2005-09-01 18:26:17 -04002743module_init(mv_init);
2744module_exit(mv_exit);