blob: b39648f0914b023f9bedd8d3b7c206d8a5fc3fbd [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500209 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Brett Russ20f733e2005-09-01 18:26:17 -0400244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400276
Brett Russ31961942005-09-30 01:36:00 -0400277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400292
Jeff Garzikc9d39132005-11-13 17:47:51 -0500293 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500295
Brett Russ31961942005-09-30 01:36:00 -0400296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500302 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400306
Brett Russ31961942005-09-30 01:36:00 -0400307 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400310};
311
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500315
Jeff Garzik095fec82005-11-12 09:50:49 -0500316enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500321
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
Jeff Garzik522479f2005-11-12 22:14:02 -0500331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500337 chip_6042,
338 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500339};
340
Brett Russ31961942005-09-30 01:36:00 -0400341/* Command ReQuest Block: 32B */
342struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400347};
348
Jeff Garzike4e7b892006-01-31 12:18:41 -0500349struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500355};
356
Brett Russ31961942005-09-30 01:36:00 -0400357/* Command ResPonse Block: 8B */
358struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400362};
363
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400370};
371
372struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
Brett Russ31961942005-09-30 01:36:00 -0400383 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400384};
385
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
Jeff Garzik47c2b672005-11-12 21:13:17 -0500391struct mv_host_priv;
392struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500402};
403
Brett Russ20f733e2005-09-01 18:26:17 -0400404struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400405 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500406 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500407 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
410static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400424static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
425
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500426static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
427 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500428static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
429static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
430 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500431static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
432 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500433static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
434static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500435
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500436static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
437 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500438static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
439static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
440 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500441static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500443static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
444static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500445static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
446 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500447
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400448static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400449 .module = THIS_MODULE,
450 .name = DRV_NAME,
451 .ioctl = ata_scsi_ioctl,
452 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400453 .can_queue = ATA_DEF_QUEUE,
454 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400455 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400456 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
457 .emulated = ATA_SHT_EMULATED,
458 .use_clustering = 1,
459 .proc_name = DRV_NAME,
460 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400461 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400462 .slave_destroy = ata_scsi_slave_destroy,
463 .bios_param = ata_std_bios_param,
464};
465
466static struct scsi_host_template mv6_sht = {
467 .module = THIS_MODULE,
468 .name = DRV_NAME,
469 .ioctl = ata_scsi_ioctl,
470 .queuecommand = ata_scsi_queuecmd,
471 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400472 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400473 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400474 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
475 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500476 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400477 .proc_name = DRV_NAME,
478 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400479 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900480 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400481 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400482};
483
Jeff Garzikc9d39132005-11-13 17:47:51 -0500484static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500485 .tf_load = ata_tf_load,
486 .tf_read = ata_tf_read,
487 .check_status = ata_check_status,
488 .exec_command = ata_exec_command,
489 .dev_select = ata_std_dev_select,
490
Jeff Garzikcffacd82007-03-09 09:46:47 -0500491 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500492
493 .qc_prep = mv_qc_prep,
494 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900495 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500496
Jeff Garzikc9d39132005-11-13 17:47:51 -0500497 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900498 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500499
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400500 .error_handler = mv_error_handler,
501 .post_internal_cmd = mv_post_int_cmd,
502 .freeze = mv_eh_freeze,
503 .thaw = mv_eh_thaw,
504
Jeff Garzikc9d39132005-11-13 17:47:51 -0500505 .scr_read = mv5_scr_read,
506 .scr_write = mv5_scr_write,
507
508 .port_start = mv_port_start,
509 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500510};
511
512static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
518
Jeff Garzikcffacd82007-03-09 09:46:47 -0500519 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400520
Brett Russ31961942005-09-30 01:36:00 -0400521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900523 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400524
Brett Russ20f733e2005-09-01 18:26:17 -0400525 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900526 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400527
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400528 .error_handler = mv_error_handler,
529 .post_internal_cmd = mv_post_int_cmd,
530 .freeze = mv_eh_freeze,
531 .thaw = mv_eh_thaw,
532
Brett Russ20f733e2005-09-01 18:26:17 -0400533 .scr_read = mv_scr_read,
534 .scr_write = mv_scr_write,
535
Brett Russ31961942005-09-30 01:36:00 -0400536 .port_start = mv_port_start,
537 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400538};
539
Jeff Garzike4e7b892006-01-31 12:18:41 -0500540static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500541 .tf_load = ata_tf_load,
542 .tf_read = ata_tf_read,
543 .check_status = ata_check_status,
544 .exec_command = ata_exec_command,
545 .dev_select = ata_std_dev_select,
546
Jeff Garzikcffacd82007-03-09 09:46:47 -0500547 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500548
549 .qc_prep = mv_qc_prep_iie,
550 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900551 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500552
Jeff Garzike4e7b892006-01-31 12:18:41 -0500553 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900554 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500555
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400556 .error_handler = mv_error_handler,
557 .post_internal_cmd = mv_post_int_cmd,
558 .freeze = mv_eh_freeze,
559 .thaw = mv_eh_thaw,
560
Jeff Garzike4e7b892006-01-31 12:18:41 -0500561 .scr_read = mv_scr_read,
562 .scr_write = mv_scr_write,
563
564 .port_start = mv_port_start,
565 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500566};
567
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100568static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400569 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400570 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400571 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400572 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400574 },
575 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400576 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400577 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400578 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500579 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400580 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500581 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400582 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500583 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400584 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500585 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500586 },
Brett Russ20f733e2005-09-01 18:26:17 -0400587 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400588 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400589 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400590 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500591 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400592 },
593 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
595 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400599 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500600 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604 .port_ops = &mv_iie_ops,
605 },
606 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 .port_ops = &mv_iie_ops,
611 },
Brett Russ20f733e2005-09-01 18:26:17 -0400612};
613
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500614static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400615 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
616 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
617 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
618 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100619 /* RocketRAID 1740/174x have different identifiers */
620 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
621 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400622
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400623 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
624 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
626 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
627 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500628
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400629 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
630
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200631 /* Adaptec 1430SA */
632 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
633
Olof Johanssone93f09d2007-01-18 18:39:59 -0600634 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
635
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800636 /* add Marvell 7042 support */
637 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400639 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400640};
641
642static struct pci_driver mv_pci_driver = {
643 .name = DRV_NAME,
644 .id_table = mv_pci_tbl,
645 .probe = mv_init_one,
646 .remove = ata_pci_remove_one,
647};
648
Jeff Garzik47c2b672005-11-12 21:13:17 -0500649static const struct mv_hw_ops mv5xxx_ops = {
650 .phy_errata = mv5_phy_errata,
651 .enable_leds = mv5_enable_leds,
652 .read_preamp = mv5_read_preamp,
653 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500654 .reset_flash = mv5_reset_flash,
655 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500656};
657
658static const struct mv_hw_ops mv6xxx_ops = {
659 .phy_errata = mv6_phy_errata,
660 .enable_leds = mv6_enable_leds,
661 .read_preamp = mv6_read_preamp,
662 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500663 .reset_flash = mv6_reset_flash,
664 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500665};
666
Brett Russ20f733e2005-09-01 18:26:17 -0400667/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500668 * module options
669 */
670static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
671
672
Jeff Garzikd88184f2007-02-26 01:26:06 -0500673/* move to PCI layer or libata core? */
674static int pci_go_64(struct pci_dev *pdev)
675{
676 int rc;
677
678 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
679 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
680 if (rc) {
681 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
682 if (rc) {
683 dev_printk(KERN_ERR, &pdev->dev,
684 "64-bit DMA enable failed\n");
685 return rc;
686 }
687 }
688 } else {
689 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
690 if (rc) {
691 dev_printk(KERN_ERR, &pdev->dev,
692 "32-bit DMA enable failed\n");
693 return rc;
694 }
695 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
696 if (rc) {
697 dev_printk(KERN_ERR, &pdev->dev,
698 "32-bit consistent DMA enable failed\n");
699 return rc;
700 }
701 }
702
703 return rc;
704}
705
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500706/*
Brett Russ20f733e2005-09-01 18:26:17 -0400707 * Functions
708 */
709
710static inline void writelfl(unsigned long data, void __iomem *addr)
711{
712 writel(data, addr);
713 (void) readl(addr); /* flush to avoid PCI posted write */
714}
715
Brett Russ20f733e2005-09-01 18:26:17 -0400716static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
717{
718 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
719}
720
Jeff Garzikc9d39132005-11-13 17:47:51 -0500721static inline unsigned int mv_hc_from_port(unsigned int port)
722{
723 return port >> MV_PORT_HC_SHIFT;
724}
725
726static inline unsigned int mv_hardport_from_port(unsigned int port)
727{
728 return port & MV_PORT_MASK;
729}
730
731static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
732 unsigned int port)
733{
734 return mv_hc_base(base, mv_hc_from_port(port));
735}
736
Brett Russ20f733e2005-09-01 18:26:17 -0400737static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
738{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500739 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500740 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500741 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400742}
743
744static inline void __iomem *mv_ap_base(struct ata_port *ap)
745{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900746 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
Jeff Garzikcca39742006-08-24 03:19:22 -0400749static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400750{
Jeff Garzikcca39742006-08-24 03:19:22 -0400751 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400752}
753
754static void mv_irq_clear(struct ata_port *ap)
755{
756}
757
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400758static void mv_set_edma_ptrs(void __iomem *port_mmio,
759 struct mv_host_priv *hpriv,
760 struct mv_port_priv *pp)
761{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400762 u32 index;
763
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400764 /*
765 * initialize request queue
766 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400767 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
768
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400769 WARN_ON(pp->crqb_dma & 0x3ff);
770 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400771 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400772 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
773
774 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400775 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400776 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
777 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400778 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779
780 /*
781 * initialize response queue
782 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
784
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400785 WARN_ON(pp->crpb_dma & 0xff);
786 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
787
788 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400789 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400790 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
791 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400792 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400794 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400795 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400796}
797
Brett Russ05b308e2005-10-05 17:08:53 -0400798/**
799 * mv_start_dma - Enable eDMA engine
800 * @base: port base address
801 * @pp: port private data
802 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900803 * Verify the local cache of the eDMA state is accurate with a
804 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400805 *
806 * LOCKING:
807 * Inherited from caller.
808 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400811{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 /* clear EDMA event indicators, if any */
814 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
815
816 mv_set_edma_ptrs(base, hpriv, pp);
817
Brett Russafb0edd2005-10-05 17:08:42 -0400818 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
819 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
820 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900821 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400822}
823
Brett Russ05b308e2005-10-05 17:08:53 -0400824/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400825 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400826 * @ap: ATA channel to manipulate
827 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900828 * Verify the local cache of the eDMA state is accurate with a
829 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400830 *
831 * LOCKING:
832 * Inherited from caller.
833 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400834static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400835{
836 void __iomem *port_mmio = mv_ap_base(ap);
837 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400838 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400839 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400840
Jeff Garzik4537deb2007-07-12 14:30:19 -0400841 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400842 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400843 */
Brett Russ31961942005-09-30 01:36:00 -0400844 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
845 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400846 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900847 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400848 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500849
Brett Russ31961942005-09-30 01:36:00 -0400850 /* now properly wait for the eDMA to stop */
851 for (i = 1000; i > 0; i--) {
852 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400853 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400854 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400855
Brett Russ31961942005-09-30 01:36:00 -0400856 udelay(100);
857 }
858
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400859 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900860 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400861 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400862 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400863
864 return err;
Brett Russ31961942005-09-30 01:36:00 -0400865}
866
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400867static int mv_stop_dma(struct ata_port *ap)
868{
869 unsigned long flags;
870 int rc;
871
872 spin_lock_irqsave(&ap->host->lock, flags);
873 rc = __mv_stop_dma(ap);
874 spin_unlock_irqrestore(&ap->host->lock, flags);
875
876 return rc;
877}
878
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400879#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400880static void mv_dump_mem(void __iomem *start, unsigned bytes)
881{
Brett Russ31961942005-09-30 01:36:00 -0400882 int b, w;
883 for (b = 0; b < bytes; ) {
884 DPRINTK("%p: ", start + b);
885 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400886 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400887 b += sizeof(u32);
888 }
889 printk("\n");
890 }
Brett Russ31961942005-09-30 01:36:00 -0400891}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400892#endif
893
Brett Russ31961942005-09-30 01:36:00 -0400894static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
895{
896#ifdef ATA_DEBUG
897 int b, w;
898 u32 dw;
899 for (b = 0; b < bytes; ) {
900 DPRINTK("%02x: ", b);
901 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400902 (void) pci_read_config_dword(pdev, b, &dw);
903 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400904 b += sizeof(u32);
905 }
906 printk("\n");
907 }
908#endif
909}
910static void mv_dump_all_regs(void __iomem *mmio_base, int port,
911 struct pci_dev *pdev)
912{
913#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500914 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400915 port >> MV_PORT_HC_SHIFT);
916 void __iomem *port_base;
917 int start_port, num_ports, p, start_hc, num_hcs, hc;
918
919 if (0 > port) {
920 start_hc = start_port = 0;
921 num_ports = 8; /* shld be benign for 4 port devs */
922 num_hcs = 2;
923 } else {
924 start_hc = port >> MV_PORT_HC_SHIFT;
925 start_port = port;
926 num_ports = num_hcs = 1;
927 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500928 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400929 num_ports > 1 ? num_ports - 1 : start_port);
930
931 if (NULL != pdev) {
932 DPRINTK("PCI config space regs:\n");
933 mv_dump_pci_cfg(pdev, 0x68);
934 }
935 DPRINTK("PCI regs:\n");
936 mv_dump_mem(mmio_base+0xc00, 0x3c);
937 mv_dump_mem(mmio_base+0xd00, 0x34);
938 mv_dump_mem(mmio_base+0xf00, 0x4);
939 mv_dump_mem(mmio_base+0x1d00, 0x6c);
940 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700941 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400942 DPRINTK("HC regs (HC %i):\n", hc);
943 mv_dump_mem(hc_base, 0x1c);
944 }
945 for (p = start_port; p < start_port + num_ports; p++) {
946 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400947 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400948 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400949 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400950 mv_dump_mem(port_base+0x300, 0x60);
951 }
952#endif
953}
954
Brett Russ20f733e2005-09-01 18:26:17 -0400955static unsigned int mv_scr_offset(unsigned int sc_reg_in)
956{
957 unsigned int ofs;
958
959 switch (sc_reg_in) {
960 case SCR_STATUS:
961 case SCR_CONTROL:
962 case SCR_ERROR:
963 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
964 break;
965 case SCR_ACTIVE:
966 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
967 break;
968 default:
969 ofs = 0xffffffffU;
970 break;
971 }
972 return ofs;
973}
974
Tejun Heoda3dbb12007-07-16 14:29:40 +0900975static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400976{
977 unsigned int ofs = mv_scr_offset(sc_reg_in);
978
Tejun Heoda3dbb12007-07-16 14:29:40 +0900979 if (ofs != 0xffffffffU) {
980 *val = readl(mv_ap_base(ap) + ofs);
981 return 0;
982 } else
983 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400984}
985
Tejun Heoda3dbb12007-07-16 14:29:40 +0900986static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400987{
988 unsigned int ofs = mv_scr_offset(sc_reg_in);
989
Tejun Heoda3dbb12007-07-16 14:29:40 +0900990 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400991 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900992 return 0;
993 } else
994 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400995}
996
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400997static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
998 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500999{
1000 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1001
1002 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001003 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001004
Jeff Garzike728eab2007-02-25 02:53:41 -05001005 if (IS_GEN_I(hpriv)) {
1006 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001007 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -05001008 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001009
Jeff Garzike728eab2007-02-25 02:53:41 -05001010 else if (IS_GEN_II(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001012 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -05001013 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1014 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001015
1016 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001017 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1018 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001019 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1020 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001021 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1022 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001023 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001024 }
1025
1026 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1027}
1028
Brett Russ05b308e2005-10-05 17:08:53 -04001029/**
1030 * mv_port_start - Port specific init/start routine.
1031 * @ap: ATA channel to manipulate
1032 *
1033 * Allocate and point to DMA memory, init port private memory,
1034 * zero indices.
1035 *
1036 * LOCKING:
1037 * Inherited from caller.
1038 */
Brett Russ31961942005-09-30 01:36:00 -04001039static int mv_port_start(struct ata_port *ap)
1040{
Jeff Garzikcca39742006-08-24 03:19:22 -04001041 struct device *dev = ap->host->dev;
1042 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001043 struct mv_port_priv *pp;
1044 void __iomem *port_mmio = mv_ap_base(ap);
1045 void *mem;
1046 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001047 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001048 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001049
Tejun Heo24dc5f32007-01-20 16:00:28 +09001050 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001051 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001052 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001053
Tejun Heo24dc5f32007-01-20 16:00:28 +09001054 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1055 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001056 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001057 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001058 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1059
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001060 rc = ata_pad_alloc(ap, dev);
1061 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001062 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001063
Jeff Garzik8b260242005-11-12 12:32:50 -05001064 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001065 * 32-slot command request table (CRQB), 32 bytes each in size
1066 */
1067 pp->crqb = mem;
1068 pp->crqb_dma = mem_dma;
1069 mem += MV_CRQB_Q_SZ;
1070 mem_dma += MV_CRQB_Q_SZ;
1071
Jeff Garzik8b260242005-11-12 12:32:50 -05001072 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001073 * 32-slot command response table (CRPB), 8 bytes each in size
1074 */
1075 pp->crpb = mem;
1076 pp->crpb_dma = mem_dma;
1077 mem += MV_CRPB_Q_SZ;
1078 mem_dma += MV_CRPB_Q_SZ;
1079
1080 /* Third item:
1081 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1082 */
1083 pp->sg_tbl = mem;
1084 pp->sg_tbl_dma = mem_dma;
1085
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001086 spin_lock_irqsave(&ap->host->lock, flags);
1087
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001088 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001089
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001090 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001091
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001092 spin_unlock_irqrestore(&ap->host->lock, flags);
1093
Brett Russ31961942005-09-30 01:36:00 -04001094 /* Don't turn on EDMA here...do it before DMA commands only. Else
1095 * we'll be unable to send non-data, PIO, etc due to restricted access
1096 * to shadow regs.
1097 */
1098 ap->private_data = pp;
1099 return 0;
1100}
1101
Brett Russ05b308e2005-10-05 17:08:53 -04001102/**
1103 * mv_port_stop - Port specific cleanup/stop routine.
1104 * @ap: ATA channel to manipulate
1105 *
1106 * Stop DMA, cleanup port memory.
1107 *
1108 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001109 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001110 */
Brett Russ31961942005-09-30 01:36:00 -04001111static void mv_port_stop(struct ata_port *ap)
1112{
Brett Russ31961942005-09-30 01:36:00 -04001113 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001114}
1115
Brett Russ05b308e2005-10-05 17:08:53 -04001116/**
1117 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1118 * @qc: queued command whose SG list to source from
1119 *
1120 * Populate the SG list and mark the last entry.
1121 *
1122 * LOCKING:
1123 * Inherited from caller.
1124 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001125static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001126{
1127 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001128 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001129 struct mv_sg *mv_sg, *last_sg = NULL;
Brett Russ31961942005-09-30 01:36:00 -04001130
Jeff Garzikd88184f2007-02-26 01:26:06 -05001131 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001132 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001133 dma_addr_t addr = sg_dma_address(sg);
1134 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001135
Olof Johansson4007b492007-10-02 20:45:27 -05001136 while (sg_len) {
1137 u32 offset = addr & 0xffff;
1138 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001139
Olof Johansson4007b492007-10-02 20:45:27 -05001140 if ((offset + sg_len > 0x10000))
1141 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001142
Olof Johansson4007b492007-10-02 20:45:27 -05001143 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1144 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001145 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001146
1147 sg_len -= len;
1148 addr += len;
1149
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001150 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001151 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001152 }
Brett Russ31961942005-09-30 01:36:00 -04001153 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001154
1155 if (likely(last_sg))
1156 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001157}
1158
Mark Lorde1469872006-05-22 19:02:03 -04001159static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001160{
Mark Lord559eeda2006-05-19 16:40:15 -04001161 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001162 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001163 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001164}
1165
Brett Russ05b308e2005-10-05 17:08:53 -04001166/**
1167 * mv_qc_prep - Host specific command preparation.
1168 * @qc: queued command to prepare
1169 *
1170 * This routine simply redirects to the general purpose routine
1171 * if command is not DMA. Else, it handles prep of the CRQB
1172 * (command request block), does some sanity checking, and calls
1173 * the SG load routine.
1174 *
1175 * LOCKING:
1176 * Inherited from caller.
1177 */
Brett Russ31961942005-09-30 01:36:00 -04001178static void mv_qc_prep(struct ata_queued_cmd *qc)
1179{
1180 struct ata_port *ap = qc->ap;
1181 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001182 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001183 struct ata_taskfile *tf;
1184 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001185 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001186
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001187 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001188 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001189
Brett Russ31961942005-09-30 01:36:00 -04001190 /* Fill in command request block
1191 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001192 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001193 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001194 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001195 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001196 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001197
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001198 /* get current queue index from software */
1199 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001200
Mark Lorda6432432006-05-19 16:36:36 -04001201 pp->crqb[in_index].sg_addr =
1202 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1203 pp->crqb[in_index].sg_addr_hi =
1204 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1205 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1206
1207 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001208 tf = &qc->tf;
1209
1210 /* Sadly, the CRQB cannot accomodate all registers--there are
1211 * only 11 bytes...so we must pick and choose required
1212 * registers based on the command. So, we drop feature and
1213 * hob_feature for [RW] DMA commands, but they are needed for
1214 * NCQ. NCQ will drop hob_nsect.
1215 */
1216 switch (tf->command) {
1217 case ATA_CMD_READ:
1218 case ATA_CMD_READ_EXT:
1219 case ATA_CMD_WRITE:
1220 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001221 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001222 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1223 break;
1224#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1225 case ATA_CMD_FPDMA_READ:
1226 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001227 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001228 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1229 break;
1230#endif /* FIXME: remove this line when NCQ added */
1231 default:
1232 /* The only other commands EDMA supports in non-queued and
1233 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1234 * of which are defined/used by Linux. If we get here, this
1235 * driver needs work.
1236 *
1237 * FIXME: modify libata to give qc_prep a return value and
1238 * return error here.
1239 */
1240 BUG_ON(tf->command);
1241 break;
1242 }
1243 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1244 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1245 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1246 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1247 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1248 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1249 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1250 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1251 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1252
Jeff Garzike4e7b892006-01-31 12:18:41 -05001253 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001254 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001255 mv_fill_sg(qc);
1256}
1257
1258/**
1259 * mv_qc_prep_iie - Host specific command preparation.
1260 * @qc: queued command to prepare
1261 *
1262 * This routine simply redirects to the general purpose routine
1263 * if command is not DMA. Else, it handles prep of the CRQB
1264 * (command request block), does some sanity checking, and calls
1265 * the SG load routine.
1266 *
1267 * LOCKING:
1268 * Inherited from caller.
1269 */
1270static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1271{
1272 struct ata_port *ap = qc->ap;
1273 struct mv_port_priv *pp = ap->private_data;
1274 struct mv_crqb_iie *crqb;
1275 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001276 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001277 u32 flags = 0;
1278
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001279 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001280 return;
1281
Jeff Garzike4e7b892006-01-31 12:18:41 -05001282 /* Fill in Gen IIE command request block
1283 */
1284 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1285 flags |= CRQB_FLAG_READ;
1286
Tejun Heobeec7db2006-02-11 19:11:13 +09001287 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001288 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001289 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001290 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001291
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001292 /* get current queue index from software */
1293 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001294
1295 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001296 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1297 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1298 crqb->flags = cpu_to_le32(flags);
1299
1300 tf = &qc->tf;
1301 crqb->ata_cmd[0] = cpu_to_le32(
1302 (tf->command << 16) |
1303 (tf->feature << 24)
1304 );
1305 crqb->ata_cmd[1] = cpu_to_le32(
1306 (tf->lbal << 0) |
1307 (tf->lbam << 8) |
1308 (tf->lbah << 16) |
1309 (tf->device << 24)
1310 );
1311 crqb->ata_cmd[2] = cpu_to_le32(
1312 (tf->hob_lbal << 0) |
1313 (tf->hob_lbam << 8) |
1314 (tf->hob_lbah << 16) |
1315 (tf->hob_feature << 24)
1316 );
1317 crqb->ata_cmd[3] = cpu_to_le32(
1318 (tf->nsect << 0) |
1319 (tf->hob_nsect << 8)
1320 );
1321
1322 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1323 return;
Brett Russ31961942005-09-30 01:36:00 -04001324 mv_fill_sg(qc);
1325}
1326
Brett Russ05b308e2005-10-05 17:08:53 -04001327/**
1328 * mv_qc_issue - Initiate a command to the host
1329 * @qc: queued command to start
1330 *
1331 * This routine simply redirects to the general purpose routine
1332 * if command is not DMA. Else, it sanity checks our local
1333 * caches of the request producer/consumer indices then enables
1334 * DMA and bumps the request producer index.
1335 *
1336 * LOCKING:
1337 * Inherited from caller.
1338 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001339static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001340{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001341 struct ata_port *ap = qc->ap;
1342 void __iomem *port_mmio = mv_ap_base(ap);
1343 struct mv_port_priv *pp = ap->private_data;
1344 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001345 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001346
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001347 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001348 /* We're about to send a non-EDMA capable command to the
1349 * port. Turn off EDMA so there won't be problems accessing
1350 * shadow block, etc registers.
1351 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001352 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001353 return ata_qc_issue_prot(qc);
1354 }
1355
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001356 mv_start_dma(port_mmio, hpriv, pp);
1357
1358 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001359
Brett Russ31961942005-09-30 01:36:00 -04001360 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001361 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1362 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001363
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001364 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001365
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001366 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001367
1368 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001369 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1370 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001371
1372 return 0;
1373}
1374
Brett Russ05b308e2005-10-05 17:08:53 -04001375/**
Brett Russ05b308e2005-10-05 17:08:53 -04001376 * mv_err_intr - Handle error interrupts on the port
1377 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001378 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001379 *
1380 * In most cases, just clear the interrupt and move on. However,
1381 * some cases require an eDMA reset, which is done right before
1382 * the COMRESET in mv_phy_reset(). The SERR case requires a
1383 * clear of pending errors in the SATA SERROR register. Finally,
1384 * if the port disabled DMA, update our cached copy to match.
1385 *
1386 * LOCKING:
1387 * Inherited from caller.
1388 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001389static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001390{
Brett Russ31961942005-09-30 01:36:00 -04001391 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001392 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1393 struct mv_port_priv *pp = ap->private_data;
1394 struct mv_host_priv *hpriv = ap->host->private_data;
1395 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1396 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001397 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001398
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001399 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001400
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 if (!edma_enabled) {
1402 /* just a guess: do we need to do this? should we
1403 * expand this, and do it in all cases?
1404 */
Tejun Heo936fd732007-08-06 18:36:23 +09001405 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1406 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001407 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001408
1409 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1410
1411 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1412
1413 /*
1414 * all generations share these EDMA error cause bits
1415 */
1416
1417 if (edma_err_cause & EDMA_ERR_DEV)
1418 err_mask |= AC_ERR_DEV;
1419 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001420 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001421 EDMA_ERR_INTRL_PAR)) {
1422 err_mask |= AC_ERR_ATA_BUS;
1423 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001424 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001425 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001426 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1427 ata_ehi_hotplugged(ehi);
1428 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001429 "dev disconnect" : "dev connect");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 }
1431
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001432 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001433 eh_freeze_mask = EDMA_EH_FREEZE_5;
1434
1435 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1436 struct mv_port_priv *pp = ap->private_data;
1437 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001438 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001439 }
1440 } else {
1441 eh_freeze_mask = EDMA_EH_FREEZE;
1442
1443 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1444 struct mv_port_priv *pp = ap->private_data;
1445 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001446 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 }
1448
1449 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001450 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1451 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001452 err_mask = AC_ERR_ATA_BUS;
1453 action |= ATA_EH_HARDRESET;
1454 }
1455 }
Brett Russ20f733e2005-09-01 18:26:17 -04001456
1457 /* Clear EDMA now that SERR cleanup done */
1458 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1459
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 if (!err_mask) {
1461 err_mask = AC_ERR_OTHER;
1462 action |= ATA_EH_HARDRESET;
1463 }
1464
1465 ehi->serror |= serr;
1466 ehi->action |= action;
1467
1468 if (qc)
1469 qc->err_mask |= err_mask;
1470 else
1471 ehi->err_mask |= err_mask;
1472
1473 if (edma_err_cause & eh_freeze_mask)
1474 ata_port_freeze(ap);
1475 else
1476 ata_port_abort(ap);
1477}
1478
1479static void mv_intr_pio(struct ata_port *ap)
1480{
1481 struct ata_queued_cmd *qc;
1482 u8 ata_status;
1483
1484 /* ignore spurious intr if drive still BUSY */
1485 ata_status = readb(ap->ioaddr.status_addr);
1486 if (unlikely(ata_status & ATA_BUSY))
1487 return;
1488
1489 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001490 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 if (unlikely(!qc)) /* no active tag */
1492 return;
1493 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1494 return;
1495
1496 /* and finally, complete the ATA command */
1497 qc->err_mask |= ac_err_mask(ata_status);
1498 ata_qc_complete(qc);
1499}
1500
1501static void mv_intr_edma(struct ata_port *ap)
1502{
1503 void __iomem *port_mmio = mv_ap_base(ap);
1504 struct mv_host_priv *hpriv = ap->host->private_data;
1505 struct mv_port_priv *pp = ap->private_data;
1506 struct ata_queued_cmd *qc;
1507 u32 out_index, in_index;
1508 bool work_done = false;
1509
1510 /* get h/w response queue pointer */
1511 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1512 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1513
1514 while (1) {
1515 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001516 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517
1518 /* get s/w response queue last-read pointer, and compare */
1519 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1520 if (in_index == out_index)
1521 break;
1522
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001523 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001524 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001525 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001526
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001527 /* Gen II/IIE: get active ATA command via tag, to enable
1528 * support for queueing. this works transparently for
1529 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001531 else if (IS_GEN_II(hpriv))
1532 tag = (le16_to_cpu(pp->crpb[out_index].id)
1533 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001534
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001535 else /* IS_GEN_IIE */
1536 tag = (le16_to_cpu(pp->crpb[out_index].id)
1537 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001539 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540
1541 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1542 * bits (WARNING: might not necessarily be associated
1543 * with this command), which -should- be clear
1544 * if all is well
1545 */
1546 status = le16_to_cpu(pp->crpb[out_index].flags);
1547 if (unlikely(status & 0xff)) {
1548 mv_err_intr(ap, qc);
1549 return;
1550 }
1551
1552 /* and finally, complete the ATA command */
1553 if (qc) {
1554 qc->err_mask |=
1555 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1556 ata_qc_complete(qc);
1557 }
1558
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001559 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560 * indicate (after the loop completes) to hardware
1561 * that we have consumed a response queue entry.
1562 */
1563 work_done = true;
1564 pp->resp_idx++;
1565 }
1566
1567 if (work_done)
1568 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1569 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1570 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001571}
1572
Brett Russ05b308e2005-10-05 17:08:53 -04001573/**
1574 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001575 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001576 * @relevant: port error bits relevant to this host controller
1577 * @hc: which host controller we're to look at
1578 *
1579 * Read then write clear the HC interrupt status then walk each
1580 * port connected to the HC and see if it needs servicing. Port
1581 * success ints are reported in the HC interrupt status reg, the
1582 * port error ints are reported in the higher level main
1583 * interrupt status register and thus are passed in via the
1584 * 'relevant' argument.
1585 *
1586 * LOCKING:
1587 * Inherited from caller.
1588 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001589static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001590{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001591 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001592 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001593 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001594 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001595
Jeff Garzik35177262007-02-24 21:26:42 -05001596 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001597 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001598 else
Brett Russ20f733e2005-09-01 18:26:17 -04001599 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001600
1601 /* we'll need the HC success int register in most cases */
1602 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001603 if (!hc_irq_cause)
1604 return;
1605
1606 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001607
1608 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001609 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001610
1611 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001612 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001613 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001614 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001615
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001616 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001617 continue;
1618
Brett Russ31961942005-09-30 01:36:00 -04001619 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001620 if (port >= MV_PORTS_PER_HC) {
1621 shift++; /* skip bit 8 in the HC Main IRQ reg */
1622 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001623 have_err_bits = ((PORT0_ERR << shift) & relevant);
1624
1625 if (unlikely(have_err_bits)) {
1626 struct ata_queued_cmd *qc;
1627
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001628 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001629 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1630 continue;
1631
1632 mv_err_intr(ap, qc);
1633 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001634 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001635
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001636 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1637
1638 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1639 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1640 mv_intr_edma(ap);
1641 } else {
1642 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1643 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001644 }
1645 }
1646 VPRINTK("EXIT\n");
1647}
1648
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001649static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1650{
1651 struct ata_port *ap;
1652 struct ata_queued_cmd *qc;
1653 struct ata_eh_info *ehi;
1654 unsigned int i, err_mask, printed = 0;
1655 u32 err_cause;
1656
1657 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1658
1659 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1660 err_cause);
1661
1662 DPRINTK("All regs @ PCI error\n");
1663 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1664
1665 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1666
1667 for (i = 0; i < host->n_ports; i++) {
1668 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001669 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001670 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001671 ata_ehi_clear_desc(ehi);
1672 if (!printed++)
1673 ata_ehi_push_desc(ehi,
1674 "PCI err cause 0x%08x", err_cause);
1675 err_mask = AC_ERR_HOST_BUS;
1676 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001677 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001678 if (qc)
1679 qc->err_mask |= err_mask;
1680 else
1681 ehi->err_mask |= err_mask;
1682
1683 ata_port_freeze(ap);
1684 }
1685 }
1686}
1687
Brett Russ05b308e2005-10-05 17:08:53 -04001688/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001689 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001690 * @irq: unused
1691 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001692 *
1693 * Read the read only register to determine if any host
1694 * controllers have pending interrupts. If so, call lower level
1695 * routine to handle. Also check for PCI errors which are only
1696 * reported here.
1697 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001698 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001699 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001700 * interrupts.
1701 */
David Howells7d12e782006-10-05 14:55:46 +01001702static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001703{
Jeff Garzikcca39742006-08-24 03:19:22 -04001704 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001705 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001706 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001707 u32 irq_stat;
1708
Brett Russ20f733e2005-09-01 18:26:17 -04001709 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001710
1711 /* check the cases where we either have nothing pending or have read
1712 * a bogus register value which can indicate HW removal or PCI fault
1713 */
Jeff Garzik35177262007-02-24 21:26:42 -05001714 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001715 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001716
Jeff Garzikcca39742006-08-24 03:19:22 -04001717 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1718 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 if (unlikely(irq_stat & PCI_ERR)) {
1721 mv_pci_error(host, mmio);
1722 handled = 1;
1723 goto out_unlock; /* skip all other HC irq handling */
1724 }
1725
Brett Russ20f733e2005-09-01 18:26:17 -04001726 for (hc = 0; hc < n_hcs; hc++) {
1727 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1728 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001729 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001731 }
1732 }
Mark Lord615ab952006-05-19 16:24:56 -04001733
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001734out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001735 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001736
1737 return IRQ_RETVAL(handled);
1738}
1739
Jeff Garzikc9d39132005-11-13 17:47:51 -05001740static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1741{
1742 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1743 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1744
1745 return hc_mmio + ofs;
1746}
1747
1748static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1749{
1750 unsigned int ofs;
1751
1752 switch (sc_reg_in) {
1753 case SCR_STATUS:
1754 case SCR_ERROR:
1755 case SCR_CONTROL:
1756 ofs = sc_reg_in * sizeof(u32);
1757 break;
1758 default:
1759 ofs = 0xffffffffU;
1760 break;
1761 }
1762 return ofs;
1763}
1764
Tejun Heoda3dbb12007-07-16 14:29:40 +09001765static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001766{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001767 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1768 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001769 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1770
Tejun Heoda3dbb12007-07-16 14:29:40 +09001771 if (ofs != 0xffffffffU) {
1772 *val = readl(addr + ofs);
1773 return 0;
1774 } else
1775 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001776}
1777
Tejun Heoda3dbb12007-07-16 14:29:40 +09001778static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001779{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001780 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1781 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001782 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1783
Tejun Heoda3dbb12007-07-16 14:29:40 +09001784 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001785 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001786 return 0;
1787 } else
1788 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001789}
1790
Jeff Garzik522479f2005-11-12 22:14:02 -05001791static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1792{
Jeff Garzik522479f2005-11-12 22:14:02 -05001793 int early_5080;
1794
Auke Kok44c10132007-06-08 15:46:36 -07001795 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001796
1797 if (!early_5080) {
1798 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1799 tmp |= (1 << 0);
1800 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1801 }
1802
1803 mv_reset_pci_bus(pdev, mmio);
1804}
1805
1806static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1807{
1808 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1809}
1810
Jeff Garzik47c2b672005-11-12 21:13:17 -05001811static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001812 void __iomem *mmio)
1813{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001814 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1815 u32 tmp;
1816
1817 tmp = readl(phy_mmio + MV5_PHY_MODE);
1818
1819 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1820 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001821}
1822
Jeff Garzik47c2b672005-11-12 21:13:17 -05001823static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001824{
Jeff Garzik522479f2005-11-12 22:14:02 -05001825 u32 tmp;
1826
1827 writel(0, mmio + MV_GPIO_PORT_CTL);
1828
1829 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1830
1831 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1832 tmp |= ~(1 << 0);
1833 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001834}
1835
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001836static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1837 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001838{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001839 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1840 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1841 u32 tmp;
1842 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1843
1844 if (fix_apm_sq) {
1845 tmp = readl(phy_mmio + MV5_LT_MODE);
1846 tmp |= (1 << 19);
1847 writel(tmp, phy_mmio + MV5_LT_MODE);
1848
1849 tmp = readl(phy_mmio + MV5_PHY_CTL);
1850 tmp &= ~0x3;
1851 tmp |= 0x1;
1852 writel(tmp, phy_mmio + MV5_PHY_CTL);
1853 }
1854
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1856 tmp &= ~mask;
1857 tmp |= hpriv->signal[port].pre;
1858 tmp |= hpriv->signal[port].amps;
1859 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001860}
1861
Jeff Garzikc9d39132005-11-13 17:47:51 -05001862
1863#undef ZERO
1864#define ZERO(reg) writel(0, port_mmio + (reg))
1865static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1866 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001867{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001868 void __iomem *port_mmio = mv_port_base(mmio, port);
1869
1870 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1871
1872 mv_channel_reset(hpriv, mmio, port);
1873
1874 ZERO(0x028); /* command */
1875 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1876 ZERO(0x004); /* timer */
1877 ZERO(0x008); /* irq err cause */
1878 ZERO(0x00c); /* irq err mask */
1879 ZERO(0x010); /* rq bah */
1880 ZERO(0x014); /* rq inp */
1881 ZERO(0x018); /* rq outp */
1882 ZERO(0x01c); /* respq bah */
1883 ZERO(0x024); /* respq outp */
1884 ZERO(0x020); /* respq inp */
1885 ZERO(0x02c); /* test control */
1886 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1887}
1888#undef ZERO
1889
1890#define ZERO(reg) writel(0, hc_mmio + (reg))
1891static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1892 unsigned int hc)
1893{
1894 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1895 u32 tmp;
1896
1897 ZERO(0x00c);
1898 ZERO(0x010);
1899 ZERO(0x014);
1900 ZERO(0x018);
1901
1902 tmp = readl(hc_mmio + 0x20);
1903 tmp &= 0x1c1c1c1c;
1904 tmp |= 0x03030303;
1905 writel(tmp, hc_mmio + 0x20);
1906}
1907#undef ZERO
1908
1909static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1910 unsigned int n_hc)
1911{
1912 unsigned int hc, port;
1913
1914 for (hc = 0; hc < n_hc; hc++) {
1915 for (port = 0; port < MV_PORTS_PER_HC; port++)
1916 mv5_reset_hc_port(hpriv, mmio,
1917 (hc * MV_PORTS_PER_HC) + port);
1918
1919 mv5_reset_one_hc(hpriv, mmio, hc);
1920 }
1921
1922 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001923}
1924
Jeff Garzik101ffae2005-11-12 22:17:49 -05001925#undef ZERO
1926#define ZERO(reg) writel(0, mmio + (reg))
1927static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1928{
1929 u32 tmp;
1930
1931 tmp = readl(mmio + MV_PCI_MODE);
1932 tmp &= 0xff00ffff;
1933 writel(tmp, mmio + MV_PCI_MODE);
1934
1935 ZERO(MV_PCI_DISC_TIMER);
1936 ZERO(MV_PCI_MSI_TRIGGER);
1937 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1938 ZERO(HC_MAIN_IRQ_MASK_OFS);
1939 ZERO(MV_PCI_SERR_MASK);
1940 ZERO(PCI_IRQ_CAUSE_OFS);
1941 ZERO(PCI_IRQ_MASK_OFS);
1942 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1943 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1944 ZERO(MV_PCI_ERR_ATTRIBUTE);
1945 ZERO(MV_PCI_ERR_COMMAND);
1946}
1947#undef ZERO
1948
1949static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1950{
1951 u32 tmp;
1952
1953 mv5_reset_flash(hpriv, mmio);
1954
1955 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1956 tmp &= 0x3;
1957 tmp |= (1 << 5) | (1 << 6);
1958 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1959}
1960
1961/**
1962 * mv6_reset_hc - Perform the 6xxx global soft reset
1963 * @mmio: base address of the HBA
1964 *
1965 * This routine only applies to 6xxx parts.
1966 *
1967 * LOCKING:
1968 * Inherited from caller.
1969 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001970static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001972{
1973 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1974 int i, rc = 0;
1975 u32 t;
1976
1977 /* Following procedure defined in PCI "main command and status
1978 * register" table.
1979 */
1980 t = readl(reg);
1981 writel(t | STOP_PCI_MASTER, reg);
1982
1983 for (i = 0; i < 1000; i++) {
1984 udelay(1);
1985 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001986 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001987 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001988 }
1989 if (!(PCI_MASTER_EMPTY & t)) {
1990 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1991 rc = 1;
1992 goto done;
1993 }
1994
1995 /* set reset */
1996 i = 5;
1997 do {
1998 writel(t | GLOB_SFT_RST, reg);
1999 t = readl(reg);
2000 udelay(1);
2001 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2002
2003 if (!(GLOB_SFT_RST & t)) {
2004 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2005 rc = 1;
2006 goto done;
2007 }
2008
2009 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2010 i = 5;
2011 do {
2012 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2013 t = readl(reg);
2014 udelay(1);
2015 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2016
2017 if (GLOB_SFT_RST & t) {
2018 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2019 rc = 1;
2020 }
2021done:
2022 return rc;
2023}
2024
Jeff Garzik47c2b672005-11-12 21:13:17 -05002025static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002026 void __iomem *mmio)
2027{
2028 void __iomem *port_mmio;
2029 u32 tmp;
2030
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002031 tmp = readl(mmio + MV_RESET_CFG);
2032 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002033 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002034 hpriv->signal[idx].pre = 0x1 << 5;
2035 return;
2036 }
2037
2038 port_mmio = mv_port_base(mmio, idx);
2039 tmp = readl(port_mmio + PHY_MODE2);
2040
2041 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2042 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2043}
2044
Jeff Garzik47c2b672005-11-12 21:13:17 -05002045static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002046{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002047 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002048}
2049
Jeff Garzikc9d39132005-11-13 17:47:51 -05002050static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002051 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002052{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002053 void __iomem *port_mmio = mv_port_base(mmio, port);
2054
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002055 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002056 int fix_phy_mode2 =
2057 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002058 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002059 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2060 u32 m2, tmp;
2061
2062 if (fix_phy_mode2) {
2063 m2 = readl(port_mmio + PHY_MODE2);
2064 m2 &= ~(1 << 16);
2065 m2 |= (1 << 31);
2066 writel(m2, port_mmio + PHY_MODE2);
2067
2068 udelay(200);
2069
2070 m2 = readl(port_mmio + PHY_MODE2);
2071 m2 &= ~((1 << 16) | (1 << 31));
2072 writel(m2, port_mmio + PHY_MODE2);
2073
2074 udelay(200);
2075 }
2076
2077 /* who knows what this magic does */
2078 tmp = readl(port_mmio + PHY_MODE3);
2079 tmp &= ~0x7F800000;
2080 tmp |= 0x2A800000;
2081 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002082
2083 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002084 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002085
2086 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002087
2088 if (hp_flags & MV_HP_ERRATA_60X1B2)
2089 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002090
2091 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2092
2093 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002094
2095 if (hp_flags & MV_HP_ERRATA_60X1B2)
2096 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002097 }
2098
2099 /* Revert values of pre-emphasis and signal amps to the saved ones */
2100 m2 = readl(port_mmio + PHY_MODE2);
2101
2102 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002103 m2 |= hpriv->signal[port].amps;
2104 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002105 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002106
Jeff Garzike4e7b892006-01-31 12:18:41 -05002107 /* according to mvSata 3.6.1, some IIE values are fixed */
2108 if (IS_GEN_IIE(hpriv)) {
2109 m2 &= ~0xC30FF01F;
2110 m2 |= 0x0000900F;
2111 }
2112
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002113 writel(m2, port_mmio + PHY_MODE2);
2114}
2115
Jeff Garzikc9d39132005-11-13 17:47:51 -05002116static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2117 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002118{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002119 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002120
Brett Russ31961942005-09-30 01:36:00 -04002121 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002122
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002123 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002124 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002125 ifctl |= (1 << 7); /* enable gen2i speed */
2126 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002127 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2128 }
2129
Brett Russ20f733e2005-09-01 18:26:17 -04002130 udelay(25); /* allow reset propagation */
2131
2132 /* Spec never mentions clearing the bit. Marvell's driver does
2133 * clear the bit, however.
2134 */
Brett Russ31961942005-09-30 01:36:00 -04002135 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002136
Jeff Garzikc9d39132005-11-13 17:47:51 -05002137 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2138
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002139 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002140 mdelay(1);
2141}
2142
Jeff Garzikc9d39132005-11-13 17:47:51 -05002143/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002144 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002145 * @ap: ATA channel to manipulate
2146 *
2147 * Part of this is taken from __sata_phy_reset and modified to
2148 * not sleep since this routine gets called from interrupt level.
2149 *
2150 * LOCKING:
2151 * Inherited from caller. This is coded to safe to call at
2152 * interrupt level, i.e. it does not sleep.
2153 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002154static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2155 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002156{
2157 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002158 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002159 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002160 int retry = 5;
2161 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002162
2163 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002164
Tejun Heoda3dbb12007-07-16 14:29:40 +09002165#ifdef DEBUG
2166 {
2167 u32 sstatus, serror, scontrol;
2168
2169 mv_scr_read(ap, SCR_STATUS, &sstatus);
2170 mv_scr_read(ap, SCR_ERROR, &serror);
2171 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2172 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2173 "SCtrl 0x%08x\n", status, serror, scontrol);
2174 }
2175#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002176
Jeff Garzik22374672005-11-17 10:59:48 -05002177 /* Issue COMRESET via SControl */
2178comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002179 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002180 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002181
Tejun Heo936fd732007-08-06 18:36:23 +09002182 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002183 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002184
Brett Russ31961942005-09-30 01:36:00 -04002185 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002186 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002187 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002188 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002189
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002190 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002191 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002192
Jeff Garzik22374672005-11-17 10:59:48 -05002193 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002194 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002195 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2196 (retry-- > 0))
2197 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002198
Tejun Heoda3dbb12007-07-16 14:29:40 +09002199#ifdef DEBUG
2200 {
2201 u32 sstatus, serror, scontrol;
2202
2203 mv_scr_read(ap, SCR_STATUS, &sstatus);
2204 mv_scr_read(ap, SCR_ERROR, &serror);
2205 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2206 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2207 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2208 }
2209#endif
Brett Russ31961942005-09-30 01:36:00 -04002210
Tejun Heo936fd732007-08-06 18:36:23 +09002211 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002212 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002213 return;
2214 }
2215
Jeff Garzik22374672005-11-17 10:59:48 -05002216 /* even after SStatus reflects that device is ready,
2217 * it seems to take a while for link to be fully
2218 * established (and thus Status no longer 0x80/0x7F),
2219 * so we poll a bit for that, here.
2220 */
2221 retry = 20;
2222 while (1) {
2223 u8 drv_stat = ata_check_status(ap);
2224 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2225 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002226 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002227 if (retry-- <= 0)
2228 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002229 if (time_after(jiffies, deadline))
2230 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002231 }
2232
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002233 /* FIXME: if we passed the deadline, the following
2234 * code probably produces an invalid result
2235 */
Brett Russ20f733e2005-09-01 18:26:17 -04002236
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002237 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002238 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002239
2240 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2241
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002242 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002243
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002244 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002245}
2246
Tejun Heocc0680a2007-08-06 18:36:23 +09002247static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002248{
Tejun Heocc0680a2007-08-06 18:36:23 +09002249 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002250 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002251 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002252 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002253
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002254 rc = mv_stop_dma(ap);
2255 if (rc)
2256 ehc->i.action |= ATA_EH_HARDRESET;
2257
2258 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2259 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2260 ehc->i.action |= ATA_EH_HARDRESET;
2261 }
2262
2263 /* if we're about to do hardreset, nothing more to do */
2264 if (ehc->i.action & ATA_EH_HARDRESET)
2265 return 0;
2266
Tejun Heocc0680a2007-08-06 18:36:23 +09002267 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002268 rc = ata_wait_ready(ap, deadline);
2269 else
2270 rc = -ENODEV;
2271
2272 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002273}
2274
Tejun Heocc0680a2007-08-06 18:36:23 +09002275static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002276 unsigned long deadline)
2277{
Tejun Heocc0680a2007-08-06 18:36:23 +09002278 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002279 struct mv_host_priv *hpriv = ap->host->private_data;
2280 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2281
2282 mv_stop_dma(ap);
2283
2284 mv_channel_reset(hpriv, mmio, ap->port_no);
2285
2286 mv_phy_reset(ap, class, deadline);
2287
2288 return 0;
2289}
2290
Tejun Heocc0680a2007-08-06 18:36:23 +09002291static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002292{
Tejun Heocc0680a2007-08-06 18:36:23 +09002293 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002294 u32 serr;
2295
2296 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002297 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298
2299 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002300 sata_scr_read(link, SCR_ERROR, &serr);
2301 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002302
2303 /* bail out if no device is present */
2304 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2305 DPRINTK("EXIT, no device\n");
2306 return;
2307 }
2308
2309 /* set up device control */
2310 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2311}
2312
2313static void mv_error_handler(struct ata_port *ap)
2314{
2315 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2316 mv_hardreset, mv_postreset);
2317}
2318
2319static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2320{
2321 mv_stop_dma(qc->ap);
2322}
2323
2324static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002325{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002326 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002327 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2328 u32 tmp, mask;
2329 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002330
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002331 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002332
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002333 shift = ap->port_no * 2;
2334 if (hc > 0)
2335 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002336
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002337 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002338
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002339 /* disable assertion of portN err, done events */
2340 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2341 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2342}
2343
2344static void mv_eh_thaw(struct ata_port *ap)
2345{
2346 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2347 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2348 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 u32 tmp, mask, hc_irq_cause;
2351 unsigned int shift, hc_port_no = ap->port_no;
2352
2353 /* FIXME: handle coalescing completion events properly */
2354
2355 shift = ap->port_no * 2;
2356 if (hc > 0) {
2357 shift++;
2358 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002359 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002360
2361 mask = 0x3 << shift;
2362
2363 /* clear EDMA errors on this port */
2364 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2365
2366 /* clear pending irq events */
2367 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2368 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2369 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2370 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2371
2372 /* enable assertion of portN err, done events */
2373 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2374 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002375}
2376
Brett Russ05b308e2005-10-05 17:08:53 -04002377/**
2378 * mv_port_init - Perform some early initialization on a single port.
2379 * @port: libata data structure storing shadow register addresses
2380 * @port_mmio: base address of the port
2381 *
2382 * Initialize shadow register mmio addresses, clear outstanding
2383 * interrupts on the port, and unmask interrupts for the future
2384 * start of the port.
2385 *
2386 * LOCKING:
2387 * Inherited from caller.
2388 */
Brett Russ31961942005-09-30 01:36:00 -04002389static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2390{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002391 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002392 unsigned serr_ofs;
2393
Jeff Garzik8b260242005-11-12 12:32:50 -05002394 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002395 */
2396 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002397 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002398 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2399 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2400 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2401 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2402 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2403 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002404 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002405 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2406 /* special case: control/altstatus doesn't have ATA_REG_ address */
2407 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2408
2409 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002410 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002411
Brett Russ31961942005-09-30 01:36:00 -04002412 /* Clear any currently outstanding port interrupt conditions */
2413 serr_ofs = mv_scr_offset(SCR_ERROR);
2414 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2415 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2416
Brett Russ20f733e2005-09-01 18:26:17 -04002417 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002418 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002419
Jeff Garzik8b260242005-11-12 12:32:50 -05002420 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002421 readl(port_mmio + EDMA_CFG_OFS),
2422 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2423 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002424}
2425
Tejun Heo4447d352007-04-17 23:44:08 +09002426static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002427{
Tejun Heo4447d352007-04-17 23:44:08 +09002428 struct pci_dev *pdev = to_pci_dev(host->dev);
2429 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002430 u32 hp_flags = hpriv->hp_flags;
2431
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002432 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002433 case chip_5080:
2434 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002435 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002436
Auke Kok44c10132007-06-08 15:46:36 -07002437 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002438 case 0x1:
2439 hp_flags |= MV_HP_ERRATA_50XXB0;
2440 break;
2441 case 0x3:
2442 hp_flags |= MV_HP_ERRATA_50XXB2;
2443 break;
2444 default:
2445 dev_printk(KERN_WARNING, &pdev->dev,
2446 "Applying 50XXB2 workarounds to unknown rev\n");
2447 hp_flags |= MV_HP_ERRATA_50XXB2;
2448 break;
2449 }
2450 break;
2451
2452 case chip_504x:
2453 case chip_508x:
2454 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002455 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002456
Auke Kok44c10132007-06-08 15:46:36 -07002457 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002458 case 0x0:
2459 hp_flags |= MV_HP_ERRATA_50XXB0;
2460 break;
2461 case 0x3:
2462 hp_flags |= MV_HP_ERRATA_50XXB2;
2463 break;
2464 default:
2465 dev_printk(KERN_WARNING, &pdev->dev,
2466 "Applying B2 workarounds to unknown rev\n");
2467 hp_flags |= MV_HP_ERRATA_50XXB2;
2468 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002469 }
2470 break;
2471
2472 case chip_604x:
2473 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002474 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002475 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002476
Auke Kok44c10132007-06-08 15:46:36 -07002477 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002478 case 0x7:
2479 hp_flags |= MV_HP_ERRATA_60X1B2;
2480 break;
2481 case 0x9:
2482 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002483 break;
2484 default:
2485 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002486 "Applying B2 workarounds to unknown rev\n");
2487 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002488 break;
2489 }
2490 break;
2491
Jeff Garzike4e7b892006-01-31 12:18:41 -05002492 case chip_7042:
2493 case chip_6042:
2494 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002495 hp_flags |= MV_HP_GEN_IIE;
2496
Auke Kok44c10132007-06-08 15:46:36 -07002497 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002498 case 0x0:
2499 hp_flags |= MV_HP_ERRATA_XX42A0;
2500 break;
2501 case 0x1:
2502 hp_flags |= MV_HP_ERRATA_60X1C0;
2503 break;
2504 default:
2505 dev_printk(KERN_WARNING, &pdev->dev,
2506 "Applying 60X1C0 workarounds to unknown rev\n");
2507 hp_flags |= MV_HP_ERRATA_60X1C0;
2508 break;
2509 }
2510 break;
2511
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002512 default:
2513 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2514 return 1;
2515 }
2516
2517 hpriv->hp_flags = hp_flags;
2518
2519 return 0;
2520}
2521
Brett Russ05b308e2005-10-05 17:08:53 -04002522/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002523 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002524 * @host: ATA host to initialize
2525 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002526 *
2527 * If possible, do an early global reset of the host. Then do
2528 * our port init and clear/unmask all/relevant host interrupts.
2529 *
2530 * LOCKING:
2531 * Inherited from caller.
2532 */
Tejun Heo4447d352007-04-17 23:44:08 +09002533static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002534{
2535 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002536 struct pci_dev *pdev = to_pci_dev(host->dev);
2537 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2538 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002539
Jeff Garzik47c2b672005-11-12 21:13:17 -05002540 /* global interrupt mask */
2541 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2542
Tejun Heo4447d352007-04-17 23:44:08 +09002543 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002544 if (rc)
2545 goto done;
2546
Tejun Heo4447d352007-04-17 23:44:08 +09002547 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002548
Tejun Heo4447d352007-04-17 23:44:08 +09002549 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002550 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002551
Jeff Garzikc9d39132005-11-13 17:47:51 -05002552 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002553 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002554 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002555
Jeff Garzik522479f2005-11-12 22:14:02 -05002556 hpriv->ops->reset_flash(hpriv, mmio);
2557 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002558 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002559
Tejun Heo4447d352007-04-17 23:44:08 +09002560 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002561 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002562 void __iomem *port_mmio = mv_port_base(mmio, port);
2563
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002564 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002565 ifctl |= (1 << 7); /* enable gen2i speed */
2566 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002567 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2568 }
2569
Jeff Garzikc9d39132005-11-13 17:47:51 -05002570 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002571 }
2572
Tejun Heo4447d352007-04-17 23:44:08 +09002573 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002574 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002575 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002576 unsigned int offset = port_mmio - mmio;
2577
2578 mv_port_init(&ap->ioaddr, port_mmio);
2579
2580 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2581 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002582 }
2583
2584 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002585 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2586
2587 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2588 "(before clear)=0x%08x\n", hc,
2589 readl(hc_mmio + HC_CFG_OFS),
2590 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2591
2592 /* Clear any currently outstanding hc interrupt conditions */
2593 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002594 }
2595
Brett Russ31961942005-09-30 01:36:00 -04002596 /* Clear any currently outstanding host interrupt conditions */
2597 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2598
2599 /* and unmask interrupt generation for host regs */
2600 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002601
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002602 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002603 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2604 else
2605 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002606
2607 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002608 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002609 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2610 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2611 readl(mmio + PCI_IRQ_CAUSE_OFS),
2612 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002613
Brett Russ31961942005-09-30 01:36:00 -04002614done:
Brett Russ20f733e2005-09-01 18:26:17 -04002615 return rc;
2616}
2617
Brett Russ05b308e2005-10-05 17:08:53 -04002618/**
2619 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002620 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002621 *
2622 * FIXME: complete this.
2623 *
2624 * LOCKING:
2625 * Inherited from caller.
2626 */
Tejun Heo4447d352007-04-17 23:44:08 +09002627static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002628{
Tejun Heo4447d352007-04-17 23:44:08 +09002629 struct pci_dev *pdev = to_pci_dev(host->dev);
2630 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002631 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002632 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002633
2634 /* Use this to determine the HW stepping of the chip so we know
2635 * what errata to workaround
2636 */
Brett Russ31961942005-09-30 01:36:00 -04002637 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2638 if (scc == 0)
2639 scc_s = "SCSI";
2640 else if (scc == 0x01)
2641 scc_s = "RAID";
2642 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002643 scc_s = "?";
2644
2645 if (IS_GEN_I(hpriv))
2646 gen = "I";
2647 else if (IS_GEN_II(hpriv))
2648 gen = "II";
2649 else if (IS_GEN_IIE(hpriv))
2650 gen = "IIE";
2651 else
2652 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002653
Jeff Garzika9524a72005-10-30 14:39:11 -05002654 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002655 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2656 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002657 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2658}
2659
Brett Russ05b308e2005-10-05 17:08:53 -04002660/**
2661 * mv_init_one - handle a positive probe of a Marvell host
2662 * @pdev: PCI device found
2663 * @ent: PCI device ID entry for the matched host
2664 *
2665 * LOCKING:
2666 * Inherited from caller.
2667 */
Brett Russ20f733e2005-09-01 18:26:17 -04002668static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2669{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002670 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002671 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002672 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2673 struct ata_host *host;
2674 struct mv_host_priv *hpriv;
2675 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002676
Jeff Garzika9524a72005-10-30 14:39:11 -05002677 if (!printed_version++)
2678 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002679
Tejun Heo4447d352007-04-17 23:44:08 +09002680 /* allocate host */
2681 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2682
2683 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2684 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2685 if (!host || !hpriv)
2686 return -ENOMEM;
2687 host->private_data = hpriv;
2688
2689 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002690 rc = pcim_enable_device(pdev);
2691 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002692 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002693
Tejun Heo0d5ff562007-02-01 15:06:36 +09002694 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2695 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002696 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002697 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002698 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002699 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002700
Jeff Garzikd88184f2007-02-26 01:26:06 -05002701 rc = pci_go_64(pdev);
2702 if (rc)
2703 return rc;
2704
Brett Russ20f733e2005-09-01 18:26:17 -04002705 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002706 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002707 if (rc)
2708 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002709
Brett Russ31961942005-09-30 01:36:00 -04002710 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002711 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002712 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002713
Brett Russ31961942005-09-30 01:36:00 -04002714 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002715 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002716
Tejun Heo4447d352007-04-17 23:44:08 +09002717 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002718 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002719 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002720 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002721}
2722
2723static int __init mv_init(void)
2724{
Pavel Roskinb7887192006-08-10 18:13:18 +09002725 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002726}
2727
2728static void __exit mv_exit(void)
2729{
2730 pci_unregister_driver(&mv_pci_driver);
2731}
2732
2733MODULE_AUTHOR("Brett Russ");
2734MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2735MODULE_LICENSE("GPL");
2736MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2737MODULE_VERSION(DRV_VERSION);
2738
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002739module_param(msi, int, 0444);
2740MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2741
Brett Russ20f733e2005-09-01 18:26:17 -04002742module_init(mv_init);
2743module_exit(mv_exit);