blob: d9832e234e44ccd5cdfea2e83b6d59925782276e [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500209 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Brett Russ20f733e2005-09-01 18:26:17 -0400244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400276
Brett Russ31961942005-09-30 01:36:00 -0400277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400292
Jeff Garzikc9d39132005-11-13 17:47:51 -0500293 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500294 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500295
Brett Russ31961942005-09-30 01:36:00 -0400296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500302 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400306
Brett Russ31961942005-09-30 01:36:00 -0400307 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400310};
311
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400312#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500314#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500315
Jeff Garzik095fec82005-11-12 09:50:49 -0500316enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500321
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400327 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329};
330
Jeff Garzik522479f2005-11-12 22:14:02 -0500331enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500337 chip_6042,
338 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500339};
340
Brett Russ31961942005-09-30 01:36:00 -0400341/* Command ReQuest Block: 32B */
342struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400347};
348
Jeff Garzike4e7b892006-01-31 12:18:41 -0500349struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500355};
356
Brett Russ31961942005-09-30 01:36:00 -0400357/* Command ResPonse Block: 8B */
358struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400362};
363
364/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400370};
371
372struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
Brett Russ31961942005-09-30 01:36:00 -0400383 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400384};
385
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500386struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389};
390
Jeff Garzik47c2b672005-11-12 21:13:17 -0500391struct mv_host_priv;
392struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500402};
403
Brett Russ20f733e2005-09-01 18:26:17 -0400404struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400405 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500406 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500407 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
410static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900411static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400415static int mv_port_start(struct ata_port *ap);
416static void mv_port_stop(struct ata_port *ap);
417static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500418static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900419static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400420static void mv_error_handler(struct ata_port *ap);
421static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422static void mv_eh_freeze(struct ata_port *ap);
423static void mv_eh_thaw(struct ata_port *ap);
Jeff Garzik6c087722007-10-12 00:16:23 -0400424static int mv_slave_config(struct scsi_device *sdev);
Brett Russ20f733e2005-09-01 18:26:17 -0400425static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500427static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500429static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500432static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500434static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500437static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500439static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
441 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500442static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
443 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500444static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500446static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500448
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400449static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400450 .module = THIS_MODULE,
451 .name = DRV_NAME,
452 .ioctl = ata_scsi_ioctl,
453 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400454 .can_queue = ATA_DEF_QUEUE,
455 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400456 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400457 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
458 .emulated = ATA_SHT_EMULATED,
459 .use_clustering = 1,
460 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik6c087722007-10-12 00:16:23 -0400462 .slave_configure = mv_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400463 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param,
465};
466
467static struct scsi_host_template mv6_sht = {
468 .module = THIS_MODULE,
469 .name = DRV_NAME,
470 .ioctl = ata_scsi_ioctl,
471 .queuecommand = ata_scsi_queuecmd,
472 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400473 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400474 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400475 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
476 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500477 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400478 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik6c087722007-10-12 00:16:23 -0400480 .slave_configure = mv_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900481 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400482 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400483};
484
Jeff Garzikc9d39132005-11-13 17:47:51 -0500485static const struct ata_port_operations mv5_ops = {
486 .port_disable = ata_port_disable,
487
488 .tf_load = ata_tf_load,
489 .tf_read = ata_tf_read,
490 .check_status = ata_check_status,
491 .exec_command = ata_exec_command,
492 .dev_select = ata_std_dev_select,
493
Jeff Garzikcffacd82007-03-09 09:46:47 -0500494 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500495
496 .qc_prep = mv_qc_prep,
497 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900498 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500499
Jeff Garzikc9d39132005-11-13 17:47:51 -0500500 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900501 .irq_on = ata_irq_on,
502 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500503
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400504 .error_handler = mv_error_handler,
505 .post_internal_cmd = mv_post_int_cmd,
506 .freeze = mv_eh_freeze,
507 .thaw = mv_eh_thaw,
508
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509 .scr_read = mv5_scr_read,
510 .scr_write = mv5_scr_write,
511
512 .port_start = mv_port_start,
513 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500514};
515
516static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400517 .port_disable = ata_port_disable,
518
519 .tf_load = ata_tf_load,
520 .tf_read = ata_tf_read,
521 .check_status = ata_check_status,
522 .exec_command = ata_exec_command,
523 .dev_select = ata_std_dev_select,
524
Jeff Garzikcffacd82007-03-09 09:46:47 -0500525 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400526
Brett Russ31961942005-09-30 01:36:00 -0400527 .qc_prep = mv_qc_prep,
528 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900529 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400530
Brett Russ20f733e2005-09-01 18:26:17 -0400531 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900532 .irq_on = ata_irq_on,
533 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400534
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
538 .thaw = mv_eh_thaw,
539
Brett Russ20f733e2005-09-01 18:26:17 -0400540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
542
Brett Russ31961942005-09-30 01:36:00 -0400543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400545};
546
Jeff Garzike4e7b892006-01-31 12:18:41 -0500547static const struct ata_port_operations mv_iie_ops = {
548 .port_disable = ata_port_disable,
549
550 .tf_load = ata_tf_load,
551 .tf_read = ata_tf_read,
552 .check_status = ata_check_status,
553 .exec_command = ata_exec_command,
554 .dev_select = ata_std_dev_select,
555
Jeff Garzikcffacd82007-03-09 09:46:47 -0500556 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500557
558 .qc_prep = mv_qc_prep_iie,
559 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900560 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500561
Jeff Garzike4e7b892006-01-31 12:18:41 -0500562 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900563 .irq_on = ata_irq_on,
564 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500565
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400566 .error_handler = mv_error_handler,
567 .post_internal_cmd = mv_post_int_cmd,
568 .freeze = mv_eh_freeze,
569 .thaw = mv_eh_thaw,
570
Jeff Garzike4e7b892006-01-31 12:18:41 -0500571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
573
574 .port_start = mv_port_start,
575 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500576};
577
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100578static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400579 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400580 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400581 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400582 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500583 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400584 },
585 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400587 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400588 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500589 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400590 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500591 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400592 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500593 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400594 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500595 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500596 },
Brett Russ20f733e2005-09-01 18:26:17 -0400597 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400599 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400600 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500601 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400602 },
603 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400604 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
605 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400606 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400607 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500608 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400609 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500612 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400613 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500614 .port_ops = &mv_iie_ops,
615 },
616 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500618 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400619 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500620 .port_ops = &mv_iie_ops,
621 },
Brett Russ20f733e2005-09-01 18:26:17 -0400622};
623
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500624static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400625 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
626 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
627 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
628 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100629 /* RocketRAID 1740/174x have different identifiers */
630 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
631 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400632
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400633 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
634 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
635 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
636 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
637 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500638
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400639 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
640
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200641 /* Adaptec 1430SA */
642 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
643
Olof Johanssone93f09d2007-01-18 18:39:59 -0600644 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
645
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800646 /* add Marvell 7042 support */
647 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
648
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400649 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400650};
651
652static struct pci_driver mv_pci_driver = {
653 .name = DRV_NAME,
654 .id_table = mv_pci_tbl,
655 .probe = mv_init_one,
656 .remove = ata_pci_remove_one,
657};
658
Jeff Garzik47c2b672005-11-12 21:13:17 -0500659static const struct mv_hw_ops mv5xxx_ops = {
660 .phy_errata = mv5_phy_errata,
661 .enable_leds = mv5_enable_leds,
662 .read_preamp = mv5_read_preamp,
663 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500664 .reset_flash = mv5_reset_flash,
665 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500666};
667
668static const struct mv_hw_ops mv6xxx_ops = {
669 .phy_errata = mv6_phy_errata,
670 .enable_leds = mv6_enable_leds,
671 .read_preamp = mv6_read_preamp,
672 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500673 .reset_flash = mv6_reset_flash,
674 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500675};
676
Brett Russ20f733e2005-09-01 18:26:17 -0400677/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500678 * module options
679 */
680static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
681
682
Jeff Garzikd88184f2007-02-26 01:26:06 -0500683/* move to PCI layer or libata core? */
684static int pci_go_64(struct pci_dev *pdev)
685{
686 int rc;
687
688 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
689 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
690 if (rc) {
691 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
692 if (rc) {
693 dev_printk(KERN_ERR, &pdev->dev,
694 "64-bit DMA enable failed\n");
695 return rc;
696 }
697 }
698 } else {
699 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
700 if (rc) {
701 dev_printk(KERN_ERR, &pdev->dev,
702 "32-bit DMA enable failed\n");
703 return rc;
704 }
705 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
706 if (rc) {
707 dev_printk(KERN_ERR, &pdev->dev,
708 "32-bit consistent DMA enable failed\n");
709 return rc;
710 }
711 }
712
713 return rc;
714}
715
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500716/*
Brett Russ20f733e2005-09-01 18:26:17 -0400717 * Functions
718 */
719
720static inline void writelfl(unsigned long data, void __iomem *addr)
721{
722 writel(data, addr);
723 (void) readl(addr); /* flush to avoid PCI posted write */
724}
725
Brett Russ20f733e2005-09-01 18:26:17 -0400726static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
727{
728 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
729}
730
Jeff Garzikc9d39132005-11-13 17:47:51 -0500731static inline unsigned int mv_hc_from_port(unsigned int port)
732{
733 return port >> MV_PORT_HC_SHIFT;
734}
735
736static inline unsigned int mv_hardport_from_port(unsigned int port)
737{
738 return port & MV_PORT_MASK;
739}
740
741static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
742 unsigned int port)
743{
744 return mv_hc_base(base, mv_hc_from_port(port));
745}
746
Brett Russ20f733e2005-09-01 18:26:17 -0400747static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
748{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500749 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500750 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500751 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400752}
753
754static inline void __iomem *mv_ap_base(struct ata_port *ap)
755{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900756 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400757}
758
Jeff Garzikcca39742006-08-24 03:19:22 -0400759static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400760{
Jeff Garzikcca39742006-08-24 03:19:22 -0400761 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400762}
763
764static void mv_irq_clear(struct ata_port *ap)
765{
766}
767
Jeff Garzik6c087722007-10-12 00:16:23 -0400768static int mv_slave_config(struct scsi_device *sdev)
769{
770 int rc = ata_scsi_slave_config(sdev);
771 if (rc)
772 return rc;
773
774 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
775
776 return 0; /* scsi layer doesn't check return value, sigh */
777}
778
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779static void mv_set_edma_ptrs(void __iomem *port_mmio,
780 struct mv_host_priv *hpriv,
781 struct mv_port_priv *pp)
782{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 u32 index;
784
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400785 /*
786 * initialize request queue
787 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400788 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
789
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400790 WARN_ON(pp->crqb_dma & 0x3ff);
791 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400792 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
794
795 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400796 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
798 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400799 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800
801 /*
802 * initialize response queue
803 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
805
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400806 WARN_ON(pp->crpb_dma & 0xff);
807 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
808
809 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400810 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
812 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400815 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400816 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817}
818
Brett Russ05b308e2005-10-05 17:08:53 -0400819/**
820 * mv_start_dma - Enable eDMA engine
821 * @base: port base address
822 * @pp: port private data
823 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900824 * Verify the local cache of the eDMA state is accurate with a
825 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400826 *
827 * LOCKING:
828 * Inherited from caller.
829 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
831 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400832{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400833 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 /* clear EDMA event indicators, if any */
835 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
836
837 mv_set_edma_ptrs(base, hpriv, pp);
838
Brett Russafb0edd2005-10-05 17:08:42 -0400839 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
840 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
841 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900842 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400843}
844
Brett Russ05b308e2005-10-05 17:08:53 -0400845/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400846 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400847 * @ap: ATA channel to manipulate
848 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900849 * Verify the local cache of the eDMA state is accurate with a
850 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400851 *
852 * LOCKING:
853 * Inherited from caller.
854 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400855static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400856{
857 void __iomem *port_mmio = mv_ap_base(ap);
858 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400859 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400860 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400861
Jeff Garzik4537deb52007-07-12 14:30:19 -0400862 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400863 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400864 */
Brett Russ31961942005-09-30 01:36:00 -0400865 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
866 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400867 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900868 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400869 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500870
Brett Russ31961942005-09-30 01:36:00 -0400871 /* now properly wait for the eDMA to stop */
872 for (i = 1000; i > 0; i--) {
873 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400874 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400875 break;
Jeff Garzik4537deb52007-07-12 14:30:19 -0400876
Brett Russ31961942005-09-30 01:36:00 -0400877 udelay(100);
878 }
879
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400880 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900881 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400882 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400883 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400884
885 return err;
Brett Russ31961942005-09-30 01:36:00 -0400886}
887
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400888static int mv_stop_dma(struct ata_port *ap)
889{
890 unsigned long flags;
891 int rc;
892
893 spin_lock_irqsave(&ap->host->lock, flags);
894 rc = __mv_stop_dma(ap);
895 spin_unlock_irqrestore(&ap->host->lock, flags);
896
897 return rc;
898}
899
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400900#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400901static void mv_dump_mem(void __iomem *start, unsigned bytes)
902{
Brett Russ31961942005-09-30 01:36:00 -0400903 int b, w;
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%p: ", start + b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 printk("%08x ",readl(start + b));
908 b += sizeof(u32);
909 }
910 printk("\n");
911 }
Brett Russ31961942005-09-30 01:36:00 -0400912}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400913#endif
914
Brett Russ31961942005-09-30 01:36:00 -0400915static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
916{
917#ifdef ATA_DEBUG
918 int b, w;
919 u32 dw;
920 for (b = 0; b < bytes; ) {
921 DPRINTK("%02x: ", b);
922 for (w = 0; b < bytes && w < 4; w++) {
923 (void) pci_read_config_dword(pdev,b,&dw);
924 printk("%08x ",dw);
925 b += sizeof(u32);
926 }
927 printk("\n");
928 }
929#endif
930}
931static void mv_dump_all_regs(void __iomem *mmio_base, int port,
932 struct pci_dev *pdev)
933{
934#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500935 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400936 port >> MV_PORT_HC_SHIFT);
937 void __iomem *port_base;
938 int start_port, num_ports, p, start_hc, num_hcs, hc;
939
940 if (0 > port) {
941 start_hc = start_port = 0;
942 num_ports = 8; /* shld be benign for 4 port devs */
943 num_hcs = 2;
944 } else {
945 start_hc = port >> MV_PORT_HC_SHIFT;
946 start_port = port;
947 num_ports = num_hcs = 1;
948 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500949 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400950 num_ports > 1 ? num_ports - 1 : start_port);
951
952 if (NULL != pdev) {
953 DPRINTK("PCI config space regs:\n");
954 mv_dump_pci_cfg(pdev, 0x68);
955 }
956 DPRINTK("PCI regs:\n");
957 mv_dump_mem(mmio_base+0xc00, 0x3c);
958 mv_dump_mem(mmio_base+0xd00, 0x34);
959 mv_dump_mem(mmio_base+0xf00, 0x4);
960 mv_dump_mem(mmio_base+0x1d00, 0x6c);
961 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700962 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400963 DPRINTK("HC regs (HC %i):\n", hc);
964 mv_dump_mem(hc_base, 0x1c);
965 }
966 for (p = start_port; p < start_port + num_ports; p++) {
967 port_base = mv_port_base(mmio_base, p);
968 DPRINTK("EDMA regs (port %i):\n",p);
969 mv_dump_mem(port_base, 0x54);
970 DPRINTK("SATA regs (port %i):\n",p);
971 mv_dump_mem(port_base+0x300, 0x60);
972 }
973#endif
974}
975
Brett Russ20f733e2005-09-01 18:26:17 -0400976static unsigned int mv_scr_offset(unsigned int sc_reg_in)
977{
978 unsigned int ofs;
979
980 switch (sc_reg_in) {
981 case SCR_STATUS:
982 case SCR_CONTROL:
983 case SCR_ERROR:
984 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
985 break;
986 case SCR_ACTIVE:
987 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
988 break;
989 default:
990 ofs = 0xffffffffU;
991 break;
992 }
993 return ofs;
994}
995
Tejun Heoda3dbb12007-07-16 14:29:40 +0900996static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400997{
998 unsigned int ofs = mv_scr_offset(sc_reg_in);
999
Tejun Heoda3dbb12007-07-16 14:29:40 +09001000 if (ofs != 0xffffffffU) {
1001 *val = readl(mv_ap_base(ap) + ofs);
1002 return 0;
1003 } else
1004 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001005}
1006
Tejun Heoda3dbb12007-07-16 14:29:40 +09001007static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001008{
1009 unsigned int ofs = mv_scr_offset(sc_reg_in);
1010
Tejun Heoda3dbb12007-07-16 14:29:40 +09001011 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001012 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001013 return 0;
1014 } else
1015 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001016}
1017
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001018static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1019 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001020{
1021 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1022
1023 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001024 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001025
Jeff Garzike728eab2007-02-25 02:53:41 -05001026 if (IS_GEN_I(hpriv)) {
1027 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001028 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -05001029 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001030
Jeff Garzike728eab2007-02-25 02:53:41 -05001031 else if (IS_GEN_II(hpriv)) {
1032 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001033 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -05001034 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1035 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001036
1037 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001038 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1039 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001040 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1041 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001042 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1043 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb52007-07-12 14:30:19 -04001044 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001045 }
1046
1047 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1048}
1049
Brett Russ05b308e2005-10-05 17:08:53 -04001050/**
1051 * mv_port_start - Port specific init/start routine.
1052 * @ap: ATA channel to manipulate
1053 *
1054 * Allocate and point to DMA memory, init port private memory,
1055 * zero indices.
1056 *
1057 * LOCKING:
1058 * Inherited from caller.
1059 */
Brett Russ31961942005-09-30 01:36:00 -04001060static int mv_port_start(struct ata_port *ap)
1061{
Jeff Garzikcca39742006-08-24 03:19:22 -04001062 struct device *dev = ap->host->dev;
1063 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001064 struct mv_port_priv *pp;
1065 void __iomem *port_mmio = mv_ap_base(ap);
1066 void *mem;
1067 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001068 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001069 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001070
Tejun Heo24dc5f32007-01-20 16:00:28 +09001071 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001072 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001073 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001074
Tejun Heo24dc5f32007-01-20 16:00:28 +09001075 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1076 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001077 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001078 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001079 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1080
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001081 rc = ata_pad_alloc(ap, dev);
1082 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001083 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001084
Jeff Garzik8b260242005-11-12 12:32:50 -05001085 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001086 * 32-slot command request table (CRQB), 32 bytes each in size
1087 */
1088 pp->crqb = mem;
1089 pp->crqb_dma = mem_dma;
1090 mem += MV_CRQB_Q_SZ;
1091 mem_dma += MV_CRQB_Q_SZ;
1092
Jeff Garzik8b260242005-11-12 12:32:50 -05001093 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001094 * 32-slot command response table (CRPB), 8 bytes each in size
1095 */
1096 pp->crpb = mem;
1097 pp->crpb_dma = mem_dma;
1098 mem += MV_CRPB_Q_SZ;
1099 mem_dma += MV_CRPB_Q_SZ;
1100
1101 /* Third item:
1102 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1103 */
1104 pp->sg_tbl = mem;
1105 pp->sg_tbl_dma = mem_dma;
1106
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001107 spin_lock_irqsave(&ap->host->lock, flags);
1108
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001109 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001110
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001111 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001112
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001113 spin_unlock_irqrestore(&ap->host->lock, flags);
1114
Brett Russ31961942005-09-30 01:36:00 -04001115 /* Don't turn on EDMA here...do it before DMA commands only. Else
1116 * we'll be unable to send non-data, PIO, etc due to restricted access
1117 * to shadow regs.
1118 */
1119 ap->private_data = pp;
1120 return 0;
1121}
1122
Brett Russ05b308e2005-10-05 17:08:53 -04001123/**
1124 * mv_port_stop - Port specific cleanup/stop routine.
1125 * @ap: ATA channel to manipulate
1126 *
1127 * Stop DMA, cleanup port memory.
1128 *
1129 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001130 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001131 */
Brett Russ31961942005-09-30 01:36:00 -04001132static void mv_port_stop(struct ata_port *ap)
1133{
Brett Russ31961942005-09-30 01:36:00 -04001134 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001135}
1136
Brett Russ05b308e2005-10-05 17:08:53 -04001137/**
1138 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1139 * @qc: queued command whose SG list to source from
1140 *
1141 * Populate the SG list and mark the last entry.
1142 *
1143 * LOCKING:
1144 * Inherited from caller.
1145 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001146static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001147{
1148 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001149 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001150 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001151
Jeff Garzikd88184f2007-02-26 01:26:06 -05001152 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001153 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001154 dma_addr_t addr = sg_dma_address(sg);
1155 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001156
Olof Johansson4007b492007-10-02 20:45:27 -05001157 while (sg_len) {
1158 u32 offset = addr & 0xffff;
1159 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001160
Olof Johansson4007b492007-10-02 20:45:27 -05001161 if ((offset + sg_len > 0x10000))
1162 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001163
Olof Johansson4007b492007-10-02 20:45:27 -05001164 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1165 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001166 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001167
1168 sg_len -= len;
1169 addr += len;
1170
1171 if (!sg_len && ata_sg_is_last(sg, qc))
1172 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1173
1174 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001175 }
1176
Brett Russ31961942005-09-30 01:36:00 -04001177 }
1178}
1179
Mark Lorde1469872006-05-22 19:02:03 -04001180static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001181{
Mark Lord559eeda2006-05-19 16:40:15 -04001182 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001183 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001184 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001185}
1186
Brett Russ05b308e2005-10-05 17:08:53 -04001187/**
1188 * mv_qc_prep - Host specific command preparation.
1189 * @qc: queued command to prepare
1190 *
1191 * This routine simply redirects to the general purpose routine
1192 * if command is not DMA. Else, it handles prep of the CRQB
1193 * (command request block), does some sanity checking, and calls
1194 * the SG load routine.
1195 *
1196 * LOCKING:
1197 * Inherited from caller.
1198 */
Brett Russ31961942005-09-30 01:36:00 -04001199static void mv_qc_prep(struct ata_queued_cmd *qc)
1200{
1201 struct ata_port *ap = qc->ap;
1202 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001203 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001204 struct ata_taskfile *tf;
1205 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001206 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001207
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001208 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001209 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001210
Brett Russ31961942005-09-30 01:36:00 -04001211 /* Fill in command request block
1212 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001213 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001214 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001215 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001216 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb52007-07-12 14:30:19 -04001217 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001218
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001219 /* get current queue index from software */
1220 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001221
Mark Lorda6432432006-05-19 16:36:36 -04001222 pp->crqb[in_index].sg_addr =
1223 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1224 pp->crqb[in_index].sg_addr_hi =
1225 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1226 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1227
1228 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001229 tf = &qc->tf;
1230
1231 /* Sadly, the CRQB cannot accomodate all registers--there are
1232 * only 11 bytes...so we must pick and choose required
1233 * registers based on the command. So, we drop feature and
1234 * hob_feature for [RW] DMA commands, but they are needed for
1235 * NCQ. NCQ will drop hob_nsect.
1236 */
1237 switch (tf->command) {
1238 case ATA_CMD_READ:
1239 case ATA_CMD_READ_EXT:
1240 case ATA_CMD_WRITE:
1241 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001242 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001243 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1244 break;
1245#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1246 case ATA_CMD_FPDMA_READ:
1247 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001248 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001249 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1250 break;
1251#endif /* FIXME: remove this line when NCQ added */
1252 default:
1253 /* The only other commands EDMA supports in non-queued and
1254 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1255 * of which are defined/used by Linux. If we get here, this
1256 * driver needs work.
1257 *
1258 * FIXME: modify libata to give qc_prep a return value and
1259 * return error here.
1260 */
1261 BUG_ON(tf->command);
1262 break;
1263 }
1264 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1265 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1266 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1267 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1268 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1269 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1270 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1271 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1272 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1273
Jeff Garzike4e7b892006-01-31 12:18:41 -05001274 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001275 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001276 mv_fill_sg(qc);
1277}
1278
1279/**
1280 * mv_qc_prep_iie - Host specific command preparation.
1281 * @qc: queued command to prepare
1282 *
1283 * This routine simply redirects to the general purpose routine
1284 * if command is not DMA. Else, it handles prep of the CRQB
1285 * (command request block), does some sanity checking, and calls
1286 * the SG load routine.
1287 *
1288 * LOCKING:
1289 * Inherited from caller.
1290 */
1291static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1292{
1293 struct ata_port *ap = qc->ap;
1294 struct mv_port_priv *pp = ap->private_data;
1295 struct mv_crqb_iie *crqb;
1296 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001297 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001298 u32 flags = 0;
1299
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001300 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001301 return;
1302
Jeff Garzike4e7b892006-01-31 12:18:41 -05001303 /* Fill in Gen IIE command request block
1304 */
1305 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1306 flags |= CRQB_FLAG_READ;
1307
Tejun Heobeec7db2006-02-11 19:11:13 +09001308 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001309 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001310 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb52007-07-12 14:30:19 -04001311 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001312
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001313 /* get current queue index from software */
1314 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001315
1316 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001317 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1318 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1319 crqb->flags = cpu_to_le32(flags);
1320
1321 tf = &qc->tf;
1322 crqb->ata_cmd[0] = cpu_to_le32(
1323 (tf->command << 16) |
1324 (tf->feature << 24)
1325 );
1326 crqb->ata_cmd[1] = cpu_to_le32(
1327 (tf->lbal << 0) |
1328 (tf->lbam << 8) |
1329 (tf->lbah << 16) |
1330 (tf->device << 24)
1331 );
1332 crqb->ata_cmd[2] = cpu_to_le32(
1333 (tf->hob_lbal << 0) |
1334 (tf->hob_lbam << 8) |
1335 (tf->hob_lbah << 16) |
1336 (tf->hob_feature << 24)
1337 );
1338 crqb->ata_cmd[3] = cpu_to_le32(
1339 (tf->nsect << 0) |
1340 (tf->hob_nsect << 8)
1341 );
1342
1343 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1344 return;
Brett Russ31961942005-09-30 01:36:00 -04001345 mv_fill_sg(qc);
1346}
1347
Brett Russ05b308e2005-10-05 17:08:53 -04001348/**
1349 * mv_qc_issue - Initiate a command to the host
1350 * @qc: queued command to start
1351 *
1352 * This routine simply redirects to the general purpose routine
1353 * if command is not DMA. Else, it sanity checks our local
1354 * caches of the request producer/consumer indices then enables
1355 * DMA and bumps the request producer index.
1356 *
1357 * LOCKING:
1358 * Inherited from caller.
1359 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001360static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001361{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001362 struct ata_port *ap = qc->ap;
1363 void __iomem *port_mmio = mv_ap_base(ap);
1364 struct mv_port_priv *pp = ap->private_data;
1365 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001366 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001367
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001368 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001369 /* We're about to send a non-EDMA capable command to the
1370 * port. Turn off EDMA so there won't be problems accessing
1371 * shadow block, etc registers.
1372 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001373 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001374 return ata_qc_issue_prot(qc);
1375 }
1376
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001377 mv_start_dma(port_mmio, hpriv, pp);
1378
1379 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001380
Brett Russ31961942005-09-30 01:36:00 -04001381 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001382 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1383 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001384
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001385 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001386
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001387 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001388
1389 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001390 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1391 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001392
1393 return 0;
1394}
1395
Brett Russ05b308e2005-10-05 17:08:53 -04001396/**
Brett Russ05b308e2005-10-05 17:08:53 -04001397 * mv_err_intr - Handle error interrupts on the port
1398 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001399 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001400 *
1401 * In most cases, just clear the interrupt and move on. However,
1402 * some cases require an eDMA reset, which is done right before
1403 * the COMRESET in mv_phy_reset(). The SERR case requires a
1404 * clear of pending errors in the SATA SERROR register. Finally,
1405 * if the port disabled DMA, update our cached copy to match.
1406 *
1407 * LOCKING:
1408 * Inherited from caller.
1409 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001410static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001411{
Brett Russ31961942005-09-30 01:36:00 -04001412 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001413 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1414 struct mv_port_priv *pp = ap->private_data;
1415 struct mv_host_priv *hpriv = ap->host->private_data;
1416 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1417 unsigned int action = 0, err_mask = 0;
1418 struct ata_eh_info *ehi = &ap->eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001419
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001420 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001421
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422 if (!edma_enabled) {
1423 /* just a guess: do we need to do this? should we
1424 * expand this, and do it in all cases?
1425 */
Tejun Heo81952c52006-05-15 20:57:47 +09001426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001428 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001429
1430 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1431
1432 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1433
1434 /*
1435 * all generations share these EDMA error cause bits
1436 */
1437
1438 if (edma_err_cause & EDMA_ERR_DEV)
1439 err_mask |= AC_ERR_DEV;
1440 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001441 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001442 EDMA_ERR_INTRL_PAR)) {
1443 err_mask |= AC_ERR_ATA_BUS;
1444 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001445 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001446 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1448 ata_ehi_hotplugged(ehi);
1449 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001450 "dev disconnect" : "dev connect");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001451 }
1452
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001453 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454 eh_freeze_mask = EDMA_EH_FREEZE_5;
1455
1456 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1457 struct mv_port_priv *pp = ap->private_data;
1458 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001459 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 }
1461 } else {
1462 eh_freeze_mask = EDMA_EH_FREEZE;
1463
1464 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1465 struct mv_port_priv *pp = ap->private_data;
1466 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001467 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 }
1469
1470 if (edma_err_cause & EDMA_ERR_SERR) {
1471 sata_scr_read(ap, SCR_ERROR, &serr);
1472 sata_scr_write_flush(ap, SCR_ERROR, serr);
1473 err_mask = AC_ERR_ATA_BUS;
1474 action |= ATA_EH_HARDRESET;
1475 }
1476 }
Brett Russ20f733e2005-09-01 18:26:17 -04001477
1478 /* Clear EDMA now that SERR cleanup done */
1479 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1480
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001481 if (!err_mask) {
1482 err_mask = AC_ERR_OTHER;
1483 action |= ATA_EH_HARDRESET;
1484 }
1485
1486 ehi->serror |= serr;
1487 ehi->action |= action;
1488
1489 if (qc)
1490 qc->err_mask |= err_mask;
1491 else
1492 ehi->err_mask |= err_mask;
1493
1494 if (edma_err_cause & eh_freeze_mask)
1495 ata_port_freeze(ap);
1496 else
1497 ata_port_abort(ap);
1498}
1499
1500static void mv_intr_pio(struct ata_port *ap)
1501{
1502 struct ata_queued_cmd *qc;
1503 u8 ata_status;
1504
1505 /* ignore spurious intr if drive still BUSY */
1506 ata_status = readb(ap->ioaddr.status_addr);
1507 if (unlikely(ata_status & ATA_BUSY))
1508 return;
1509
1510 /* get active ATA command */
1511 qc = ata_qc_from_tag(ap, ap->active_tag);
1512 if (unlikely(!qc)) /* no active tag */
1513 return;
1514 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1515 return;
1516
1517 /* and finally, complete the ATA command */
1518 qc->err_mask |= ac_err_mask(ata_status);
1519 ata_qc_complete(qc);
1520}
1521
1522static void mv_intr_edma(struct ata_port *ap)
1523{
1524 void __iomem *port_mmio = mv_ap_base(ap);
1525 struct mv_host_priv *hpriv = ap->host->private_data;
1526 struct mv_port_priv *pp = ap->private_data;
1527 struct ata_queued_cmd *qc;
1528 u32 out_index, in_index;
1529 bool work_done = false;
1530
1531 /* get h/w response queue pointer */
1532 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1533 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1534
1535 while (1) {
1536 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001537 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538
1539 /* get s/w response queue last-read pointer, and compare */
1540 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1541 if (in_index == out_index)
1542 break;
1543
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001544 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001545 if (IS_GEN_I(hpriv))
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001546 tag = ap->active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001548 /* Gen II/IIE: get active ATA command via tag, to enable
1549 * support for queueing. this works transparently for
1550 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001551 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001552 else if (IS_GEN_II(hpriv))
1553 tag = (le16_to_cpu(pp->crpb[out_index].id)
1554 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001555
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001556 else /* IS_GEN_IIE */
1557 tag = (le16_to_cpu(pp->crpb[out_index].id)
1558 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001559
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001560 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561
1562 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1563 * bits (WARNING: might not necessarily be associated
1564 * with this command), which -should- be clear
1565 * if all is well
1566 */
1567 status = le16_to_cpu(pp->crpb[out_index].flags);
1568 if (unlikely(status & 0xff)) {
1569 mv_err_intr(ap, qc);
1570 return;
1571 }
1572
1573 /* and finally, complete the ATA command */
1574 if (qc) {
1575 qc->err_mask |=
1576 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1577 ata_qc_complete(qc);
1578 }
1579
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001580 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001581 * indicate (after the loop completes) to hardware
1582 * that we have consumed a response queue entry.
1583 */
1584 work_done = true;
1585 pp->resp_idx++;
1586 }
1587
1588 if (work_done)
1589 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1590 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1591 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001592}
1593
Brett Russ05b308e2005-10-05 17:08:53 -04001594/**
1595 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001596 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001597 * @relevant: port error bits relevant to this host controller
1598 * @hc: which host controller we're to look at
1599 *
1600 * Read then write clear the HC interrupt status then walk each
1601 * port connected to the HC and see if it needs servicing. Port
1602 * success ints are reported in the HC interrupt status reg, the
1603 * port error ints are reported in the higher level main
1604 * interrupt status register and thus are passed in via the
1605 * 'relevant' argument.
1606 *
1607 * LOCKING:
1608 * Inherited from caller.
1609 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001610static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001611{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001612 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001613 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001614 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001615 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001616
Jeff Garzik35177262007-02-24 21:26:42 -05001617 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001618 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001619 else
Brett Russ20f733e2005-09-01 18:26:17 -04001620 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001621
1622 /* we'll need the HC success int register in most cases */
1623 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001624 if (!hc_irq_cause)
1625 return;
1626
1627 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001628
1629 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1630 hc,relevant,hc_irq_cause);
1631
1632 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001633 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001634 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001636
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001637 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001638 continue;
1639
Brett Russ31961942005-09-30 01:36:00 -04001640 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001641 if (port >= MV_PORTS_PER_HC) {
1642 shift++; /* skip bit 8 in the HC Main IRQ reg */
1643 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644 have_err_bits = ((PORT0_ERR << shift) & relevant);
1645
1646 if (unlikely(have_err_bits)) {
1647 struct ata_queued_cmd *qc;
1648
1649 qc = ata_qc_from_tag(ap, ap->active_tag);
1650 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1651 continue;
1652
1653 mv_err_intr(ap, qc);
1654 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001655 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001656
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001657 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1658
1659 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1660 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1661 mv_intr_edma(ap);
1662 } else {
1663 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1664 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001665 }
1666 }
1667 VPRINTK("EXIT\n");
1668}
1669
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001670static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1671{
1672 struct ata_port *ap;
1673 struct ata_queued_cmd *qc;
1674 struct ata_eh_info *ehi;
1675 unsigned int i, err_mask, printed = 0;
1676 u32 err_cause;
1677
1678 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1679
1680 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1681 err_cause);
1682
1683 DPRINTK("All regs @ PCI error\n");
1684 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1685
1686 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1687
1688 for (i = 0; i < host->n_ports; i++) {
1689 ap = host->ports[i];
1690 if (!ata_port_offline(ap)) {
1691 ehi = &ap->eh_info;
1692 ata_ehi_clear_desc(ehi);
1693 if (!printed++)
1694 ata_ehi_push_desc(ehi,
1695 "PCI err cause 0x%08x", err_cause);
1696 err_mask = AC_ERR_HOST_BUS;
1697 ehi->action = ATA_EH_HARDRESET;
1698 qc = ata_qc_from_tag(ap, ap->active_tag);
1699 if (qc)
1700 qc->err_mask |= err_mask;
1701 else
1702 ehi->err_mask |= err_mask;
1703
1704 ata_port_freeze(ap);
1705 }
1706 }
1707}
1708
Brett Russ05b308e2005-10-05 17:08:53 -04001709/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001710 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001711 * @irq: unused
1712 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001713 *
1714 * Read the read only register to determine if any host
1715 * controllers have pending interrupts. If so, call lower level
1716 * routine to handle. Also check for PCI errors which are only
1717 * reported here.
1718 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001719 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001720 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001721 * interrupts.
1722 */
David Howells7d12e782006-10-05 14:55:46 +01001723static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001724{
Jeff Garzikcca39742006-08-24 03:19:22 -04001725 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001726 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001727 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001728 u32 irq_stat;
1729
Brett Russ20f733e2005-09-01 18:26:17 -04001730 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001731
1732 /* check the cases where we either have nothing pending or have read
1733 * a bogus register value which can indicate HW removal or PCI fault
1734 */
Jeff Garzik35177262007-02-24 21:26:42 -05001735 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001736 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001737
Jeff Garzikcca39742006-08-24 03:19:22 -04001738 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1739 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001740
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001741 if (unlikely(irq_stat & PCI_ERR)) {
1742 mv_pci_error(host, mmio);
1743 handled = 1;
1744 goto out_unlock; /* skip all other HC irq handling */
1745 }
1746
Brett Russ20f733e2005-09-01 18:26:17 -04001747 for (hc = 0; hc < n_hcs; hc++) {
1748 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1749 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001750 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001751 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001752 }
1753 }
Mark Lord615ab952006-05-19 16:24:56 -04001754
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001755out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001756 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001757
1758 return IRQ_RETVAL(handled);
1759}
1760
Jeff Garzikc9d39132005-11-13 17:47:51 -05001761static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1762{
1763 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1764 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1765
1766 return hc_mmio + ofs;
1767}
1768
1769static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1770{
1771 unsigned int ofs;
1772
1773 switch (sc_reg_in) {
1774 case SCR_STATUS:
1775 case SCR_ERROR:
1776 case SCR_CONTROL:
1777 ofs = sc_reg_in * sizeof(u32);
1778 break;
1779 default:
1780 ofs = 0xffffffffU;
1781 break;
1782 }
1783 return ofs;
1784}
1785
Tejun Heoda3dbb12007-07-16 14:29:40 +09001786static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001787{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001788 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1789 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001790 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1791
Tejun Heoda3dbb12007-07-16 14:29:40 +09001792 if (ofs != 0xffffffffU) {
1793 *val = readl(addr + ofs);
1794 return 0;
1795 } else
1796 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001797}
1798
Tejun Heoda3dbb12007-07-16 14:29:40 +09001799static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001800{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001801 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1802 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001803 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1804
Tejun Heoda3dbb12007-07-16 14:29:40 +09001805 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001806 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001807 return 0;
1808 } else
1809 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001810}
1811
Jeff Garzik522479f2005-11-12 22:14:02 -05001812static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1813{
Jeff Garzik522479f2005-11-12 22:14:02 -05001814 int early_5080;
1815
Auke Kok44c10132007-06-08 15:46:36 -07001816 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001817
1818 if (!early_5080) {
1819 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1820 tmp |= (1 << 0);
1821 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1822 }
1823
1824 mv_reset_pci_bus(pdev, mmio);
1825}
1826
1827static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1828{
1829 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1830}
1831
Jeff Garzik47c2b672005-11-12 21:13:17 -05001832static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001833 void __iomem *mmio)
1834{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001835 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1836 u32 tmp;
1837
1838 tmp = readl(phy_mmio + MV5_PHY_MODE);
1839
1840 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1841 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001842}
1843
Jeff Garzik47c2b672005-11-12 21:13:17 -05001844static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001845{
Jeff Garzik522479f2005-11-12 22:14:02 -05001846 u32 tmp;
1847
1848 writel(0, mmio + MV_GPIO_PORT_CTL);
1849
1850 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1851
1852 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1853 tmp |= ~(1 << 0);
1854 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001855}
1856
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001857static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1858 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001859{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001860 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1861 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1862 u32 tmp;
1863 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1864
1865 if (fix_apm_sq) {
1866 tmp = readl(phy_mmio + MV5_LT_MODE);
1867 tmp |= (1 << 19);
1868 writel(tmp, phy_mmio + MV5_LT_MODE);
1869
1870 tmp = readl(phy_mmio + MV5_PHY_CTL);
1871 tmp &= ~0x3;
1872 tmp |= 0x1;
1873 writel(tmp, phy_mmio + MV5_PHY_CTL);
1874 }
1875
1876 tmp = readl(phy_mmio + MV5_PHY_MODE);
1877 tmp &= ~mask;
1878 tmp |= hpriv->signal[port].pre;
1879 tmp |= hpriv->signal[port].amps;
1880 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001881}
1882
Jeff Garzikc9d39132005-11-13 17:47:51 -05001883
1884#undef ZERO
1885#define ZERO(reg) writel(0, port_mmio + (reg))
1886static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1887 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001888{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001889 void __iomem *port_mmio = mv_port_base(mmio, port);
1890
1891 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1892
1893 mv_channel_reset(hpriv, mmio, port);
1894
1895 ZERO(0x028); /* command */
1896 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1897 ZERO(0x004); /* timer */
1898 ZERO(0x008); /* irq err cause */
1899 ZERO(0x00c); /* irq err mask */
1900 ZERO(0x010); /* rq bah */
1901 ZERO(0x014); /* rq inp */
1902 ZERO(0x018); /* rq outp */
1903 ZERO(0x01c); /* respq bah */
1904 ZERO(0x024); /* respq outp */
1905 ZERO(0x020); /* respq inp */
1906 ZERO(0x02c); /* test control */
1907 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1908}
1909#undef ZERO
1910
1911#define ZERO(reg) writel(0, hc_mmio + (reg))
1912static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1913 unsigned int hc)
1914{
1915 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1916 u32 tmp;
1917
1918 ZERO(0x00c);
1919 ZERO(0x010);
1920 ZERO(0x014);
1921 ZERO(0x018);
1922
1923 tmp = readl(hc_mmio + 0x20);
1924 tmp &= 0x1c1c1c1c;
1925 tmp |= 0x03030303;
1926 writel(tmp, hc_mmio + 0x20);
1927}
1928#undef ZERO
1929
1930static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1931 unsigned int n_hc)
1932{
1933 unsigned int hc, port;
1934
1935 for (hc = 0; hc < n_hc; hc++) {
1936 for (port = 0; port < MV_PORTS_PER_HC; port++)
1937 mv5_reset_hc_port(hpriv, mmio,
1938 (hc * MV_PORTS_PER_HC) + port);
1939
1940 mv5_reset_one_hc(hpriv, mmio, hc);
1941 }
1942
1943 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001944}
1945
Jeff Garzik101ffae2005-11-12 22:17:49 -05001946#undef ZERO
1947#define ZERO(reg) writel(0, mmio + (reg))
1948static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1949{
1950 u32 tmp;
1951
1952 tmp = readl(mmio + MV_PCI_MODE);
1953 tmp &= 0xff00ffff;
1954 writel(tmp, mmio + MV_PCI_MODE);
1955
1956 ZERO(MV_PCI_DISC_TIMER);
1957 ZERO(MV_PCI_MSI_TRIGGER);
1958 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1959 ZERO(HC_MAIN_IRQ_MASK_OFS);
1960 ZERO(MV_PCI_SERR_MASK);
1961 ZERO(PCI_IRQ_CAUSE_OFS);
1962 ZERO(PCI_IRQ_MASK_OFS);
1963 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1964 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1965 ZERO(MV_PCI_ERR_ATTRIBUTE);
1966 ZERO(MV_PCI_ERR_COMMAND);
1967}
1968#undef ZERO
1969
1970static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1971{
1972 u32 tmp;
1973
1974 mv5_reset_flash(hpriv, mmio);
1975
1976 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1977 tmp &= 0x3;
1978 tmp |= (1 << 5) | (1 << 6);
1979 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1980}
1981
1982/**
1983 * mv6_reset_hc - Perform the 6xxx global soft reset
1984 * @mmio: base address of the HBA
1985 *
1986 * This routine only applies to 6xxx parts.
1987 *
1988 * LOCKING:
1989 * Inherited from caller.
1990 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001991static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1992 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001993{
1994 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1995 int i, rc = 0;
1996 u32 t;
1997
1998 /* Following procedure defined in PCI "main command and status
1999 * register" table.
2000 */
2001 t = readl(reg);
2002 writel(t | STOP_PCI_MASTER, reg);
2003
2004 for (i = 0; i < 1000; i++) {
2005 udelay(1);
2006 t = readl(reg);
2007 if (PCI_MASTER_EMPTY & t) {
2008 break;
2009 }
2010 }
2011 if (!(PCI_MASTER_EMPTY & t)) {
2012 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2013 rc = 1;
2014 goto done;
2015 }
2016
2017 /* set reset */
2018 i = 5;
2019 do {
2020 writel(t | GLOB_SFT_RST, reg);
2021 t = readl(reg);
2022 udelay(1);
2023 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2024
2025 if (!(GLOB_SFT_RST & t)) {
2026 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2027 rc = 1;
2028 goto done;
2029 }
2030
2031 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2032 i = 5;
2033 do {
2034 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2035 t = readl(reg);
2036 udelay(1);
2037 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2038
2039 if (GLOB_SFT_RST & t) {
2040 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2041 rc = 1;
2042 }
2043done:
2044 return rc;
2045}
2046
Jeff Garzik47c2b672005-11-12 21:13:17 -05002047static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002048 void __iomem *mmio)
2049{
2050 void __iomem *port_mmio;
2051 u32 tmp;
2052
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002053 tmp = readl(mmio + MV_RESET_CFG);
2054 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002055 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002056 hpriv->signal[idx].pre = 0x1 << 5;
2057 return;
2058 }
2059
2060 port_mmio = mv_port_base(mmio, idx);
2061 tmp = readl(port_mmio + PHY_MODE2);
2062
2063 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2064 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2065}
2066
Jeff Garzik47c2b672005-11-12 21:13:17 -05002067static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002068{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002069 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002070}
2071
Jeff Garzikc9d39132005-11-13 17:47:51 -05002072static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002073 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002074{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002075 void __iomem *port_mmio = mv_port_base(mmio, port);
2076
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002077 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002078 int fix_phy_mode2 =
2079 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002080 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002081 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2082 u32 m2, tmp;
2083
2084 if (fix_phy_mode2) {
2085 m2 = readl(port_mmio + PHY_MODE2);
2086 m2 &= ~(1 << 16);
2087 m2 |= (1 << 31);
2088 writel(m2, port_mmio + PHY_MODE2);
2089
2090 udelay(200);
2091
2092 m2 = readl(port_mmio + PHY_MODE2);
2093 m2 &= ~((1 << 16) | (1 << 31));
2094 writel(m2, port_mmio + PHY_MODE2);
2095
2096 udelay(200);
2097 }
2098
2099 /* who knows what this magic does */
2100 tmp = readl(port_mmio + PHY_MODE3);
2101 tmp &= ~0x7F800000;
2102 tmp |= 0x2A800000;
2103 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002104
2105 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002106 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002107
2108 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002109
2110 if (hp_flags & MV_HP_ERRATA_60X1B2)
2111 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002112
2113 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2114
2115 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002116
2117 if (hp_flags & MV_HP_ERRATA_60X1B2)
2118 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002119 }
2120
2121 /* Revert values of pre-emphasis and signal amps to the saved ones */
2122 m2 = readl(port_mmio + PHY_MODE2);
2123
2124 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002125 m2 |= hpriv->signal[port].amps;
2126 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002128
Jeff Garzike4e7b892006-01-31 12:18:41 -05002129 /* according to mvSata 3.6.1, some IIE values are fixed */
2130 if (IS_GEN_IIE(hpriv)) {
2131 m2 &= ~0xC30FF01F;
2132 m2 |= 0x0000900F;
2133 }
2134
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002135 writel(m2, port_mmio + PHY_MODE2);
2136}
2137
Jeff Garzikc9d39132005-11-13 17:47:51 -05002138static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2139 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002140{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002141 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002142
Brett Russ31961942005-09-30 01:36:00 -04002143 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002144
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002145 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002146 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002147 ifctl |= (1 << 7); /* enable gen2i speed */
2148 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002149 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2150 }
2151
Brett Russ20f733e2005-09-01 18:26:17 -04002152 udelay(25); /* allow reset propagation */
2153
2154 /* Spec never mentions clearing the bit. Marvell's driver does
2155 * clear the bit, however.
2156 */
Brett Russ31961942005-09-30 01:36:00 -04002157 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002158
Jeff Garzikc9d39132005-11-13 17:47:51 -05002159 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2160
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002161 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002162 mdelay(1);
2163}
2164
Jeff Garzikc9d39132005-11-13 17:47:51 -05002165/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002166 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002167 * @ap: ATA channel to manipulate
2168 *
2169 * Part of this is taken from __sata_phy_reset and modified to
2170 * not sleep since this routine gets called from interrupt level.
2171 *
2172 * LOCKING:
2173 * Inherited from caller. This is coded to safe to call at
2174 * interrupt level, i.e. it does not sleep.
2175 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002176static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2177 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002178{
2179 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002180 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002181 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002182 int retry = 5;
2183 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002184
2185 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002186
Tejun Heoda3dbb12007-07-16 14:29:40 +09002187#ifdef DEBUG
2188 {
2189 u32 sstatus, serror, scontrol;
2190
2191 mv_scr_read(ap, SCR_STATUS, &sstatus);
2192 mv_scr_read(ap, SCR_ERROR, &serror);
2193 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2194 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2195 "SCtrl 0x%08x\n", status, serror, scontrol);
2196 }
2197#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002198
Jeff Garzik22374672005-11-17 10:59:48 -05002199 /* Issue COMRESET via SControl */
2200comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09002201 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002202 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002203
Tejun Heo81952c52006-05-15 20:57:47 +09002204 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002205 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002206
Brett Russ31961942005-09-30 01:36:00 -04002207 do {
Tejun Heo81952c52006-05-15 20:57:47 +09002208 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002209 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002210 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002211
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002212 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002213 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002214
Jeff Garzik22374672005-11-17 10:59:48 -05002215 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002216 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002217 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2218 (retry-- > 0))
2219 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002220
Tejun Heoda3dbb12007-07-16 14:29:40 +09002221#ifdef DEBUG
2222 {
2223 u32 sstatus, serror, scontrol;
2224
2225 mv_scr_read(ap, SCR_STATUS, &sstatus);
2226 mv_scr_read(ap, SCR_ERROR, &serror);
2227 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2228 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2229 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2230 }
2231#endif
Brett Russ31961942005-09-30 01:36:00 -04002232
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002233 if (ata_port_offline(ap)) {
2234 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002235 return;
2236 }
2237
Jeff Garzik22374672005-11-17 10:59:48 -05002238 /* even after SStatus reflects that device is ready,
2239 * it seems to take a while for link to be fully
2240 * established (and thus Status no longer 0x80/0x7F),
2241 * so we poll a bit for that, here.
2242 */
2243 retry = 20;
2244 while (1) {
2245 u8 drv_stat = ata_check_status(ap);
2246 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2247 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002248 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002249 if (retry-- <= 0)
2250 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002251 if (time_after(jiffies, deadline))
2252 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002253 }
2254
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002255 /* FIXME: if we passed the deadline, the following
2256 * code probably produces an invalid result
2257 */
Brett Russ20f733e2005-09-01 18:26:17 -04002258
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002259 /* finally, read device signature from TF registers */
2260 *class = ata_dev_try_classify(ap, 0, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002261
2262 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2263
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002264 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002265
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002266 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002267}
2268
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002269static int mv_prereset(struct ata_port *ap, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002270{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002271 struct mv_port_priv *pp = ap->private_data;
2272 struct ata_eh_context *ehc = &ap->eh_context;
2273 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002275 rc = mv_stop_dma(ap);
2276 if (rc)
2277 ehc->i.action |= ATA_EH_HARDRESET;
2278
2279 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2280 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2281 ehc->i.action |= ATA_EH_HARDRESET;
2282 }
2283
2284 /* if we're about to do hardreset, nothing more to do */
2285 if (ehc->i.action & ATA_EH_HARDRESET)
2286 return 0;
2287
2288 if (ata_port_online(ap))
2289 rc = ata_wait_ready(ap, deadline);
2290 else
2291 rc = -ENODEV;
2292
2293 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002294}
2295
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002296static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2297 unsigned long deadline)
2298{
2299 struct mv_host_priv *hpriv = ap->host->private_data;
2300 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2301
2302 mv_stop_dma(ap);
2303
2304 mv_channel_reset(hpriv, mmio, ap->port_no);
2305
2306 mv_phy_reset(ap, class, deadline);
2307
2308 return 0;
2309}
2310
2311static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2312{
2313 u32 serr;
2314
2315 /* print link status */
2316 sata_print_link_status(ap);
2317
2318 /* clear SError */
2319 sata_scr_read(ap, SCR_ERROR, &serr);
2320 sata_scr_write_flush(ap, SCR_ERROR, serr);
2321
2322 /* bail out if no device is present */
2323 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2324 DPRINTK("EXIT, no device\n");
2325 return;
2326 }
2327
2328 /* set up device control */
2329 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2330}
2331
2332static void mv_error_handler(struct ata_port *ap)
2333{
2334 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2335 mv_hardreset, mv_postreset);
2336}
2337
2338static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2339{
2340 mv_stop_dma(qc->ap);
2341}
2342
2343static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002344{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2347 u32 tmp, mask;
2348 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002349
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002351
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002352 shift = ap->port_no * 2;
2353 if (hc > 0)
2354 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002355
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002356 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002357
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002358 /* disable assertion of portN err, done events */
2359 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2360 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2361}
2362
2363static void mv_eh_thaw(struct ata_port *ap)
2364{
2365 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2366 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2367 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2368 void __iomem *port_mmio = mv_ap_base(ap);
2369 u32 tmp, mask, hc_irq_cause;
2370 unsigned int shift, hc_port_no = ap->port_no;
2371
2372 /* FIXME: handle coalescing completion events properly */
2373
2374 shift = ap->port_no * 2;
2375 if (hc > 0) {
2376 shift++;
2377 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002378 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002379
2380 mask = 0x3 << shift;
2381
2382 /* clear EDMA errors on this port */
2383 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2384
2385 /* clear pending irq events */
2386 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2387 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2388 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2389 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2390
2391 /* enable assertion of portN err, done events */
2392 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2393 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002394}
2395
Brett Russ05b308e2005-10-05 17:08:53 -04002396/**
2397 * mv_port_init - Perform some early initialization on a single port.
2398 * @port: libata data structure storing shadow register addresses
2399 * @port_mmio: base address of the port
2400 *
2401 * Initialize shadow register mmio addresses, clear outstanding
2402 * interrupts on the port, and unmask interrupts for the future
2403 * start of the port.
2404 *
2405 * LOCKING:
2406 * Inherited from caller.
2407 */
Brett Russ31961942005-09-30 01:36:00 -04002408static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2409{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002410 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002411 unsigned serr_ofs;
2412
Jeff Garzik8b260242005-11-12 12:32:50 -05002413 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002414 */
2415 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002416 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002417 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2418 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2419 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2420 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2421 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2422 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002423 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002424 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2425 /* special case: control/altstatus doesn't have ATA_REG_ address */
2426 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2427
2428 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002429 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002430
Brett Russ31961942005-09-30 01:36:00 -04002431 /* Clear any currently outstanding port interrupt conditions */
2432 serr_ofs = mv_scr_offset(SCR_ERROR);
2433 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2435
Brett Russ20f733e2005-09-01 18:26:17 -04002436 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002437 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002438
Jeff Garzik8b260242005-11-12 12:32:50 -05002439 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002440 readl(port_mmio + EDMA_CFG_OFS),
2441 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2442 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002443}
2444
Tejun Heo4447d352007-04-17 23:44:08 +09002445static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002446{
Tejun Heo4447d352007-04-17 23:44:08 +09002447 struct pci_dev *pdev = to_pci_dev(host->dev);
2448 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002449 u32 hp_flags = hpriv->hp_flags;
2450
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002451 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002452 case chip_5080:
2453 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002454 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002455
Auke Kok44c10132007-06-08 15:46:36 -07002456 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002457 case 0x1:
2458 hp_flags |= MV_HP_ERRATA_50XXB0;
2459 break;
2460 case 0x3:
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2462 break;
2463 default:
2464 dev_printk(KERN_WARNING, &pdev->dev,
2465 "Applying 50XXB2 workarounds to unknown rev\n");
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 break;
2468 }
2469 break;
2470
2471 case chip_504x:
2472 case chip_508x:
2473 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002474 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002475
Auke Kok44c10132007-06-08 15:46:36 -07002476 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002477 case 0x0:
2478 hp_flags |= MV_HP_ERRATA_50XXB0;
2479 break;
2480 case 0x3:
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2482 break;
2483 default:
2484 dev_printk(KERN_WARNING, &pdev->dev,
2485 "Applying B2 workarounds to unknown rev\n");
2486 hp_flags |= MV_HP_ERRATA_50XXB2;
2487 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002488 }
2489 break;
2490
2491 case chip_604x:
2492 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002493 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002494 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002495
Auke Kok44c10132007-06-08 15:46:36 -07002496 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002497 case 0x7:
2498 hp_flags |= MV_HP_ERRATA_60X1B2;
2499 break;
2500 case 0x9:
2501 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002502 break;
2503 default:
2504 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002505 "Applying B2 workarounds to unknown rev\n");
2506 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002507 break;
2508 }
2509 break;
2510
Jeff Garzike4e7b892006-01-31 12:18:41 -05002511 case chip_7042:
2512 case chip_6042:
2513 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002514 hp_flags |= MV_HP_GEN_IIE;
2515
Auke Kok44c10132007-06-08 15:46:36 -07002516 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002517 case 0x0:
2518 hp_flags |= MV_HP_ERRATA_XX42A0;
2519 break;
2520 case 0x1:
2521 hp_flags |= MV_HP_ERRATA_60X1C0;
2522 break;
2523 default:
2524 dev_printk(KERN_WARNING, &pdev->dev,
2525 "Applying 60X1C0 workarounds to unknown rev\n");
2526 hp_flags |= MV_HP_ERRATA_60X1C0;
2527 break;
2528 }
2529 break;
2530
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002531 default:
2532 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2533 return 1;
2534 }
2535
2536 hpriv->hp_flags = hp_flags;
2537
2538 return 0;
2539}
2540
Brett Russ05b308e2005-10-05 17:08:53 -04002541/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002542 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002543 * @host: ATA host to initialize
2544 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002545 *
2546 * If possible, do an early global reset of the host. Then do
2547 * our port init and clear/unmask all/relevant host interrupts.
2548 *
2549 * LOCKING:
2550 * Inherited from caller.
2551 */
Tejun Heo4447d352007-04-17 23:44:08 +09002552static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002553{
2554 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002555 struct pci_dev *pdev = to_pci_dev(host->dev);
2556 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2557 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002558
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559 /* global interrupt mask */
2560 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2561
Tejun Heo4447d352007-04-17 23:44:08 +09002562 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002563 if (rc)
2564 goto done;
2565
Tejun Heo4447d352007-04-17 23:44:08 +09002566 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002567
Tejun Heo4447d352007-04-17 23:44:08 +09002568 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002569 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002570
Jeff Garzikc9d39132005-11-13 17:47:51 -05002571 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002572 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002573 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002574
Jeff Garzik522479f2005-11-12 22:14:02 -05002575 hpriv->ops->reset_flash(hpriv, mmio);
2576 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002577 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002578
Tejun Heo4447d352007-04-17 23:44:08 +09002579 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002580 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002581 void __iomem *port_mmio = mv_port_base(mmio, port);
2582
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002583 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002584 ifctl |= (1 << 7); /* enable gen2i speed */
2585 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002586 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2587 }
2588
Jeff Garzikc9d39132005-11-13 17:47:51 -05002589 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002590 }
2591
Tejun Heo4447d352007-04-17 23:44:08 +09002592 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002593 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002594 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002595 }
2596
2597 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002598 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2599
2600 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2601 "(before clear)=0x%08x\n", hc,
2602 readl(hc_mmio + HC_CFG_OFS),
2603 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2604
2605 /* Clear any currently outstanding hc interrupt conditions */
2606 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002607 }
2608
Brett Russ31961942005-09-30 01:36:00 -04002609 /* Clear any currently outstanding host interrupt conditions */
2610 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2611
2612 /* and unmask interrupt generation for host regs */
2613 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002614
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002615 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002616 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2617 else
2618 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002619
2620 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002621 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002622 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2623 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2624 readl(mmio + PCI_IRQ_CAUSE_OFS),
2625 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002626
Brett Russ31961942005-09-30 01:36:00 -04002627done:
Brett Russ20f733e2005-09-01 18:26:17 -04002628 return rc;
2629}
2630
Brett Russ05b308e2005-10-05 17:08:53 -04002631/**
2632 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002633 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002634 *
2635 * FIXME: complete this.
2636 *
2637 * LOCKING:
2638 * Inherited from caller.
2639 */
Tejun Heo4447d352007-04-17 23:44:08 +09002640static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002641{
Tejun Heo4447d352007-04-17 23:44:08 +09002642 struct pci_dev *pdev = to_pci_dev(host->dev);
2643 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002644 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002645 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002646
2647 /* Use this to determine the HW stepping of the chip so we know
2648 * what errata to workaround
2649 */
Brett Russ31961942005-09-30 01:36:00 -04002650 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2651 if (scc == 0)
2652 scc_s = "SCSI";
2653 else if (scc == 0x01)
2654 scc_s = "RAID";
2655 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002656 scc_s = "?";
2657
2658 if (IS_GEN_I(hpriv))
2659 gen = "I";
2660 else if (IS_GEN_II(hpriv))
2661 gen = "II";
2662 else if (IS_GEN_IIE(hpriv))
2663 gen = "IIE";
2664 else
2665 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002666
Jeff Garzika9524a72005-10-30 14:39:11 -05002667 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002668 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2669 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002670 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2671}
2672
Brett Russ05b308e2005-10-05 17:08:53 -04002673/**
2674 * mv_init_one - handle a positive probe of a Marvell host
2675 * @pdev: PCI device found
2676 * @ent: PCI device ID entry for the matched host
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 */
Brett Russ20f733e2005-09-01 18:26:17 -04002681static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2682{
2683 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002684 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002685 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2686 struct ata_host *host;
2687 struct mv_host_priv *hpriv;
2688 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002689
Jeff Garzika9524a72005-10-30 14:39:11 -05002690 if (!printed_version++)
2691 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002692
Tejun Heo4447d352007-04-17 23:44:08 +09002693 /* allocate host */
2694 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2695
2696 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2697 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2698 if (!host || !hpriv)
2699 return -ENOMEM;
2700 host->private_data = hpriv;
2701
2702 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002703 rc = pcim_enable_device(pdev);
2704 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002705 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002706
Tejun Heo0d5ff562007-02-01 15:06:36 +09002707 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2708 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002709 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002710 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002711 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002712 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002713
Jeff Garzikd88184f2007-02-26 01:26:06 -05002714 rc = pci_go_64(pdev);
2715 if (rc)
2716 return rc;
2717
Brett Russ20f733e2005-09-01 18:26:17 -04002718 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002719 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002720 if (rc)
2721 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002722
Brett Russ31961942005-09-30 01:36:00 -04002723 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002724 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002725 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002726
Brett Russ31961942005-09-30 01:36:00 -04002727 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002728 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002729
Tejun Heo4447d352007-04-17 23:44:08 +09002730 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002731 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002732 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002733 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002734}
2735
2736static int __init mv_init(void)
2737{
Pavel Roskinb7887192006-08-10 18:13:18 +09002738 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002739}
2740
2741static void __exit mv_exit(void)
2742{
2743 pci_unregister_driver(&mv_pci_driver);
2744}
2745
2746MODULE_AUTHOR("Brett Russ");
2747MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2748MODULE_LICENSE("GPL");
2749MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2750MODULE_VERSION(DRV_VERSION);
2751
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002752module_param(msi, int, 0444);
2753MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2754
Brett Russ20f733e2005-09-01 18:26:17 -04002755module_init(mv_init);
2756module_exit(mv_exit);