blob: 7fa42c36c4173bd2f44670d61e61d22f89d1002b [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65*/
66
67
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/pci.h>
71#include <linux/init.h>
72#include <linux/blkdev.h>
73#include <linux/delay.h>
74#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080
81#define DRV_NAME "sata_mv"
Jeff Garzik8bc3fc42007-05-21 20:26:38 -040082#define DRV_VERSION "0.81"
Brett Russ20f733e2005-09-01 18:26:17 -040083
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040095 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
Brett Russ20f733e2005-09-01 18:26:17 -0400101 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500102 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
Brett Russ31961942005-09-30 01:36:00 -0400111 MV_MAX_Q_DEPTH = 32,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
113
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
116 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 176,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
124
Brett Russ20f733e2005-09-01 18:26:17 -0400125 MV_PORTS_PER_HC = 4,
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400129 MV_PORT_MASK = 3,
130
131 /* Host Flags */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400134 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
135 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
136 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500137 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400138
Brett Russ31961942005-09-30 01:36:00 -0400139 CRQB_FLAG_READ = (1 << 0),
140 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400141 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
142 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
149 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400150
151 EPRD_FLAG_END_OF_TBL = (1 << 31),
152
Brett Russ20f733e2005-09-01 18:26:17 -0400153 /* PCI interface registers */
154
Brett Russ31961942005-09-30 01:36:00 -0400155 PCI_COMMAND_OFS = 0xc00,
156
Brett Russ20f733e2005-09-01 18:26:17 -0400157 PCI_MAIN_CMD_STS_OFS = 0xd30,
158 STOP_PCI_MASTER = (1 << 2),
159 PCI_MASTER_EMPTY = (1 << 3),
160 GLOB_SFT_RST = (1 << 4),
161
Jeff Garzik522479f2005-11-12 22:14:02 -0500162 MV_PCI_MODE = 0xd00,
163 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
164 MV_PCI_DISC_TIMER = 0xd04,
165 MV_PCI_MSI_TRIGGER = 0xc38,
166 MV_PCI_SERR_MASK = 0xc28,
167 MV_PCI_XBAR_TMOUT = 0x1d04,
168 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
169 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
170 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
171 MV_PCI_ERR_COMMAND = 0x1d50,
172
173 PCI_IRQ_CAUSE_OFS = 0x1d58,
174 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400175 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
176
177 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
178 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
179 PORT0_ERR = (1 << 0), /* shift by port # */
180 PORT0_DONE = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
182 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
183 PCI_ERR = (1 << 18),
184 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
185 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500186 PORTS_0_3_COAL_DONE = (1 << 8),
187 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
189 GPIO_INT = (1 << 22),
190 SELF_INT = (1 << 23),
191 TWSI_INT = (1 << 24),
192 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500194 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400195 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
196 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400199
200 /* SATAHC registers */
201 HC_CFG_OFS = 0,
202
203 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400204 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400205 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
206 DEV_IRQ = (1 << 8), /* shift by port # */
207
208 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400209 SHD_BLK_OFS = 0x100,
210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400211
212 /* SATA registers */
213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
214 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500215 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500216 PHY_MODE4 = 0x314,
217 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500218 MV5_PHY_MODE = 0x74,
219 MV5_LT_MODE = 0x30,
220 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500221 SATA_INTERFACE_CTL = 0x050,
222
223 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400224
225 /* Port registers */
226 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400227 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
228 EDMA_CFG_NCQ = (1 << 5),
229 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
230 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
231 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400232
233 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
234 EDMA_ERR_IRQ_MASK_OFS = 0xc,
235 EDMA_ERR_D_PAR = (1 << 0),
236 EDMA_ERR_PRD_PAR = (1 << 1),
237 EDMA_ERR_DEV = (1 << 2),
238 EDMA_ERR_DEV_DCON = (1 << 3),
239 EDMA_ERR_DEV_CON = (1 << 4),
240 EDMA_ERR_SERR = (1 << 5),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400241 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
242 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Brett Russ20f733e2005-09-01 18:26:17 -0400243 EDMA_ERR_BIST_ASYNC = (1 << 8),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400244 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Brett Russ20f733e2005-09-01 18:26:17 -0400245 EDMA_ERR_CRBQ_PAR = (1 << 9),
246 EDMA_ERR_CRPB_PAR = (1 << 10),
247 EDMA_ERR_INTRL_PAR = (1 << 11),
248 EDMA_ERR_IORDY = (1 << 12),
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
251 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
252 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
253 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
254 EDMA_ERR_TRANS_PROTO = (1 << 31),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_OVERRUN_5 = (1 << 5),
256 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Jeff Garzik8b260242005-11-12 12:32:50 -0500257 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Brett Russ20f733e2005-09-01 18:26:17 -0400258 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
Jeff Garzik8b260242005-11-12 12:32:50 -0500260 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
Brett Russ20f733e2005-09-01 18:26:17 -0400261 EDMA_ERR_LNK_DATA_RX |
Jeff Garzik8b260242005-11-12 12:32:50 -0500262 EDMA_ERR_LNK_DATA_TX |
Brett Russ20f733e2005-09-01 18:26:17 -0400263 EDMA_ERR_TRANS_PROTO),
264
Brett Russ31961942005-09-30 01:36:00 -0400265 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
266 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400267
268 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
269 EDMA_REQ_Q_PTR_SHIFT = 5,
270
271 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
272 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
273 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400274 EDMA_RSP_Q_PTR_SHIFT = 3,
275
Brett Russ20f733e2005-09-01 18:26:17 -0400276 EDMA_CMD_OFS = 0x28,
277 EDMA_EN = (1 << 0),
278 EDMA_DS = (1 << 1),
279 ATA_RST = (1 << 2),
280
Jeff Garzikc9d39132005-11-13 17:47:51 -0500281 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500282 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500283
Brett Russ31961942005-09-30 01:36:00 -0400284 /* Host private flags (hp_flags) */
285 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500286 MV_HP_ERRATA_50XXB0 = (1 << 1),
287 MV_HP_ERRATA_50XXB2 = (1 << 2),
288 MV_HP_ERRATA_60X1B2 = (1 << 3),
289 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500290 MV_HP_ERRATA_XX42A0 = (1 << 5),
291 MV_HP_50XX = (1 << 6),
292 MV_HP_GEN_IIE = (1 << 7),
Brett Russ20f733e2005-09-01 18:26:17 -0400293
Brett Russ31961942005-09-30 01:36:00 -0400294 /* Port private flags (pp_flags) */
295 MV_PP_FLAG_EDMA_EN = (1 << 0),
296 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400297 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
Brett Russ31961942005-09-30 01:36:00 -0400298};
299
Jeff Garzikc9d39132005-11-13 17:47:51 -0500300#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500301#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500302#define IS_GEN_I(hpriv) IS_50XX(hpriv)
303#define IS_GEN_II(hpriv) IS_60XX(hpriv)
304#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500305
Jeff Garzik095fec82005-11-12 09:50:49 -0500306enum {
Jeff Garzikd88184f2007-02-26 01:26:06 -0500307 MV_DMA_BOUNDARY = 0xffffffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500308
309 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
310
311 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
312};
313
Jeff Garzik522479f2005-11-12 22:14:02 -0500314enum chip_type {
315 chip_504x,
316 chip_508x,
317 chip_5080,
318 chip_604x,
319 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500320 chip_6042,
321 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500322};
323
Brett Russ31961942005-09-30 01:36:00 -0400324/* Command ReQuest Block: 32B */
325struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400326 __le32 sg_addr;
327 __le32 sg_addr_hi;
328 __le16 ctrl_flags;
329 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400330};
331
Jeff Garzike4e7b892006-01-31 12:18:41 -0500332struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400333 __le32 addr;
334 __le32 addr_hi;
335 __le32 flags;
336 __le32 len;
337 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338};
339
Brett Russ31961942005-09-30 01:36:00 -0400340/* Command ResPonse Block: 8B */
341struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400342 __le16 id;
343 __le16 flags;
344 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400345};
346
347/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
348struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400349 __le32 addr;
350 __le32 flags_size;
351 __le32 addr_hi;
352 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400353};
354
355struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400356 struct mv_crqb *crqb;
357 dma_addr_t crqb_dma;
358 struct mv_crpb *crpb;
359 dma_addr_t crpb_dma;
360 struct mv_sg *sg_tbl;
361 dma_addr_t sg_tbl_dma;
Brett Russ31961942005-09-30 01:36:00 -0400362 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400363};
364
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500365struct mv_port_signal {
366 u32 amps;
367 u32 pre;
368};
369
Jeff Garzik47c2b672005-11-12 21:13:17 -0500370struct mv_host_priv;
371struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500372 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
373 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500374 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
375 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
376 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500377 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
378 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500379 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
380 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500381};
382
Brett Russ20f733e2005-09-01 18:26:17 -0400383struct mv_host_priv {
Brett Russ31961942005-09-30 01:36:00 -0400384 u32 hp_flags;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500385 struct mv_port_signal signal[8];
Jeff Garzik47c2b672005-11-12 21:13:17 -0500386 const struct mv_hw_ops *ops;
Brett Russ20f733e2005-09-01 18:26:17 -0400387};
388
389static void mv_irq_clear(struct ata_port *ap);
390static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
391static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500392static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
393static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ20f733e2005-09-01 18:26:17 -0400394static void mv_phy_reset(struct ata_port *ap);
Jeff Garzik22374672005-11-17 10:59:48 -0500395static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
Brett Russ31961942005-09-30 01:36:00 -0400396static int mv_port_start(struct ata_port *ap);
397static void mv_port_stop(struct ata_port *ap);
398static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500399static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900400static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400401static void mv_eng_timeout(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400402static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
403
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500404static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
405 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500406static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
407static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
408 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500409static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
410 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500411static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
412static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500413
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500414static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
415 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500416static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
417static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
418 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500419static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
420 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500421static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
422static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500423static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
424 unsigned int port_no);
425static void mv_stop_and_reset(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500426
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400427static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400428 .module = THIS_MODULE,
429 .name = DRV_NAME,
430 .ioctl = ata_scsi_ioctl,
431 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400432 .can_queue = ATA_DEF_QUEUE,
433 .this_id = ATA_SHT_THIS_ID,
434 .sg_tablesize = MV_MAX_SG_CT,
435 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
436 .emulated = ATA_SHT_EMULATED,
437 .use_clustering = 1,
438 .proc_name = DRV_NAME,
439 .dma_boundary = MV_DMA_BOUNDARY,
440 .slave_configure = ata_scsi_slave_config,
441 .slave_destroy = ata_scsi_slave_destroy,
442 .bios_param = ata_std_bios_param,
443};
444
445static struct scsi_host_template mv6_sht = {
446 .module = THIS_MODULE,
447 .name = DRV_NAME,
448 .ioctl = ata_scsi_ioctl,
449 .queuecommand = ata_scsi_queuecmd,
450 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400451 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500452 .sg_tablesize = MV_MAX_SG_CT,
Brett Russ20f733e2005-09-01 18:26:17 -0400453 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
454 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500455 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400456 .proc_name = DRV_NAME,
457 .dma_boundary = MV_DMA_BOUNDARY,
458 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900459 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400460 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400461};
462
Jeff Garzikc9d39132005-11-13 17:47:51 -0500463static const struct ata_port_operations mv5_ops = {
464 .port_disable = ata_port_disable,
465
466 .tf_load = ata_tf_load,
467 .tf_read = ata_tf_read,
468 .check_status = ata_check_status,
469 .exec_command = ata_exec_command,
470 .dev_select = ata_std_dev_select,
471
472 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500473 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500474
475 .qc_prep = mv_qc_prep,
476 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900477 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500478
479 .eng_timeout = mv_eng_timeout,
480
Jeff Garzikc9d39132005-11-13 17:47:51 -0500481 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900482 .irq_on = ata_irq_on,
483 .irq_ack = ata_irq_ack,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500484
485 .scr_read = mv5_scr_read,
486 .scr_write = mv5_scr_write,
487
488 .port_start = mv_port_start,
489 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500490};
491
492static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400493 .port_disable = ata_port_disable,
494
495 .tf_load = ata_tf_load,
496 .tf_read = ata_tf_read,
497 .check_status = ata_check_status,
498 .exec_command = ata_exec_command,
499 .dev_select = ata_std_dev_select,
500
501 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500502 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400503
Brett Russ31961942005-09-30 01:36:00 -0400504 .qc_prep = mv_qc_prep,
505 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900506 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400507
Brett Russ31961942005-09-30 01:36:00 -0400508 .eng_timeout = mv_eng_timeout,
Brett Russ20f733e2005-09-01 18:26:17 -0400509
Brett Russ20f733e2005-09-01 18:26:17 -0400510 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900511 .irq_on = ata_irq_on,
512 .irq_ack = ata_irq_ack,
Brett Russ20f733e2005-09-01 18:26:17 -0400513
514 .scr_read = mv_scr_read,
515 .scr_write = mv_scr_write,
516
Brett Russ31961942005-09-30 01:36:00 -0400517 .port_start = mv_port_start,
518 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400519};
520
Jeff Garzike4e7b892006-01-31 12:18:41 -0500521static const struct ata_port_operations mv_iie_ops = {
522 .port_disable = ata_port_disable,
523
524 .tf_load = ata_tf_load,
525 .tf_read = ata_tf_read,
526 .check_status = ata_check_status,
527 .exec_command = ata_exec_command,
528 .dev_select = ata_std_dev_select,
529
530 .phy_reset = mv_phy_reset,
Jeff Garzikcffacd82007-03-09 09:46:47 -0500531 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500532
533 .qc_prep = mv_qc_prep_iie,
534 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900535 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500536
537 .eng_timeout = mv_eng_timeout,
538
Jeff Garzike4e7b892006-01-31 12:18:41 -0500539 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900540 .irq_on = ata_irq_on,
541 .irq_ack = ata_irq_ack,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500542
543 .scr_read = mv_scr_read,
544 .scr_write = mv_scr_write,
545
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500548};
549
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100550static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400551 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400552 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400553 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400554 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400556 },
557 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400558 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400559 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400560 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500561 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400562 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500563 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400564 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500565 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400566 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500567 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500568 },
Brett Russ20f733e2005-09-01 18:26:17 -0400569 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400570 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400571 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400572 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400574 },
575 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400576 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
577 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400578 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400579 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500580 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400581 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500582 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400583 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586 .port_ops = &mv_iie_ops,
587 },
588 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592 .port_ops = &mv_iie_ops,
593 },
Brett Russ20f733e2005-09-01 18:26:17 -0400594};
595
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500596static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400597 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
598 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
599 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
600 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400601
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400602 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
603 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
604 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
605 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
606 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500607
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400608 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
609
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200610 /* Adaptec 1430SA */
611 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
612
Olof Johanssone93f09d2007-01-18 18:39:59 -0600613 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
614
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800615 /* add Marvell 7042 support */
616 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
617
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400618 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400619};
620
621static struct pci_driver mv_pci_driver = {
622 .name = DRV_NAME,
623 .id_table = mv_pci_tbl,
624 .probe = mv_init_one,
625 .remove = ata_pci_remove_one,
626};
627
Jeff Garzik47c2b672005-11-12 21:13:17 -0500628static const struct mv_hw_ops mv5xxx_ops = {
629 .phy_errata = mv5_phy_errata,
630 .enable_leds = mv5_enable_leds,
631 .read_preamp = mv5_read_preamp,
632 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500633 .reset_flash = mv5_reset_flash,
634 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500635};
636
637static const struct mv_hw_ops mv6xxx_ops = {
638 .phy_errata = mv6_phy_errata,
639 .enable_leds = mv6_enable_leds,
640 .read_preamp = mv6_read_preamp,
641 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500642 .reset_flash = mv6_reset_flash,
643 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500644};
645
Brett Russ20f733e2005-09-01 18:26:17 -0400646/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500647 * module options
648 */
649static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
650
651
Jeff Garzikd88184f2007-02-26 01:26:06 -0500652/* move to PCI layer or libata core? */
653static int pci_go_64(struct pci_dev *pdev)
654{
655 int rc;
656
657 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
658 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
659 if (rc) {
660 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
661 if (rc) {
662 dev_printk(KERN_ERR, &pdev->dev,
663 "64-bit DMA enable failed\n");
664 return rc;
665 }
666 }
667 } else {
668 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
669 if (rc) {
670 dev_printk(KERN_ERR, &pdev->dev,
671 "32-bit DMA enable failed\n");
672 return rc;
673 }
674 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
675 if (rc) {
676 dev_printk(KERN_ERR, &pdev->dev,
677 "32-bit consistent DMA enable failed\n");
678 return rc;
679 }
680 }
681
682 return rc;
683}
684
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500685/*
Brett Russ20f733e2005-09-01 18:26:17 -0400686 * Functions
687 */
688
689static inline void writelfl(unsigned long data, void __iomem *addr)
690{
691 writel(data, addr);
692 (void) readl(addr); /* flush to avoid PCI posted write */
693}
694
Brett Russ20f733e2005-09-01 18:26:17 -0400695static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
696{
697 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
698}
699
Jeff Garzikc9d39132005-11-13 17:47:51 -0500700static inline unsigned int mv_hc_from_port(unsigned int port)
701{
702 return port >> MV_PORT_HC_SHIFT;
703}
704
705static inline unsigned int mv_hardport_from_port(unsigned int port)
706{
707 return port & MV_PORT_MASK;
708}
709
710static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
711 unsigned int port)
712{
713 return mv_hc_base(base, mv_hc_from_port(port));
714}
715
Brett Russ20f733e2005-09-01 18:26:17 -0400716static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
717{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500718 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500719 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500720 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400721}
722
723static inline void __iomem *mv_ap_base(struct ata_port *ap)
724{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900725 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400726}
727
Jeff Garzikcca39742006-08-24 03:19:22 -0400728static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400729{
Jeff Garzikcca39742006-08-24 03:19:22 -0400730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400731}
732
733static void mv_irq_clear(struct ata_port *ap)
734{
735}
736
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400737static void mv_set_edma_ptrs(void __iomem *port_mmio,
738 struct mv_host_priv *hpriv,
739 struct mv_port_priv *pp)
740{
741 /*
742 * initialize request queue
743 */
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl(pp->crqb_dma & 0xffffffff,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
753 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
754
755 /*
756 * initialize response queue
757 */
758 WARN_ON(pp->crpb_dma & 0xff);
759 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
760
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl(pp->crpb_dma & 0xffffffff,
763 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
764 else
765 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766
767 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
768 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
769
770}
771
Brett Russ05b308e2005-10-05 17:08:53 -0400772/**
773 * mv_start_dma - Enable eDMA engine
774 * @base: port base address
775 * @pp: port private data
776 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900777 * Verify the local cache of the eDMA state is accurate with a
778 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400779 *
780 * LOCKING:
781 * Inherited from caller.
782 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400783static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
784 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400785{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Brett Russafb0edd2005-10-05 17:08:42 -0400787 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
788 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
789 }
Tejun Heobeec7db2006-02-11 19:11:13 +0900790 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400791}
792
Brett Russ05b308e2005-10-05 17:08:53 -0400793/**
794 * mv_stop_dma - Disable eDMA engine
795 * @ap: ATA channel to manipulate
796 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900797 * Verify the local cache of the eDMA state is accurate with a
798 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400799 *
800 * LOCKING:
801 * Inherited from caller.
802 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400803static int mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400804{
805 void __iomem *port_mmio = mv_ap_base(ap);
806 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400807 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400809
Jeff Garzik4537deb2007-07-12 14:30:19 -0400810 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400811 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400812 */
Brett Russ31961942005-09-30 01:36:00 -0400813 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
814 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400815 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900816 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Brett Russafb0edd2005-10-05 17:08:42 -0400817 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500818
Brett Russ31961942005-09-30 01:36:00 -0400819 /* now properly wait for the eDMA to stop */
820 for (i = 1000; i > 0; i--) {
821 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400822 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400823 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400824
Brett Russ31961942005-09-30 01:36:00 -0400825 udelay(100);
826 }
827
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900829 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Brett Russafb0edd2005-10-05 17:08:42 -0400830 /* FIXME: Consider doing a reset here to recover */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400831 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400832 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400833
834 return err;
Brett Russ31961942005-09-30 01:36:00 -0400835}
836
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400837#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400838static void mv_dump_mem(void __iomem *start, unsigned bytes)
839{
Brett Russ31961942005-09-30 01:36:00 -0400840 int b, w;
841 for (b = 0; b < bytes; ) {
842 DPRINTK("%p: ", start + b);
843 for (w = 0; b < bytes && w < 4; w++) {
844 printk("%08x ",readl(start + b));
845 b += sizeof(u32);
846 }
847 printk("\n");
848 }
Brett Russ31961942005-09-30 01:36:00 -0400849}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400850#endif
851
Brett Russ31961942005-09-30 01:36:00 -0400852static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
853{
854#ifdef ATA_DEBUG
855 int b, w;
856 u32 dw;
857 for (b = 0; b < bytes; ) {
858 DPRINTK("%02x: ", b);
859 for (w = 0; b < bytes && w < 4; w++) {
860 (void) pci_read_config_dword(pdev,b,&dw);
861 printk("%08x ",dw);
862 b += sizeof(u32);
863 }
864 printk("\n");
865 }
866#endif
867}
868static void mv_dump_all_regs(void __iomem *mmio_base, int port,
869 struct pci_dev *pdev)
870{
871#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500872 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400873 port >> MV_PORT_HC_SHIFT);
874 void __iomem *port_base;
875 int start_port, num_ports, p, start_hc, num_hcs, hc;
876
877 if (0 > port) {
878 start_hc = start_port = 0;
879 num_ports = 8; /* shld be benign for 4 port devs */
880 num_hcs = 2;
881 } else {
882 start_hc = port >> MV_PORT_HC_SHIFT;
883 start_port = port;
884 num_ports = num_hcs = 1;
885 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500886 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400887 num_ports > 1 ? num_ports - 1 : start_port);
888
889 if (NULL != pdev) {
890 DPRINTK("PCI config space regs:\n");
891 mv_dump_pci_cfg(pdev, 0x68);
892 }
893 DPRINTK("PCI regs:\n");
894 mv_dump_mem(mmio_base+0xc00, 0x3c);
895 mv_dump_mem(mmio_base+0xd00, 0x34);
896 mv_dump_mem(mmio_base+0xf00, 0x4);
897 mv_dump_mem(mmio_base+0x1d00, 0x6c);
898 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700899 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400900 DPRINTK("HC regs (HC %i):\n", hc);
901 mv_dump_mem(hc_base, 0x1c);
902 }
903 for (p = start_port; p < start_port + num_ports; p++) {
904 port_base = mv_port_base(mmio_base, p);
905 DPRINTK("EDMA regs (port %i):\n",p);
906 mv_dump_mem(port_base, 0x54);
907 DPRINTK("SATA regs (port %i):\n",p);
908 mv_dump_mem(port_base+0x300, 0x60);
909 }
910#endif
911}
912
Brett Russ20f733e2005-09-01 18:26:17 -0400913static unsigned int mv_scr_offset(unsigned int sc_reg_in)
914{
915 unsigned int ofs;
916
917 switch (sc_reg_in) {
918 case SCR_STATUS:
919 case SCR_CONTROL:
920 case SCR_ERROR:
921 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
922 break;
923 case SCR_ACTIVE:
924 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
925 break;
926 default:
927 ofs = 0xffffffffU;
928 break;
929 }
930 return ofs;
931}
932
933static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
934{
935 unsigned int ofs = mv_scr_offset(sc_reg_in);
936
Jeff Garzik35177262007-02-24 21:26:42 -0500937 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400938 return readl(mv_ap_base(ap) + ofs);
Jeff Garzik35177262007-02-24 21:26:42 -0500939 else
Brett Russ20f733e2005-09-01 18:26:17 -0400940 return (u32) ofs;
Brett Russ20f733e2005-09-01 18:26:17 -0400941}
942
943static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
944{
945 unsigned int ofs = mv_scr_offset(sc_reg_in);
946
Jeff Garzik35177262007-02-24 21:26:42 -0500947 if (0xffffffffU != ofs)
Brett Russ20f733e2005-09-01 18:26:17 -0400948 writelfl(val, mv_ap_base(ap) + ofs);
Brett Russ20f733e2005-09-01 18:26:17 -0400949}
950
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400951static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
952 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500953{
954 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
955
956 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400957 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500958
Jeff Garzike728eab2007-02-25 02:53:41 -0500959 if (IS_GEN_I(hpriv)) {
960 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500961 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -0500962 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500963
Jeff Garzike728eab2007-02-25 02:53:41 -0500964 else if (IS_GEN_II(hpriv)) {
965 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500966 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -0500967 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
968 }
Jeff Garzike4e7b892006-01-31 12:18:41 -0500969
970 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -0500971 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
972 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500973 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
974 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -0500975 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
976 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -0400977 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -0500978 }
979
980 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
981}
982
Brett Russ05b308e2005-10-05 17:08:53 -0400983/**
984 * mv_port_start - Port specific init/start routine.
985 * @ap: ATA channel to manipulate
986 *
987 * Allocate and point to DMA memory, init port private memory,
988 * zero indices.
989 *
990 * LOCKING:
991 * Inherited from caller.
992 */
Brett Russ31961942005-09-30 01:36:00 -0400993static int mv_port_start(struct ata_port *ap)
994{
Jeff Garzikcca39742006-08-24 03:19:22 -0400995 struct device *dev = ap->host->dev;
996 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400997 struct mv_port_priv *pp;
998 void __iomem *port_mmio = mv_ap_base(ap);
999 void *mem;
1000 dma_addr_t mem_dma;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001001 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001002
Tejun Heo24dc5f32007-01-20 16:00:28 +09001003 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001004 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001005 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001006
Tejun Heo24dc5f32007-01-20 16:00:28 +09001007 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1008 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001009 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001010 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001011 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1012
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001013 rc = ata_pad_alloc(ap, dev);
1014 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001015 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001016
Jeff Garzik8b260242005-11-12 12:32:50 -05001017 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001018 * 32-slot command request table (CRQB), 32 bytes each in size
1019 */
1020 pp->crqb = mem;
1021 pp->crqb_dma = mem_dma;
1022 mem += MV_CRQB_Q_SZ;
1023 mem_dma += MV_CRQB_Q_SZ;
1024
Jeff Garzik8b260242005-11-12 12:32:50 -05001025 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001026 * 32-slot command response table (CRPB), 8 bytes each in size
1027 */
1028 pp->crpb = mem;
1029 pp->crpb_dma = mem_dma;
1030 mem += MV_CRPB_Q_SZ;
1031 mem_dma += MV_CRPB_Q_SZ;
1032
1033 /* Third item:
1034 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1035 */
1036 pp->sg_tbl = mem;
1037 pp->sg_tbl_dma = mem_dma;
1038
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001039 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001040
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001041 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001042
Brett Russ31961942005-09-30 01:36:00 -04001043 /* Don't turn on EDMA here...do it before DMA commands only. Else
1044 * we'll be unable to send non-data, PIO, etc due to restricted access
1045 * to shadow regs.
1046 */
1047 ap->private_data = pp;
1048 return 0;
1049}
1050
Brett Russ05b308e2005-10-05 17:08:53 -04001051/**
1052 * mv_port_stop - Port specific cleanup/stop routine.
1053 * @ap: ATA channel to manipulate
1054 *
1055 * Stop DMA, cleanup port memory.
1056 *
1057 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001058 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001059 */
Brett Russ31961942005-09-30 01:36:00 -04001060static void mv_port_stop(struct ata_port *ap)
1061{
Brett Russafb0edd2005-10-05 17:08:42 -04001062 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04001063
Jeff Garzikcca39742006-08-24 03:19:22 -04001064 spin_lock_irqsave(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001065 mv_stop_dma(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04001066 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04001067}
1068
Brett Russ05b308e2005-10-05 17:08:53 -04001069/**
1070 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1071 * @qc: queued command whose SG list to source from
1072 *
1073 * Populate the SG list and mark the last entry.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
Jeff Garzikd88184f2007-02-26 01:26:06 -05001078static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001079{
1080 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001081 unsigned int n_sg = 0;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001082 struct scatterlist *sg;
Jeff Garzikd88184f2007-02-26 01:26:06 -05001083 struct mv_sg *mv_sg;
Brett Russ31961942005-09-30 01:36:00 -04001084
Jeff Garzikd88184f2007-02-26 01:26:06 -05001085 mv_sg = pp->sg_tbl;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001086 ata_for_each_sg(sg, qc) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001087 dma_addr_t addr = sg_dma_address(sg);
1088 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001089
Jeff Garzikd88184f2007-02-26 01:26:06 -05001090 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1091 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1092 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
Brett Russ31961942005-09-30 01:36:00 -04001093
Jeff Garzikd88184f2007-02-26 01:26:06 -05001094 if (ata_sg_is_last(sg, qc))
1095 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Jeff Garzik972c26b2005-10-18 22:14:54 -04001096
Jeff Garzikd88184f2007-02-26 01:26:06 -05001097 mv_sg++;
1098 n_sg++;
Brett Russ31961942005-09-30 01:36:00 -04001099 }
Jeff Garzikd88184f2007-02-26 01:26:06 -05001100
1101 return n_sg;
Brett Russ31961942005-09-30 01:36:00 -04001102}
1103
Mark Lorda6432432006-05-19 16:36:36 -04001104static inline unsigned mv_inc_q_index(unsigned index)
Brett Russ31961942005-09-30 01:36:00 -04001105{
Mark Lorda6432432006-05-19 16:36:36 -04001106 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001107}
1108
Mark Lorde1469872006-05-22 19:02:03 -04001109static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001110{
Mark Lord559eeda2006-05-19 16:40:15 -04001111 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001112 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001113 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001114}
1115
Brett Russ05b308e2005-10-05 17:08:53 -04001116/**
1117 * mv_qc_prep - Host specific command preparation.
1118 * @qc: queued command to prepare
1119 *
1120 * This routine simply redirects to the general purpose routine
1121 * if command is not DMA. Else, it handles prep of the CRQB
1122 * (command request block), does some sanity checking, and calls
1123 * the SG load routine.
1124 *
1125 * LOCKING:
1126 * Inherited from caller.
1127 */
Brett Russ31961942005-09-30 01:36:00 -04001128static void mv_qc_prep(struct ata_queued_cmd *qc)
1129{
1130 struct ata_port *ap = qc->ap;
1131 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001132 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001133 struct ata_taskfile *tf;
1134 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001135 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001136
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001137 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001138 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001139
Brett Russ31961942005-09-30 01:36:00 -04001140 /* Fill in command request block
1141 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001142 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001143 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001144 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001145 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001146 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001147
Mark Lorda6432432006-05-19 16:36:36 -04001148 /* get current queue index from hardware */
1149 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1150 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001151
Mark Lorda6432432006-05-19 16:36:36 -04001152 pp->crqb[in_index].sg_addr =
1153 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1154 pp->crqb[in_index].sg_addr_hi =
1155 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1156 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1157
1158 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001159 tf = &qc->tf;
1160
1161 /* Sadly, the CRQB cannot accomodate all registers--there are
1162 * only 11 bytes...so we must pick and choose required
1163 * registers based on the command. So, we drop feature and
1164 * hob_feature for [RW] DMA commands, but they are needed for
1165 * NCQ. NCQ will drop hob_nsect.
1166 */
1167 switch (tf->command) {
1168 case ATA_CMD_READ:
1169 case ATA_CMD_READ_EXT:
1170 case ATA_CMD_WRITE:
1171 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001172 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001173 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1174 break;
1175#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1176 case ATA_CMD_FPDMA_READ:
1177 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001178 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001179 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1180 break;
1181#endif /* FIXME: remove this line when NCQ added */
1182 default:
1183 /* The only other commands EDMA supports in non-queued and
1184 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1185 * of which are defined/used by Linux. If we get here, this
1186 * driver needs work.
1187 *
1188 * FIXME: modify libata to give qc_prep a return value and
1189 * return error here.
1190 */
1191 BUG_ON(tf->command);
1192 break;
1193 }
1194 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1195 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1196 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1197 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1198 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1199 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1200 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1201 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1202 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1203
Jeff Garzike4e7b892006-01-31 12:18:41 -05001204 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001205 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001206 mv_fill_sg(qc);
1207}
1208
1209/**
1210 * mv_qc_prep_iie - Host specific command preparation.
1211 * @qc: queued command to prepare
1212 *
1213 * This routine simply redirects to the general purpose routine
1214 * if command is not DMA. Else, it handles prep of the CRQB
1215 * (command request block), does some sanity checking, and calls
1216 * the SG load routine.
1217 *
1218 * LOCKING:
1219 * Inherited from caller.
1220 */
1221static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1222{
1223 struct ata_port *ap = qc->ap;
1224 struct mv_port_priv *pp = ap->private_data;
1225 struct mv_crqb_iie *crqb;
1226 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001227 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001228 u32 flags = 0;
1229
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001230 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001231 return;
1232
Jeff Garzike4e7b892006-01-31 12:18:41 -05001233 /* Fill in Gen IIE command request block
1234 */
1235 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1236 flags |= CRQB_FLAG_READ;
1237
Tejun Heobeec7db2006-02-11 19:11:13 +09001238 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001239 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001240 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1241 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001242
Mark Lorda6432432006-05-19 16:36:36 -04001243 /* get current queue index from hardware */
1244 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1245 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1246
1247 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001248 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1249 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1250 crqb->flags = cpu_to_le32(flags);
1251
1252 tf = &qc->tf;
1253 crqb->ata_cmd[0] = cpu_to_le32(
1254 (tf->command << 16) |
1255 (tf->feature << 24)
1256 );
1257 crqb->ata_cmd[1] = cpu_to_le32(
1258 (tf->lbal << 0) |
1259 (tf->lbam << 8) |
1260 (tf->lbah << 16) |
1261 (tf->device << 24)
1262 );
1263 crqb->ata_cmd[2] = cpu_to_le32(
1264 (tf->hob_lbal << 0) |
1265 (tf->hob_lbam << 8) |
1266 (tf->hob_lbah << 16) |
1267 (tf->hob_feature << 24)
1268 );
1269 crqb->ata_cmd[3] = cpu_to_le32(
1270 (tf->nsect << 0) |
1271 (tf->hob_nsect << 8)
1272 );
1273
1274 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1275 return;
Brett Russ31961942005-09-30 01:36:00 -04001276 mv_fill_sg(qc);
1277}
1278
Brett Russ05b308e2005-10-05 17:08:53 -04001279/**
1280 * mv_qc_issue - Initiate a command to the host
1281 * @qc: queued command to start
1282 *
1283 * This routine simply redirects to the general purpose routine
1284 * if command is not DMA. Else, it sanity checks our local
1285 * caches of the request producer/consumer indices then enables
1286 * DMA and bumps the request producer index.
1287 *
1288 * LOCKING:
1289 * Inherited from caller.
1290 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001291static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001292{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001293 struct ata_port *ap = qc->ap;
1294 void __iomem *port_mmio = mv_ap_base(ap);
1295 struct mv_port_priv *pp = ap->private_data;
1296 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001297 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001298 u32 in_ptr;
1299
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001300 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001301 /* We're about to send a non-EDMA capable command to the
1302 * port. Turn off EDMA so there won't be problems accessing
1303 * shadow block, etc registers.
1304 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001305 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001306 return ata_qc_issue_prot(qc);
1307 }
1308
Mark Lorda6432432006-05-19 16:36:36 -04001309 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1310 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001311
Brett Russ31961942005-09-30 01:36:00 -04001312 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001313 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1314 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001315
Mark Lorda6432432006-05-19 16:36:36 -04001316 in_index = mv_inc_q_index(in_index); /* now incr producer index */
Brett Russ31961942005-09-30 01:36:00 -04001317
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001318 mv_start_dma(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001319
1320 /* and write the request in pointer to kick the EDMA to life */
1321 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001322 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001323 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1324
1325 return 0;
1326}
1327
Brett Russ05b308e2005-10-05 17:08:53 -04001328/**
1329 * mv_get_crpb_status - get status from most recently completed cmd
1330 * @ap: ATA channel to manipulate
1331 *
1332 * This routine is for use when the port is in DMA mode, when it
1333 * will be using the CRPB (command response block) method of
Tejun Heobeec7db2006-02-11 19:11:13 +09001334 * returning command completion information. We check indices
Brett Russ05b308e2005-10-05 17:08:53 -04001335 * are good, grab status, and bump the response consumer index to
1336 * prove that we're up to date.
1337 *
1338 * LOCKING:
1339 * Inherited from caller.
1340 */
Brett Russ31961942005-09-30 01:36:00 -04001341static u8 mv_get_crpb_status(struct ata_port *ap)
1342{
1343 void __iomem *port_mmio = mv_ap_base(ap);
1344 struct mv_port_priv *pp = ap->private_data;
Mark Lorda6432432006-05-19 16:36:36 -04001345 unsigned out_index;
Brett Russ31961942005-09-30 01:36:00 -04001346 u32 out_ptr;
Mark Lord806a6e72006-03-21 21:11:53 -05001347 u8 ata_status;
Brett Russ31961942005-09-30 01:36:00 -04001348
Mark Lorda6432432006-05-19 16:36:36 -04001349 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1350 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001351
Mark Lorda6432432006-05-19 16:36:36 -04001352 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1353 >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord806a6e72006-03-21 21:11:53 -05001354
Brett Russ31961942005-09-30 01:36:00 -04001355 /* increment our consumer index... */
Mark Lorda6432432006-05-19 16:36:36 -04001356 out_index = mv_inc_q_index(out_index);
Jeff Garzik8b260242005-11-12 12:32:50 -05001357
Brett Russ31961942005-09-30 01:36:00 -04001358 /* and, until we do NCQ, there should only be 1 CRPB waiting */
Mark Lorda6432432006-05-19 16:36:36 -04001359 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1360 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001361
1362 /* write out our inc'd consumer index so EDMA knows we're caught up */
1363 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001364 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001365 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1366
1367 /* Return ATA status register for completed CRPB */
Mark Lord806a6e72006-03-21 21:11:53 -05001368 return ata_status;
Brett Russ20f733e2005-09-01 18:26:17 -04001369}
1370
Brett Russ05b308e2005-10-05 17:08:53 -04001371/**
1372 * mv_err_intr - Handle error interrupts on the port
1373 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001374 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001375 *
1376 * In most cases, just clear the interrupt and move on. However,
1377 * some cases require an eDMA reset, which is done right before
1378 * the COMRESET in mv_phy_reset(). The SERR case requires a
1379 * clear of pending errors in the SATA SERROR register. Finally,
1380 * if the port disabled DMA, update our cached copy to match.
1381 *
1382 * LOCKING:
1383 * Inherited from caller.
1384 */
Mark Lord9b358e32006-05-19 16:21:03 -04001385static void mv_err_intr(struct ata_port *ap, int reset_allowed)
Brett Russ20f733e2005-09-01 18:26:17 -04001386{
Brett Russ31961942005-09-30 01:36:00 -04001387 void __iomem *port_mmio = mv_ap_base(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001388 u32 edma_err_cause, serr = 0;
1389
Brett Russ20f733e2005-09-01 18:26:17 -04001390 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1391
1392 if (EDMA_ERR_SERR & edma_err_cause) {
Tejun Heo81952c52006-05-15 20:57:47 +09001393 sata_scr_read(ap, SCR_ERROR, &serr);
1394 sata_scr_write_flush(ap, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001395 }
Brett Russafb0edd2005-10-05 17:08:42 -04001396 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1397 struct mv_port_priv *pp = ap->private_data;
1398 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1399 }
1400 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
Tejun Heo44877b42007-02-21 01:06:51 +09001401 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001402
1403 /* Clear EDMA now that SERR cleanup done */
1404 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1405
1406 /* check for fatal here and recover if needed */
Mark Lord9b358e32006-05-19 16:21:03 -04001407 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
Jeff Garzikc9d39132005-11-13 17:47:51 -05001408 mv_stop_and_reset(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001409}
1410
Brett Russ05b308e2005-10-05 17:08:53 -04001411/**
1412 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001413 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001414 * @relevant: port error bits relevant to this host controller
1415 * @hc: which host controller we're to look at
1416 *
1417 * Read then write clear the HC interrupt status then walk each
1418 * port connected to the HC and see if it needs servicing. Port
1419 * success ints are reported in the HC interrupt status reg, the
1420 * port error ints are reported in the higher level main
1421 * interrupt status register and thus are passed in via the
1422 * 'relevant' argument.
1423 *
1424 * LOCKING:
1425 * Inherited from caller.
1426 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001427static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001428{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001429 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001430 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001431 struct ata_queued_cmd *qc;
1432 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001433 int port, port0;
1434 int shift, hard_port, handled;
Jeff Garzika7dac442005-10-30 04:44:42 -05001435 unsigned int err_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001436
Jeff Garzik35177262007-02-24 21:26:42 -05001437 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001438 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001439 else
Brett Russ20f733e2005-09-01 18:26:17 -04001440 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001441
1442 /* we'll need the HC success int register in most cases */
1443 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzik35177262007-02-24 21:26:42 -05001444 if (hc_irq_cause)
Brett Russ31961942005-09-30 01:36:00 -04001445 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001446
1447 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1448 hc,relevant,hc_irq_cause);
1449
1450 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcd85f6e2006-03-20 19:49:54 -05001451 u8 ata_status = 0;
Jeff Garzikcca39742006-08-24 03:19:22 -04001452 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001453 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001454
Mark Lorde857f142006-05-19 16:33:03 -04001455 hard_port = mv_hardport_from_port(port); /* range 0..3 */
Brett Russ31961942005-09-30 01:36:00 -04001456 handled = 0; /* ensure ata_status is set if handled++ */
Brett Russ20f733e2005-09-01 18:26:17 -04001457
Mark Lord63af2a52006-03-29 09:50:31 -05001458 /* Note that DEV_IRQ might happen spuriously during EDMA,
Mark Lorde857f142006-05-19 16:33:03 -04001459 * and should be ignored in such cases.
1460 * The cause of this is still under investigation.
Jeff Garzik8190bdb2006-05-24 01:53:39 -04001461 */
Mark Lord63af2a52006-03-29 09:50:31 -05001462 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1463 /* EDMA: check for response queue interrupt */
1464 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1465 ata_status = mv_get_crpb_status(ap);
1466 handled = 1;
1467 }
1468 } else {
1469 /* PIO: check for device (drive) interrupt */
1470 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001471 ata_status = readb(ap->ioaddr.status_addr);
Mark Lord63af2a52006-03-29 09:50:31 -05001472 handled = 1;
Mark Lorde857f142006-05-19 16:33:03 -04001473 /* ignore spurious intr if drive still BUSY */
1474 if (ata_status & ATA_BUSY) {
1475 ata_status = 0;
1476 handled = 0;
1477 }
Mark Lord63af2a52006-03-29 09:50:31 -05001478 }
Brett Russ20f733e2005-09-01 18:26:17 -04001479 }
1480
Jeff Garzik029f5462006-04-02 10:30:40 -04001481 if (ap && (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001482 continue;
1483
Jeff Garzika7dac442005-10-30 04:44:42 -05001484 err_mask = ac_err_mask(ata_status);
1485
Brett Russ31961942005-09-30 01:36:00 -04001486 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001487 if (port >= MV_PORTS_PER_HC) {
1488 shift++; /* skip bit 8 in the HC Main IRQ reg */
1489 }
1490 if ((PORT0_ERR << shift) & relevant) {
Mark Lord9b358e32006-05-19 16:21:03 -04001491 mv_err_intr(ap, 1);
Jeff Garzika7dac442005-10-30 04:44:42 -05001492 err_mask |= AC_ERR_OTHER;
Mark Lord63af2a52006-03-29 09:50:31 -05001493 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001494 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001495
Mark Lord63af2a52006-03-29 09:50:31 -05001496 if (handled) {
Brett Russ20f733e2005-09-01 18:26:17 -04001497 qc = ata_qc_from_tag(ap, ap->active_tag);
Mark Lord63af2a52006-03-29 09:50:31 -05001498 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
Brett Russ20f733e2005-09-01 18:26:17 -04001499 VPRINTK("port %u IRQ found for qc, "
1500 "ata_status 0x%x\n", port,ata_status);
Brett Russ20f733e2005-09-01 18:26:17 -04001501 /* mark qc status appropriately */
Jeff Garzik701db692005-12-06 04:52:48 -05001502 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
Albert Leea22e2eb2005-12-05 15:38:02 +08001503 qc->err_mask |= err_mask;
1504 ata_qc_complete(qc);
1505 }
Brett Russ20f733e2005-09-01 18:26:17 -04001506 }
1507 }
1508 }
1509 VPRINTK("EXIT\n");
1510}
1511
Brett Russ05b308e2005-10-05 17:08:53 -04001512/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001513 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001514 * @irq: unused
1515 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001516 *
1517 * Read the read only register to determine if any host
1518 * controllers have pending interrupts. If so, call lower level
1519 * routine to handle. Also check for PCI errors which are only
1520 * reported here.
1521 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001522 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001523 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001524 * interrupts.
1525 */
David Howells7d12e782006-10-05 14:55:46 +01001526static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001527{
Jeff Garzikcca39742006-08-24 03:19:22 -04001528 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001529 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001530 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001531 u32 irq_stat;
1532
Brett Russ20f733e2005-09-01 18:26:17 -04001533 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001534
1535 /* check the cases where we either have nothing pending or have read
1536 * a bogus register value which can indicate HW removal or PCI fault
1537 */
Jeff Garzik35177262007-02-24 21:26:42 -05001538 if (!irq_stat || (0xffffffffU == irq_stat))
Brett Russ20f733e2005-09-01 18:26:17 -04001539 return IRQ_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04001540
Jeff Garzikcca39742006-08-24 03:19:22 -04001541 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1542 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001543
1544 for (hc = 0; hc < n_hcs; hc++) {
1545 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1546 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001547 mv_host_intr(host, relevant, hc);
Brett Russ31961942005-09-30 01:36:00 -04001548 handled++;
Brett Russ20f733e2005-09-01 18:26:17 -04001549 }
1550 }
Mark Lord615ab952006-05-19 16:24:56 -04001551
Brett Russ20f733e2005-09-01 18:26:17 -04001552 if (PCI_ERR & irq_stat) {
Brett Russ31961942005-09-30 01:36:00 -04001553 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1554 readl(mmio + PCI_IRQ_CAUSE_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04001555
Brett Russafb0edd2005-10-05 17:08:42 -04001556 DPRINTK("All regs @ PCI error\n");
Jeff Garzikcca39742006-08-24 03:19:22 -04001557 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
Brett Russ31961942005-09-30 01:36:00 -04001558
1559 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1560 handled++;
1561 }
Jeff Garzikcca39742006-08-24 03:19:22 -04001562 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001563
1564 return IRQ_RETVAL(handled);
1565}
1566
Jeff Garzikc9d39132005-11-13 17:47:51 -05001567static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1568{
1569 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1570 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1571
1572 return hc_mmio + ofs;
1573}
1574
1575static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1576{
1577 unsigned int ofs;
1578
1579 switch (sc_reg_in) {
1580 case SCR_STATUS:
1581 case SCR_ERROR:
1582 case SCR_CONTROL:
1583 ofs = sc_reg_in * sizeof(u32);
1584 break;
1585 default:
1586 ofs = 0xffffffffU;
1587 break;
1588 }
1589 return ofs;
1590}
1591
1592static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1593{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001594 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1595 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001596 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1597
1598 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001599 return readl(addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001600 else
1601 return (u32) ofs;
1602}
1603
1604static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1605{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001606 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1607 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001608 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1609
1610 if (ofs != 0xffffffffU)
Tejun Heo0d5ff562007-02-01 15:06:36 +09001611 writelfl(val, addr + ofs);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001612}
1613
Jeff Garzik522479f2005-11-12 22:14:02 -05001614static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1615{
1616 u8 rev_id;
1617 int early_5080;
1618
1619 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1620
1621 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1622
1623 if (!early_5080) {
1624 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1625 tmp |= (1 << 0);
1626 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1627 }
1628
1629 mv_reset_pci_bus(pdev, mmio);
1630}
1631
1632static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1633{
1634 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1635}
1636
Jeff Garzik47c2b672005-11-12 21:13:17 -05001637static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001638 void __iomem *mmio)
1639{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001640 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1641 u32 tmp;
1642
1643 tmp = readl(phy_mmio + MV5_PHY_MODE);
1644
1645 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1646 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001647}
1648
Jeff Garzik47c2b672005-11-12 21:13:17 -05001649static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001650{
Jeff Garzik522479f2005-11-12 22:14:02 -05001651 u32 tmp;
1652
1653 writel(0, mmio + MV_GPIO_PORT_CTL);
1654
1655 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1656
1657 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1658 tmp |= ~(1 << 0);
1659 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001660}
1661
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001662static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1663 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001664{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001665 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1666 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1667 u32 tmp;
1668 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1669
1670 if (fix_apm_sq) {
1671 tmp = readl(phy_mmio + MV5_LT_MODE);
1672 tmp |= (1 << 19);
1673 writel(tmp, phy_mmio + MV5_LT_MODE);
1674
1675 tmp = readl(phy_mmio + MV5_PHY_CTL);
1676 tmp &= ~0x3;
1677 tmp |= 0x1;
1678 writel(tmp, phy_mmio + MV5_PHY_CTL);
1679 }
1680
1681 tmp = readl(phy_mmio + MV5_PHY_MODE);
1682 tmp &= ~mask;
1683 tmp |= hpriv->signal[port].pre;
1684 tmp |= hpriv->signal[port].amps;
1685 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001686}
1687
Jeff Garzikc9d39132005-11-13 17:47:51 -05001688
1689#undef ZERO
1690#define ZERO(reg) writel(0, port_mmio + (reg))
1691static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1692 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001693{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001694 void __iomem *port_mmio = mv_port_base(mmio, port);
1695
1696 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1697
1698 mv_channel_reset(hpriv, mmio, port);
1699
1700 ZERO(0x028); /* command */
1701 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1702 ZERO(0x004); /* timer */
1703 ZERO(0x008); /* irq err cause */
1704 ZERO(0x00c); /* irq err mask */
1705 ZERO(0x010); /* rq bah */
1706 ZERO(0x014); /* rq inp */
1707 ZERO(0x018); /* rq outp */
1708 ZERO(0x01c); /* respq bah */
1709 ZERO(0x024); /* respq outp */
1710 ZERO(0x020); /* respq inp */
1711 ZERO(0x02c); /* test control */
1712 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1713}
1714#undef ZERO
1715
1716#define ZERO(reg) writel(0, hc_mmio + (reg))
1717static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1718 unsigned int hc)
1719{
1720 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1721 u32 tmp;
1722
1723 ZERO(0x00c);
1724 ZERO(0x010);
1725 ZERO(0x014);
1726 ZERO(0x018);
1727
1728 tmp = readl(hc_mmio + 0x20);
1729 tmp &= 0x1c1c1c1c;
1730 tmp |= 0x03030303;
1731 writel(tmp, hc_mmio + 0x20);
1732}
1733#undef ZERO
1734
1735static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1736 unsigned int n_hc)
1737{
1738 unsigned int hc, port;
1739
1740 for (hc = 0; hc < n_hc; hc++) {
1741 for (port = 0; port < MV_PORTS_PER_HC; port++)
1742 mv5_reset_hc_port(hpriv, mmio,
1743 (hc * MV_PORTS_PER_HC) + port);
1744
1745 mv5_reset_one_hc(hpriv, mmio, hc);
1746 }
1747
1748 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001749}
1750
Jeff Garzik101ffae2005-11-12 22:17:49 -05001751#undef ZERO
1752#define ZERO(reg) writel(0, mmio + (reg))
1753static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1754{
1755 u32 tmp;
1756
1757 tmp = readl(mmio + MV_PCI_MODE);
1758 tmp &= 0xff00ffff;
1759 writel(tmp, mmio + MV_PCI_MODE);
1760
1761 ZERO(MV_PCI_DISC_TIMER);
1762 ZERO(MV_PCI_MSI_TRIGGER);
1763 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1764 ZERO(HC_MAIN_IRQ_MASK_OFS);
1765 ZERO(MV_PCI_SERR_MASK);
1766 ZERO(PCI_IRQ_CAUSE_OFS);
1767 ZERO(PCI_IRQ_MASK_OFS);
1768 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1769 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1770 ZERO(MV_PCI_ERR_ATTRIBUTE);
1771 ZERO(MV_PCI_ERR_COMMAND);
1772}
1773#undef ZERO
1774
1775static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1776{
1777 u32 tmp;
1778
1779 mv5_reset_flash(hpriv, mmio);
1780
1781 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1782 tmp &= 0x3;
1783 tmp |= (1 << 5) | (1 << 6);
1784 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1785}
1786
1787/**
1788 * mv6_reset_hc - Perform the 6xxx global soft reset
1789 * @mmio: base address of the HBA
1790 *
1791 * This routine only applies to 6xxx parts.
1792 *
1793 * LOCKING:
1794 * Inherited from caller.
1795 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05001796static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1797 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001798{
1799 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1800 int i, rc = 0;
1801 u32 t;
1802
1803 /* Following procedure defined in PCI "main command and status
1804 * register" table.
1805 */
1806 t = readl(reg);
1807 writel(t | STOP_PCI_MASTER, reg);
1808
1809 for (i = 0; i < 1000; i++) {
1810 udelay(1);
1811 t = readl(reg);
1812 if (PCI_MASTER_EMPTY & t) {
1813 break;
1814 }
1815 }
1816 if (!(PCI_MASTER_EMPTY & t)) {
1817 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1818 rc = 1;
1819 goto done;
1820 }
1821
1822 /* set reset */
1823 i = 5;
1824 do {
1825 writel(t | GLOB_SFT_RST, reg);
1826 t = readl(reg);
1827 udelay(1);
1828 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1829
1830 if (!(GLOB_SFT_RST & t)) {
1831 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1832 rc = 1;
1833 goto done;
1834 }
1835
1836 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1837 i = 5;
1838 do {
1839 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1840 t = readl(reg);
1841 udelay(1);
1842 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1843
1844 if (GLOB_SFT_RST & t) {
1845 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1846 rc = 1;
1847 }
1848done:
1849 return rc;
1850}
1851
Jeff Garzik47c2b672005-11-12 21:13:17 -05001852static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001853 void __iomem *mmio)
1854{
1855 void __iomem *port_mmio;
1856 u32 tmp;
1857
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001858 tmp = readl(mmio + MV_RESET_CFG);
1859 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001860 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001861 hpriv->signal[idx].pre = 0x1 << 5;
1862 return;
1863 }
1864
1865 port_mmio = mv_port_base(mmio, idx);
1866 tmp = readl(port_mmio + PHY_MODE2);
1867
1868 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1869 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1870}
1871
Jeff Garzik47c2b672005-11-12 21:13:17 -05001872static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001873{
Jeff Garzik47c2b672005-11-12 21:13:17 -05001874 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001875}
1876
Jeff Garzikc9d39132005-11-13 17:47:51 -05001877static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001878 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001879{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001880 void __iomem *port_mmio = mv_port_base(mmio, port);
1881
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001882 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001883 int fix_phy_mode2 =
1884 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001885 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05001886 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1887 u32 m2, tmp;
1888
1889 if (fix_phy_mode2) {
1890 m2 = readl(port_mmio + PHY_MODE2);
1891 m2 &= ~(1 << 16);
1892 m2 |= (1 << 31);
1893 writel(m2, port_mmio + PHY_MODE2);
1894
1895 udelay(200);
1896
1897 m2 = readl(port_mmio + PHY_MODE2);
1898 m2 &= ~((1 << 16) | (1 << 31));
1899 writel(m2, port_mmio + PHY_MODE2);
1900
1901 udelay(200);
1902 }
1903
1904 /* who knows what this magic does */
1905 tmp = readl(port_mmio + PHY_MODE3);
1906 tmp &= ~0x7F800000;
1907 tmp |= 0x2A800000;
1908 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001909
1910 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05001911 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001912
1913 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001914
1915 if (hp_flags & MV_HP_ERRATA_60X1B2)
1916 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001917
1918 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1919
1920 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05001921
1922 if (hp_flags & MV_HP_ERRATA_60X1B2)
1923 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001924 }
1925
1926 /* Revert values of pre-emphasis and signal amps to the saved ones */
1927 m2 = readl(port_mmio + PHY_MODE2);
1928
1929 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001930 m2 |= hpriv->signal[port].amps;
1931 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001932 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001933
Jeff Garzike4e7b892006-01-31 12:18:41 -05001934 /* according to mvSata 3.6.1, some IIE values are fixed */
1935 if (IS_GEN_IIE(hpriv)) {
1936 m2 &= ~0xC30FF01F;
1937 m2 |= 0x0000900F;
1938 }
1939
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001940 writel(m2, port_mmio + PHY_MODE2);
1941}
1942
Jeff Garzikc9d39132005-11-13 17:47:51 -05001943static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1944 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04001945{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001946 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04001947
Brett Russ31961942005-09-30 01:36:00 -04001948 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001949
1950 if (IS_60XX(hpriv)) {
1951 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04001952 ifctl |= (1 << 7); /* enable gen2i speed */
1953 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001954 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1955 }
1956
Brett Russ20f733e2005-09-01 18:26:17 -04001957 udelay(25); /* allow reset propagation */
1958
1959 /* Spec never mentions clearing the bit. Marvell's driver does
1960 * clear the bit, however.
1961 */
Brett Russ31961942005-09-30 01:36:00 -04001962 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001963
Jeff Garzikc9d39132005-11-13 17:47:51 -05001964 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1965
1966 if (IS_50XX(hpriv))
1967 mdelay(1);
1968}
1969
1970static void mv_stop_and_reset(struct ata_port *ap)
1971{
Jeff Garzikcca39742006-08-24 03:19:22 -04001972 struct mv_host_priv *hpriv = ap->host->private_data;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001973 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikc9d39132005-11-13 17:47:51 -05001974
1975 mv_stop_dma(ap);
1976
1977 mv_channel_reset(hpriv, mmio, ap->port_no);
1978
Jeff Garzik22374672005-11-17 10:59:48 -05001979 __mv_phy_reset(ap, 0);
1980}
1981
1982static inline void __msleep(unsigned int msec, int can_sleep)
1983{
1984 if (can_sleep)
1985 msleep(msec);
1986 else
1987 mdelay(msec);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001988}
1989
1990/**
Jeff Garzik22374672005-11-17 10:59:48 -05001991 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05001992 * @ap: ATA channel to manipulate
1993 *
1994 * Part of this is taken from __sata_phy_reset and modified to
1995 * not sleep since this routine gets called from interrupt level.
1996 *
1997 * LOCKING:
1998 * Inherited from caller. This is coded to safe to call at
1999 * interrupt level, i.e. it does not sleep.
2000 */
Jeff Garzik22374672005-11-17 10:59:48 -05002001static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002002{
2003 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002004 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002005 void __iomem *port_mmio = mv_ap_base(ap);
2006 struct ata_taskfile tf;
2007 struct ata_device *dev = &ap->device[0];
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002008 unsigned long deadline;
Jeff Garzik22374672005-11-17 10:59:48 -05002009 int retry = 5;
2010 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002011
2012 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002013
Jeff Garzik095fec82005-11-12 09:50:49 -05002014 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002015 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2016 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
Brett Russ20f733e2005-09-01 18:26:17 -04002017
Jeff Garzik22374672005-11-17 10:59:48 -05002018 /* Issue COMRESET via SControl */
2019comreset_retry:
Tejun Heo81952c52006-05-15 20:57:47 +09002020 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
Jeff Garzik22374672005-11-17 10:59:48 -05002021 __msleep(1, can_sleep);
2022
Tejun Heo81952c52006-05-15 20:57:47 +09002023 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
Jeff Garzik22374672005-11-17 10:59:48 -05002024 __msleep(20, can_sleep);
2025
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002026 deadline = jiffies + msecs_to_jiffies(200);
Brett Russ31961942005-09-30 01:36:00 -04002027 do {
Tejun Heo81952c52006-05-15 20:57:47 +09002028 sata_scr_read(ap, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002029 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002030 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002031
2032 __msleep(1, can_sleep);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002033 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002034
Jeff Garzik22374672005-11-17 10:59:48 -05002035 /* work around errata */
2036 if (IS_60XX(hpriv) &&
2037 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2038 (retry-- > 0))
2039 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002040
2041 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
Brett Russ31961942005-09-30 01:36:00 -04002042 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2043 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2044
Tejun Heo81952c52006-05-15 20:57:47 +09002045 if (ata_port_online(ap)) {
Brett Russ31961942005-09-30 01:36:00 -04002046 ata_port_probe(ap);
2047 } else {
Tejun Heo81952c52006-05-15 20:57:47 +09002048 sata_scr_read(ap, SCR_STATUS, &sstatus);
Tejun Heof15a1da2006-05-15 20:57:56 +09002049 ata_port_printk(ap, KERN_INFO,
2050 "no device found (phy stat %08x)\n", sstatus);
Brett Russ31961942005-09-30 01:36:00 -04002051 ata_port_disable(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04002052 return;
2053 }
2054
Jeff Garzik22374672005-11-17 10:59:48 -05002055 /* even after SStatus reflects that device is ready,
2056 * it seems to take a while for link to be fully
2057 * established (and thus Status no longer 0x80/0x7F),
2058 * so we poll a bit for that, here.
2059 */
2060 retry = 20;
2061 while (1) {
2062 u8 drv_stat = ata_check_status(ap);
2063 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2064 break;
2065 __msleep(500, can_sleep);
2066 if (retry-- <= 0)
2067 break;
2068 }
2069
Tejun Heo0d5ff562007-02-01 15:06:36 +09002070 tf.lbah = readb(ap->ioaddr.lbah_addr);
2071 tf.lbam = readb(ap->ioaddr.lbam_addr);
2072 tf.lbal = readb(ap->ioaddr.lbal_addr);
2073 tf.nsect = readb(ap->ioaddr.nsect_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04002074
2075 dev->class = ata_dev_classify(&tf);
Tejun Heoe1211e32006-04-01 01:38:18 +09002076 if (!ata_dev_enabled(dev)) {
Brett Russ20f733e2005-09-01 18:26:17 -04002077 VPRINTK("Port disabled post-sig: No device present.\n");
2078 ata_port_disable(ap);
2079 }
Jeff Garzik095fec82005-11-12 09:50:49 -05002080
2081 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2082
2083 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2084
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002085 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002086}
2087
Jeff Garzik22374672005-11-17 10:59:48 -05002088static void mv_phy_reset(struct ata_port *ap)
2089{
2090 __mv_phy_reset(ap, 1);
2091}
2092
Brett Russ05b308e2005-10-05 17:08:53 -04002093/**
2094 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2095 * @ap: ATA channel to manipulate
2096 *
2097 * Intent is to clear all pending error conditions, reset the
2098 * chip/bus, fail the command, and move on.
2099 *
2100 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002101 * This routine holds the host lock while failing the command.
Brett Russ05b308e2005-10-05 17:08:53 -04002102 */
Brett Russ31961942005-09-30 01:36:00 -04002103static void mv_eng_timeout(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002104{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002105 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Brett Russ31961942005-09-30 01:36:00 -04002106 struct ata_queued_cmd *qc;
Mark Lord2f9719b2006-06-07 12:53:29 -04002107 unsigned long flags;
Brett Russ31961942005-09-30 01:36:00 -04002108
Tejun Heof15a1da2006-05-15 20:57:56 +09002109 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
Brett Russ31961942005-09-30 01:36:00 -04002110 DPRINTK("All regs @ start of eng_timeout\n");
Tejun Heo0d5ff562007-02-01 15:06:36 +09002111 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
Brett Russ31961942005-09-30 01:36:00 -04002112
2113 qc = ata_qc_from_tag(ap, ap->active_tag);
2114 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
Tejun Heo0d5ff562007-02-01 15:06:36 +09002115 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
Brett Russ31961942005-09-30 01:36:00 -04002116
Jeff Garzikcca39742006-08-24 03:19:22 -04002117 spin_lock_irqsave(&ap->host->lock, flags);
Mark Lord9b358e32006-05-19 16:21:03 -04002118 mv_err_intr(ap, 0);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002119 mv_stop_and_reset(ap);
Jeff Garzikcca39742006-08-24 03:19:22 -04002120 spin_unlock_irqrestore(&ap->host->lock, flags);
Brett Russ31961942005-09-30 01:36:00 -04002121
Mark Lord9b358e32006-05-19 16:21:03 -04002122 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2123 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2124 qc->err_mask |= AC_ERR_TIMEOUT;
2125 ata_eh_qc_complete(qc);
2126 }
Brett Russ31961942005-09-30 01:36:00 -04002127}
2128
Brett Russ05b308e2005-10-05 17:08:53 -04002129/**
2130 * mv_port_init - Perform some early initialization on a single port.
2131 * @port: libata data structure storing shadow register addresses
2132 * @port_mmio: base address of the port
2133 *
2134 * Initialize shadow register mmio addresses, clear outstanding
2135 * interrupts on the port, and unmask interrupts for the future
2136 * start of the port.
2137 *
2138 * LOCKING:
2139 * Inherited from caller.
2140 */
Brett Russ31961942005-09-30 01:36:00 -04002141static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2142{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002143 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002144 unsigned serr_ofs;
2145
Jeff Garzik8b260242005-11-12 12:32:50 -05002146 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002147 */
2148 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002149 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002150 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2151 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2152 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2153 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2154 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2155 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002156 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002157 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2158 /* special case: control/altstatus doesn't have ATA_REG_ address */
2159 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2160
2161 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002162 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002163
Brett Russ31961942005-09-30 01:36:00 -04002164 /* Clear any currently outstanding port interrupt conditions */
2165 serr_ofs = mv_scr_offset(SCR_ERROR);
2166 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2167 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2168
Brett Russ20f733e2005-09-01 18:26:17 -04002169 /* unmask all EDMA error interrupts */
Brett Russ31961942005-09-30 01:36:00 -04002170 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002171
Jeff Garzik8b260242005-11-12 12:32:50 -05002172 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002173 readl(port_mmio + EDMA_CFG_OFS),
2174 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2175 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002176}
2177
Tejun Heo4447d352007-04-17 23:44:08 +09002178static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002179{
Tejun Heo4447d352007-04-17 23:44:08 +09002180 struct pci_dev *pdev = to_pci_dev(host->dev);
2181 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002182 u8 rev_id;
2183 u32 hp_flags = hpriv->hp_flags;
2184
2185 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2186
2187 switch(board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002188 case chip_5080:
2189 hpriv->ops = &mv5xxx_ops;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190 hp_flags |= MV_HP_50XX;
2191
Jeff Garzik47c2b672005-11-12 21:13:17 -05002192 switch (rev_id) {
2193 case 0x1:
2194 hp_flags |= MV_HP_ERRATA_50XXB0;
2195 break;
2196 case 0x3:
2197 hp_flags |= MV_HP_ERRATA_50XXB2;
2198 break;
2199 default:
2200 dev_printk(KERN_WARNING, &pdev->dev,
2201 "Applying 50XXB2 workarounds to unknown rev\n");
2202 hp_flags |= MV_HP_ERRATA_50XXB2;
2203 break;
2204 }
2205 break;
2206
2207 case chip_504x:
2208 case chip_508x:
2209 hpriv->ops = &mv5xxx_ops;
2210 hp_flags |= MV_HP_50XX;
2211
2212 switch (rev_id) {
2213 case 0x0:
2214 hp_flags |= MV_HP_ERRATA_50XXB0;
2215 break;
2216 case 0x3:
2217 hp_flags |= MV_HP_ERRATA_50XXB2;
2218 break;
2219 default:
2220 dev_printk(KERN_WARNING, &pdev->dev,
2221 "Applying B2 workarounds to unknown rev\n");
2222 hp_flags |= MV_HP_ERRATA_50XXB2;
2223 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002224 }
2225 break;
2226
2227 case chip_604x:
2228 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002229 hpriv->ops = &mv6xxx_ops;
2230
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002231 switch (rev_id) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002232 case 0x7:
2233 hp_flags |= MV_HP_ERRATA_60X1B2;
2234 break;
2235 case 0x9:
2236 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002237 break;
2238 default:
2239 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002240 "Applying B2 workarounds to unknown rev\n");
2241 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002242 break;
2243 }
2244 break;
2245
Jeff Garzike4e7b892006-01-31 12:18:41 -05002246 case chip_7042:
2247 case chip_6042:
2248 hpriv->ops = &mv6xxx_ops;
2249
2250 hp_flags |= MV_HP_GEN_IIE;
2251
2252 switch (rev_id) {
2253 case 0x0:
2254 hp_flags |= MV_HP_ERRATA_XX42A0;
2255 break;
2256 case 0x1:
2257 hp_flags |= MV_HP_ERRATA_60X1C0;
2258 break;
2259 default:
2260 dev_printk(KERN_WARNING, &pdev->dev,
2261 "Applying 60X1C0 workarounds to unknown rev\n");
2262 hp_flags |= MV_HP_ERRATA_60X1C0;
2263 break;
2264 }
2265 break;
2266
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002267 default:
2268 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2269 return 1;
2270 }
2271
2272 hpriv->hp_flags = hp_flags;
2273
2274 return 0;
2275}
2276
Brett Russ05b308e2005-10-05 17:08:53 -04002277/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002278 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002279 * @host: ATA host to initialize
2280 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002281 *
2282 * If possible, do an early global reset of the host. Then do
2283 * our port init and clear/unmask all/relevant host interrupts.
2284 *
2285 * LOCKING:
2286 * Inherited from caller.
2287 */
Tejun Heo4447d352007-04-17 23:44:08 +09002288static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002289{
2290 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002291 struct pci_dev *pdev = to_pci_dev(host->dev);
2292 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2293 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002294
Jeff Garzik47c2b672005-11-12 21:13:17 -05002295 /* global interrupt mask */
2296 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2297
Tejun Heo4447d352007-04-17 23:44:08 +09002298 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002299 if (rc)
2300 goto done;
2301
Tejun Heo4447d352007-04-17 23:44:08 +09002302 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002303
Tejun Heo4447d352007-04-17 23:44:08 +09002304 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002305 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002306
Jeff Garzikc9d39132005-11-13 17:47:51 -05002307 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002308 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002309 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002310
Jeff Garzik522479f2005-11-12 22:14:02 -05002311 hpriv->ops->reset_flash(hpriv, mmio);
2312 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002313 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002314
Tejun Heo4447d352007-04-17 23:44:08 +09002315 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002316 if (IS_60XX(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002317 void __iomem *port_mmio = mv_port_base(mmio, port);
2318
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002319 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002320 ifctl |= (1 << 7); /* enable gen2i speed */
2321 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002322 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2323 }
2324
Jeff Garzikc9d39132005-11-13 17:47:51 -05002325 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002326 }
2327
Tejun Heo4447d352007-04-17 23:44:08 +09002328 for (port = 0; port < host->n_ports; port++) {
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002329 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heo4447d352007-04-17 23:44:08 +09002330 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002331 }
2332
2333 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002334 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2335
2336 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2337 "(before clear)=0x%08x\n", hc,
2338 readl(hc_mmio + HC_CFG_OFS),
2339 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2340
2341 /* Clear any currently outstanding hc interrupt conditions */
2342 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002343 }
2344
Brett Russ31961942005-09-30 01:36:00 -04002345 /* Clear any currently outstanding host interrupt conditions */
2346 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2347
2348 /* and unmask interrupt generation for host regs */
2349 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002350
2351 if (IS_50XX(hpriv))
2352 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2353 else
2354 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002355
2356 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002357 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002358 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2359 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2360 readl(mmio + PCI_IRQ_CAUSE_OFS),
2361 readl(mmio + PCI_IRQ_MASK_OFS));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002362
Brett Russ31961942005-09-30 01:36:00 -04002363done:
Brett Russ20f733e2005-09-01 18:26:17 -04002364 return rc;
2365}
2366
Brett Russ05b308e2005-10-05 17:08:53 -04002367/**
2368 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002369 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002370 *
2371 * FIXME: complete this.
2372 *
2373 * LOCKING:
2374 * Inherited from caller.
2375 */
Tejun Heo4447d352007-04-17 23:44:08 +09002376static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002377{
Tejun Heo4447d352007-04-17 23:44:08 +09002378 struct pci_dev *pdev = to_pci_dev(host->dev);
2379 struct mv_host_priv *hpriv = host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04002380 u8 rev_id, scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002381 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002382
2383 /* Use this to determine the HW stepping of the chip so we know
2384 * what errata to workaround
2385 */
2386 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2387
2388 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2389 if (scc == 0)
2390 scc_s = "SCSI";
2391 else if (scc == 0x01)
2392 scc_s = "RAID";
2393 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002394 scc_s = "?";
2395
2396 if (IS_GEN_I(hpriv))
2397 gen = "I";
2398 else if (IS_GEN_II(hpriv))
2399 gen = "II";
2400 else if (IS_GEN_IIE(hpriv))
2401 gen = "IIE";
2402 else
2403 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002404
Jeff Garzika9524a72005-10-30 14:39:11 -05002405 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002406 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2407 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002408 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2409}
2410
Brett Russ05b308e2005-10-05 17:08:53 -04002411/**
2412 * mv_init_one - handle a positive probe of a Marvell host
2413 * @pdev: PCI device found
2414 * @ent: PCI device ID entry for the matched host
2415 *
2416 * LOCKING:
2417 * Inherited from caller.
2418 */
Brett Russ20f733e2005-09-01 18:26:17 -04002419static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2420{
2421 static int printed_version = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002422 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002423 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2424 struct ata_host *host;
2425 struct mv_host_priv *hpriv;
2426 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002427
Jeff Garzika9524a72005-10-30 14:39:11 -05002428 if (!printed_version++)
2429 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002430
Tejun Heo4447d352007-04-17 23:44:08 +09002431 /* allocate host */
2432 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2433
2434 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2435 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2436 if (!host || !hpriv)
2437 return -ENOMEM;
2438 host->private_data = hpriv;
2439
2440 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002441 rc = pcim_enable_device(pdev);
2442 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002443 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002444
Tejun Heo0d5ff562007-02-01 15:06:36 +09002445 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2446 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002447 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002448 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002449 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002450 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002451
Jeff Garzikd88184f2007-02-26 01:26:06 -05002452 rc = pci_go_64(pdev);
2453 if (rc)
2454 return rc;
2455
Brett Russ20f733e2005-09-01 18:26:17 -04002456 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002457 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002458 if (rc)
2459 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002460
Brett Russ31961942005-09-30 01:36:00 -04002461 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002462 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002463 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002464
Brett Russ31961942005-09-30 01:36:00 -04002465 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002466 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002467
Tejun Heo4447d352007-04-17 23:44:08 +09002468 pci_set_master(pdev);
Jeff Garzik4537deb2007-07-12 14:30:19 -04002469 pci_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002470 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002471 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002472}
2473
2474static int __init mv_init(void)
2475{
Pavel Roskinb7887192006-08-10 18:13:18 +09002476 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002477}
2478
2479static void __exit mv_exit(void)
2480{
2481 pci_unregister_driver(&mv_pci_driver);
2482}
2483
2484MODULE_AUTHOR("Brett Russ");
2485MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2486MODULE_LICENSE("GPL");
2487MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2488MODULE_VERSION(DRV_VERSION);
2489
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002490module_param(msi, int, 0444);
2491MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2492
Brett Russ20f733e2005-09-01 18:26:17 -04002493module_init(mv_init);
2494module_exit(mv_exit);