blob: 1c53c8a7d21f3f79b9e9d4c3897f20246fffe619 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500114 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400116
Brett Russ20f733e2005-09-01 18:26:17 -0400117 MV_PORTS_PER_HC = 4,
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400121 MV_PORT_MASK = 3,
122
123 /* Host Flags */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400130
Brett Russ31961942005-09-30 01:36:00 -0400131 CRQB_FLAG_READ = (1 << 0),
132 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
138
139 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400142
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
144
Brett Russ20f733e2005-09-01 18:26:17 -0400145 /* PCI interface registers */
146
Brett Russ31961942005-09-30 01:36:00 -0400147 PCI_COMMAND_OFS = 0xc00,
148
Brett Russ20f733e2005-09-01 18:26:17 -0400149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
153
Jeff Garzik522479f2005-11-12 22:14:02 -0500154 MV_PCI_MODE = 0xd00,
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
164
Mark Lord02a121d2007-12-01 13:07:22 -0500165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
168
Mark Lord02a121d2007-12-01 13:07:22 -0500169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500172
Brett Russ20f733e2005-09-01 18:26:17 -0400173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
179 PCI_ERR = (1 << 18),
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
192 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
194 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400195
196 /* SATAHC registers */
197 HC_CFG_OFS = 0,
198
199 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
203
204 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400205 SHD_BLK_OFS = 0x100,
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400207
208 /* SATA registers */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500212 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500213 PHY_MODE4 = 0x314,
214 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500215 MV5_PHY_MODE = 0x74,
216 MV5_LT_MODE = 0x30,
217 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500218 SATA_INTERFACE_CTL = 0x050,
219
220 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* Port registers */
223 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400229
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500246
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
252
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
261
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500267
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
272
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
274 EDMA_ERR_PRD_PAR |
275 EDMA_ERR_DEV_DCON |
276 EDMA_ERR_DEV_CON |
277 EDMA_ERR_SERR |
278 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400280 EDMA_ERR_CRPB_PAR |
281 EDMA_ERR_INTRL_PAR |
282 EDMA_ERR_IORDY |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_OVERRUN_5 |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_ERR_CRPB_PAR |
296 EDMA_ERR_INTRL_PAR |
297 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400298
Brett Russ31961942005-09-30 01:36:00 -0400299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400301
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
304
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400308 EDMA_RSP_Q_PTR_SHIFT = 3,
309
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400314
Jeff Garzikc9d39132005-11-13 17:47:51 -0500315 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500316 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500317
Brett Russ31961942005-09-30 01:36:00 -0400318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500324 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400329
Brett Russ31961942005-09-30 01:36:00 -0400330 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400334};
335
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400336#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339
Jeff Garzik095fec82005-11-12 09:50:49 -0500340enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
343 */
344 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500345
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
348 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
350
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400351 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
353};
354
Jeff Garzik522479f2005-11-12 22:14:02 -0500355enum chip_type {
356 chip_504x,
357 chip_508x,
358 chip_5080,
359 chip_604x,
360 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 chip_6042,
362 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500363};
364
Brett Russ31961942005-09-30 01:36:00 -0400365/* Command ReQuest Block: 32B */
366struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400367 __le32 sg_addr;
368 __le32 sg_addr_hi;
369 __le16 ctrl_flags;
370 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400371};
372
Jeff Garzike4e7b892006-01-31 12:18:41 -0500373struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400374 __le32 addr;
375 __le32 addr_hi;
376 __le32 flags;
377 __le32 len;
378 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ResPonse Block: 8B */
382struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le16 id;
384 __le16 flags;
385 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400386};
387
388/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
389struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 flags_size;
392 __le32 addr_hi;
393 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400394};
395
396struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400397 struct mv_crqb *crqb;
398 dma_addr_t crqb_dma;
399 struct mv_crpb *crpb;
400 dma_addr_t crpb_dma;
401 struct mv_sg *sg_tbl;
402 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400403
404 unsigned int req_idx;
405 unsigned int resp_idx;
406
Brett Russ31961942005-09-30 01:36:00 -0400407 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500410struct mv_port_signal {
411 u32 amps;
412 u32 pre;
413};
414
Mark Lord02a121d2007-12-01 13:07:22 -0500415struct mv_host_priv {
416 u32 hp_flags;
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
419 u32 irq_cause_ofs;
420 u32 irq_mask_ofs;
421 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500422 /*
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
426 */
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500430};
431
Jeff Garzik47c2b672005-11-12 21:13:17 -0500432struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
437 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500442};
443
Brett Russ20f733e2005-09-01 18:26:17 -0400444static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900445static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400449static int mv_port_start(struct ata_port *ap);
450static void mv_port_stop(struct ata_port *ap);
451static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500452static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400454static void mv_error_handler(struct ata_port *ap);
455static void mv_post_int_cmd(struct ata_queued_cmd *qc);
456static void mv_eh_freeze(struct ata_port *ap);
457static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500458static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400459static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
460
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500461static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
464static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500466static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500468static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
469static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
474static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
479static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500480static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500482static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
483 void __iomem *port_mmio, int want_ncq);
484static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500485
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400486static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400487 .module = THIS_MODULE,
488 .name = DRV_NAME,
489 .ioctl = ata_scsi_ioctl,
490 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400491 .can_queue = ATA_DEF_QUEUE,
492 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400493 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400494 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
495 .emulated = ATA_SHT_EMULATED,
496 .use_clustering = 1,
497 .proc_name = DRV_NAME,
498 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400499 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400500 .slave_destroy = ata_scsi_slave_destroy,
501 .bios_param = ata_std_bios_param,
502};
503
504static struct scsi_host_template mv6_sht = {
505 .module = THIS_MODULE,
506 .name = DRV_NAME,
507 .ioctl = ata_scsi_ioctl,
508 .queuecommand = ata_scsi_queuecmd,
509 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400510 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400511 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400512 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
513 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500514 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400515 .proc_name = DRV_NAME,
516 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400517 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900518 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400519 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400520};
521
Jeff Garzikc9d39132005-11-13 17:47:51 -0500522static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500523 .tf_load = ata_tf_load,
524 .tf_read = ata_tf_read,
525 .check_status = ata_check_status,
526 .exec_command = ata_exec_command,
527 .dev_select = ata_std_dev_select,
528
Jeff Garzikcffacd82007-03-09 09:46:47 -0500529 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530
531 .qc_prep = mv_qc_prep,
532 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900533 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500534
Jeff Garzikc9d39132005-11-13 17:47:51 -0500535 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900536 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500537
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400538 .error_handler = mv_error_handler,
539 .post_internal_cmd = mv_post_int_cmd,
540 .freeze = mv_eh_freeze,
541 .thaw = mv_eh_thaw,
542
Jeff Garzikc9d39132005-11-13 17:47:51 -0500543 .scr_read = mv5_scr_read,
544 .scr_write = mv5_scr_write,
545
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500548};
549
550static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500551 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400552 .tf_load = ata_tf_load,
553 .tf_read = ata_tf_read,
554 .check_status = ata_check_status,
555 .exec_command = ata_exec_command,
556 .dev_select = ata_std_dev_select,
557
Jeff Garzikcffacd82007-03-09 09:46:47 -0500558 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400559
Brett Russ31961942005-09-30 01:36:00 -0400560 .qc_prep = mv_qc_prep,
561 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900562 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400563
Brett Russ20f733e2005-09-01 18:26:17 -0400564 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900565 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400566
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400567 .error_handler = mv_error_handler,
568 .post_internal_cmd = mv_post_int_cmd,
569 .freeze = mv_eh_freeze,
570 .thaw = mv_eh_thaw,
571
Brett Russ20f733e2005-09-01 18:26:17 -0400572 .scr_read = mv_scr_read,
573 .scr_write = mv_scr_write,
574
Brett Russ31961942005-09-30 01:36:00 -0400575 .port_start = mv_port_start,
576 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400577};
578
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500580 .tf_load = ata_tf_load,
581 .tf_read = ata_tf_read,
582 .check_status = ata_check_status,
583 .exec_command = ata_exec_command,
584 .dev_select = ata_std_dev_select,
585
Jeff Garzikcffacd82007-03-09 09:46:47 -0500586 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500587
588 .qc_prep = mv_qc_prep_iie,
589 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900590 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500591
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900593 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500594
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400595 .error_handler = mv_error_handler,
596 .post_internal_cmd = mv_post_int_cmd,
597 .freeze = mv_eh_freeze,
598 .thaw = mv_eh_thaw,
599
Jeff Garzike4e7b892006-01-31 12:18:41 -0500600 .scr_read = mv_scr_read,
601 .scr_write = mv_scr_write,
602
603 .port_start = mv_port_start,
604 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500605};
606
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100607static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400608 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400609 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400610 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400611 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500612 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400613 },
614 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400616 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400617 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500618 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400619 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500620 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400621 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500622 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400623 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500624 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500625 },
Brett Russ20f733e2005-09-01 18:26:17 -0400626 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400628 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400629 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500630 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400631 },
632 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
634 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400635 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400636 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500637 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400638 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400640 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400642 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500643 .port_ops = &mv_iie_ops,
644 },
645 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400646 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400648 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500649 .port_ops = &mv_iie_ops,
650 },
Brett Russ20f733e2005-09-01 18:26:17 -0400651};
652
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500653static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400654 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
655 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
656 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
657 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100658 /* RocketRAID 1740/174x have different identifiers */
659 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
660 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400661
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400662 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
663 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
664 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
665 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
666 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500667
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400668 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
669
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200670 /* Adaptec 1430SA */
671 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
672
Mark Lord02a121d2007-12-01 13:07:22 -0500673 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800674 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
675
Mark Lord02a121d2007-12-01 13:07:22 -0500676 /* Highpoint RocketRAID PCIe series */
677 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
678 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
679
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400680 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400681};
682
683static struct pci_driver mv_pci_driver = {
684 .name = DRV_NAME,
685 .id_table = mv_pci_tbl,
686 .probe = mv_init_one,
687 .remove = ata_pci_remove_one,
688};
689
Jeff Garzik47c2b672005-11-12 21:13:17 -0500690static const struct mv_hw_ops mv5xxx_ops = {
691 .phy_errata = mv5_phy_errata,
692 .enable_leds = mv5_enable_leds,
693 .read_preamp = mv5_read_preamp,
694 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500695 .reset_flash = mv5_reset_flash,
696 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500697};
698
699static const struct mv_hw_ops mv6xxx_ops = {
700 .phy_errata = mv6_phy_errata,
701 .enable_leds = mv6_enable_leds,
702 .read_preamp = mv6_read_preamp,
703 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500704 .reset_flash = mv6_reset_flash,
705 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500706};
707
Brett Russ20f733e2005-09-01 18:26:17 -0400708/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500709 * module options
710 */
711static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
712
713
Jeff Garzikd88184f2007-02-26 01:26:06 -0500714/* move to PCI layer or libata core? */
715static int pci_go_64(struct pci_dev *pdev)
716{
717 int rc;
718
719 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
720 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
721 if (rc) {
722 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
723 if (rc) {
724 dev_printk(KERN_ERR, &pdev->dev,
725 "64-bit DMA enable failed\n");
726 return rc;
727 }
728 }
729 } else {
730 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
731 if (rc) {
732 dev_printk(KERN_ERR, &pdev->dev,
733 "32-bit DMA enable failed\n");
734 return rc;
735 }
736 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
737 if (rc) {
738 dev_printk(KERN_ERR, &pdev->dev,
739 "32-bit consistent DMA enable failed\n");
740 return rc;
741 }
742 }
743
744 return rc;
745}
746
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500747/*
Brett Russ20f733e2005-09-01 18:26:17 -0400748 * Functions
749 */
750
751static inline void writelfl(unsigned long data, void __iomem *addr)
752{
753 writel(data, addr);
754 (void) readl(addr); /* flush to avoid PCI posted write */
755}
756
Brett Russ20f733e2005-09-01 18:26:17 -0400757static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
758{
759 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
760}
761
Jeff Garzikc9d39132005-11-13 17:47:51 -0500762static inline unsigned int mv_hc_from_port(unsigned int port)
763{
764 return port >> MV_PORT_HC_SHIFT;
765}
766
767static inline unsigned int mv_hardport_from_port(unsigned int port)
768{
769 return port & MV_PORT_MASK;
770}
771
772static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
773 unsigned int port)
774{
775 return mv_hc_base(base, mv_hc_from_port(port));
776}
777
Brett Russ20f733e2005-09-01 18:26:17 -0400778static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
779{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500780 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500781 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500782 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400783}
784
785static inline void __iomem *mv_ap_base(struct ata_port *ap)
786{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900787 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400788}
789
Jeff Garzikcca39742006-08-24 03:19:22 -0400790static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400791{
Jeff Garzikcca39742006-08-24 03:19:22 -0400792 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400793}
794
795static void mv_irq_clear(struct ata_port *ap)
796{
797}
798
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400799static void mv_set_edma_ptrs(void __iomem *port_mmio,
800 struct mv_host_priv *hpriv,
801 struct mv_port_priv *pp)
802{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400803 u32 index;
804
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 /*
806 * initialize request queue
807 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400808 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
809
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 WARN_ON(pp->crqb_dma & 0x3ff);
811 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400812 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400813 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814
815 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400820
821 /*
822 * initialize response queue
823 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400824 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
825
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826 WARN_ON(pp->crpb_dma & 0xff);
827 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
828
829 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400830 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400831 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
832 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400833 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400834
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400835 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400836 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400837}
838
Brett Russ05b308e2005-10-05 17:08:53 -0400839/**
840 * mv_start_dma - Enable eDMA engine
841 * @base: port base address
842 * @pp: port private data
843 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900844 * Verify the local cache of the eDMA state is accurate with a
845 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400846 *
847 * LOCKING:
848 * Inherited from caller.
849 */
Mark Lord0c589122008-01-26 18:31:16 -0500850static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500851 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400852{
Mark Lord72109162008-01-26 18:31:33 -0500853 int want_ncq = (protocol == ATA_PROT_NCQ);
854
855 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
856 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
857 if (want_ncq != using_ncq)
858 __mv_stop_dma(ap);
859 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400860 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500861 struct mv_host_priv *hpriv = ap->host->private_data;
862 int hard_port = mv_hardport_from_port(ap->port_no);
863 void __iomem *hc_mmio = mv_hc_base_from_port(
864 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
865 u32 hc_irq_cause, ipending;
866
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400867 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500868 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400869
Mark Lord0c589122008-01-26 18:31:16 -0500870 /* clear EDMA interrupt indicator, if any */
871 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
872 ipending = (DEV_IRQ << hard_port) |
873 (CRPB_DMA_DONE << hard_port);
874 if (hc_irq_cause & ipending) {
875 writelfl(hc_irq_cause & ~ipending,
876 hc_mmio + HC_IRQ_CAUSE_OFS);
877 }
878
Mark Lord72109162008-01-26 18:31:33 -0500879 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500880
881 /* clear FIS IRQ Cause */
882 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
883
Mark Lordf630d562008-01-26 18:31:00 -0500884 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400885
Mark Lordf630d562008-01-26 18:31:00 -0500886 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400887 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
888 }
Mark Lordf630d562008-01-26 18:31:00 -0500889 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400890}
891
Brett Russ05b308e2005-10-05 17:08:53 -0400892/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400893 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400894 * @ap: ATA channel to manipulate
895 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900896 * Verify the local cache of the eDMA state is accurate with a
897 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400898 *
899 * LOCKING:
900 * Inherited from caller.
901 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400902static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400903{
904 void __iomem *port_mmio = mv_ap_base(ap);
905 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400906 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400907 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400908
Jeff Garzik4537deb2007-07-12 14:30:19 -0400909 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400910 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400911 */
Brett Russ31961942005-09-30 01:36:00 -0400912 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
913 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400914 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900915 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400916 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500917
Brett Russ31961942005-09-30 01:36:00 -0400918 /* now properly wait for the eDMA to stop */
919 for (i = 1000; i > 0; i--) {
920 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400921 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400922 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400923
Brett Russ31961942005-09-30 01:36:00 -0400924 udelay(100);
925 }
926
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400927 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900928 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400929 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400930 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400931
932 return err;
Brett Russ31961942005-09-30 01:36:00 -0400933}
934
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400935static int mv_stop_dma(struct ata_port *ap)
936{
937 unsigned long flags;
938 int rc;
939
940 spin_lock_irqsave(&ap->host->lock, flags);
941 rc = __mv_stop_dma(ap);
942 spin_unlock_irqrestore(&ap->host->lock, flags);
943
944 return rc;
945}
946
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400947#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400948static void mv_dump_mem(void __iomem *start, unsigned bytes)
949{
Brett Russ31961942005-09-30 01:36:00 -0400950 int b, w;
951 for (b = 0; b < bytes; ) {
952 DPRINTK("%p: ", start + b);
953 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400954 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400955 b += sizeof(u32);
956 }
957 printk("\n");
958 }
Brett Russ31961942005-09-30 01:36:00 -0400959}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400960#endif
961
Brett Russ31961942005-09-30 01:36:00 -0400962static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
963{
964#ifdef ATA_DEBUG
965 int b, w;
966 u32 dw;
967 for (b = 0; b < bytes; ) {
968 DPRINTK("%02x: ", b);
969 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400970 (void) pci_read_config_dword(pdev, b, &dw);
971 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400972 b += sizeof(u32);
973 }
974 printk("\n");
975 }
976#endif
977}
978static void mv_dump_all_regs(void __iomem *mmio_base, int port,
979 struct pci_dev *pdev)
980{
981#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500982 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400983 port >> MV_PORT_HC_SHIFT);
984 void __iomem *port_base;
985 int start_port, num_ports, p, start_hc, num_hcs, hc;
986
987 if (0 > port) {
988 start_hc = start_port = 0;
989 num_ports = 8; /* shld be benign for 4 port devs */
990 num_hcs = 2;
991 } else {
992 start_hc = port >> MV_PORT_HC_SHIFT;
993 start_port = port;
994 num_ports = num_hcs = 1;
995 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500996 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400997 num_ports > 1 ? num_ports - 1 : start_port);
998
999 if (NULL != pdev) {
1000 DPRINTK("PCI config space regs:\n");
1001 mv_dump_pci_cfg(pdev, 0x68);
1002 }
1003 DPRINTK("PCI regs:\n");
1004 mv_dump_mem(mmio_base+0xc00, 0x3c);
1005 mv_dump_mem(mmio_base+0xd00, 0x34);
1006 mv_dump_mem(mmio_base+0xf00, 0x4);
1007 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1008 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001009 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001010 DPRINTK("HC regs (HC %i):\n", hc);
1011 mv_dump_mem(hc_base, 0x1c);
1012 }
1013 for (p = start_port; p < start_port + num_ports; p++) {
1014 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001015 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001016 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001017 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001018 mv_dump_mem(port_base+0x300, 0x60);
1019 }
1020#endif
1021}
1022
Brett Russ20f733e2005-09-01 18:26:17 -04001023static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1024{
1025 unsigned int ofs;
1026
1027 switch (sc_reg_in) {
1028 case SCR_STATUS:
1029 case SCR_CONTROL:
1030 case SCR_ERROR:
1031 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1032 break;
1033 case SCR_ACTIVE:
1034 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1035 break;
1036 default:
1037 ofs = 0xffffffffU;
1038 break;
1039 }
1040 return ofs;
1041}
1042
Tejun Heoda3dbb12007-07-16 14:29:40 +09001043static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001044{
1045 unsigned int ofs = mv_scr_offset(sc_reg_in);
1046
Tejun Heoda3dbb12007-07-16 14:29:40 +09001047 if (ofs != 0xffffffffU) {
1048 *val = readl(mv_ap_base(ap) + ofs);
1049 return 0;
1050 } else
1051 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001052}
1053
Tejun Heoda3dbb12007-07-16 14:29:40 +09001054static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001055{
1056 unsigned int ofs = mv_scr_offset(sc_reg_in);
1057
Tejun Heoda3dbb12007-07-16 14:29:40 +09001058 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001059 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001060 return 0;
1061 } else
1062 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001063}
1064
Mark Lordf2738272008-01-26 18:32:29 -05001065static void mv6_dev_config(struct ata_device *adev)
1066{
1067 /*
1068 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1069 * See mv_qc_prep() for more info.
1070 */
1071 if (adev->flags & ATA_DFLAG_NCQ)
1072 if (adev->max_sectors > ATA_MAX_SECTORS)
1073 adev->max_sectors = ATA_MAX_SECTORS;
1074}
1075
Mark Lord72109162008-01-26 18:31:33 -05001076static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1077 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001078{
Mark Lord0c589122008-01-26 18:31:16 -05001079 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001080
1081 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001082 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001083
Mark Lord0c589122008-01-26 18:31:16 -05001084 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001085 cfg |= (1 << 8); /* enab config burst size mask */
1086
Mark Lord0c589122008-01-26 18:31:16 -05001087 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001088 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1089
1090 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001091 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1092 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001094 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001095 }
1096
Mark Lord72109162008-01-26 18:31:33 -05001097 if (want_ncq) {
1098 cfg |= EDMA_CFG_NCQ;
1099 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1100 } else
1101 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1102
Jeff Garzike4e7b892006-01-31 12:18:41 -05001103 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1104}
1105
Mark Lordda2fa9b2008-01-26 18:32:45 -05001106static void mv_port_free_dma_mem(struct ata_port *ap)
1107{
1108 struct mv_host_priv *hpriv = ap->host->private_data;
1109 struct mv_port_priv *pp = ap->private_data;
1110
1111 if (pp->crqb) {
1112 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1113 pp->crqb = NULL;
1114 }
1115 if (pp->crpb) {
1116 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1117 pp->crpb = NULL;
1118 }
1119 if (pp->sg_tbl) {
1120 dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl, pp->sg_tbl_dma);
1121 pp->sg_tbl = NULL;
1122 }
1123}
1124
Brett Russ05b308e2005-10-05 17:08:53 -04001125/**
1126 * mv_port_start - Port specific init/start routine.
1127 * @ap: ATA channel to manipulate
1128 *
1129 * Allocate and point to DMA memory, init port private memory,
1130 * zero indices.
1131 *
1132 * LOCKING:
1133 * Inherited from caller.
1134 */
Brett Russ31961942005-09-30 01:36:00 -04001135static int mv_port_start(struct ata_port *ap)
1136{
Jeff Garzikcca39742006-08-24 03:19:22 -04001137 struct device *dev = ap->host->dev;
1138 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001139 struct mv_port_priv *pp;
1140 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001141 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001142 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001143
Tejun Heo24dc5f32007-01-20 16:00:28 +09001144 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001145 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001146 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001147 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001148
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001149 rc = ata_pad_alloc(ap, dev);
1150 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001151 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001152
Mark Lordda2fa9b2008-01-26 18:32:45 -05001153 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1154 if (!pp->crqb)
1155 return -ENOMEM;
1156 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001157
Mark Lordda2fa9b2008-01-26 18:32:45 -05001158 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1159 if (!pp->crpb)
1160 goto out_port_free_dma_mem;
1161 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001162
Mark Lordda2fa9b2008-01-26 18:32:45 -05001163 pp->sg_tbl = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL,
1164 &pp->sg_tbl_dma);
1165 if (!pp->sg_tbl)
1166 goto out_port_free_dma_mem;
Brett Russ31961942005-09-30 01:36:00 -04001167
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001168 spin_lock_irqsave(&ap->host->lock, flags);
1169
Mark Lord72109162008-01-26 18:31:33 -05001170 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001171 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001172
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001173 spin_unlock_irqrestore(&ap->host->lock, flags);
1174
Brett Russ31961942005-09-30 01:36:00 -04001175 /* Don't turn on EDMA here...do it before DMA commands only. Else
1176 * we'll be unable to send non-data, PIO, etc due to restricted access
1177 * to shadow regs.
1178 */
Brett Russ31961942005-09-30 01:36:00 -04001179 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001180
1181out_port_free_dma_mem:
1182 mv_port_free_dma_mem(ap);
1183 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001184}
1185
Brett Russ05b308e2005-10-05 17:08:53 -04001186/**
1187 * mv_port_stop - Port specific cleanup/stop routine.
1188 * @ap: ATA channel to manipulate
1189 *
1190 * Stop DMA, cleanup port memory.
1191 *
1192 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001193 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001194 */
Brett Russ31961942005-09-30 01:36:00 -04001195static void mv_port_stop(struct ata_port *ap)
1196{
Brett Russ31961942005-09-30 01:36:00 -04001197 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001198 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001199}
1200
Brett Russ05b308e2005-10-05 17:08:53 -04001201/**
1202 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1203 * @qc: queued command whose SG list to source from
1204 *
1205 * Populate the SG list and mark the last entry.
1206 *
1207 * LOCKING:
1208 * Inherited from caller.
1209 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001210static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001211{
1212 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001213 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001214 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001215 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001216
Jeff Garzikd88184f2007-02-26 01:26:06 -05001217 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001218 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001219 dma_addr_t addr = sg_dma_address(sg);
1220 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001221
Olof Johansson4007b492007-10-02 20:45:27 -05001222 while (sg_len) {
1223 u32 offset = addr & 0xffff;
1224 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001225
Olof Johansson4007b492007-10-02 20:45:27 -05001226 if ((offset + sg_len > 0x10000))
1227 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001228
Olof Johansson4007b492007-10-02 20:45:27 -05001229 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1230 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001231 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001232
1233 sg_len -= len;
1234 addr += len;
1235
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001236 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001237 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001238 }
Brett Russ31961942005-09-30 01:36:00 -04001239 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001240
1241 if (likely(last_sg))
1242 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001243}
1244
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001245static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001246{
Mark Lord559eeda2006-05-19 16:40:15 -04001247 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001248 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001249 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001250}
1251
Brett Russ05b308e2005-10-05 17:08:53 -04001252/**
1253 * mv_qc_prep - Host specific command preparation.
1254 * @qc: queued command to prepare
1255 *
1256 * This routine simply redirects to the general purpose routine
1257 * if command is not DMA. Else, it handles prep of the CRQB
1258 * (command request block), does some sanity checking, and calls
1259 * the SG load routine.
1260 *
1261 * LOCKING:
1262 * Inherited from caller.
1263 */
Brett Russ31961942005-09-30 01:36:00 -04001264static void mv_qc_prep(struct ata_queued_cmd *qc)
1265{
1266 struct ata_port *ap = qc->ap;
1267 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001268 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001269 struct ata_taskfile *tf;
1270 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001271 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001272
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001273 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001274 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001275
Brett Russ31961942005-09-30 01:36:00 -04001276 /* Fill in command request block
1277 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001278 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001279 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001280 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001281 flags |= qc->tag << CRQB_TAG_SHIFT;
1282
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001283 /* get current queue index from software */
1284 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001285
Mark Lorda6432432006-05-19 16:36:36 -04001286 pp->crqb[in_index].sg_addr =
1287 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1288 pp->crqb[in_index].sg_addr_hi =
1289 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1290 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1291
1292 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001293 tf = &qc->tf;
1294
1295 /* Sadly, the CRQB cannot accomodate all registers--there are
1296 * only 11 bytes...so we must pick and choose required
1297 * registers based on the command. So, we drop feature and
1298 * hob_feature for [RW] DMA commands, but they are needed for
1299 * NCQ. NCQ will drop hob_nsect.
1300 */
1301 switch (tf->command) {
1302 case ATA_CMD_READ:
1303 case ATA_CMD_READ_EXT:
1304 case ATA_CMD_WRITE:
1305 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001306 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001307 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1308 break;
1309#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1310 case ATA_CMD_FPDMA_READ:
1311 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001312 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001313 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1314 break;
1315#endif /* FIXME: remove this line when NCQ added */
1316 default:
1317 /* The only other commands EDMA supports in non-queued and
1318 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1319 * of which are defined/used by Linux. If we get here, this
1320 * driver needs work.
1321 *
1322 * FIXME: modify libata to give qc_prep a return value and
1323 * return error here.
1324 */
1325 BUG_ON(tf->command);
1326 break;
1327 }
1328 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1329 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1330 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1331 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1332 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1335 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1336 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1337
Jeff Garzike4e7b892006-01-31 12:18:41 -05001338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001339 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001340 mv_fill_sg(qc);
1341}
1342
1343/**
1344 * mv_qc_prep_iie - Host specific command preparation.
1345 * @qc: queued command to prepare
1346 *
1347 * This routine simply redirects to the general purpose routine
1348 * if command is not DMA. Else, it handles prep of the CRQB
1349 * (command request block), does some sanity checking, and calls
1350 * the SG load routine.
1351 *
1352 * LOCKING:
1353 * Inherited from caller.
1354 */
1355static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1356{
1357 struct ata_port *ap = qc->ap;
1358 struct mv_port_priv *pp = ap->private_data;
1359 struct mv_crqb_iie *crqb;
1360 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001361 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001362 u32 flags = 0;
1363
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001364 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001365 return;
1366
Jeff Garzike4e7b892006-01-31 12:18:41 -05001367 /* Fill in Gen IIE command request block
1368 */
1369 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1370 flags |= CRQB_FLAG_READ;
1371
Tejun Heobeec7db2006-02-11 19:11:13 +09001372 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001373 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001374 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001375
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001376 /* get current queue index from software */
1377 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001378
1379 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001380 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1381 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1382 crqb->flags = cpu_to_le32(flags);
1383
1384 tf = &qc->tf;
1385 crqb->ata_cmd[0] = cpu_to_le32(
1386 (tf->command << 16) |
1387 (tf->feature << 24)
1388 );
1389 crqb->ata_cmd[1] = cpu_to_le32(
1390 (tf->lbal << 0) |
1391 (tf->lbam << 8) |
1392 (tf->lbah << 16) |
1393 (tf->device << 24)
1394 );
1395 crqb->ata_cmd[2] = cpu_to_le32(
1396 (tf->hob_lbal << 0) |
1397 (tf->hob_lbam << 8) |
1398 (tf->hob_lbah << 16) |
1399 (tf->hob_feature << 24)
1400 );
1401 crqb->ata_cmd[3] = cpu_to_le32(
1402 (tf->nsect << 0) |
1403 (tf->hob_nsect << 8)
1404 );
1405
1406 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1407 return;
Brett Russ31961942005-09-30 01:36:00 -04001408 mv_fill_sg(qc);
1409}
1410
Brett Russ05b308e2005-10-05 17:08:53 -04001411/**
1412 * mv_qc_issue - Initiate a command to the host
1413 * @qc: queued command to start
1414 *
1415 * This routine simply redirects to the general purpose routine
1416 * if command is not DMA. Else, it sanity checks our local
1417 * caches of the request producer/consumer indices then enables
1418 * DMA and bumps the request producer index.
1419 *
1420 * LOCKING:
1421 * Inherited from caller.
1422 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001423static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001424{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001425 struct ata_port *ap = qc->ap;
1426 void __iomem *port_mmio = mv_ap_base(ap);
1427 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001428 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001429
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001430 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001431 /* We're about to send a non-EDMA capable command to the
1432 * port. Turn off EDMA so there won't be problems accessing
1433 * shadow block, etc registers.
1434 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001435 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001436 return ata_qc_issue_prot(qc);
1437 }
1438
Mark Lord72109162008-01-26 18:31:33 -05001439 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001440
1441 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001442
Brett Russ31961942005-09-30 01:36:00 -04001443 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001444 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1445 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001446
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001448
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001449 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001450
1451 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001452 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1453 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001454
1455 return 0;
1456}
1457
Brett Russ05b308e2005-10-05 17:08:53 -04001458/**
Brett Russ05b308e2005-10-05 17:08:53 -04001459 * mv_err_intr - Handle error interrupts on the port
1460 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001461 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001462 *
1463 * In most cases, just clear the interrupt and move on. However,
1464 * some cases require an eDMA reset, which is done right before
1465 * the COMRESET in mv_phy_reset(). The SERR case requires a
1466 * clear of pending errors in the SATA SERROR register. Finally,
1467 * if the port disabled DMA, update our cached copy to match.
1468 *
1469 * LOCKING:
1470 * Inherited from caller.
1471 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001473{
Brett Russ31961942005-09-30 01:36:00 -04001474 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001475 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1476 struct mv_port_priv *pp = ap->private_data;
1477 struct mv_host_priv *hpriv = ap->host->private_data;
1478 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1479 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001480 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001481
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001482 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001483
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 if (!edma_enabled) {
1485 /* just a guess: do we need to do this? should we
1486 * expand this, and do it in all cases?
1487 */
Tejun Heo936fd732007-08-06 18:36:23 +09001488 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1489 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001490 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491
1492 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1493
1494 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1495
1496 /*
1497 * all generations share these EDMA error cause bits
1498 */
1499
1500 if (edma_err_cause & EDMA_ERR_DEV)
1501 err_mask |= AC_ERR_DEV;
1502 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001503 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001504 EDMA_ERR_INTRL_PAR)) {
1505 err_mask |= AC_ERR_ATA_BUS;
1506 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001507 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001508 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1510 ata_ehi_hotplugged(ehi);
1511 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001512 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001513 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001514 }
1515
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001516 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517 eh_freeze_mask = EDMA_EH_FREEZE_5;
1518
1519 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1520 struct mv_port_priv *pp = ap->private_data;
1521 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001522 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001523 }
1524 } else {
1525 eh_freeze_mask = EDMA_EH_FREEZE;
1526
1527 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1528 struct mv_port_priv *pp = ap->private_data;
1529 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001530 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001531 }
1532
1533 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001534 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1535 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001536 err_mask = AC_ERR_ATA_BUS;
1537 action |= ATA_EH_HARDRESET;
1538 }
1539 }
Brett Russ20f733e2005-09-01 18:26:17 -04001540
1541 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001542 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001543
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001544 if (!err_mask) {
1545 err_mask = AC_ERR_OTHER;
1546 action |= ATA_EH_HARDRESET;
1547 }
1548
1549 ehi->serror |= serr;
1550 ehi->action |= action;
1551
1552 if (qc)
1553 qc->err_mask |= err_mask;
1554 else
1555 ehi->err_mask |= err_mask;
1556
1557 if (edma_err_cause & eh_freeze_mask)
1558 ata_port_freeze(ap);
1559 else
1560 ata_port_abort(ap);
1561}
1562
1563static void mv_intr_pio(struct ata_port *ap)
1564{
1565 struct ata_queued_cmd *qc;
1566 u8 ata_status;
1567
1568 /* ignore spurious intr if drive still BUSY */
1569 ata_status = readb(ap->ioaddr.status_addr);
1570 if (unlikely(ata_status & ATA_BUSY))
1571 return;
1572
1573 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001574 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001575 if (unlikely(!qc)) /* no active tag */
1576 return;
1577 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1578 return;
1579
1580 /* and finally, complete the ATA command */
1581 qc->err_mask |= ac_err_mask(ata_status);
1582 ata_qc_complete(qc);
1583}
1584
1585static void mv_intr_edma(struct ata_port *ap)
1586{
1587 void __iomem *port_mmio = mv_ap_base(ap);
1588 struct mv_host_priv *hpriv = ap->host->private_data;
1589 struct mv_port_priv *pp = ap->private_data;
1590 struct ata_queued_cmd *qc;
1591 u32 out_index, in_index;
1592 bool work_done = false;
1593
1594 /* get h/w response queue pointer */
1595 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1596 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1597
1598 while (1) {
1599 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001600 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001601
1602 /* get s/w response queue last-read pointer, and compare */
1603 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1604 if (in_index == out_index)
1605 break;
1606
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001607 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001608 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001609 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001610
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001611 /* Gen II/IIE: get active ATA command via tag, to enable
1612 * support for queueing. this works transparently for
1613 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001614 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001615 else
1616 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001617
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001618 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001619
Mark Lordcb924412008-01-26 18:32:09 -05001620 /* For non-NCQ mode, the lower 8 bits of status
1621 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1622 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001623 */
1624 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001625 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626 mv_err_intr(ap, qc);
1627 return;
1628 }
1629
1630 /* and finally, complete the ATA command */
1631 if (qc) {
1632 qc->err_mask |=
1633 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1634 ata_qc_complete(qc);
1635 }
1636
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001637 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001638 * indicate (after the loop completes) to hardware
1639 * that we have consumed a response queue entry.
1640 */
1641 work_done = true;
1642 pp->resp_idx++;
1643 }
1644
1645 if (work_done)
1646 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1647 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1648 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001649}
1650
Brett Russ05b308e2005-10-05 17:08:53 -04001651/**
1652 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001653 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001654 * @relevant: port error bits relevant to this host controller
1655 * @hc: which host controller we're to look at
1656 *
1657 * Read then write clear the HC interrupt status then walk each
1658 * port connected to the HC and see if it needs servicing. Port
1659 * success ints are reported in the HC interrupt status reg, the
1660 * port error ints are reported in the higher level main
1661 * interrupt status register and thus are passed in via the
1662 * 'relevant' argument.
1663 *
1664 * LOCKING:
1665 * Inherited from caller.
1666 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001667static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001668{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001669 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001670 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001671 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001672 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001673
Jeff Garzik35177262007-02-24 21:26:42 -05001674 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001675 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001676 else
Brett Russ20f733e2005-09-01 18:26:17 -04001677 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001678
1679 /* we'll need the HC success int register in most cases */
1680 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001681 if (!hc_irq_cause)
1682 return;
1683
1684 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001685
1686 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001687 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001688
1689 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001690 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001691 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001692 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001693
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001694 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001695 continue;
1696
Brett Russ31961942005-09-30 01:36:00 -04001697 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001698 if (port >= MV_PORTS_PER_HC) {
1699 shift++; /* skip bit 8 in the HC Main IRQ reg */
1700 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001701 have_err_bits = ((PORT0_ERR << shift) & relevant);
1702
1703 if (unlikely(have_err_bits)) {
1704 struct ata_queued_cmd *qc;
1705
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001706 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1708 continue;
1709
1710 mv_err_intr(ap, qc);
1711 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001712 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001713
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001714 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1715
1716 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1717 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1718 mv_intr_edma(ap);
1719 } else {
1720 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1721 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001722 }
1723 }
1724 VPRINTK("EXIT\n");
1725}
1726
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001727static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1728{
Mark Lord02a121d2007-12-01 13:07:22 -05001729 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730 struct ata_port *ap;
1731 struct ata_queued_cmd *qc;
1732 struct ata_eh_info *ehi;
1733 unsigned int i, err_mask, printed = 0;
1734 u32 err_cause;
1735
Mark Lord02a121d2007-12-01 13:07:22 -05001736 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001737
1738 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1739 err_cause);
1740
1741 DPRINTK("All regs @ PCI error\n");
1742 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1743
Mark Lord02a121d2007-12-01 13:07:22 -05001744 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001745
1746 for (i = 0; i < host->n_ports; i++) {
1747 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001748 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001749 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001750 ata_ehi_clear_desc(ehi);
1751 if (!printed++)
1752 ata_ehi_push_desc(ehi,
1753 "PCI err cause 0x%08x", err_cause);
1754 err_mask = AC_ERR_HOST_BUS;
1755 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001756 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001757 if (qc)
1758 qc->err_mask |= err_mask;
1759 else
1760 ehi->err_mask |= err_mask;
1761
1762 ata_port_freeze(ap);
1763 }
1764 }
1765}
1766
Brett Russ05b308e2005-10-05 17:08:53 -04001767/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001768 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001769 * @irq: unused
1770 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001771 *
1772 * Read the read only register to determine if any host
1773 * controllers have pending interrupts. If so, call lower level
1774 * routine to handle. Also check for PCI errors which are only
1775 * reported here.
1776 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001777 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001778 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001779 * interrupts.
1780 */
David Howells7d12e782006-10-05 14:55:46 +01001781static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001782{
Jeff Garzikcca39742006-08-24 03:19:22 -04001783 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001784 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001785 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001786 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001787
Mark Lord646a4da2008-01-26 18:30:37 -05001788 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001789 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001790 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001791
1792 /* check the cases where we either have nothing pending or have read
1793 * a bogus register value which can indicate HW removal or PCI fault
1794 */
Mark Lord646a4da2008-01-26 18:30:37 -05001795 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1796 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001797
Jeff Garzikcca39742006-08-24 03:19:22 -04001798 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001799
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001800 if (unlikely(irq_stat & PCI_ERR)) {
1801 mv_pci_error(host, mmio);
1802 handled = 1;
1803 goto out_unlock; /* skip all other HC irq handling */
1804 }
1805
Brett Russ20f733e2005-09-01 18:26:17 -04001806 for (hc = 0; hc < n_hcs; hc++) {
1807 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1808 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001809 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001810 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001811 }
1812 }
Mark Lord615ab952006-05-19 16:24:56 -04001813
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001814out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001815 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001816
1817 return IRQ_RETVAL(handled);
1818}
1819
Jeff Garzikc9d39132005-11-13 17:47:51 -05001820static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1821{
1822 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1823 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1824
1825 return hc_mmio + ofs;
1826}
1827
1828static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1829{
1830 unsigned int ofs;
1831
1832 switch (sc_reg_in) {
1833 case SCR_STATUS:
1834 case SCR_ERROR:
1835 case SCR_CONTROL:
1836 ofs = sc_reg_in * sizeof(u32);
1837 break;
1838 default:
1839 ofs = 0xffffffffU;
1840 break;
1841 }
1842 return ofs;
1843}
1844
Tejun Heoda3dbb12007-07-16 14:29:40 +09001845static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001847 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1848 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001849 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1850
Tejun Heoda3dbb12007-07-16 14:29:40 +09001851 if (ofs != 0xffffffffU) {
1852 *val = readl(addr + ofs);
1853 return 0;
1854 } else
1855 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001856}
1857
Tejun Heoda3dbb12007-07-16 14:29:40 +09001858static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001859{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001860 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1861 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001862 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1863
Tejun Heoda3dbb12007-07-16 14:29:40 +09001864 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001865 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001866 return 0;
1867 } else
1868 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001869}
1870
Jeff Garzik522479f2005-11-12 22:14:02 -05001871static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1872{
Jeff Garzik522479f2005-11-12 22:14:02 -05001873 int early_5080;
1874
Auke Kok44c10132007-06-08 15:46:36 -07001875 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001876
1877 if (!early_5080) {
1878 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1879 tmp |= (1 << 0);
1880 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1881 }
1882
1883 mv_reset_pci_bus(pdev, mmio);
1884}
1885
1886static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1887{
1888 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1889}
1890
Jeff Garzik47c2b672005-11-12 21:13:17 -05001891static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001892 void __iomem *mmio)
1893{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1895 u32 tmp;
1896
1897 tmp = readl(phy_mmio + MV5_PHY_MODE);
1898
1899 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1900 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001901}
1902
Jeff Garzik47c2b672005-11-12 21:13:17 -05001903static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001904{
Jeff Garzik522479f2005-11-12 22:14:02 -05001905 u32 tmp;
1906
1907 writel(0, mmio + MV_GPIO_PORT_CTL);
1908
1909 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1910
1911 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 tmp |= ~(1 << 0);
1913 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001914}
1915
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001916static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1917 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001918{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001919 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1920 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1921 u32 tmp;
1922 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1923
1924 if (fix_apm_sq) {
1925 tmp = readl(phy_mmio + MV5_LT_MODE);
1926 tmp |= (1 << 19);
1927 writel(tmp, phy_mmio + MV5_LT_MODE);
1928
1929 tmp = readl(phy_mmio + MV5_PHY_CTL);
1930 tmp &= ~0x3;
1931 tmp |= 0x1;
1932 writel(tmp, phy_mmio + MV5_PHY_CTL);
1933 }
1934
1935 tmp = readl(phy_mmio + MV5_PHY_MODE);
1936 tmp &= ~mask;
1937 tmp |= hpriv->signal[port].pre;
1938 tmp |= hpriv->signal[port].amps;
1939 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001940}
1941
Jeff Garzikc9d39132005-11-13 17:47:51 -05001942
1943#undef ZERO
1944#define ZERO(reg) writel(0, port_mmio + (reg))
1945static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1946 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001947{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001948 void __iomem *port_mmio = mv_port_base(mmio, port);
1949
1950 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1951
1952 mv_channel_reset(hpriv, mmio, port);
1953
1954 ZERO(0x028); /* command */
1955 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1956 ZERO(0x004); /* timer */
1957 ZERO(0x008); /* irq err cause */
1958 ZERO(0x00c); /* irq err mask */
1959 ZERO(0x010); /* rq bah */
1960 ZERO(0x014); /* rq inp */
1961 ZERO(0x018); /* rq outp */
1962 ZERO(0x01c); /* respq bah */
1963 ZERO(0x024); /* respq outp */
1964 ZERO(0x020); /* respq inp */
1965 ZERO(0x02c); /* test control */
1966 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1967}
1968#undef ZERO
1969
1970#define ZERO(reg) writel(0, hc_mmio + (reg))
1971static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1972 unsigned int hc)
1973{
1974 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1975 u32 tmp;
1976
1977 ZERO(0x00c);
1978 ZERO(0x010);
1979 ZERO(0x014);
1980 ZERO(0x018);
1981
1982 tmp = readl(hc_mmio + 0x20);
1983 tmp &= 0x1c1c1c1c;
1984 tmp |= 0x03030303;
1985 writel(tmp, hc_mmio + 0x20);
1986}
1987#undef ZERO
1988
1989static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1990 unsigned int n_hc)
1991{
1992 unsigned int hc, port;
1993
1994 for (hc = 0; hc < n_hc; hc++) {
1995 for (port = 0; port < MV_PORTS_PER_HC; port++)
1996 mv5_reset_hc_port(hpriv, mmio,
1997 (hc * MV_PORTS_PER_HC) + port);
1998
1999 mv5_reset_one_hc(hpriv, mmio, hc);
2000 }
2001
2002 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002003}
2004
Jeff Garzik101ffae2005-11-12 22:17:49 -05002005#undef ZERO
2006#define ZERO(reg) writel(0, mmio + (reg))
2007static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2008{
Mark Lord02a121d2007-12-01 13:07:22 -05002009 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2010 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002011 u32 tmp;
2012
2013 tmp = readl(mmio + MV_PCI_MODE);
2014 tmp &= 0xff00ffff;
2015 writel(tmp, mmio + MV_PCI_MODE);
2016
2017 ZERO(MV_PCI_DISC_TIMER);
2018 ZERO(MV_PCI_MSI_TRIGGER);
2019 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2020 ZERO(HC_MAIN_IRQ_MASK_OFS);
2021 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002022 ZERO(hpriv->irq_cause_ofs);
2023 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002024 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2025 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2026 ZERO(MV_PCI_ERR_ATTRIBUTE);
2027 ZERO(MV_PCI_ERR_COMMAND);
2028}
2029#undef ZERO
2030
2031static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2032{
2033 u32 tmp;
2034
2035 mv5_reset_flash(hpriv, mmio);
2036
2037 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2038 tmp &= 0x3;
2039 tmp |= (1 << 5) | (1 << 6);
2040 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2041}
2042
2043/**
2044 * mv6_reset_hc - Perform the 6xxx global soft reset
2045 * @mmio: base address of the HBA
2046 *
2047 * This routine only applies to 6xxx parts.
2048 *
2049 * LOCKING:
2050 * Inherited from caller.
2051 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002052static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2053 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002054{
2055 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2056 int i, rc = 0;
2057 u32 t;
2058
2059 /* Following procedure defined in PCI "main command and status
2060 * register" table.
2061 */
2062 t = readl(reg);
2063 writel(t | STOP_PCI_MASTER, reg);
2064
2065 for (i = 0; i < 1000; i++) {
2066 udelay(1);
2067 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002068 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002069 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002070 }
2071 if (!(PCI_MASTER_EMPTY & t)) {
2072 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2073 rc = 1;
2074 goto done;
2075 }
2076
2077 /* set reset */
2078 i = 5;
2079 do {
2080 writel(t | GLOB_SFT_RST, reg);
2081 t = readl(reg);
2082 udelay(1);
2083 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2084
2085 if (!(GLOB_SFT_RST & t)) {
2086 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2087 rc = 1;
2088 goto done;
2089 }
2090
2091 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2092 i = 5;
2093 do {
2094 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2095 t = readl(reg);
2096 udelay(1);
2097 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2098
2099 if (GLOB_SFT_RST & t) {
2100 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2101 rc = 1;
2102 }
2103done:
2104 return rc;
2105}
2106
Jeff Garzik47c2b672005-11-12 21:13:17 -05002107static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002108 void __iomem *mmio)
2109{
2110 void __iomem *port_mmio;
2111 u32 tmp;
2112
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002113 tmp = readl(mmio + MV_RESET_CFG);
2114 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002115 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002116 hpriv->signal[idx].pre = 0x1 << 5;
2117 return;
2118 }
2119
2120 port_mmio = mv_port_base(mmio, idx);
2121 tmp = readl(port_mmio + PHY_MODE2);
2122
2123 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2124 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2125}
2126
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002128{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002129 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002130}
2131
Jeff Garzikc9d39132005-11-13 17:47:51 -05002132static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002133 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002134{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002135 void __iomem *port_mmio = mv_port_base(mmio, port);
2136
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002137 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002138 int fix_phy_mode2 =
2139 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002140 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002141 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2142 u32 m2, tmp;
2143
2144 if (fix_phy_mode2) {
2145 m2 = readl(port_mmio + PHY_MODE2);
2146 m2 &= ~(1 << 16);
2147 m2 |= (1 << 31);
2148 writel(m2, port_mmio + PHY_MODE2);
2149
2150 udelay(200);
2151
2152 m2 = readl(port_mmio + PHY_MODE2);
2153 m2 &= ~((1 << 16) | (1 << 31));
2154 writel(m2, port_mmio + PHY_MODE2);
2155
2156 udelay(200);
2157 }
2158
2159 /* who knows what this magic does */
2160 tmp = readl(port_mmio + PHY_MODE3);
2161 tmp &= ~0x7F800000;
2162 tmp |= 0x2A800000;
2163 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002164
2165 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002166 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002167
2168 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002169
2170 if (hp_flags & MV_HP_ERRATA_60X1B2)
2171 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002172
2173 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2174
2175 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002176
2177 if (hp_flags & MV_HP_ERRATA_60X1B2)
2178 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002179 }
2180
2181 /* Revert values of pre-emphasis and signal amps to the saved ones */
2182 m2 = readl(port_mmio + PHY_MODE2);
2183
2184 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002185 m2 |= hpriv->signal[port].amps;
2186 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002187 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002188
Jeff Garzike4e7b892006-01-31 12:18:41 -05002189 /* according to mvSata 3.6.1, some IIE values are fixed */
2190 if (IS_GEN_IIE(hpriv)) {
2191 m2 &= ~0xC30FF01F;
2192 m2 |= 0x0000900F;
2193 }
2194
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002195 writel(m2, port_mmio + PHY_MODE2);
2196}
2197
Jeff Garzikc9d39132005-11-13 17:47:51 -05002198static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2199 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002200{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002201 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002202
Brett Russ31961942005-09-30 01:36:00 -04002203 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002204
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002205 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002206 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002207 ifctl |= (1 << 7); /* enable gen2i speed */
2208 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002209 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2210 }
2211
Brett Russ20f733e2005-09-01 18:26:17 -04002212 udelay(25); /* allow reset propagation */
2213
2214 /* Spec never mentions clearing the bit. Marvell's driver does
2215 * clear the bit, however.
2216 */
Brett Russ31961942005-09-30 01:36:00 -04002217 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002218
Jeff Garzikc9d39132005-11-13 17:47:51 -05002219 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2220
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002221 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002222 mdelay(1);
2223}
2224
Jeff Garzikc9d39132005-11-13 17:47:51 -05002225/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002226 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002227 * @ap: ATA channel to manipulate
2228 *
2229 * Part of this is taken from __sata_phy_reset and modified to
2230 * not sleep since this routine gets called from interrupt level.
2231 *
2232 * LOCKING:
2233 * Inherited from caller. This is coded to safe to call at
2234 * interrupt level, i.e. it does not sleep.
2235 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002236static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2237 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002238{
2239 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002240 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002241 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002242 int retry = 5;
2243 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002244
2245 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002246
Tejun Heoda3dbb12007-07-16 14:29:40 +09002247#ifdef DEBUG
2248 {
2249 u32 sstatus, serror, scontrol;
2250
2251 mv_scr_read(ap, SCR_STATUS, &sstatus);
2252 mv_scr_read(ap, SCR_ERROR, &serror);
2253 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2254 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002255 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002256 }
2257#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002258
Jeff Garzik22374672005-11-17 10:59:48 -05002259 /* Issue COMRESET via SControl */
2260comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002261 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002262 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002263
Tejun Heo936fd732007-08-06 18:36:23 +09002264 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002265 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002266
Brett Russ31961942005-09-30 01:36:00 -04002267 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002268 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002269 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002270 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002271
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002272 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002273 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002274
Jeff Garzik22374672005-11-17 10:59:48 -05002275 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002276 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002277 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2278 (retry-- > 0))
2279 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002280
Tejun Heoda3dbb12007-07-16 14:29:40 +09002281#ifdef DEBUG
2282 {
2283 u32 sstatus, serror, scontrol;
2284
2285 mv_scr_read(ap, SCR_STATUS, &sstatus);
2286 mv_scr_read(ap, SCR_ERROR, &serror);
2287 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2288 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2289 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2290 }
2291#endif
Brett Russ31961942005-09-30 01:36:00 -04002292
Tejun Heo936fd732007-08-06 18:36:23 +09002293 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002294 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002295 return;
2296 }
2297
Jeff Garzik22374672005-11-17 10:59:48 -05002298 /* even after SStatus reflects that device is ready,
2299 * it seems to take a while for link to be fully
2300 * established (and thus Status no longer 0x80/0x7F),
2301 * so we poll a bit for that, here.
2302 */
2303 retry = 20;
2304 while (1) {
2305 u8 drv_stat = ata_check_status(ap);
2306 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2307 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002308 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002309 if (retry-- <= 0)
2310 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002311 if (time_after(jiffies, deadline))
2312 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002313 }
2314
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002315 /* FIXME: if we passed the deadline, the following
2316 * code probably produces an invalid result
2317 */
Brett Russ20f733e2005-09-01 18:26:17 -04002318
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002319 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002320 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002321
2322 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2323
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002324 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002325
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002326 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002327}
2328
Tejun Heocc0680a2007-08-06 18:36:23 +09002329static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002330{
Tejun Heocc0680a2007-08-06 18:36:23 +09002331 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002332 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002333 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002335
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002336 rc = mv_stop_dma(ap);
2337 if (rc)
2338 ehc->i.action |= ATA_EH_HARDRESET;
2339
2340 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2341 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2342 ehc->i.action |= ATA_EH_HARDRESET;
2343 }
2344
2345 /* if we're about to do hardreset, nothing more to do */
2346 if (ehc->i.action & ATA_EH_HARDRESET)
2347 return 0;
2348
Tejun Heocc0680a2007-08-06 18:36:23 +09002349 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 rc = ata_wait_ready(ap, deadline);
2351 else
2352 rc = -ENODEV;
2353
2354 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002355}
2356
Tejun Heocc0680a2007-08-06 18:36:23 +09002357static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002358 unsigned long deadline)
2359{
Tejun Heocc0680a2007-08-06 18:36:23 +09002360 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002361 struct mv_host_priv *hpriv = ap->host->private_data;
2362 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2363
2364 mv_stop_dma(ap);
2365
2366 mv_channel_reset(hpriv, mmio, ap->port_no);
2367
2368 mv_phy_reset(ap, class, deadline);
2369
2370 return 0;
2371}
2372
Tejun Heocc0680a2007-08-06 18:36:23 +09002373static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002374{
Tejun Heocc0680a2007-08-06 18:36:23 +09002375 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376 u32 serr;
2377
2378 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002379 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002380
2381 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002382 sata_scr_read(link, SCR_ERROR, &serr);
2383 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384
2385 /* bail out if no device is present */
2386 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2387 DPRINTK("EXIT, no device\n");
2388 return;
2389 }
2390
2391 /* set up device control */
2392 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2393}
2394
2395static void mv_error_handler(struct ata_port *ap)
2396{
2397 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2398 mv_hardreset, mv_postreset);
2399}
2400
2401static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2402{
2403 mv_stop_dma(qc->ap);
2404}
2405
2406static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002407{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002408 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002409 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2410 u32 tmp, mask;
2411 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002412
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002413 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002414
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002415 shift = ap->port_no * 2;
2416 if (hc > 0)
2417 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002418
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002419 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002420
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002421 /* disable assertion of portN err, done events */
2422 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2423 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2424}
2425
2426static void mv_eh_thaw(struct ata_port *ap)
2427{
2428 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2429 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2430 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2431 void __iomem *port_mmio = mv_ap_base(ap);
2432 u32 tmp, mask, hc_irq_cause;
2433 unsigned int shift, hc_port_no = ap->port_no;
2434
2435 /* FIXME: handle coalescing completion events properly */
2436
2437 shift = ap->port_no * 2;
2438 if (hc > 0) {
2439 shift++;
2440 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002441 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002442
2443 mask = 0x3 << shift;
2444
2445 /* clear EDMA errors on this port */
2446 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2447
2448 /* clear pending irq events */
2449 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2450 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2451 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2452 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2453
2454 /* enable assertion of portN err, done events */
2455 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2456 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002457}
2458
Brett Russ05b308e2005-10-05 17:08:53 -04002459/**
2460 * mv_port_init - Perform some early initialization on a single port.
2461 * @port: libata data structure storing shadow register addresses
2462 * @port_mmio: base address of the port
2463 *
2464 * Initialize shadow register mmio addresses, clear outstanding
2465 * interrupts on the port, and unmask interrupts for the future
2466 * start of the port.
2467 *
2468 * LOCKING:
2469 * Inherited from caller.
2470 */
Brett Russ31961942005-09-30 01:36:00 -04002471static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2472{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002473 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002474 unsigned serr_ofs;
2475
Jeff Garzik8b260242005-11-12 12:32:50 -05002476 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002477 */
2478 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002479 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002480 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2481 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2482 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2483 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2484 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2485 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002486 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002487 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2488 /* special case: control/altstatus doesn't have ATA_REG_ address */
2489 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2490
2491 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002492 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002493
Brett Russ31961942005-09-30 01:36:00 -04002494 /* Clear any currently outstanding port interrupt conditions */
2495 serr_ofs = mv_scr_offset(SCR_ERROR);
2496 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2497 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2498
Mark Lord646a4da2008-01-26 18:30:37 -05002499 /* unmask all non-transient EDMA error interrupts */
2500 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002501
Jeff Garzik8b260242005-11-12 12:32:50 -05002502 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002503 readl(port_mmio + EDMA_CFG_OFS),
2504 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2505 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002506}
2507
Tejun Heo4447d352007-04-17 23:44:08 +09002508static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002509{
Tejun Heo4447d352007-04-17 23:44:08 +09002510 struct pci_dev *pdev = to_pci_dev(host->dev);
2511 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002512 u32 hp_flags = hpriv->hp_flags;
2513
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002514 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002515 case chip_5080:
2516 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002517 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002518
Auke Kok44c10132007-06-08 15:46:36 -07002519 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002520 case 0x1:
2521 hp_flags |= MV_HP_ERRATA_50XXB0;
2522 break;
2523 case 0x3:
2524 hp_flags |= MV_HP_ERRATA_50XXB2;
2525 break;
2526 default:
2527 dev_printk(KERN_WARNING, &pdev->dev,
2528 "Applying 50XXB2 workarounds to unknown rev\n");
2529 hp_flags |= MV_HP_ERRATA_50XXB2;
2530 break;
2531 }
2532 break;
2533
2534 case chip_504x:
2535 case chip_508x:
2536 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002537 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002538
Auke Kok44c10132007-06-08 15:46:36 -07002539 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002540 case 0x0:
2541 hp_flags |= MV_HP_ERRATA_50XXB0;
2542 break;
2543 case 0x3:
2544 hp_flags |= MV_HP_ERRATA_50XXB2;
2545 break;
2546 default:
2547 dev_printk(KERN_WARNING, &pdev->dev,
2548 "Applying B2 workarounds to unknown rev\n");
2549 hp_flags |= MV_HP_ERRATA_50XXB2;
2550 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002551 }
2552 break;
2553
2554 case chip_604x:
2555 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002556 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002557 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002558
Auke Kok44c10132007-06-08 15:46:36 -07002559 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002560 case 0x7:
2561 hp_flags |= MV_HP_ERRATA_60X1B2;
2562 break;
2563 case 0x9:
2564 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002565 break;
2566 default:
2567 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002568 "Applying B2 workarounds to unknown rev\n");
2569 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002570 break;
2571 }
2572 break;
2573
Jeff Garzike4e7b892006-01-31 12:18:41 -05002574 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002575 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002576 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2577 (pdev->device == 0x2300 || pdev->device == 0x2310))
2578 {
Mark Lord4e520032007-12-11 12:58:05 -05002579 /*
2580 * Highpoint RocketRAID PCIe 23xx series cards:
2581 *
2582 * Unconfigured drives are treated as "Legacy"
2583 * by the BIOS, and it overwrites sector 8 with
2584 * a "Lgcy" metadata block prior to Linux boot.
2585 *
2586 * Configured drives (RAID or JBOD) leave sector 8
2587 * alone, but instead overwrite a high numbered
2588 * sector for the RAID metadata. This sector can
2589 * be determined exactly, by truncating the physical
2590 * drive capacity to a nice even GB value.
2591 *
2592 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2593 *
2594 * Warn the user, lest they think we're just buggy.
2595 */
2596 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2597 " BIOS CORRUPTS DATA on all attached drives,"
2598 " regardless of if/how they are configured."
2599 " BEWARE!\n");
2600 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2601 " use sectors 8-9 on \"Legacy\" drives,"
2602 " and avoid the final two gigabytes on"
2603 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002604 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002605 case chip_6042:
2606 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002607 hp_flags |= MV_HP_GEN_IIE;
2608
Auke Kok44c10132007-06-08 15:46:36 -07002609 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002610 case 0x0:
2611 hp_flags |= MV_HP_ERRATA_XX42A0;
2612 break;
2613 case 0x1:
2614 hp_flags |= MV_HP_ERRATA_60X1C0;
2615 break;
2616 default:
2617 dev_printk(KERN_WARNING, &pdev->dev,
2618 "Applying 60X1C0 workarounds to unknown rev\n");
2619 hp_flags |= MV_HP_ERRATA_60X1C0;
2620 break;
2621 }
2622 break;
2623
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002624 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002625 dev_printk(KERN_ERR, &pdev->dev,
2626 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002627 return 1;
2628 }
2629
2630 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002631 if (hp_flags & MV_HP_PCIE) {
2632 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2633 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2634 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2635 } else {
2636 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2637 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2638 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2639 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002640
2641 return 0;
2642}
2643
Brett Russ05b308e2005-10-05 17:08:53 -04002644/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002645 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002646 * @host: ATA host to initialize
2647 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002648 *
2649 * If possible, do an early global reset of the host. Then do
2650 * our port init and clear/unmask all/relevant host interrupts.
2651 *
2652 * LOCKING:
2653 * Inherited from caller.
2654 */
Tejun Heo4447d352007-04-17 23:44:08 +09002655static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002656{
2657 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002658 struct pci_dev *pdev = to_pci_dev(host->dev);
2659 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2660 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002661
Jeff Garzik47c2b672005-11-12 21:13:17 -05002662 /* global interrupt mask */
2663 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2664
Tejun Heo4447d352007-04-17 23:44:08 +09002665 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002666 if (rc)
2667 goto done;
2668
Tejun Heo4447d352007-04-17 23:44:08 +09002669 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002670
Tejun Heo4447d352007-04-17 23:44:08 +09002671 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002672 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002673
Jeff Garzikc9d39132005-11-13 17:47:51 -05002674 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002675 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002676 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002677
Jeff Garzik522479f2005-11-12 22:14:02 -05002678 hpriv->ops->reset_flash(hpriv, mmio);
2679 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002680 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002681
Tejun Heo4447d352007-04-17 23:44:08 +09002682 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002683 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002684 void __iomem *port_mmio = mv_port_base(mmio, port);
2685
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002686 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002687 ifctl |= (1 << 7); /* enable gen2i speed */
2688 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002689 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2690 }
2691
Jeff Garzikc9d39132005-11-13 17:47:51 -05002692 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002693 }
2694
Tejun Heo4447d352007-04-17 23:44:08 +09002695 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002696 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002697 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002698 unsigned int offset = port_mmio - mmio;
2699
2700 mv_port_init(&ap->ioaddr, port_mmio);
2701
2702 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2703 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002704 }
2705
2706 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002707 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2708
2709 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2710 "(before clear)=0x%08x\n", hc,
2711 readl(hc_mmio + HC_CFG_OFS),
2712 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2713
2714 /* Clear any currently outstanding hc interrupt conditions */
2715 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002716 }
2717
Brett Russ31961942005-09-30 01:36:00 -04002718 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002719 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002720
2721 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002722 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002723
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002724 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002725 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2726 else
2727 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002728
2729 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002730 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002731 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2732 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002733 readl(mmio + hpriv->irq_cause_ofs),
2734 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002735
Brett Russ31961942005-09-30 01:36:00 -04002736done:
Brett Russ20f733e2005-09-01 18:26:17 -04002737 return rc;
2738}
2739
Brett Russ05b308e2005-10-05 17:08:53 -04002740/**
2741 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002742 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002743 *
2744 * FIXME: complete this.
2745 *
2746 * LOCKING:
2747 * Inherited from caller.
2748 */
Tejun Heo4447d352007-04-17 23:44:08 +09002749static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002750{
Tejun Heo4447d352007-04-17 23:44:08 +09002751 struct pci_dev *pdev = to_pci_dev(host->dev);
2752 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002753 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002754 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002755
2756 /* Use this to determine the HW stepping of the chip so we know
2757 * what errata to workaround
2758 */
Brett Russ31961942005-09-30 01:36:00 -04002759 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2760 if (scc == 0)
2761 scc_s = "SCSI";
2762 else if (scc == 0x01)
2763 scc_s = "RAID";
2764 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002765 scc_s = "?";
2766
2767 if (IS_GEN_I(hpriv))
2768 gen = "I";
2769 else if (IS_GEN_II(hpriv))
2770 gen = "II";
2771 else if (IS_GEN_IIE(hpriv))
2772 gen = "IIE";
2773 else
2774 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002775
Jeff Garzika9524a72005-10-30 14:39:11 -05002776 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002777 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2778 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002779 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2780}
2781
Mark Lordda2fa9b2008-01-26 18:32:45 -05002782static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2783{
2784 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2785 MV_CRQB_Q_SZ, 0);
2786 if (!hpriv->crqb_pool)
2787 return -ENOMEM;
2788
2789 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2790 MV_CRPB_Q_SZ, 0);
2791 if (!hpriv->crpb_pool)
2792 return -ENOMEM;
2793
2794 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2795 MV_SG_TBL_SZ, 0);
2796 if (!hpriv->sg_tbl_pool)
2797 return -ENOMEM;
2798
2799 return 0;
2800}
2801
Brett Russ05b308e2005-10-05 17:08:53 -04002802/**
2803 * mv_init_one - handle a positive probe of a Marvell host
2804 * @pdev: PCI device found
2805 * @ent: PCI device ID entry for the matched host
2806 *
2807 * LOCKING:
2808 * Inherited from caller.
2809 */
Brett Russ20f733e2005-09-01 18:26:17 -04002810static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2811{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002812 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002813 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002814 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2815 struct ata_host *host;
2816 struct mv_host_priv *hpriv;
2817 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002818
Jeff Garzika9524a72005-10-30 14:39:11 -05002819 if (!printed_version++)
2820 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002821
Tejun Heo4447d352007-04-17 23:44:08 +09002822 /* allocate host */
2823 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2824
2825 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2826 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2827 if (!host || !hpriv)
2828 return -ENOMEM;
2829 host->private_data = hpriv;
2830
2831 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002832 rc = pcim_enable_device(pdev);
2833 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002834 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002835
Tejun Heo0d5ff562007-02-01 15:06:36 +09002836 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2837 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002838 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002839 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002840 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002841 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002842
Jeff Garzikd88184f2007-02-26 01:26:06 -05002843 rc = pci_go_64(pdev);
2844 if (rc)
2845 return rc;
2846
Mark Lordda2fa9b2008-01-26 18:32:45 -05002847 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2848 if (rc)
2849 return rc;
2850
Brett Russ20f733e2005-09-01 18:26:17 -04002851 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002852 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002853 if (rc)
2854 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002855
Brett Russ31961942005-09-30 01:36:00 -04002856 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002857 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002858 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002859
Brett Russ31961942005-09-30 01:36:00 -04002860 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002861 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002862
Tejun Heo4447d352007-04-17 23:44:08 +09002863 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002864 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002865 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002866 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002867}
2868
2869static int __init mv_init(void)
2870{
Pavel Roskinb7887192006-08-10 18:13:18 +09002871 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002872}
2873
2874static void __exit mv_exit(void)
2875{
2876 pci_unregister_driver(&mv_pci_driver);
2877}
2878
2879MODULE_AUTHOR("Brett Russ");
2880MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2881MODULE_LICENSE("GPL");
2882MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2883MODULE_VERSION(DRV_VERSION);
2884
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002885module_param(msi, int, 0444);
2886MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2887
Brett Russ20f733e2005-09-01 18:26:17 -04002888module_init(mv_init);
2889module_exit(mv_exit);