blob: 817595cfc2f7ab7435bf9ded8ccd4a8912b300b4 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500114 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400116
Brett Russ20f733e2005-09-01 18:26:17 -0400117 MV_PORTS_PER_HC = 4,
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400121 MV_PORT_MASK = 3,
122
123 /* Host Flags */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400130
Brett Russ31961942005-09-30 01:36:00 -0400131 CRQB_FLAG_READ = (1 << 0),
132 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
138
139 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400142
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
144
Brett Russ20f733e2005-09-01 18:26:17 -0400145 /* PCI interface registers */
146
Brett Russ31961942005-09-30 01:36:00 -0400147 PCI_COMMAND_OFS = 0xc00,
148
Brett Russ20f733e2005-09-01 18:26:17 -0400149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
153
Jeff Garzik522479f2005-11-12 22:14:02 -0500154 MV_PCI_MODE = 0xd00,
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
164
Mark Lord02a121d2007-12-01 13:07:22 -0500165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
168
Mark Lord02a121d2007-12-01 13:07:22 -0500169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500172
Brett Russ20f733e2005-09-01 18:26:17 -0400173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
179 PCI_ERR = (1 << 18),
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
192 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
194 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400195
196 /* SATAHC registers */
197 HC_CFG_OFS = 0,
198
199 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
203
204 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400205 SHD_BLK_OFS = 0x100,
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400207
208 /* SATA registers */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500212 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500213 PHY_MODE4 = 0x314,
214 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500215 MV5_PHY_MODE = 0x74,
216 MV5_LT_MODE = 0x30,
217 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500218 SATA_INTERFACE_CTL = 0x050,
219
220 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* Port registers */
223 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400229
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500246
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
252
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
261
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500267
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
272
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
274 EDMA_ERR_PRD_PAR |
275 EDMA_ERR_DEV_DCON |
276 EDMA_ERR_DEV_CON |
277 EDMA_ERR_SERR |
278 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400280 EDMA_ERR_CRPB_PAR |
281 EDMA_ERR_INTRL_PAR |
282 EDMA_ERR_IORDY |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_OVERRUN_5 |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_ERR_CRPB_PAR |
296 EDMA_ERR_INTRL_PAR |
297 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400298
Brett Russ31961942005-09-30 01:36:00 -0400299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400301
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
304
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400308 EDMA_RSP_Q_PTR_SHIFT = 3,
309
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400314
Jeff Garzikc9d39132005-11-13 17:47:51 -0500315 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500316 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500317
Brett Russ31961942005-09-30 01:36:00 -0400318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500324 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400329
Brett Russ31961942005-09-30 01:36:00 -0400330 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400334};
335
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400336#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339
Jeff Garzik095fec82005-11-12 09:50:49 -0500340enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
343 */
344 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500345
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
348 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
350
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400351 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
353};
354
Jeff Garzik522479f2005-11-12 22:14:02 -0500355enum chip_type {
356 chip_504x,
357 chip_508x,
358 chip_5080,
359 chip_604x,
360 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 chip_6042,
362 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500363};
364
Brett Russ31961942005-09-30 01:36:00 -0400365/* Command ReQuest Block: 32B */
366struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400367 __le32 sg_addr;
368 __le32 sg_addr_hi;
369 __le16 ctrl_flags;
370 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400371};
372
Jeff Garzike4e7b892006-01-31 12:18:41 -0500373struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400374 __le32 addr;
375 __le32 addr_hi;
376 __le32 flags;
377 __le32 len;
378 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ResPonse Block: 8B */
382struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le16 id;
384 __le16 flags;
385 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400386};
387
388/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
389struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 flags_size;
392 __le32 addr_hi;
393 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400394};
395
396struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400397 struct mv_crqb *crqb;
398 dma_addr_t crqb_dma;
399 struct mv_crpb *crpb;
400 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500401 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
402 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400403
404 unsigned int req_idx;
405 unsigned int resp_idx;
406
Brett Russ31961942005-09-30 01:36:00 -0400407 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500410struct mv_port_signal {
411 u32 amps;
412 u32 pre;
413};
414
Mark Lord02a121d2007-12-01 13:07:22 -0500415struct mv_host_priv {
416 u32 hp_flags;
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
419 u32 irq_cause_ofs;
420 u32 irq_mask_ofs;
421 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500422 /*
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
426 */
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500430};
431
Jeff Garzik47c2b672005-11-12 21:13:17 -0500432struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
437 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500442};
443
Brett Russ20f733e2005-09-01 18:26:17 -0400444static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900445static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400449static int mv_port_start(struct ata_port *ap);
450static void mv_port_stop(struct ata_port *ap);
451static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500452static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400454static void mv_error_handler(struct ata_port *ap);
455static void mv_post_int_cmd(struct ata_queued_cmd *qc);
456static void mv_eh_freeze(struct ata_port *ap);
457static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500458static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400459static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
460
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500461static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
464static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500466static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500468static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
469static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
474static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
479static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500480static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500482static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
483 void __iomem *port_mmio, int want_ncq);
484static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500485
Mark Lordeb73d552008-01-29 13:24:00 -0500486/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
487 * because we have to allow room for worst case splitting of
488 * PRDs for 64K boundaries in mv_fill_sg().
489 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400490static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400491 .module = THIS_MODULE,
492 .name = DRV_NAME,
493 .ioctl = ata_scsi_ioctl,
494 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400495 .can_queue = ATA_DEF_QUEUE,
496 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400497 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400498 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
499 .emulated = ATA_SHT_EMULATED,
500 .use_clustering = 1,
501 .proc_name = DRV_NAME,
502 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400503 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400504 .slave_destroy = ata_scsi_slave_destroy,
505 .bios_param = ata_std_bios_param,
506};
507
508static struct scsi_host_template mv6_sht = {
509 .module = THIS_MODULE,
510 .name = DRV_NAME,
511 .ioctl = ata_scsi_ioctl,
512 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500513 .change_queue_depth = ata_scsi_change_queue_depth,
514 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400515 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400516 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400517 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
518 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500519 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400520 .proc_name = DRV_NAME,
521 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400522 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900523 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400524 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400525};
526
Jeff Garzikc9d39132005-11-13 17:47:51 -0500527static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500528 .tf_load = ata_tf_load,
529 .tf_read = ata_tf_read,
530 .check_status = ata_check_status,
531 .exec_command = ata_exec_command,
532 .dev_select = ata_std_dev_select,
533
Jeff Garzikcffacd82007-03-09 09:46:47 -0500534 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500535
536 .qc_prep = mv_qc_prep,
537 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900538 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500539
Jeff Garzikc9d39132005-11-13 17:47:51 -0500540 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900541 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500542
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400543 .error_handler = mv_error_handler,
544 .post_internal_cmd = mv_post_int_cmd,
545 .freeze = mv_eh_freeze,
546 .thaw = mv_eh_thaw,
547
Jeff Garzikc9d39132005-11-13 17:47:51 -0500548 .scr_read = mv5_scr_read,
549 .scr_write = mv5_scr_write,
550
551 .port_start = mv_port_start,
552 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500553};
554
555static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500556 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400557 .tf_load = ata_tf_load,
558 .tf_read = ata_tf_read,
559 .check_status = ata_check_status,
560 .exec_command = ata_exec_command,
561 .dev_select = ata_std_dev_select,
562
Jeff Garzikcffacd82007-03-09 09:46:47 -0500563 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400564
Brett Russ31961942005-09-30 01:36:00 -0400565 .qc_prep = mv_qc_prep,
566 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900567 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400568
Brett Russ20f733e2005-09-01 18:26:17 -0400569 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900570 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400571
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400572 .error_handler = mv_error_handler,
573 .post_internal_cmd = mv_post_int_cmd,
574 .freeze = mv_eh_freeze,
575 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500576 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400577
Brett Russ20f733e2005-09-01 18:26:17 -0400578 .scr_read = mv_scr_read,
579 .scr_write = mv_scr_write,
580
Brett Russ31961942005-09-30 01:36:00 -0400581 .port_start = mv_port_start,
582 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400583};
584
Jeff Garzike4e7b892006-01-31 12:18:41 -0500585static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586 .tf_load = ata_tf_load,
587 .tf_read = ata_tf_read,
588 .check_status = ata_check_status,
589 .exec_command = ata_exec_command,
590 .dev_select = ata_std_dev_select,
591
Jeff Garzikcffacd82007-03-09 09:46:47 -0500592 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500593
594 .qc_prep = mv_qc_prep_iie,
595 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900596 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597
Jeff Garzike4e7b892006-01-31 12:18:41 -0500598 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900599 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500600
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400601 .error_handler = mv_error_handler,
602 .post_internal_cmd = mv_post_int_cmd,
603 .freeze = mv_eh_freeze,
604 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500605 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400606
Jeff Garzike4e7b892006-01-31 12:18:41 -0500607 .scr_read = mv_scr_read,
608 .scr_write = mv_scr_write,
609
610 .port_start = mv_port_start,
611 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500612};
613
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100614static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400615 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400616 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400617 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400618 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500619 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400620 },
621 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400622 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400623 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400624 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500625 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400626 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500627 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400628 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500629 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400630 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500631 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500632 },
Brett Russ20f733e2005-09-01 18:26:17 -0400633 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
635 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400636 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400637 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500638 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400639 },
640 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500642 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400643 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400644 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500645 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400646 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500648 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
649 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500650 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400651 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500652 .port_ops = &mv_iie_ops,
653 },
654 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500655 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
656 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500657 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400658 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500659 .port_ops = &mv_iie_ops,
660 },
Brett Russ20f733e2005-09-01 18:26:17 -0400661};
662
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500663static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400664 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
665 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
666 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
667 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100668 /* RocketRAID 1740/174x have different identifiers */
669 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
670 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400671
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400672 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
673 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
674 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
675 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
676 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500677
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400678 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
679
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200680 /* Adaptec 1430SA */
681 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
682
Mark Lord02a121d2007-12-01 13:07:22 -0500683 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800684 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
685
Mark Lord02a121d2007-12-01 13:07:22 -0500686 /* Highpoint RocketRAID PCIe series */
687 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
688 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
689
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400690 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400691};
692
693static struct pci_driver mv_pci_driver = {
694 .name = DRV_NAME,
695 .id_table = mv_pci_tbl,
696 .probe = mv_init_one,
697 .remove = ata_pci_remove_one,
698};
699
Jeff Garzik47c2b672005-11-12 21:13:17 -0500700static const struct mv_hw_ops mv5xxx_ops = {
701 .phy_errata = mv5_phy_errata,
702 .enable_leds = mv5_enable_leds,
703 .read_preamp = mv5_read_preamp,
704 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500705 .reset_flash = mv5_reset_flash,
706 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500707};
708
709static const struct mv_hw_ops mv6xxx_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv6_enable_leds,
712 .read_preamp = mv6_read_preamp,
713 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500714 .reset_flash = mv6_reset_flash,
715 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500716};
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500719 * module options
720 */
721static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
722
723
Jeff Garzikd88184f2007-02-26 01:26:06 -0500724/* move to PCI layer or libata core? */
725static int pci_go_64(struct pci_dev *pdev)
726{
727 int rc;
728
729 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
730 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
731 if (rc) {
732 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
733 if (rc) {
734 dev_printk(KERN_ERR, &pdev->dev,
735 "64-bit DMA enable failed\n");
736 return rc;
737 }
738 }
739 } else {
740 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
741 if (rc) {
742 dev_printk(KERN_ERR, &pdev->dev,
743 "32-bit DMA enable failed\n");
744 return rc;
745 }
746 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
747 if (rc) {
748 dev_printk(KERN_ERR, &pdev->dev,
749 "32-bit consistent DMA enable failed\n");
750 return rc;
751 }
752 }
753
754 return rc;
755}
756
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500757/*
Brett Russ20f733e2005-09-01 18:26:17 -0400758 * Functions
759 */
760
761static inline void writelfl(unsigned long data, void __iomem *addr)
762{
763 writel(data, addr);
764 (void) readl(addr); /* flush to avoid PCI posted write */
765}
766
Brett Russ20f733e2005-09-01 18:26:17 -0400767static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
768{
769 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
770}
771
Jeff Garzikc9d39132005-11-13 17:47:51 -0500772static inline unsigned int mv_hc_from_port(unsigned int port)
773{
774 return port >> MV_PORT_HC_SHIFT;
775}
776
777static inline unsigned int mv_hardport_from_port(unsigned int port)
778{
779 return port & MV_PORT_MASK;
780}
781
782static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
783 unsigned int port)
784{
785 return mv_hc_base(base, mv_hc_from_port(port));
786}
787
Brett Russ20f733e2005-09-01 18:26:17 -0400788static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
789{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500790 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500791 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500792 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400793}
794
795static inline void __iomem *mv_ap_base(struct ata_port *ap)
796{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900797 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400798}
799
Jeff Garzikcca39742006-08-24 03:19:22 -0400800static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400801{
Jeff Garzikcca39742006-08-24 03:19:22 -0400802 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400803}
804
805static void mv_irq_clear(struct ata_port *ap)
806{
807}
808
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809static void mv_set_edma_ptrs(void __iomem *port_mmio,
810 struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
812{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 u32 index;
814
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400815 /*
816 * initialize request queue
817 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400818 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
819
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400820 WARN_ON(pp->crqb_dma & 0x3ff);
821 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400822 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
824
825 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400826 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400827 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
828 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400829 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830
831 /*
832 * initialize response queue
833 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
835
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400836 WARN_ON(pp->crpb_dma & 0xff);
837 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
838
839 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400840 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
842 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400844
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400845 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400846 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400847}
848
Brett Russ05b308e2005-10-05 17:08:53 -0400849/**
850 * mv_start_dma - Enable eDMA engine
851 * @base: port base address
852 * @pp: port private data
853 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900854 * Verify the local cache of the eDMA state is accurate with a
855 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400856 *
857 * LOCKING:
858 * Inherited from caller.
859 */
Mark Lord0c589122008-01-26 18:31:16 -0500860static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500861 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400862{
Mark Lord72109162008-01-26 18:31:33 -0500863 int want_ncq = (protocol == ATA_PROT_NCQ);
864
865 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
866 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
867 if (want_ncq != using_ncq)
868 __mv_stop_dma(ap);
869 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400870 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500871 struct mv_host_priv *hpriv = ap->host->private_data;
872 int hard_port = mv_hardport_from_port(ap->port_no);
873 void __iomem *hc_mmio = mv_hc_base_from_port(
874 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
875 u32 hc_irq_cause, ipending;
876
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400877 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500878 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400879
Mark Lord0c589122008-01-26 18:31:16 -0500880 /* clear EDMA interrupt indicator, if any */
881 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
882 ipending = (DEV_IRQ << hard_port) |
883 (CRPB_DMA_DONE << hard_port);
884 if (hc_irq_cause & ipending) {
885 writelfl(hc_irq_cause & ~ipending,
886 hc_mmio + HC_IRQ_CAUSE_OFS);
887 }
888
Mark Lord72109162008-01-26 18:31:33 -0500889 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500890
891 /* clear FIS IRQ Cause */
892 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
893
Mark Lordf630d562008-01-26 18:31:00 -0500894 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400895
Mark Lordf630d562008-01-26 18:31:00 -0500896 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400897 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
898 }
Mark Lordf630d562008-01-26 18:31:00 -0500899 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400900}
901
Brett Russ05b308e2005-10-05 17:08:53 -0400902/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400903 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400904 * @ap: ATA channel to manipulate
905 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900906 * Verify the local cache of the eDMA state is accurate with a
907 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400908 *
909 * LOCKING:
910 * Inherited from caller.
911 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400912static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400913{
914 void __iomem *port_mmio = mv_ap_base(ap);
915 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400916 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400917 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400918
Jeff Garzik4537deb2007-07-12 14:30:19 -0400919 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400920 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400921 */
Brett Russ31961942005-09-30 01:36:00 -0400922 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
923 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400924 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900925 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400926 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500927
Brett Russ31961942005-09-30 01:36:00 -0400928 /* now properly wait for the eDMA to stop */
929 for (i = 1000; i > 0; i--) {
930 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400931 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400932 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400933
Brett Russ31961942005-09-30 01:36:00 -0400934 udelay(100);
935 }
936
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400937 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900938 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400939 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400940 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400941
942 return err;
Brett Russ31961942005-09-30 01:36:00 -0400943}
944
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400945static int mv_stop_dma(struct ata_port *ap)
946{
947 unsigned long flags;
948 int rc;
949
950 spin_lock_irqsave(&ap->host->lock, flags);
951 rc = __mv_stop_dma(ap);
952 spin_unlock_irqrestore(&ap->host->lock, flags);
953
954 return rc;
955}
956
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400957#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400958static void mv_dump_mem(void __iomem *start, unsigned bytes)
959{
Brett Russ31961942005-09-30 01:36:00 -0400960 int b, w;
961 for (b = 0; b < bytes; ) {
962 DPRINTK("%p: ", start + b);
963 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400964 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400965 b += sizeof(u32);
966 }
967 printk("\n");
968 }
Brett Russ31961942005-09-30 01:36:00 -0400969}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400970#endif
971
Brett Russ31961942005-09-30 01:36:00 -0400972static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
973{
974#ifdef ATA_DEBUG
975 int b, w;
976 u32 dw;
977 for (b = 0; b < bytes; ) {
978 DPRINTK("%02x: ", b);
979 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400980 (void) pci_read_config_dword(pdev, b, &dw);
981 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400982 b += sizeof(u32);
983 }
984 printk("\n");
985 }
986#endif
987}
988static void mv_dump_all_regs(void __iomem *mmio_base, int port,
989 struct pci_dev *pdev)
990{
991#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500992 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400993 port >> MV_PORT_HC_SHIFT);
994 void __iomem *port_base;
995 int start_port, num_ports, p, start_hc, num_hcs, hc;
996
997 if (0 > port) {
998 start_hc = start_port = 0;
999 num_ports = 8; /* shld be benign for 4 port devs */
1000 num_hcs = 2;
1001 } else {
1002 start_hc = port >> MV_PORT_HC_SHIFT;
1003 start_port = port;
1004 num_ports = num_hcs = 1;
1005 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001006 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001007 num_ports > 1 ? num_ports - 1 : start_port);
1008
1009 if (NULL != pdev) {
1010 DPRINTK("PCI config space regs:\n");
1011 mv_dump_pci_cfg(pdev, 0x68);
1012 }
1013 DPRINTK("PCI regs:\n");
1014 mv_dump_mem(mmio_base+0xc00, 0x3c);
1015 mv_dump_mem(mmio_base+0xd00, 0x34);
1016 mv_dump_mem(mmio_base+0xf00, 0x4);
1017 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1018 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001019 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001020 DPRINTK("HC regs (HC %i):\n", hc);
1021 mv_dump_mem(hc_base, 0x1c);
1022 }
1023 for (p = start_port; p < start_port + num_ports; p++) {
1024 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001025 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001026 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001027 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001028 mv_dump_mem(port_base+0x300, 0x60);
1029 }
1030#endif
1031}
1032
Brett Russ20f733e2005-09-01 18:26:17 -04001033static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1034{
1035 unsigned int ofs;
1036
1037 switch (sc_reg_in) {
1038 case SCR_STATUS:
1039 case SCR_CONTROL:
1040 case SCR_ERROR:
1041 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1042 break;
1043 case SCR_ACTIVE:
1044 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1045 break;
1046 default:
1047 ofs = 0xffffffffU;
1048 break;
1049 }
1050 return ofs;
1051}
1052
Tejun Heoda3dbb12007-07-16 14:29:40 +09001053static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001054{
1055 unsigned int ofs = mv_scr_offset(sc_reg_in);
1056
Tejun Heoda3dbb12007-07-16 14:29:40 +09001057 if (ofs != 0xffffffffU) {
1058 *val = readl(mv_ap_base(ap) + ofs);
1059 return 0;
1060 } else
1061 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001062}
1063
Tejun Heoda3dbb12007-07-16 14:29:40 +09001064static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001065{
1066 unsigned int ofs = mv_scr_offset(sc_reg_in);
1067
Tejun Heoda3dbb12007-07-16 14:29:40 +09001068 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001069 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001070 return 0;
1071 } else
1072 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001073}
1074
Mark Lordf2738272008-01-26 18:32:29 -05001075static void mv6_dev_config(struct ata_device *adev)
1076{
1077 /*
1078 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1079 * See mv_qc_prep() for more info.
1080 */
1081 if (adev->flags & ATA_DFLAG_NCQ)
1082 if (adev->max_sectors > ATA_MAX_SECTORS)
1083 adev->max_sectors = ATA_MAX_SECTORS;
1084}
1085
Mark Lord72109162008-01-26 18:31:33 -05001086static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1087 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001088{
Mark Lord0c589122008-01-26 18:31:16 -05001089 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001090
1091 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001092 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093
Mark Lord0c589122008-01-26 18:31:16 -05001094 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001095 cfg |= (1 << 8); /* enab config burst size mask */
1096
Mark Lord0c589122008-01-26 18:31:16 -05001097 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001098 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1099
1100 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001101 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1102 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001103 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001104 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001105 }
1106
Mark Lord72109162008-01-26 18:31:33 -05001107 if (want_ncq) {
1108 cfg |= EDMA_CFG_NCQ;
1109 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1110 } else
1111 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1112
Jeff Garzike4e7b892006-01-31 12:18:41 -05001113 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1114}
1115
Mark Lordda2fa9b2008-01-26 18:32:45 -05001116static void mv_port_free_dma_mem(struct ata_port *ap)
1117{
1118 struct mv_host_priv *hpriv = ap->host->private_data;
1119 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001120 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001121
1122 if (pp->crqb) {
1123 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1124 pp->crqb = NULL;
1125 }
1126 if (pp->crpb) {
1127 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1128 pp->crpb = NULL;
1129 }
Mark Lordeb73d552008-01-29 13:24:00 -05001130 /*
1131 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1132 * For later hardware, we have one unique sg_tbl per NCQ tag.
1133 */
1134 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1135 if (pp->sg_tbl[tag]) {
1136 if (tag == 0 || !IS_GEN_I(hpriv))
1137 dma_pool_free(hpriv->sg_tbl_pool,
1138 pp->sg_tbl[tag],
1139 pp->sg_tbl_dma[tag]);
1140 pp->sg_tbl[tag] = NULL;
1141 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001142 }
1143}
1144
Brett Russ05b308e2005-10-05 17:08:53 -04001145/**
1146 * mv_port_start - Port specific init/start routine.
1147 * @ap: ATA channel to manipulate
1148 *
1149 * Allocate and point to DMA memory, init port private memory,
1150 * zero indices.
1151 *
1152 * LOCKING:
1153 * Inherited from caller.
1154 */
Brett Russ31961942005-09-30 01:36:00 -04001155static int mv_port_start(struct ata_port *ap)
1156{
Jeff Garzikcca39742006-08-24 03:19:22 -04001157 struct device *dev = ap->host->dev;
1158 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001159 struct mv_port_priv *pp;
1160 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001161 unsigned long flags;
Mark Lordeb73d552008-01-29 13:24:00 -05001162 int tag, rc;
Brett Russ31961942005-09-30 01:36:00 -04001163
Tejun Heo24dc5f32007-01-20 16:00:28 +09001164 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001165 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001166 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001167 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001168
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001169 rc = ata_pad_alloc(ap, dev);
1170 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001171 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001172
Mark Lordda2fa9b2008-01-26 18:32:45 -05001173 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1174 if (!pp->crqb)
1175 return -ENOMEM;
1176 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001177
Mark Lordda2fa9b2008-01-26 18:32:45 -05001178 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1179 if (!pp->crpb)
1180 goto out_port_free_dma_mem;
1181 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001182
Mark Lordeb73d552008-01-29 13:24:00 -05001183 /*
1184 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1185 * For later hardware, we need one unique sg_tbl per NCQ tag.
1186 */
1187 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1188 if (tag == 0 || !IS_GEN_I(hpriv)) {
1189 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1190 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1191 if (!pp->sg_tbl[tag])
1192 goto out_port_free_dma_mem;
1193 } else {
1194 pp->sg_tbl[tag] = pp->sg_tbl[0];
1195 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1196 }
1197 }
Brett Russ31961942005-09-30 01:36:00 -04001198
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001199 spin_lock_irqsave(&ap->host->lock, flags);
1200
Mark Lord72109162008-01-26 18:31:33 -05001201 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001202 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001203
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001204 spin_unlock_irqrestore(&ap->host->lock, flags);
1205
Brett Russ31961942005-09-30 01:36:00 -04001206 /* Don't turn on EDMA here...do it before DMA commands only. Else
1207 * we'll be unable to send non-data, PIO, etc due to restricted access
1208 * to shadow regs.
1209 */
Brett Russ31961942005-09-30 01:36:00 -04001210 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001211
1212out_port_free_dma_mem:
1213 mv_port_free_dma_mem(ap);
1214 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001215}
1216
Brett Russ05b308e2005-10-05 17:08:53 -04001217/**
1218 * mv_port_stop - Port specific cleanup/stop routine.
1219 * @ap: ATA channel to manipulate
1220 *
1221 * Stop DMA, cleanup port memory.
1222 *
1223 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001224 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001225 */
Brett Russ31961942005-09-30 01:36:00 -04001226static void mv_port_stop(struct ata_port *ap)
1227{
Brett Russ31961942005-09-30 01:36:00 -04001228 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001229 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001230}
1231
Brett Russ05b308e2005-10-05 17:08:53 -04001232/**
1233 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234 * @qc: queued command whose SG list to source from
1235 *
1236 * Populate the SG list and mark the last entry.
1237 *
1238 * LOCKING:
1239 * Inherited from caller.
1240 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001241static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001242{
1243 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001244 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001245 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001246 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001247
Mark Lordeb73d552008-01-29 13:24:00 -05001248 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001249 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001250 dma_addr_t addr = sg_dma_address(sg);
1251 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001252
Olof Johansson4007b492007-10-02 20:45:27 -05001253 while (sg_len) {
1254 u32 offset = addr & 0xffff;
1255 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001256
Olof Johansson4007b492007-10-02 20:45:27 -05001257 if ((offset + sg_len > 0x10000))
1258 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001259
Olof Johansson4007b492007-10-02 20:45:27 -05001260 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001262 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001263
1264 sg_len -= len;
1265 addr += len;
1266
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001267 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001268 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001269 }
Brett Russ31961942005-09-30 01:36:00 -04001270 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001271
1272 if (likely(last_sg))
1273 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001274}
1275
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001276static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001277{
Mark Lord559eeda2006-05-19 16:40:15 -04001278 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001279 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001280 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001281}
1282
Brett Russ05b308e2005-10-05 17:08:53 -04001283/**
1284 * mv_qc_prep - Host specific command preparation.
1285 * @qc: queued command to prepare
1286 *
1287 * This routine simply redirects to the general purpose routine
1288 * if command is not DMA. Else, it handles prep of the CRQB
1289 * (command request block), does some sanity checking, and calls
1290 * the SG load routine.
1291 *
1292 * LOCKING:
1293 * Inherited from caller.
1294 */
Brett Russ31961942005-09-30 01:36:00 -04001295static void mv_qc_prep(struct ata_queued_cmd *qc)
1296{
1297 struct ata_port *ap = qc->ap;
1298 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001299 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001300 struct ata_taskfile *tf;
1301 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001302 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001303
Mark Lord138bfdd2008-01-26 18:33:18 -05001304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001306 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001307
Brett Russ31961942005-09-30 01:36:00 -04001308 /* Fill in command request block
1309 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001310 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001311 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001312 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001313 flags |= qc->tag << CRQB_TAG_SHIFT;
1314
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001315 /* get current queue index from software */
1316 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001317
Mark Lorda6432432006-05-19 16:36:36 -04001318 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001319 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001320 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001321 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001322 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1323
1324 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001325 tf = &qc->tf;
1326
1327 /* Sadly, the CRQB cannot accomodate all registers--there are
1328 * only 11 bytes...so we must pick and choose required
1329 * registers based on the command. So, we drop feature and
1330 * hob_feature for [RW] DMA commands, but they are needed for
1331 * NCQ. NCQ will drop hob_nsect.
1332 */
1333 switch (tf->command) {
1334 case ATA_CMD_READ:
1335 case ATA_CMD_READ_EXT:
1336 case ATA_CMD_WRITE:
1337 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001338 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001339 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1340 break;
Brett Russ31961942005-09-30 01:36:00 -04001341 case ATA_CMD_FPDMA_READ:
1342 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001343 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001344 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1345 break;
Brett Russ31961942005-09-30 01:36:00 -04001346 default:
1347 /* The only other commands EDMA supports in non-queued and
1348 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1349 * of which are defined/used by Linux. If we get here, this
1350 * driver needs work.
1351 *
1352 * FIXME: modify libata to give qc_prep a return value and
1353 * return error here.
1354 */
1355 BUG_ON(tf->command);
1356 break;
1357 }
1358 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1366 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1367
Jeff Garzike4e7b892006-01-31 12:18:41 -05001368 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001369 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001370 mv_fill_sg(qc);
1371}
1372
1373/**
1374 * mv_qc_prep_iie - Host specific command preparation.
1375 * @qc: queued command to prepare
1376 *
1377 * This routine simply redirects to the general purpose routine
1378 * if command is not DMA. Else, it handles prep of the CRQB
1379 * (command request block), does some sanity checking, and calls
1380 * the SG load routine.
1381 *
1382 * LOCKING:
1383 * Inherited from caller.
1384 */
1385static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1386{
1387 struct ata_port *ap = qc->ap;
1388 struct mv_port_priv *pp = ap->private_data;
1389 struct mv_crqb_iie *crqb;
1390 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001391 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001392 u32 flags = 0;
1393
Mark Lord138bfdd2008-01-26 18:33:18 -05001394 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1395 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001396 return;
1397
Jeff Garzike4e7b892006-01-31 12:18:41 -05001398 /* Fill in Gen IIE command request block
1399 */
1400 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 flags |= CRQB_FLAG_READ;
1402
Tejun Heobeec7db2006-02-11 19:11:13 +09001403 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001404 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001405 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001406
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001407 /* get current queue index from software */
1408 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001409
1410 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001411 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1412 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001413 crqb->flags = cpu_to_le32(flags);
1414
1415 tf = &qc->tf;
1416 crqb->ata_cmd[0] = cpu_to_le32(
1417 (tf->command << 16) |
1418 (tf->feature << 24)
1419 );
1420 crqb->ata_cmd[1] = cpu_to_le32(
1421 (tf->lbal << 0) |
1422 (tf->lbam << 8) |
1423 (tf->lbah << 16) |
1424 (tf->device << 24)
1425 );
1426 crqb->ata_cmd[2] = cpu_to_le32(
1427 (tf->hob_lbal << 0) |
1428 (tf->hob_lbam << 8) |
1429 (tf->hob_lbah << 16) |
1430 (tf->hob_feature << 24)
1431 );
1432 crqb->ata_cmd[3] = cpu_to_le32(
1433 (tf->nsect << 0) |
1434 (tf->hob_nsect << 8)
1435 );
1436
1437 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1438 return;
Brett Russ31961942005-09-30 01:36:00 -04001439 mv_fill_sg(qc);
1440}
1441
Brett Russ05b308e2005-10-05 17:08:53 -04001442/**
1443 * mv_qc_issue - Initiate a command to the host
1444 * @qc: queued command to start
1445 *
1446 * This routine simply redirects to the general purpose routine
1447 * if command is not DMA. Else, it sanity checks our local
1448 * caches of the request producer/consumer indices then enables
1449 * DMA and bumps the request producer index.
1450 *
1451 * LOCKING:
1452 * Inherited from caller.
1453 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001454static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001455{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001456 struct ata_port *ap = qc->ap;
1457 void __iomem *port_mmio = mv_ap_base(ap);
1458 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001460
Mark Lord138bfdd2008-01-26 18:33:18 -05001461 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1462 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001463 /* We're about to send a non-EDMA capable command to the
1464 * port. Turn off EDMA so there won't be problems accessing
1465 * shadow block, etc registers.
1466 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001467 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001468 return ata_qc_issue_prot(qc);
1469 }
1470
Mark Lord72109162008-01-26 18:31:33 -05001471 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001474
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001475 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001476
1477 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001478 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1479 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001480
1481 return 0;
1482}
1483
Brett Russ05b308e2005-10-05 17:08:53 -04001484/**
Brett Russ05b308e2005-10-05 17:08:53 -04001485 * mv_err_intr - Handle error interrupts on the port
1486 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001487 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001488 *
1489 * In most cases, just clear the interrupt and move on. However,
1490 * some cases require an eDMA reset, which is done right before
1491 * the COMRESET in mv_phy_reset(). The SERR case requires a
1492 * clear of pending errors in the SATA SERROR register. Finally,
1493 * if the port disabled DMA, update our cached copy to match.
1494 *
1495 * LOCKING:
1496 * Inherited from caller.
1497 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001498static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001499{
Brett Russ31961942005-09-30 01:36:00 -04001500 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001501 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1502 struct mv_port_priv *pp = ap->private_data;
1503 struct mv_host_priv *hpriv = ap->host->private_data;
1504 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1505 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001506 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001507
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001508 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001509
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001510 if (!edma_enabled) {
1511 /* just a guess: do we need to do this? should we
1512 * expand this, and do it in all cases?
1513 */
Tejun Heo936fd732007-08-06 18:36:23 +09001514 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1515 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001516 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517
1518 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1519
1520 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1521
1522 /*
1523 * all generations share these EDMA error cause bits
1524 */
1525
1526 if (edma_err_cause & EDMA_ERR_DEV)
1527 err_mask |= AC_ERR_DEV;
1528 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001529 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 EDMA_ERR_INTRL_PAR)) {
1531 err_mask |= AC_ERR_ATA_BUS;
1532 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001533 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001534 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1536 ata_ehi_hotplugged(ehi);
1537 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001538 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001539 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540 }
1541
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001542 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001543 eh_freeze_mask = EDMA_EH_FREEZE_5;
1544
1545 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1546 struct mv_port_priv *pp = ap->private_data;
1547 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001548 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001549 }
1550 } else {
1551 eh_freeze_mask = EDMA_EH_FREEZE;
1552
1553 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1554 struct mv_port_priv *pp = ap->private_data;
1555 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001556 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557 }
1558
1559 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001560 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001562 err_mask = AC_ERR_ATA_BUS;
1563 action |= ATA_EH_HARDRESET;
1564 }
1565 }
Brett Russ20f733e2005-09-01 18:26:17 -04001566
1567 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001568 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001569
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570 if (!err_mask) {
1571 err_mask = AC_ERR_OTHER;
1572 action |= ATA_EH_HARDRESET;
1573 }
1574
1575 ehi->serror |= serr;
1576 ehi->action |= action;
1577
1578 if (qc)
1579 qc->err_mask |= err_mask;
1580 else
1581 ehi->err_mask |= err_mask;
1582
1583 if (edma_err_cause & eh_freeze_mask)
1584 ata_port_freeze(ap);
1585 else
1586 ata_port_abort(ap);
1587}
1588
1589static void mv_intr_pio(struct ata_port *ap)
1590{
1591 struct ata_queued_cmd *qc;
1592 u8 ata_status;
1593
1594 /* ignore spurious intr if drive still BUSY */
1595 ata_status = readb(ap->ioaddr.status_addr);
1596 if (unlikely(ata_status & ATA_BUSY))
1597 return;
1598
1599 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001600 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001601 if (unlikely(!qc)) /* no active tag */
1602 return;
1603 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1604 return;
1605
1606 /* and finally, complete the ATA command */
1607 qc->err_mask |= ac_err_mask(ata_status);
1608 ata_qc_complete(qc);
1609}
1610
1611static void mv_intr_edma(struct ata_port *ap)
1612{
1613 void __iomem *port_mmio = mv_ap_base(ap);
1614 struct mv_host_priv *hpriv = ap->host->private_data;
1615 struct mv_port_priv *pp = ap->private_data;
1616 struct ata_queued_cmd *qc;
1617 u32 out_index, in_index;
1618 bool work_done = false;
1619
1620 /* get h/w response queue pointer */
1621 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1622 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1623
1624 while (1) {
1625 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001626 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001627
1628 /* get s/w response queue last-read pointer, and compare */
1629 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1630 if (in_index == out_index)
1631 break;
1632
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001633 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001634 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001635 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001636
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001637 /* Gen II/IIE: get active ATA command via tag, to enable
1638 * support for queueing. this works transparently for
1639 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001640 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001641 else
1642 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001643
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001644 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001645
Mark Lordcb924412008-01-26 18:32:09 -05001646 /* For non-NCQ mode, the lower 8 bits of status
1647 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1648 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001649 */
1650 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001651 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001652 mv_err_intr(ap, qc);
1653 return;
1654 }
1655
1656 /* and finally, complete the ATA command */
1657 if (qc) {
1658 qc->err_mask |=
1659 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1660 ata_qc_complete(qc);
1661 }
1662
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001663 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001664 * indicate (after the loop completes) to hardware
1665 * that we have consumed a response queue entry.
1666 */
1667 work_done = true;
1668 pp->resp_idx++;
1669 }
1670
1671 if (work_done)
1672 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1673 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1674 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001675}
1676
Brett Russ05b308e2005-10-05 17:08:53 -04001677/**
1678 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001679 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001680 * @relevant: port error bits relevant to this host controller
1681 * @hc: which host controller we're to look at
1682 *
1683 * Read then write clear the HC interrupt status then walk each
1684 * port connected to the HC and see if it needs servicing. Port
1685 * success ints are reported in the HC interrupt status reg, the
1686 * port error ints are reported in the higher level main
1687 * interrupt status register and thus are passed in via the
1688 * 'relevant' argument.
1689 *
1690 * LOCKING:
1691 * Inherited from caller.
1692 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001693static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001694{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001695 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001697 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001698 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001699
Jeff Garzik35177262007-02-24 21:26:42 -05001700 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001701 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001702 else
Brett Russ20f733e2005-09-01 18:26:17 -04001703 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001704
1705 /* we'll need the HC success int register in most cases */
1706 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 if (!hc_irq_cause)
1708 return;
1709
1710 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001711
1712 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001713 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001714
1715 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001716 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001717 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001718 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001721 continue;
1722
Brett Russ31961942005-09-30 01:36:00 -04001723 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001724 if (port >= MV_PORTS_PER_HC) {
1725 shift++; /* skip bit 8 in the HC Main IRQ reg */
1726 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001727 have_err_bits = ((PORT0_ERR << shift) & relevant);
1728
1729 if (unlikely(have_err_bits)) {
1730 struct ata_queued_cmd *qc;
1731
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001732 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001733 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1734 continue;
1735
1736 mv_err_intr(ap, qc);
1737 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001738 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001739
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001740 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1741
1742 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1743 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1744 mv_intr_edma(ap);
1745 } else {
1746 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1747 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001748 }
1749 }
1750 VPRINTK("EXIT\n");
1751}
1752
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001753static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1754{
Mark Lord02a121d2007-12-01 13:07:22 -05001755 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001756 struct ata_port *ap;
1757 struct ata_queued_cmd *qc;
1758 struct ata_eh_info *ehi;
1759 unsigned int i, err_mask, printed = 0;
1760 u32 err_cause;
1761
Mark Lord02a121d2007-12-01 13:07:22 -05001762 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001763
1764 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1765 err_cause);
1766
1767 DPRINTK("All regs @ PCI error\n");
1768 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1769
Mark Lord02a121d2007-12-01 13:07:22 -05001770 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001771
1772 for (i = 0; i < host->n_ports; i++) {
1773 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001774 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001775 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001776 ata_ehi_clear_desc(ehi);
1777 if (!printed++)
1778 ata_ehi_push_desc(ehi,
1779 "PCI err cause 0x%08x", err_cause);
1780 err_mask = AC_ERR_HOST_BUS;
1781 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001782 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001783 if (qc)
1784 qc->err_mask |= err_mask;
1785 else
1786 ehi->err_mask |= err_mask;
1787
1788 ata_port_freeze(ap);
1789 }
1790 }
1791}
1792
Brett Russ05b308e2005-10-05 17:08:53 -04001793/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001794 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001795 * @irq: unused
1796 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001797 *
1798 * Read the read only register to determine if any host
1799 * controllers have pending interrupts. If so, call lower level
1800 * routine to handle. Also check for PCI errors which are only
1801 * reported here.
1802 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001803 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001804 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001805 * interrupts.
1806 */
David Howells7d12e782006-10-05 14:55:46 +01001807static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001808{
Jeff Garzikcca39742006-08-24 03:19:22 -04001809 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001810 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001811 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001812 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001813
Mark Lord646a4da2008-01-26 18:30:37 -05001814 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001815 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001816 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001817
1818 /* check the cases where we either have nothing pending or have read
1819 * a bogus register value which can indicate HW removal or PCI fault
1820 */
Mark Lord646a4da2008-01-26 18:30:37 -05001821 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1822 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001823
Jeff Garzikcca39742006-08-24 03:19:22 -04001824 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001825
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001826 if (unlikely(irq_stat & PCI_ERR)) {
1827 mv_pci_error(host, mmio);
1828 handled = 1;
1829 goto out_unlock; /* skip all other HC irq handling */
1830 }
1831
Brett Russ20f733e2005-09-01 18:26:17 -04001832 for (hc = 0; hc < n_hcs; hc++) {
1833 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1834 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001835 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001836 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001837 }
1838 }
Mark Lord615ab952006-05-19 16:24:56 -04001839
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001840out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001841 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001842
1843 return IRQ_RETVAL(handled);
1844}
1845
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1847{
1848 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1849 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1850
1851 return hc_mmio + ofs;
1852}
1853
1854static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1855{
1856 unsigned int ofs;
1857
1858 switch (sc_reg_in) {
1859 case SCR_STATUS:
1860 case SCR_ERROR:
1861 case SCR_CONTROL:
1862 ofs = sc_reg_in * sizeof(u32);
1863 break;
1864 default:
1865 ofs = 0xffffffffU;
1866 break;
1867 }
1868 return ofs;
1869}
1870
Tejun Heoda3dbb12007-07-16 14:29:40 +09001871static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001872{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001873 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876
Tejun Heoda3dbb12007-07-16 14:29:40 +09001877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1879 return 0;
1880 } else
1881 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001882}
1883
Tejun Heoda3dbb12007-07-16 14:29:40 +09001884static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001886 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1887 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001888 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1889
Tejun Heoda3dbb12007-07-16 14:29:40 +09001890 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001891 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001892 return 0;
1893 } else
1894 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001895}
1896
Jeff Garzik522479f2005-11-12 22:14:02 -05001897static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1898{
Jeff Garzik522479f2005-11-12 22:14:02 -05001899 int early_5080;
1900
Auke Kok44c10132007-06-08 15:46:36 -07001901 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001902
1903 if (!early_5080) {
1904 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1905 tmp |= (1 << 0);
1906 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 }
1908
1909 mv_reset_pci_bus(pdev, mmio);
1910}
1911
1912static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1913{
1914 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1915}
1916
Jeff Garzik47c2b672005-11-12 21:13:17 -05001917static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001918 void __iomem *mmio)
1919{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001920 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1921 u32 tmp;
1922
1923 tmp = readl(phy_mmio + MV5_PHY_MODE);
1924
1925 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1926 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001927}
1928
Jeff Garzik47c2b672005-11-12 21:13:17 -05001929static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001930{
Jeff Garzik522479f2005-11-12 22:14:02 -05001931 u32 tmp;
1932
1933 writel(0, mmio + MV_GPIO_PORT_CTL);
1934
1935 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1936
1937 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1938 tmp |= ~(1 << 0);
1939 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001940}
1941
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001942static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1943 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001944{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001945 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1946 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1947 u32 tmp;
1948 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1949
1950 if (fix_apm_sq) {
1951 tmp = readl(phy_mmio + MV5_LT_MODE);
1952 tmp |= (1 << 19);
1953 writel(tmp, phy_mmio + MV5_LT_MODE);
1954
1955 tmp = readl(phy_mmio + MV5_PHY_CTL);
1956 tmp &= ~0x3;
1957 tmp |= 0x1;
1958 writel(tmp, phy_mmio + MV5_PHY_CTL);
1959 }
1960
1961 tmp = readl(phy_mmio + MV5_PHY_MODE);
1962 tmp &= ~mask;
1963 tmp |= hpriv->signal[port].pre;
1964 tmp |= hpriv->signal[port].amps;
1965 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001966}
1967
Jeff Garzikc9d39132005-11-13 17:47:51 -05001968
1969#undef ZERO
1970#define ZERO(reg) writel(0, port_mmio + (reg))
1971static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1972 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001973{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001974 void __iomem *port_mmio = mv_port_base(mmio, port);
1975
1976 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1977
1978 mv_channel_reset(hpriv, mmio, port);
1979
1980 ZERO(0x028); /* command */
1981 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1982 ZERO(0x004); /* timer */
1983 ZERO(0x008); /* irq err cause */
1984 ZERO(0x00c); /* irq err mask */
1985 ZERO(0x010); /* rq bah */
1986 ZERO(0x014); /* rq inp */
1987 ZERO(0x018); /* rq outp */
1988 ZERO(0x01c); /* respq bah */
1989 ZERO(0x024); /* respq outp */
1990 ZERO(0x020); /* respq inp */
1991 ZERO(0x02c); /* test control */
1992 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1993}
1994#undef ZERO
1995
1996#define ZERO(reg) writel(0, hc_mmio + (reg))
1997static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1998 unsigned int hc)
1999{
2000 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2001 u32 tmp;
2002
2003 ZERO(0x00c);
2004 ZERO(0x010);
2005 ZERO(0x014);
2006 ZERO(0x018);
2007
2008 tmp = readl(hc_mmio + 0x20);
2009 tmp &= 0x1c1c1c1c;
2010 tmp |= 0x03030303;
2011 writel(tmp, hc_mmio + 0x20);
2012}
2013#undef ZERO
2014
2015static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2016 unsigned int n_hc)
2017{
2018 unsigned int hc, port;
2019
2020 for (hc = 0; hc < n_hc; hc++) {
2021 for (port = 0; port < MV_PORTS_PER_HC; port++)
2022 mv5_reset_hc_port(hpriv, mmio,
2023 (hc * MV_PORTS_PER_HC) + port);
2024
2025 mv5_reset_one_hc(hpriv, mmio, hc);
2026 }
2027
2028 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002029}
2030
Jeff Garzik101ffae2005-11-12 22:17:49 -05002031#undef ZERO
2032#define ZERO(reg) writel(0, mmio + (reg))
2033static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2034{
Mark Lord02a121d2007-12-01 13:07:22 -05002035 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2036 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002037 u32 tmp;
2038
2039 tmp = readl(mmio + MV_PCI_MODE);
2040 tmp &= 0xff00ffff;
2041 writel(tmp, mmio + MV_PCI_MODE);
2042
2043 ZERO(MV_PCI_DISC_TIMER);
2044 ZERO(MV_PCI_MSI_TRIGGER);
2045 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2046 ZERO(HC_MAIN_IRQ_MASK_OFS);
2047 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002048 ZERO(hpriv->irq_cause_ofs);
2049 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002050 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2051 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2052 ZERO(MV_PCI_ERR_ATTRIBUTE);
2053 ZERO(MV_PCI_ERR_COMMAND);
2054}
2055#undef ZERO
2056
2057static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2058{
2059 u32 tmp;
2060
2061 mv5_reset_flash(hpriv, mmio);
2062
2063 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2064 tmp &= 0x3;
2065 tmp |= (1 << 5) | (1 << 6);
2066 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2067}
2068
2069/**
2070 * mv6_reset_hc - Perform the 6xxx global soft reset
2071 * @mmio: base address of the HBA
2072 *
2073 * This routine only applies to 6xxx parts.
2074 *
2075 * LOCKING:
2076 * Inherited from caller.
2077 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002078static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2079 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002080{
2081 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2082 int i, rc = 0;
2083 u32 t;
2084
2085 /* Following procedure defined in PCI "main command and status
2086 * register" table.
2087 */
2088 t = readl(reg);
2089 writel(t | STOP_PCI_MASTER, reg);
2090
2091 for (i = 0; i < 1000; i++) {
2092 udelay(1);
2093 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002094 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002095 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002096 }
2097 if (!(PCI_MASTER_EMPTY & t)) {
2098 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2099 rc = 1;
2100 goto done;
2101 }
2102
2103 /* set reset */
2104 i = 5;
2105 do {
2106 writel(t | GLOB_SFT_RST, reg);
2107 t = readl(reg);
2108 udelay(1);
2109 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2110
2111 if (!(GLOB_SFT_RST & t)) {
2112 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2113 rc = 1;
2114 goto done;
2115 }
2116
2117 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2118 i = 5;
2119 do {
2120 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2121 t = readl(reg);
2122 udelay(1);
2123 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2124
2125 if (GLOB_SFT_RST & t) {
2126 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2127 rc = 1;
2128 }
2129done:
2130 return rc;
2131}
2132
Jeff Garzik47c2b672005-11-12 21:13:17 -05002133static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002134 void __iomem *mmio)
2135{
2136 void __iomem *port_mmio;
2137 u32 tmp;
2138
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002139 tmp = readl(mmio + MV_RESET_CFG);
2140 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002141 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002142 hpriv->signal[idx].pre = 0x1 << 5;
2143 return;
2144 }
2145
2146 port_mmio = mv_port_base(mmio, idx);
2147 tmp = readl(port_mmio + PHY_MODE2);
2148
2149 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2150 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2151}
2152
Jeff Garzik47c2b672005-11-12 21:13:17 -05002153static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002154{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002155 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002156}
2157
Jeff Garzikc9d39132005-11-13 17:47:51 -05002158static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002159 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002160{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002161 void __iomem *port_mmio = mv_port_base(mmio, port);
2162
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002163 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002164 int fix_phy_mode2 =
2165 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002166 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002167 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2168 u32 m2, tmp;
2169
2170 if (fix_phy_mode2) {
2171 m2 = readl(port_mmio + PHY_MODE2);
2172 m2 &= ~(1 << 16);
2173 m2 |= (1 << 31);
2174 writel(m2, port_mmio + PHY_MODE2);
2175
2176 udelay(200);
2177
2178 m2 = readl(port_mmio + PHY_MODE2);
2179 m2 &= ~((1 << 16) | (1 << 31));
2180 writel(m2, port_mmio + PHY_MODE2);
2181
2182 udelay(200);
2183 }
2184
2185 /* who knows what this magic does */
2186 tmp = readl(port_mmio + PHY_MODE3);
2187 tmp &= ~0x7F800000;
2188 tmp |= 0x2A800000;
2189 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190
2191 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002192 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002193
2194 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002195
2196 if (hp_flags & MV_HP_ERRATA_60X1B2)
2197 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002198
2199 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2200
2201 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002202
2203 if (hp_flags & MV_HP_ERRATA_60X1B2)
2204 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002205 }
2206
2207 /* Revert values of pre-emphasis and signal amps to the saved ones */
2208 m2 = readl(port_mmio + PHY_MODE2);
2209
2210 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002211 m2 |= hpriv->signal[port].amps;
2212 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002213 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002214
Jeff Garzike4e7b892006-01-31 12:18:41 -05002215 /* according to mvSata 3.6.1, some IIE values are fixed */
2216 if (IS_GEN_IIE(hpriv)) {
2217 m2 &= ~0xC30FF01F;
2218 m2 |= 0x0000900F;
2219 }
2220
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002221 writel(m2, port_mmio + PHY_MODE2);
2222}
2223
Jeff Garzikc9d39132005-11-13 17:47:51 -05002224static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2225 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002226{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002227 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002228
Brett Russ31961942005-09-30 01:36:00 -04002229 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002230
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002231 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002232 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002233 ifctl |= (1 << 7); /* enable gen2i speed */
2234 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002235 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2236 }
2237
Brett Russ20f733e2005-09-01 18:26:17 -04002238 udelay(25); /* allow reset propagation */
2239
2240 /* Spec never mentions clearing the bit. Marvell's driver does
2241 * clear the bit, however.
2242 */
Brett Russ31961942005-09-30 01:36:00 -04002243 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002244
Jeff Garzikc9d39132005-11-13 17:47:51 -05002245 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2246
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002247 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002248 mdelay(1);
2249}
2250
Jeff Garzikc9d39132005-11-13 17:47:51 -05002251/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002252 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002253 * @ap: ATA channel to manipulate
2254 *
2255 * Part of this is taken from __sata_phy_reset and modified to
2256 * not sleep since this routine gets called from interrupt level.
2257 *
2258 * LOCKING:
2259 * Inherited from caller. This is coded to safe to call at
2260 * interrupt level, i.e. it does not sleep.
2261 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002262static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2263 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002264{
2265 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002266 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002267 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002268 int retry = 5;
2269 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002270
2271 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002272
Tejun Heoda3dbb12007-07-16 14:29:40 +09002273#ifdef DEBUG
2274 {
2275 u32 sstatus, serror, scontrol;
2276
2277 mv_scr_read(ap, SCR_STATUS, &sstatus);
2278 mv_scr_read(ap, SCR_ERROR, &serror);
2279 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2280 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002281 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002282 }
2283#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002284
Jeff Garzik22374672005-11-17 10:59:48 -05002285 /* Issue COMRESET via SControl */
2286comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002287 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002288 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002289
Tejun Heo936fd732007-08-06 18:36:23 +09002290 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002291 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002292
Brett Russ31961942005-09-30 01:36:00 -04002293 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002294 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002295 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002296 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002297
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002299 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002300
Jeff Garzik22374672005-11-17 10:59:48 -05002301 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002302 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002303 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2304 (retry-- > 0))
2305 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002306
Tejun Heoda3dbb12007-07-16 14:29:40 +09002307#ifdef DEBUG
2308 {
2309 u32 sstatus, serror, scontrol;
2310
2311 mv_scr_read(ap, SCR_STATUS, &sstatus);
2312 mv_scr_read(ap, SCR_ERROR, &serror);
2313 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2314 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2315 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2316 }
2317#endif
Brett Russ31961942005-09-30 01:36:00 -04002318
Tejun Heo936fd732007-08-06 18:36:23 +09002319 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002321 return;
2322 }
2323
Jeff Garzik22374672005-11-17 10:59:48 -05002324 /* even after SStatus reflects that device is ready,
2325 * it seems to take a while for link to be fully
2326 * established (and thus Status no longer 0x80/0x7F),
2327 * so we poll a bit for that, here.
2328 */
2329 retry = 20;
2330 while (1) {
2331 u8 drv_stat = ata_check_status(ap);
2332 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2333 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002335 if (retry-- <= 0)
2336 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002337 if (time_after(jiffies, deadline))
2338 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002339 }
2340
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002341 /* FIXME: if we passed the deadline, the following
2342 * code probably produces an invalid result
2343 */
Brett Russ20f733e2005-09-01 18:26:17 -04002344
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002345 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002346 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002347
2348 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2349
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002351
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002352 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002353}
2354
Tejun Heocc0680a2007-08-06 18:36:23 +09002355static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002356{
Tejun Heocc0680a2007-08-06 18:36:23 +09002357 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002358 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002359 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002360 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002361
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002362 rc = mv_stop_dma(ap);
2363 if (rc)
2364 ehc->i.action |= ATA_EH_HARDRESET;
2365
2366 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2367 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2368 ehc->i.action |= ATA_EH_HARDRESET;
2369 }
2370
2371 /* if we're about to do hardreset, nothing more to do */
2372 if (ehc->i.action & ATA_EH_HARDRESET)
2373 return 0;
2374
Tejun Heocc0680a2007-08-06 18:36:23 +09002375 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376 rc = ata_wait_ready(ap, deadline);
2377 else
2378 rc = -ENODEV;
2379
2380 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002381}
2382
Tejun Heocc0680a2007-08-06 18:36:23 +09002383static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384 unsigned long deadline)
2385{
Tejun Heocc0680a2007-08-06 18:36:23 +09002386 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 struct mv_host_priv *hpriv = ap->host->private_data;
2388 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2389
2390 mv_stop_dma(ap);
2391
2392 mv_channel_reset(hpriv, mmio, ap->port_no);
2393
2394 mv_phy_reset(ap, class, deadline);
2395
2396 return 0;
2397}
2398
Tejun Heocc0680a2007-08-06 18:36:23 +09002399static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002400{
Tejun Heocc0680a2007-08-06 18:36:23 +09002401 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002402 u32 serr;
2403
2404 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002405 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002406
2407 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002408 sata_scr_read(link, SCR_ERROR, &serr);
2409 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002410
2411 /* bail out if no device is present */
2412 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2413 DPRINTK("EXIT, no device\n");
2414 return;
2415 }
2416
2417 /* set up device control */
2418 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2419}
2420
2421static void mv_error_handler(struct ata_port *ap)
2422{
2423 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2424 mv_hardreset, mv_postreset);
2425}
2426
2427static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2428{
2429 mv_stop_dma(qc->ap);
2430}
2431
2432static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002433{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002434 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002435 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2436 u32 tmp, mask;
2437 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002440
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002441 shift = ap->port_no * 2;
2442 if (hc > 0)
2443 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002444
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002445 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002446
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002447 /* disable assertion of portN err, done events */
2448 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2449 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2450}
2451
2452static void mv_eh_thaw(struct ata_port *ap)
2453{
2454 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2455 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2456 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2457 void __iomem *port_mmio = mv_ap_base(ap);
2458 u32 tmp, mask, hc_irq_cause;
2459 unsigned int shift, hc_port_no = ap->port_no;
2460
2461 /* FIXME: handle coalescing completion events properly */
2462
2463 shift = ap->port_no * 2;
2464 if (hc > 0) {
2465 shift++;
2466 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002467 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002468
2469 mask = 0x3 << shift;
2470
2471 /* clear EDMA errors on this port */
2472 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2473
2474 /* clear pending irq events */
2475 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2476 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2477 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2478 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2479
2480 /* enable assertion of portN err, done events */
2481 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2482 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002483}
2484
Brett Russ05b308e2005-10-05 17:08:53 -04002485/**
2486 * mv_port_init - Perform some early initialization on a single port.
2487 * @port: libata data structure storing shadow register addresses
2488 * @port_mmio: base address of the port
2489 *
2490 * Initialize shadow register mmio addresses, clear outstanding
2491 * interrupts on the port, and unmask interrupts for the future
2492 * start of the port.
2493 *
2494 * LOCKING:
2495 * Inherited from caller.
2496 */
Brett Russ31961942005-09-30 01:36:00 -04002497static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2498{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002499 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002500 unsigned serr_ofs;
2501
Jeff Garzik8b260242005-11-12 12:32:50 -05002502 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002503 */
2504 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002505 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002506 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2507 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2508 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2509 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2510 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2511 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002512 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002513 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2514 /* special case: control/altstatus doesn't have ATA_REG_ address */
2515 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2516
2517 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002518 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002519
Brett Russ31961942005-09-30 01:36:00 -04002520 /* Clear any currently outstanding port interrupt conditions */
2521 serr_ofs = mv_scr_offset(SCR_ERROR);
2522 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2523 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2524
Mark Lord646a4da2008-01-26 18:30:37 -05002525 /* unmask all non-transient EDMA error interrupts */
2526 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002527
Jeff Garzik8b260242005-11-12 12:32:50 -05002528 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002529 readl(port_mmio + EDMA_CFG_OFS),
2530 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2531 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002532}
2533
Tejun Heo4447d352007-04-17 23:44:08 +09002534static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002535{
Tejun Heo4447d352007-04-17 23:44:08 +09002536 struct pci_dev *pdev = to_pci_dev(host->dev);
2537 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002538 u32 hp_flags = hpriv->hp_flags;
2539
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002540 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541 case chip_5080:
2542 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002543 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002544
Auke Kok44c10132007-06-08 15:46:36 -07002545 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002546 case 0x1:
2547 hp_flags |= MV_HP_ERRATA_50XXB0;
2548 break;
2549 case 0x3:
2550 hp_flags |= MV_HP_ERRATA_50XXB2;
2551 break;
2552 default:
2553 dev_printk(KERN_WARNING, &pdev->dev,
2554 "Applying 50XXB2 workarounds to unknown rev\n");
2555 hp_flags |= MV_HP_ERRATA_50XXB2;
2556 break;
2557 }
2558 break;
2559
2560 case chip_504x:
2561 case chip_508x:
2562 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002563 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002564
Auke Kok44c10132007-06-08 15:46:36 -07002565 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002566 case 0x0:
2567 hp_flags |= MV_HP_ERRATA_50XXB0;
2568 break;
2569 case 0x3:
2570 hp_flags |= MV_HP_ERRATA_50XXB2;
2571 break;
2572 default:
2573 dev_printk(KERN_WARNING, &pdev->dev,
2574 "Applying B2 workarounds to unknown rev\n");
2575 hp_flags |= MV_HP_ERRATA_50XXB2;
2576 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002577 }
2578 break;
2579
2580 case chip_604x:
2581 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002582 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002583 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002584
Auke Kok44c10132007-06-08 15:46:36 -07002585 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002586 case 0x7:
2587 hp_flags |= MV_HP_ERRATA_60X1B2;
2588 break;
2589 case 0x9:
2590 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002591 break;
2592 default:
2593 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002594 "Applying B2 workarounds to unknown rev\n");
2595 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002596 break;
2597 }
2598 break;
2599
Jeff Garzike4e7b892006-01-31 12:18:41 -05002600 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002601 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002602 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2603 (pdev->device == 0x2300 || pdev->device == 0x2310))
2604 {
Mark Lord4e520032007-12-11 12:58:05 -05002605 /*
2606 * Highpoint RocketRAID PCIe 23xx series cards:
2607 *
2608 * Unconfigured drives are treated as "Legacy"
2609 * by the BIOS, and it overwrites sector 8 with
2610 * a "Lgcy" metadata block prior to Linux boot.
2611 *
2612 * Configured drives (RAID or JBOD) leave sector 8
2613 * alone, but instead overwrite a high numbered
2614 * sector for the RAID metadata. This sector can
2615 * be determined exactly, by truncating the physical
2616 * drive capacity to a nice even GB value.
2617 *
2618 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2619 *
2620 * Warn the user, lest they think we're just buggy.
2621 */
2622 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2623 " BIOS CORRUPTS DATA on all attached drives,"
2624 " regardless of if/how they are configured."
2625 " BEWARE!\n");
2626 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2627 " use sectors 8-9 on \"Legacy\" drives,"
2628 " and avoid the final two gigabytes on"
2629 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002630 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002631 case chip_6042:
2632 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002633 hp_flags |= MV_HP_GEN_IIE;
2634
Auke Kok44c10132007-06-08 15:46:36 -07002635 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002636 case 0x0:
2637 hp_flags |= MV_HP_ERRATA_XX42A0;
2638 break;
2639 case 0x1:
2640 hp_flags |= MV_HP_ERRATA_60X1C0;
2641 break;
2642 default:
2643 dev_printk(KERN_WARNING, &pdev->dev,
2644 "Applying 60X1C0 workarounds to unknown rev\n");
2645 hp_flags |= MV_HP_ERRATA_60X1C0;
2646 break;
2647 }
2648 break;
2649
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002650 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002651 dev_printk(KERN_ERR, &pdev->dev,
2652 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002653 return 1;
2654 }
2655
2656 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002657 if (hp_flags & MV_HP_PCIE) {
2658 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2659 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2660 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2661 } else {
2662 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2663 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2664 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2665 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002666
2667 return 0;
2668}
2669
Brett Russ05b308e2005-10-05 17:08:53 -04002670/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002671 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002672 * @host: ATA host to initialize
2673 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002674 *
2675 * If possible, do an early global reset of the host. Then do
2676 * our port init and clear/unmask all/relevant host interrupts.
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 */
Tejun Heo4447d352007-04-17 23:44:08 +09002681static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002682{
2683 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002684 struct pci_dev *pdev = to_pci_dev(host->dev);
2685 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2686 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002687
Jeff Garzik47c2b672005-11-12 21:13:17 -05002688 /* global interrupt mask */
2689 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2690
Tejun Heo4447d352007-04-17 23:44:08 +09002691 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002692 if (rc)
2693 goto done;
2694
Tejun Heo4447d352007-04-17 23:44:08 +09002695 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002696
Tejun Heo4447d352007-04-17 23:44:08 +09002697 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002698 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002699
Jeff Garzikc9d39132005-11-13 17:47:51 -05002700 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002701 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002702 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002703
Jeff Garzik522479f2005-11-12 22:14:02 -05002704 hpriv->ops->reset_flash(hpriv, mmio);
2705 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002706 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002707
Tejun Heo4447d352007-04-17 23:44:08 +09002708 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002709 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002710 void __iomem *port_mmio = mv_port_base(mmio, port);
2711
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002712 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002713 ifctl |= (1 << 7); /* enable gen2i speed */
2714 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002715 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2716 }
2717
Jeff Garzikc9d39132005-11-13 17:47:51 -05002718 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002719 }
2720
Tejun Heo4447d352007-04-17 23:44:08 +09002721 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002722 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002723 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002724 unsigned int offset = port_mmio - mmio;
2725
2726 mv_port_init(&ap->ioaddr, port_mmio);
2727
2728 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2729 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002730 }
2731
2732 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002733 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2734
2735 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2736 "(before clear)=0x%08x\n", hc,
2737 readl(hc_mmio + HC_CFG_OFS),
2738 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2739
2740 /* Clear any currently outstanding hc interrupt conditions */
2741 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002742 }
2743
Brett Russ31961942005-09-30 01:36:00 -04002744 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002745 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002746
2747 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002748 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002749
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002750 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002751 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2752 else
2753 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002754
2755 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002756 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002757 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2758 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002759 readl(mmio + hpriv->irq_cause_ofs),
2760 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002761
Brett Russ31961942005-09-30 01:36:00 -04002762done:
Brett Russ20f733e2005-09-01 18:26:17 -04002763 return rc;
2764}
2765
Brett Russ05b308e2005-10-05 17:08:53 -04002766/**
2767 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002768 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002769 *
2770 * FIXME: complete this.
2771 *
2772 * LOCKING:
2773 * Inherited from caller.
2774 */
Tejun Heo4447d352007-04-17 23:44:08 +09002775static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002776{
Tejun Heo4447d352007-04-17 23:44:08 +09002777 struct pci_dev *pdev = to_pci_dev(host->dev);
2778 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002779 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002780 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002781
2782 /* Use this to determine the HW stepping of the chip so we know
2783 * what errata to workaround
2784 */
Brett Russ31961942005-09-30 01:36:00 -04002785 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2786 if (scc == 0)
2787 scc_s = "SCSI";
2788 else if (scc == 0x01)
2789 scc_s = "RAID";
2790 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002791 scc_s = "?";
2792
2793 if (IS_GEN_I(hpriv))
2794 gen = "I";
2795 else if (IS_GEN_II(hpriv))
2796 gen = "II";
2797 else if (IS_GEN_IIE(hpriv))
2798 gen = "IIE";
2799 else
2800 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002801
Jeff Garzika9524a72005-10-30 14:39:11 -05002802 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002803 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2804 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002805 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2806}
2807
Mark Lordda2fa9b2008-01-26 18:32:45 -05002808static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2809{
2810 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2811 MV_CRQB_Q_SZ, 0);
2812 if (!hpriv->crqb_pool)
2813 return -ENOMEM;
2814
2815 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2816 MV_CRPB_Q_SZ, 0);
2817 if (!hpriv->crpb_pool)
2818 return -ENOMEM;
2819
2820 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2821 MV_SG_TBL_SZ, 0);
2822 if (!hpriv->sg_tbl_pool)
2823 return -ENOMEM;
2824
2825 return 0;
2826}
2827
Brett Russ05b308e2005-10-05 17:08:53 -04002828/**
2829 * mv_init_one - handle a positive probe of a Marvell host
2830 * @pdev: PCI device found
2831 * @ent: PCI device ID entry for the matched host
2832 *
2833 * LOCKING:
2834 * Inherited from caller.
2835 */
Brett Russ20f733e2005-09-01 18:26:17 -04002836static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2837{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002838 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002839 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002840 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2841 struct ata_host *host;
2842 struct mv_host_priv *hpriv;
2843 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002844
Jeff Garzika9524a72005-10-30 14:39:11 -05002845 if (!printed_version++)
2846 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002847
Tejun Heo4447d352007-04-17 23:44:08 +09002848 /* allocate host */
2849 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2850
2851 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2852 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2853 if (!host || !hpriv)
2854 return -ENOMEM;
2855 host->private_data = hpriv;
2856
2857 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002858 rc = pcim_enable_device(pdev);
2859 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002860 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002861
Tejun Heo0d5ff562007-02-01 15:06:36 +09002862 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2863 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002864 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002865 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002866 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002867 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002868
Jeff Garzikd88184f2007-02-26 01:26:06 -05002869 rc = pci_go_64(pdev);
2870 if (rc)
2871 return rc;
2872
Mark Lordda2fa9b2008-01-26 18:32:45 -05002873 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2874 if (rc)
2875 return rc;
2876
Brett Russ20f733e2005-09-01 18:26:17 -04002877 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002878 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002879 if (rc)
2880 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002881
Brett Russ31961942005-09-30 01:36:00 -04002882 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002883 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002884 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002885
Brett Russ31961942005-09-30 01:36:00 -04002886 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002887 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002888
Tejun Heo4447d352007-04-17 23:44:08 +09002889 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002890 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002891 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002892 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002893}
2894
2895static int __init mv_init(void)
2896{
Pavel Roskinb7887192006-08-10 18:13:18 +09002897 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002898}
2899
2900static void __exit mv_exit(void)
2901{
2902 pci_unregister_driver(&mv_pci_driver);
2903}
2904
2905MODULE_AUTHOR("Brett Russ");
2906MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2907MODULE_LICENSE("GPL");
2908MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2909MODULE_VERSION(DRV_VERSION);
2910
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002911module_param(msi, int, 0444);
2912MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2913
Brett Russ20f733e2005-09-01 18:26:17 -04002914module_init(mv_init);
2915module_exit(mv_exit);