blob: f341a82d27bf26366e9a4703645da6ab1c70a071 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 /* PCI interface registers */
156
Brett Russ31961942005-09-30 01:36:00 -0400157 PCI_COMMAND_OFS = 0xc00,
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
Jeff Garzik522479f2005-11-12 22:14:02 -0500164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
Mark Lord02a121d2007-12-01 13:07:22 -0500175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500182
Brett Russ20f733e2005-09-01 18:26:17 -0400183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500226 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500260
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400308 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400312
Brett Russ31961942005-09-30 01:36:00 -0400313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400328
Jeff Garzikc9d39132005-11-13 17:47:51 -0500329 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500330 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400343
Brett Russ31961942005-09-30 01:36:00 -0400344 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400348};
349
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500354
Jeff Garzik095fec82005-11-12 09:50:49 -0500355enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500360
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400366 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
Jeff Garzik522479f2005-11-12 22:14:02 -0500370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500376 chip_6042,
377 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500378 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ReQuest Block: 32B */
382struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400387};
388
Jeff Garzike4e7b892006-01-31 12:18:41 -0500389struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500395};
396
Brett Russ31961942005-09-30 01:36:00 -0400397/* Command ResPonse Block: 8B */
398struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400402};
403
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
412struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
Brett Russ31961942005-09-30 01:36:00 -0400423 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400424};
425
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
Mark Lord02a121d2007-12-01 13:07:22 -0500431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500450};
451
Jeff Garzik47c2b672005-11-12 21:13:17 -0500452struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500462};
463
Tejun Heoda3dbb12007-07-16 14:29:40 +0900464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400473static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500476static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400477
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500511
Mark Lordeb73d552008-01-29 13:24:00 -0500512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400516static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900517 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400518 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400519 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400520};
521
522static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900523 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500524 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400525 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400526 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400527};
528
Tejun Heo029cfd62008-03-25 12:22:49 +0900529static struct ata_port_operations mv5_ops = {
530 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500531
Jeff Garzikc9d39132005-11-13 17:47:51 -0500532 .qc_prep = mv_qc_prep,
533 .qc_issue = mv_qc_issue,
534
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400535 .freeze = mv_eh_freeze,
536 .thaw = mv_eh_thaw,
Tejun Heo029cfd62008-03-25 12:22:49 +0900537 .error_handler = mv_error_handler,
538 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400539
Jeff Garzikc9d39132005-11-13 17:47:51 -0500540 .scr_read = mv5_scr_read,
541 .scr_write = mv5_scr_write,
542
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500545};
546
Tejun Heo029cfd62008-03-25 12:22:49 +0900547static struct ata_port_operations mv6_ops = {
548 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500549 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900550 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400551 .scr_read = mv_scr_read,
552 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400553};
554
Tejun Heo029cfd62008-03-25 12:22:49 +0900555static struct ata_port_operations mv_iie_ops = {
556 .inherits = &mv6_ops,
557 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500558 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500559};
560
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100561static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400562 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400563 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400564 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400565 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500566 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400567 },
568 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400569 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400570 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400571 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500572 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400573 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500574 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500576 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400577 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500579 },
Brett Russ20f733e2005-09-01 18:26:17 -0400580 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500581 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
582 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400583 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400584 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500585 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400586 },
587 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400588 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500589 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500592 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400593 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500594 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400598 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500599 .port_ops = &mv_iie_ops,
600 },
601 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400605 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500606 .port_ops = &mv_iie_ops,
607 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500608 { /* chip_soc */
609 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv_iie_ops,
613 },
Brett Russ20f733e2005-09-01 18:26:17 -0400614};
615
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500616static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400617 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
618 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
619 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
620 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100621 /* RocketRAID 1740/174x have different identifiers */
622 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
623 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400624
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400625 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500630
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400631 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
632
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200633 /* Adaptec 1430SA */
634 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635
Mark Lord02a121d2007-12-01 13:07:22 -0500636 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800637 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638
Mark Lord02a121d2007-12-01 13:07:22 -0500639 /* Highpoint RocketRAID PCIe series */
640 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
641 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
642
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400643 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400644};
645
Jeff Garzik47c2b672005-11-12 21:13:17 -0500646static const struct mv_hw_ops mv5xxx_ops = {
647 .phy_errata = mv5_phy_errata,
648 .enable_leds = mv5_enable_leds,
649 .read_preamp = mv5_read_preamp,
650 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500651 .reset_flash = mv5_reset_flash,
652 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500653};
654
655static const struct mv_hw_ops mv6xxx_ops = {
656 .phy_errata = mv6_phy_errata,
657 .enable_leds = mv6_enable_leds,
658 .read_preamp = mv6_read_preamp,
659 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500660 .reset_flash = mv6_reset_flash,
661 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500662};
663
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500664static const struct mv_hw_ops mv_soc_ops = {
665 .phy_errata = mv6_phy_errata,
666 .enable_leds = mv_soc_enable_leds,
667 .read_preamp = mv_soc_read_preamp,
668 .reset_hc = mv_soc_reset_hc,
669 .reset_flash = mv_soc_reset_flash,
670 .reset_bus = mv_soc_reset_bus,
671};
672
Brett Russ20f733e2005-09-01 18:26:17 -0400673/*
674 * Functions
675 */
676
677static inline void writelfl(unsigned long data, void __iomem *addr)
678{
679 writel(data, addr);
680 (void) readl(addr); /* flush to avoid PCI posted write */
681}
682
Brett Russ20f733e2005-09-01 18:26:17 -0400683static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
684{
685 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
686}
687
Jeff Garzikc9d39132005-11-13 17:47:51 -0500688static inline unsigned int mv_hc_from_port(unsigned int port)
689{
690 return port >> MV_PORT_HC_SHIFT;
691}
692
693static inline unsigned int mv_hardport_from_port(unsigned int port)
694{
695 return port & MV_PORT_MASK;
696}
697
698static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
699 unsigned int port)
700{
701 return mv_hc_base(base, mv_hc_from_port(port));
702}
703
Brett Russ20f733e2005-09-01 18:26:17 -0400704static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
705{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500706 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500707 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500708 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400709}
710
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500711static inline void __iomem *mv_host_base(struct ata_host *host)
712{
713 struct mv_host_priv *hpriv = host->private_data;
714 return hpriv->base;
715}
716
Brett Russ20f733e2005-09-01 18:26:17 -0400717static inline void __iomem *mv_ap_base(struct ata_port *ap)
718{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500719 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400720}
721
Jeff Garzikcca39742006-08-24 03:19:22 -0400722static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400723{
Jeff Garzikcca39742006-08-24 03:19:22 -0400724 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400725}
726
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400727static void mv_set_edma_ptrs(void __iomem *port_mmio,
728 struct mv_host_priv *hpriv,
729 struct mv_port_priv *pp)
730{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400731 u32 index;
732
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400733 /*
734 * initialize request queue
735 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400736 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
737
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400738 WARN_ON(pp->crqb_dma & 0x3ff);
739 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400740 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400741 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
742
743 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400744 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400745 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
746 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400747 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400748
749 /*
750 * initialize response queue
751 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400752 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
753
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400754 WARN_ON(pp->crpb_dma & 0xff);
755 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
756
757 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400758 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400759 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
760 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400761 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400762
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400763 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400764 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400765}
766
Brett Russ05b308e2005-10-05 17:08:53 -0400767/**
768 * mv_start_dma - Enable eDMA engine
769 * @base: port base address
770 * @pp: port private data
771 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900772 * Verify the local cache of the eDMA state is accurate with a
773 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400774 *
775 * LOCKING:
776 * Inherited from caller.
777 */
Mark Lord0c589122008-01-26 18:31:16 -0500778static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500779 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400780{
Mark Lord72109162008-01-26 18:31:33 -0500781 int want_ncq = (protocol == ATA_PROT_NCQ);
782
783 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
784 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
785 if (want_ncq != using_ncq)
786 __mv_stop_dma(ap);
787 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400788 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500789 struct mv_host_priv *hpriv = ap->host->private_data;
790 int hard_port = mv_hardport_from_port(ap->port_no);
791 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100792 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500793 u32 hc_irq_cause, ipending;
794
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500796 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400797
Mark Lord0c589122008-01-26 18:31:16 -0500798 /* clear EDMA interrupt indicator, if any */
799 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
800 ipending = (DEV_IRQ << hard_port) |
801 (CRPB_DMA_DONE << hard_port);
802 if (hc_irq_cause & ipending) {
803 writelfl(hc_irq_cause & ~ipending,
804 hc_mmio + HC_IRQ_CAUSE_OFS);
805 }
806
Mark Lord72109162008-01-26 18:31:33 -0500807 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500808
809 /* clear FIS IRQ Cause */
810 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
811
Mark Lordf630d562008-01-26 18:31:00 -0500812 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813
Mark Lordf630d562008-01-26 18:31:00 -0500814 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400815 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
816 }
Mark Lordf630d562008-01-26 18:31:00 -0500817 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400818}
819
Brett Russ05b308e2005-10-05 17:08:53 -0400820/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400821 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400822 * @ap: ATA channel to manipulate
823 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900824 * Verify the local cache of the eDMA state is accurate with a
825 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400826 *
827 * LOCKING:
828 * Inherited from caller.
829 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400830static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400831{
832 void __iomem *port_mmio = mv_ap_base(ap);
833 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400834 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400836
Jeff Garzik4537deb2007-07-12 14:30:19 -0400837 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400838 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400839 */
Brett Russ31961942005-09-30 01:36:00 -0400840 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
841 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400842 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900843 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400844 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500845
Brett Russ31961942005-09-30 01:36:00 -0400846 /* now properly wait for the eDMA to stop */
847 for (i = 1000; i > 0; i--) {
848 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400849 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400850 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400851
Brett Russ31961942005-09-30 01:36:00 -0400852 udelay(100);
853 }
854
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400855 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900856 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400857 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400858 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400859
860 return err;
Brett Russ31961942005-09-30 01:36:00 -0400861}
862
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400863static int mv_stop_dma(struct ata_port *ap)
864{
865 unsigned long flags;
866 int rc;
867
868 spin_lock_irqsave(&ap->host->lock, flags);
869 rc = __mv_stop_dma(ap);
870 spin_unlock_irqrestore(&ap->host->lock, flags);
871
872 return rc;
873}
874
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400875#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400876static void mv_dump_mem(void __iomem *start, unsigned bytes)
877{
Brett Russ31961942005-09-30 01:36:00 -0400878 int b, w;
879 for (b = 0; b < bytes; ) {
880 DPRINTK("%p: ", start + b);
881 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400882 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400883 b += sizeof(u32);
884 }
885 printk("\n");
886 }
Brett Russ31961942005-09-30 01:36:00 -0400887}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400888#endif
889
Brett Russ31961942005-09-30 01:36:00 -0400890static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
891{
892#ifdef ATA_DEBUG
893 int b, w;
894 u32 dw;
895 for (b = 0; b < bytes; ) {
896 DPRINTK("%02x: ", b);
897 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400898 (void) pci_read_config_dword(pdev, b, &dw);
899 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400900 b += sizeof(u32);
901 }
902 printk("\n");
903 }
904#endif
905}
906static void mv_dump_all_regs(void __iomem *mmio_base, int port,
907 struct pci_dev *pdev)
908{
909#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500910 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400911 port >> MV_PORT_HC_SHIFT);
912 void __iomem *port_base;
913 int start_port, num_ports, p, start_hc, num_hcs, hc;
914
915 if (0 > port) {
916 start_hc = start_port = 0;
917 num_ports = 8; /* shld be benign for 4 port devs */
918 num_hcs = 2;
919 } else {
920 start_hc = port >> MV_PORT_HC_SHIFT;
921 start_port = port;
922 num_ports = num_hcs = 1;
923 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500924 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400925 num_ports > 1 ? num_ports - 1 : start_port);
926
927 if (NULL != pdev) {
928 DPRINTK("PCI config space regs:\n");
929 mv_dump_pci_cfg(pdev, 0x68);
930 }
931 DPRINTK("PCI regs:\n");
932 mv_dump_mem(mmio_base+0xc00, 0x3c);
933 mv_dump_mem(mmio_base+0xd00, 0x34);
934 mv_dump_mem(mmio_base+0xf00, 0x4);
935 mv_dump_mem(mmio_base+0x1d00, 0x6c);
936 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700937 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400938 DPRINTK("HC regs (HC %i):\n", hc);
939 mv_dump_mem(hc_base, 0x1c);
940 }
941 for (p = start_port; p < start_port + num_ports; p++) {
942 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400943 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400944 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400945 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400946 mv_dump_mem(port_base+0x300, 0x60);
947 }
948#endif
949}
950
Brett Russ20f733e2005-09-01 18:26:17 -0400951static unsigned int mv_scr_offset(unsigned int sc_reg_in)
952{
953 unsigned int ofs;
954
955 switch (sc_reg_in) {
956 case SCR_STATUS:
957 case SCR_CONTROL:
958 case SCR_ERROR:
959 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
960 break;
961 case SCR_ACTIVE:
962 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
963 break;
964 default:
965 ofs = 0xffffffffU;
966 break;
967 }
968 return ofs;
969}
970
Tejun Heoda3dbb12007-07-16 14:29:40 +0900971static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400972{
973 unsigned int ofs = mv_scr_offset(sc_reg_in);
974
Tejun Heoda3dbb12007-07-16 14:29:40 +0900975 if (ofs != 0xffffffffU) {
976 *val = readl(mv_ap_base(ap) + ofs);
977 return 0;
978 } else
979 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400980}
981
Tejun Heoda3dbb12007-07-16 14:29:40 +0900982static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400983{
984 unsigned int ofs = mv_scr_offset(sc_reg_in);
985
Tejun Heoda3dbb12007-07-16 14:29:40 +0900986 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400987 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900988 return 0;
989 } else
990 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400991}
992
Mark Lordf2738272008-01-26 18:32:29 -0500993static void mv6_dev_config(struct ata_device *adev)
994{
995 /*
996 * We don't have hob_nsect when doing NCQ commands on Gen-II.
997 * See mv_qc_prep() for more info.
998 */
999 if (adev->flags & ATA_DFLAG_NCQ)
1000 if (adev->max_sectors > ATA_MAX_SECTORS)
1001 adev->max_sectors = ATA_MAX_SECTORS;
1002}
1003
Mark Lord72109162008-01-26 18:31:33 -05001004static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1005 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001006{
Mark Lord0c589122008-01-26 18:31:16 -05001007 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001008
1009 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001010 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001011
Mark Lord0c589122008-01-26 18:31:16 -05001012 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001013 cfg |= (1 << 8); /* enab config burst size mask */
1014
Mark Lord0c589122008-01-26 18:31:16 -05001015 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001016 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1017
1018 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001019 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1020 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001021 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001022 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001023 }
1024
Mark Lord72109162008-01-26 18:31:33 -05001025 if (want_ncq) {
1026 cfg |= EDMA_CFG_NCQ;
1027 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1028 } else
1029 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1030
Jeff Garzike4e7b892006-01-31 12:18:41 -05001031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032}
1033
Mark Lordda2fa9b2008-01-26 18:32:45 -05001034static void mv_port_free_dma_mem(struct ata_port *ap)
1035{
1036 struct mv_host_priv *hpriv = ap->host->private_data;
1037 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001038 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001039
1040 if (pp->crqb) {
1041 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1042 pp->crqb = NULL;
1043 }
1044 if (pp->crpb) {
1045 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1046 pp->crpb = NULL;
1047 }
Mark Lordeb73d552008-01-29 13:24:00 -05001048 /*
1049 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1050 * For later hardware, we have one unique sg_tbl per NCQ tag.
1051 */
1052 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1053 if (pp->sg_tbl[tag]) {
1054 if (tag == 0 || !IS_GEN_I(hpriv))
1055 dma_pool_free(hpriv->sg_tbl_pool,
1056 pp->sg_tbl[tag],
1057 pp->sg_tbl_dma[tag]);
1058 pp->sg_tbl[tag] = NULL;
1059 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001060 }
1061}
1062
Brett Russ05b308e2005-10-05 17:08:53 -04001063/**
1064 * mv_port_start - Port specific init/start routine.
1065 * @ap: ATA channel to manipulate
1066 *
1067 * Allocate and point to DMA memory, init port private memory,
1068 * zero indices.
1069 *
1070 * LOCKING:
1071 * Inherited from caller.
1072 */
Brett Russ31961942005-09-30 01:36:00 -04001073static int mv_port_start(struct ata_port *ap)
1074{
Jeff Garzikcca39742006-08-24 03:19:22 -04001075 struct device *dev = ap->host->dev;
1076 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001077 struct mv_port_priv *pp;
1078 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001079 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001080 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001081
Tejun Heo24dc5f32007-01-20 16:00:28 +09001082 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001083 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001084 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001085 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001086
Mark Lordda2fa9b2008-01-26 18:32:45 -05001087 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1088 if (!pp->crqb)
1089 return -ENOMEM;
1090 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001091
Mark Lordda2fa9b2008-01-26 18:32:45 -05001092 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1093 if (!pp->crpb)
1094 goto out_port_free_dma_mem;
1095 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001096
Mark Lordeb73d552008-01-29 13:24:00 -05001097 /*
1098 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1099 * For later hardware, we need one unique sg_tbl per NCQ tag.
1100 */
1101 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1102 if (tag == 0 || !IS_GEN_I(hpriv)) {
1103 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1104 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1105 if (!pp->sg_tbl[tag])
1106 goto out_port_free_dma_mem;
1107 } else {
1108 pp->sg_tbl[tag] = pp->sg_tbl[0];
1109 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1110 }
1111 }
Brett Russ31961942005-09-30 01:36:00 -04001112
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001113 spin_lock_irqsave(&ap->host->lock, flags);
1114
Mark Lord72109162008-01-26 18:31:33 -05001115 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001116 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001117
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001118 spin_unlock_irqrestore(&ap->host->lock, flags);
1119
Brett Russ31961942005-09-30 01:36:00 -04001120 /* Don't turn on EDMA here...do it before DMA commands only. Else
1121 * we'll be unable to send non-data, PIO, etc due to restricted access
1122 * to shadow regs.
1123 */
Brett Russ31961942005-09-30 01:36:00 -04001124 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001125
1126out_port_free_dma_mem:
1127 mv_port_free_dma_mem(ap);
1128 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001129}
1130
Brett Russ05b308e2005-10-05 17:08:53 -04001131/**
1132 * mv_port_stop - Port specific cleanup/stop routine.
1133 * @ap: ATA channel to manipulate
1134 *
1135 * Stop DMA, cleanup port memory.
1136 *
1137 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001138 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001139 */
Brett Russ31961942005-09-30 01:36:00 -04001140static void mv_port_stop(struct ata_port *ap)
1141{
Brett Russ31961942005-09-30 01:36:00 -04001142 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001143 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001144}
1145
Brett Russ05b308e2005-10-05 17:08:53 -04001146/**
1147 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1148 * @qc: queued command whose SG list to source from
1149 *
1150 * Populate the SG list and mark the last entry.
1151 *
1152 * LOCKING:
1153 * Inherited from caller.
1154 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001155static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001156{
1157 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001158 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001159 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001160 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001161
Mark Lordeb73d552008-01-29 13:24:00 -05001162 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001163 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001164 dma_addr_t addr = sg_dma_address(sg);
1165 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001166
Olof Johansson4007b492007-10-02 20:45:27 -05001167 while (sg_len) {
1168 u32 offset = addr & 0xffff;
1169 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001170
Olof Johansson4007b492007-10-02 20:45:27 -05001171 if ((offset + sg_len > 0x10000))
1172 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001173
Olof Johansson4007b492007-10-02 20:45:27 -05001174 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1175 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001176 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001177
1178 sg_len -= len;
1179 addr += len;
1180
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001181 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001182 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001183 }
Brett Russ31961942005-09-30 01:36:00 -04001184 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001185
1186 if (likely(last_sg))
1187 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001188}
1189
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001190static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001191{
Mark Lord559eeda2006-05-19 16:40:15 -04001192 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001193 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001194 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001195}
1196
Brett Russ05b308e2005-10-05 17:08:53 -04001197/**
1198 * mv_qc_prep - Host specific command preparation.
1199 * @qc: queued command to prepare
1200 *
1201 * This routine simply redirects to the general purpose routine
1202 * if command is not DMA. Else, it handles prep of the CRQB
1203 * (command request block), does some sanity checking, and calls
1204 * the SG load routine.
1205 *
1206 * LOCKING:
1207 * Inherited from caller.
1208 */
Brett Russ31961942005-09-30 01:36:00 -04001209static void mv_qc_prep(struct ata_queued_cmd *qc)
1210{
1211 struct ata_port *ap = qc->ap;
1212 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001213 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001214 struct ata_taskfile *tf;
1215 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001216 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001217
Mark Lord138bfdd2008-01-26 18:33:18 -05001218 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1219 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001220 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001221
Brett Russ31961942005-09-30 01:36:00 -04001222 /* Fill in command request block
1223 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001224 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001225 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001226 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001227 flags |= qc->tag << CRQB_TAG_SHIFT;
1228
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001229 /* get current queue index from software */
1230 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001231
Mark Lorda6432432006-05-19 16:36:36 -04001232 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001233 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001234 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001235 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001236 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1237
1238 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001239 tf = &qc->tf;
1240
1241 /* Sadly, the CRQB cannot accomodate all registers--there are
1242 * only 11 bytes...so we must pick and choose required
1243 * registers based on the command. So, we drop feature and
1244 * hob_feature for [RW] DMA commands, but they are needed for
1245 * NCQ. NCQ will drop hob_nsect.
1246 */
1247 switch (tf->command) {
1248 case ATA_CMD_READ:
1249 case ATA_CMD_READ_EXT:
1250 case ATA_CMD_WRITE:
1251 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001252 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001253 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1254 break;
Brett Russ31961942005-09-30 01:36:00 -04001255 case ATA_CMD_FPDMA_READ:
1256 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001257 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001258 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1259 break;
Brett Russ31961942005-09-30 01:36:00 -04001260 default:
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1265 *
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1268 */
1269 BUG_ON(tf->command);
1270 break;
1271 }
1272 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1273 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1274 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1275 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1276 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1279 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1280 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1281
Jeff Garzike4e7b892006-01-31 12:18:41 -05001282 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001283 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001284 mv_fill_sg(qc);
1285}
1286
1287/**
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1290 *
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1295 *
1296 * LOCKING:
1297 * Inherited from caller.
1298 */
1299static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1300{
1301 struct ata_port *ap = qc->ap;
1302 struct mv_port_priv *pp = ap->private_data;
1303 struct mv_crqb_iie *crqb;
1304 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001305 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001306 u32 flags = 0;
1307
Mark Lord138bfdd2008-01-26 18:33:18 -05001308 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1309 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001310 return;
1311
Jeff Garzike4e7b892006-01-31 12:18:41 -05001312 /* Fill in Gen IIE command request block
1313 */
1314 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1315 flags |= CRQB_FLAG_READ;
1316
Tejun Heobeec7db2006-02-11 19:11:13 +09001317 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001318 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001319 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001320
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001321 /* get current queue index from software */
1322 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001323
1324 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001325 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1326 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001327 crqb->flags = cpu_to_le32(flags);
1328
1329 tf = &qc->tf;
1330 crqb->ata_cmd[0] = cpu_to_le32(
1331 (tf->command << 16) |
1332 (tf->feature << 24)
1333 );
1334 crqb->ata_cmd[1] = cpu_to_le32(
1335 (tf->lbal << 0) |
1336 (tf->lbam << 8) |
1337 (tf->lbah << 16) |
1338 (tf->device << 24)
1339 );
1340 crqb->ata_cmd[2] = cpu_to_le32(
1341 (tf->hob_lbal << 0) |
1342 (tf->hob_lbam << 8) |
1343 (tf->hob_lbah << 16) |
1344 (tf->hob_feature << 24)
1345 );
1346 crqb->ata_cmd[3] = cpu_to_le32(
1347 (tf->nsect << 0) |
1348 (tf->hob_nsect << 8)
1349 );
1350
1351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1352 return;
Brett Russ31961942005-09-30 01:36:00 -04001353 mv_fill_sg(qc);
1354}
1355
Brett Russ05b308e2005-10-05 17:08:53 -04001356/**
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1359 *
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1364 *
1365 * LOCKING:
1366 * Inherited from caller.
1367 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001368static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001369{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001370 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001373 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001374
Mark Lord138bfdd2008-01-26 18:33:18 -05001375 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1376 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1380 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001381 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001382 return ata_qc_issue_prot(qc);
1383 }
1384
Mark Lord72109162008-01-26 18:31:33 -05001385 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001386
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001387 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001388
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001389 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001390
1391 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001392 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1393 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001394
1395 return 0;
1396}
1397
Brett Russ05b308e2005-10-05 17:08:53 -04001398/**
Brett Russ05b308e2005-10-05 17:08:53 -04001399 * mv_err_intr - Handle error interrupts on the port
1400 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001401 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001402 *
1403 * In most cases, just clear the interrupt and move on. However,
1404 * some cases require an eDMA reset, which is done right before
1405 * the COMRESET in mv_phy_reset(). The SERR case requires a
1406 * clear of pending errors in the SATA SERROR register. Finally,
1407 * if the port disabled DMA, update our cached copy to match.
1408 *
1409 * LOCKING:
1410 * Inherited from caller.
1411 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001412static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001413{
Brett Russ31961942005-09-30 01:36:00 -04001414 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001415 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1416 struct mv_port_priv *pp = ap->private_data;
1417 struct mv_host_priv *hpriv = ap->host->private_data;
1418 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1419 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001420 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001421
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001423
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001424 if (!edma_enabled) {
1425 /* just a guess: do we need to do this? should we
1426 * expand this, and do it in all cases?
1427 */
Tejun Heo936fd732007-08-06 18:36:23 +09001428 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1429 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001430 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001431
1432 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1433
1434 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1435
1436 /*
1437 * all generations share these EDMA error cause bits
1438 */
1439
1440 if (edma_err_cause & EDMA_ERR_DEV)
1441 err_mask |= AC_ERR_DEV;
1442 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001443 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001444 EDMA_ERR_INTRL_PAR)) {
1445 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001446 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001447 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001448 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001449 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1450 ata_ehi_hotplugged(ehi);
1451 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001452 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001453 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454 }
1455
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001456 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 eh_freeze_mask = EDMA_EH_FREEZE_5;
1458
1459 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001460 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001461 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001462 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001463 }
1464 } else {
1465 eh_freeze_mask = EDMA_EH_FREEZE;
1466
1467 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001468 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001469 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001470 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001471 }
1472
1473 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001476 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001477 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001478 }
1479 }
Brett Russ20f733e2005-09-01 18:26:17 -04001480
1481 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001482 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001483
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 if (!err_mask) {
1485 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001486 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001487 }
1488
1489 ehi->serror |= serr;
1490 ehi->action |= action;
1491
1492 if (qc)
1493 qc->err_mask |= err_mask;
1494 else
1495 ehi->err_mask |= err_mask;
1496
1497 if (edma_err_cause & eh_freeze_mask)
1498 ata_port_freeze(ap);
1499 else
1500 ata_port_abort(ap);
1501}
1502
1503static void mv_intr_pio(struct ata_port *ap)
1504{
1505 struct ata_queued_cmd *qc;
1506 u8 ata_status;
1507
1508 /* ignore spurious intr if drive still BUSY */
1509 ata_status = readb(ap->ioaddr.status_addr);
1510 if (unlikely(ata_status & ATA_BUSY))
1511 return;
1512
1513 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001514 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001515 if (unlikely(!qc)) /* no active tag */
1516 return;
1517 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1518 return;
1519
1520 /* and finally, complete the ATA command */
1521 qc->err_mask |= ac_err_mask(ata_status);
1522 ata_qc_complete(qc);
1523}
1524
1525static void mv_intr_edma(struct ata_port *ap)
1526{
1527 void __iomem *port_mmio = mv_ap_base(ap);
1528 struct mv_host_priv *hpriv = ap->host->private_data;
1529 struct mv_port_priv *pp = ap->private_data;
1530 struct ata_queued_cmd *qc;
1531 u32 out_index, in_index;
1532 bool work_done = false;
1533
1534 /* get h/w response queue pointer */
1535 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1536 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1537
1538 while (1) {
1539 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001540 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001541
1542 /* get s/w response queue last-read pointer, and compare */
1543 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1544 if (in_index == out_index)
1545 break;
1546
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001548 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001549 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001551 /* Gen II/IIE: get active ATA command via tag, to enable
1552 * support for queueing. this works transparently for
1553 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001554 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001555 else
1556 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001558 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001559
Mark Lordcb924412008-01-26 18:32:09 -05001560 /* For non-NCQ mode, the lower 8 bits of status
1561 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1562 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001563 */
1564 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001565 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001566 mv_err_intr(ap, qc);
1567 return;
1568 }
1569
1570 /* and finally, complete the ATA command */
1571 if (qc) {
1572 qc->err_mask |=
1573 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1574 ata_qc_complete(qc);
1575 }
1576
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001577 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001578 * indicate (after the loop completes) to hardware
1579 * that we have consumed a response queue entry.
1580 */
1581 work_done = true;
1582 pp->resp_idx++;
1583 }
1584
1585 if (work_done)
1586 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1587 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1588 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001589}
1590
Brett Russ05b308e2005-10-05 17:08:53 -04001591/**
1592 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001593 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001594 * @relevant: port error bits relevant to this host controller
1595 * @hc: which host controller we're to look at
1596 *
1597 * Read then write clear the HC interrupt status then walk each
1598 * port connected to the HC and see if it needs servicing. Port
1599 * success ints are reported in the HC interrupt status reg, the
1600 * port error ints are reported in the higher level main
1601 * interrupt status register and thus are passed in via the
1602 * 'relevant' argument.
1603 *
1604 * LOCKING:
1605 * Inherited from caller.
1606 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001607static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001608{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001609 struct mv_host_priv *hpriv = host->private_data;
1610 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001611 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001612 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001613 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001614
Jeff Garzik35177262007-02-24 21:26:42 -05001615 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001616 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001617 else
Brett Russ20f733e2005-09-01 18:26:17 -04001618 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001619
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001620 if (HAS_PCI(host))
1621 last_port = port0 + MV_PORTS_PER_HC;
1622 else
1623 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001624 /* we'll need the HC success int register in most cases */
1625 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626 if (!hc_irq_cause)
1627 return;
1628
1629 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001630
1631 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001632 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001633
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001634 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001635 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001636 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001637 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001638
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001639 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001640 continue;
1641
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001642 pp = ap->private_data;
1643
Brett Russ31961942005-09-30 01:36:00 -04001644 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001645 if (port >= MV_PORTS_PER_HC) {
1646 shift++; /* skip bit 8 in the HC Main IRQ reg */
1647 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001648 have_err_bits = ((PORT0_ERR << shift) & relevant);
1649
1650 if (unlikely(have_err_bits)) {
1651 struct ata_queued_cmd *qc;
1652
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001653 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001654 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1655 continue;
1656
1657 mv_err_intr(ap, qc);
1658 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001659 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001660
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001661 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1662
1663 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1664 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1665 mv_intr_edma(ap);
1666 } else {
1667 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1668 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001669 }
1670 }
1671 VPRINTK("EXIT\n");
1672}
1673
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001674static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1675{
Mark Lord02a121d2007-12-01 13:07:22 -05001676 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001677 struct ata_port *ap;
1678 struct ata_queued_cmd *qc;
1679 struct ata_eh_info *ehi;
1680 unsigned int i, err_mask, printed = 0;
1681 u32 err_cause;
1682
Mark Lord02a121d2007-12-01 13:07:22 -05001683 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001684
1685 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1686 err_cause);
1687
1688 DPRINTK("All regs @ PCI error\n");
1689 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1690
Mark Lord02a121d2007-12-01 13:07:22 -05001691 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001692
1693 for (i = 0; i < host->n_ports; i++) {
1694 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001695 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001696 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001697 ata_ehi_clear_desc(ehi);
1698 if (!printed++)
1699 ata_ehi_push_desc(ehi,
1700 "PCI err cause 0x%08x", err_cause);
1701 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001702 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001703 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001704 if (qc)
1705 qc->err_mask |= err_mask;
1706 else
1707 ehi->err_mask |= err_mask;
1708
1709 ata_port_freeze(ap);
1710 }
1711 }
1712}
1713
Brett Russ05b308e2005-10-05 17:08:53 -04001714/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001715 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001716 * @irq: unused
1717 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001718 *
1719 * Read the read only register to determine if any host
1720 * controllers have pending interrupts. If so, call lower level
1721 * routine to handle. Also check for PCI errors which are only
1722 * reported here.
1723 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001724 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001725 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001726 * interrupts.
1727 */
David Howells7d12e782006-10-05 14:55:46 +01001728static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001729{
Jeff Garzikcca39742006-08-24 03:19:22 -04001730 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001731 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001732 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001733 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001734 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001735
Mark Lord646a4da2008-01-26 18:30:37 -05001736 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001737
1738 irq_stat = readl(hpriv->main_cause_reg_addr);
1739 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001740
1741 /* check the cases where we either have nothing pending or have read
1742 * a bogus register value which can indicate HW removal or PCI fault
1743 */
Mark Lord646a4da2008-01-26 18:30:37 -05001744 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1745 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001746
Jeff Garzikcca39742006-08-24 03:19:22 -04001747 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001748
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001749 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001750 mv_pci_error(host, mmio);
1751 handled = 1;
1752 goto out_unlock; /* skip all other HC irq handling */
1753 }
1754
Brett Russ20f733e2005-09-01 18:26:17 -04001755 for (hc = 0; hc < n_hcs; hc++) {
1756 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1757 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001758 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001759 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001760 }
1761 }
Mark Lord615ab952006-05-19 16:24:56 -04001762
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001763out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001764 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001765
1766 return IRQ_RETVAL(handled);
1767}
1768
Jeff Garzikc9d39132005-11-13 17:47:51 -05001769static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1770{
1771 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1772 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1773
1774 return hc_mmio + ofs;
1775}
1776
1777static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1778{
1779 unsigned int ofs;
1780
1781 switch (sc_reg_in) {
1782 case SCR_STATUS:
1783 case SCR_ERROR:
1784 case SCR_CONTROL:
1785 ofs = sc_reg_in * sizeof(u32);
1786 break;
1787 default:
1788 ofs = 0xffffffffU;
1789 break;
1790 }
1791 return ofs;
1792}
1793
Tejun Heoda3dbb12007-07-16 14:29:40 +09001794static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001795{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001796 struct mv_host_priv *hpriv = ap->host->private_data;
1797 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001798 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001799 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1800
Tejun Heoda3dbb12007-07-16 14:29:40 +09001801 if (ofs != 0xffffffffU) {
1802 *val = readl(addr + ofs);
1803 return 0;
1804 } else
1805 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001806}
1807
Tejun Heoda3dbb12007-07-16 14:29:40 +09001808static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001809{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001810 struct mv_host_priv *hpriv = ap->host->private_data;
1811 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001812 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001813 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1814
Tejun Heoda3dbb12007-07-16 14:29:40 +09001815 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001816 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001817 return 0;
1818 } else
1819 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001820}
1821
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001822static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001823{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001824 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001825 int early_5080;
1826
Auke Kok44c10132007-06-08 15:46:36 -07001827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001828
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 }
1834
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001835 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001836}
1837
1838static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1839{
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1841}
1842
Jeff Garzik47c2b672005-11-12 21:13:17 -05001843static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001844 void __iomem *mmio)
1845{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001853}
1854
Jeff Garzik47c2b672005-11-12 21:13:17 -05001855static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001856{
Jeff Garzik522479f2005-11-12 22:14:02 -05001857 u32 tmp;
1858
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1860
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1862
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001866}
1867
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001868static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001870{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1875
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1880
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1885 }
1886
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001892}
1893
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894
1895#undef ZERO
1896#define ZERO(reg) writel(0, port_mmio + (reg))
1897static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001899{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001900 void __iomem *port_mmio = mv_port_base(mmio, port);
1901
1902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1903
1904 mv_channel_reset(hpriv, mmio, port);
1905
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1919}
1920#undef ZERO
1921
1922#define ZERO(reg) writel(0, hc_mmio + (reg))
1923static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int hc)
1925{
1926 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1927 u32 tmp;
1928
1929 ZERO(0x00c);
1930 ZERO(0x010);
1931 ZERO(0x014);
1932 ZERO(0x018);
1933
1934 tmp = readl(hc_mmio + 0x20);
1935 tmp &= 0x1c1c1c1c;
1936 tmp |= 0x03030303;
1937 writel(tmp, hc_mmio + 0x20);
1938}
1939#undef ZERO
1940
1941static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 unsigned int n_hc)
1943{
1944 unsigned int hc, port;
1945
1946 for (hc = 0; hc < n_hc; hc++) {
1947 for (port = 0; port < MV_PORTS_PER_HC; port++)
1948 mv5_reset_hc_port(hpriv, mmio,
1949 (hc * MV_PORTS_PER_HC) + port);
1950
1951 mv5_reset_one_hc(hpriv, mmio, hc);
1952 }
1953
1954 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001955}
1956
Jeff Garzik101ffae2005-11-12 22:17:49 -05001957#undef ZERO
1958#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001959static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001960{
Mark Lord02a121d2007-12-01 13:07:22 -05001961 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001962 u32 tmp;
1963
1964 tmp = readl(mmio + MV_PCI_MODE);
1965 tmp &= 0xff00ffff;
1966 writel(tmp, mmio + MV_PCI_MODE);
1967
1968 ZERO(MV_PCI_DISC_TIMER);
1969 ZERO(MV_PCI_MSI_TRIGGER);
1970 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1971 ZERO(HC_MAIN_IRQ_MASK_OFS);
1972 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001973 ZERO(hpriv->irq_cause_ofs);
1974 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001975 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1976 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1977 ZERO(MV_PCI_ERR_ATTRIBUTE);
1978 ZERO(MV_PCI_ERR_COMMAND);
1979}
1980#undef ZERO
1981
1982static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1983{
1984 u32 tmp;
1985
1986 mv5_reset_flash(hpriv, mmio);
1987
1988 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1989 tmp &= 0x3;
1990 tmp |= (1 << 5) | (1 << 6);
1991 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1992}
1993
1994/**
1995 * mv6_reset_hc - Perform the 6xxx global soft reset
1996 * @mmio: base address of the HBA
1997 *
1998 * This routine only applies to 6xxx parts.
1999 *
2000 * LOCKING:
2001 * Inherited from caller.
2002 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002003static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2004 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002005{
2006 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2007 int i, rc = 0;
2008 u32 t;
2009
2010 /* Following procedure defined in PCI "main command and status
2011 * register" table.
2012 */
2013 t = readl(reg);
2014 writel(t | STOP_PCI_MASTER, reg);
2015
2016 for (i = 0; i < 1000; i++) {
2017 udelay(1);
2018 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002019 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002020 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002021 }
2022 if (!(PCI_MASTER_EMPTY & t)) {
2023 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2024 rc = 1;
2025 goto done;
2026 }
2027
2028 /* set reset */
2029 i = 5;
2030 do {
2031 writel(t | GLOB_SFT_RST, reg);
2032 t = readl(reg);
2033 udelay(1);
2034 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2035
2036 if (!(GLOB_SFT_RST & t)) {
2037 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2038 rc = 1;
2039 goto done;
2040 }
2041
2042 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2043 i = 5;
2044 do {
2045 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2046 t = readl(reg);
2047 udelay(1);
2048 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2049
2050 if (GLOB_SFT_RST & t) {
2051 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2052 rc = 1;
2053 }
2054done:
2055 return rc;
2056}
2057
Jeff Garzik47c2b672005-11-12 21:13:17 -05002058static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002059 void __iomem *mmio)
2060{
2061 void __iomem *port_mmio;
2062 u32 tmp;
2063
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002064 tmp = readl(mmio + MV_RESET_CFG);
2065 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002066 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002067 hpriv->signal[idx].pre = 0x1 << 5;
2068 return;
2069 }
2070
2071 port_mmio = mv_port_base(mmio, idx);
2072 tmp = readl(port_mmio + PHY_MODE2);
2073
2074 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2075 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2076}
2077
Jeff Garzik47c2b672005-11-12 21:13:17 -05002078static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002079{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002080 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002081}
2082
Jeff Garzikc9d39132005-11-13 17:47:51 -05002083static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002084 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002085{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002086 void __iomem *port_mmio = mv_port_base(mmio, port);
2087
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002088 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002089 int fix_phy_mode2 =
2090 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002091 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002092 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2093 u32 m2, tmp;
2094
2095 if (fix_phy_mode2) {
2096 m2 = readl(port_mmio + PHY_MODE2);
2097 m2 &= ~(1 << 16);
2098 m2 |= (1 << 31);
2099 writel(m2, port_mmio + PHY_MODE2);
2100
2101 udelay(200);
2102
2103 m2 = readl(port_mmio + PHY_MODE2);
2104 m2 &= ~((1 << 16) | (1 << 31));
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108 }
2109
2110 /* who knows what this magic does */
2111 tmp = readl(port_mmio + PHY_MODE3);
2112 tmp &= ~0x7F800000;
2113 tmp |= 0x2A800000;
2114 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002115
2116 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002117 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002118
2119 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002120
2121 if (hp_flags & MV_HP_ERRATA_60X1B2)
2122 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002123
2124 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2125
2126 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127
2128 if (hp_flags & MV_HP_ERRATA_60X1B2)
2129 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002130 }
2131
2132 /* Revert values of pre-emphasis and signal amps to the saved ones */
2133 m2 = readl(port_mmio + PHY_MODE2);
2134
2135 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002136 m2 |= hpriv->signal[port].amps;
2137 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002138 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002139
Jeff Garzike4e7b892006-01-31 12:18:41 -05002140 /* according to mvSata 3.6.1, some IIE values are fixed */
2141 if (IS_GEN_IIE(hpriv)) {
2142 m2 &= ~0xC30FF01F;
2143 m2 |= 0x0000900F;
2144 }
2145
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002146 writel(m2, port_mmio + PHY_MODE2);
2147}
2148
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002149/* TODO: use the generic LED interface to configure the SATA Presence */
2150/* & Acitivy LEDs on the board */
2151static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2152 void __iomem *mmio)
2153{
2154 return;
2155}
2156
2157static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2158 void __iomem *mmio)
2159{
2160 void __iomem *port_mmio;
2161 u32 tmp;
2162
2163 port_mmio = mv_port_base(mmio, idx);
2164 tmp = readl(port_mmio + PHY_MODE2);
2165
2166 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2167 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2168}
2169
2170#undef ZERO
2171#define ZERO(reg) writel(0, port_mmio + (reg))
2172static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2173 void __iomem *mmio, unsigned int port)
2174{
2175 void __iomem *port_mmio = mv_port_base(mmio, port);
2176
2177 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2178
2179 mv_channel_reset(hpriv, mmio, port);
2180
2181 ZERO(0x028); /* command */
2182 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2183 ZERO(0x004); /* timer */
2184 ZERO(0x008); /* irq err cause */
2185 ZERO(0x00c); /* irq err mask */
2186 ZERO(0x010); /* rq bah */
2187 ZERO(0x014); /* rq inp */
2188 ZERO(0x018); /* rq outp */
2189 ZERO(0x01c); /* respq bah */
2190 ZERO(0x024); /* respq outp */
2191 ZERO(0x020); /* respq inp */
2192 ZERO(0x02c); /* test control */
2193 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2194}
2195
2196#undef ZERO
2197
2198#define ZERO(reg) writel(0, hc_mmio + (reg))
2199static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2200 void __iomem *mmio)
2201{
2202 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2203
2204 ZERO(0x00c);
2205 ZERO(0x010);
2206 ZERO(0x014);
2207
2208}
2209
2210#undef ZERO
2211
2212static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2213 void __iomem *mmio, unsigned int n_hc)
2214{
2215 unsigned int port;
2216
2217 for (port = 0; port < hpriv->n_ports; port++)
2218 mv_soc_reset_hc_port(hpriv, mmio, port);
2219
2220 mv_soc_reset_one_hc(hpriv, mmio);
2221
2222 return 0;
2223}
2224
2225static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2226 void __iomem *mmio)
2227{
2228 return;
2229}
2230
2231static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2232{
2233 return;
2234}
2235
Jeff Garzikc9d39132005-11-13 17:47:51 -05002236static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2237 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002238{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002239 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002240
Brett Russ31961942005-09-30 01:36:00 -04002241 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002242
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002243 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002244 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002245 ifctl |= (1 << 7); /* enable gen2i speed */
2246 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002247 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2248 }
2249
Brett Russ20f733e2005-09-01 18:26:17 -04002250 udelay(25); /* allow reset propagation */
2251
2252 /* Spec never mentions clearing the bit. Marvell's driver does
2253 * clear the bit, however.
2254 */
Brett Russ31961942005-09-30 01:36:00 -04002255 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002256
Jeff Garzikc9d39132005-11-13 17:47:51 -05002257 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2258
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002259 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002260 mdelay(1);
2261}
2262
Jeff Garzikc9d39132005-11-13 17:47:51 -05002263/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002264 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002265 * @ap: ATA channel to manipulate
2266 *
2267 * Part of this is taken from __sata_phy_reset and modified to
2268 * not sleep since this routine gets called from interrupt level.
2269 *
2270 * LOCKING:
2271 * Inherited from caller. This is coded to safe to call at
2272 * interrupt level, i.e. it does not sleep.
2273 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002274static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2275 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002276{
2277 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002278 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002279 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002280 int retry = 5;
2281 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002282
2283 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002284
Tejun Heoda3dbb12007-07-16 14:29:40 +09002285#ifdef DEBUG
2286 {
2287 u32 sstatus, serror, scontrol;
2288
2289 mv_scr_read(ap, SCR_STATUS, &sstatus);
2290 mv_scr_read(ap, SCR_ERROR, &serror);
2291 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2292 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002293 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002294 }
2295#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002296
Jeff Garzik22374672005-11-17 10:59:48 -05002297 /* Issue COMRESET via SControl */
2298comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002299 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002300 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002301
Tejun Heo936fd732007-08-06 18:36:23 +09002302 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002303 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002304
Brett Russ31961942005-09-30 01:36:00 -04002305 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002306 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002307 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002308 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002310 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002311 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002312
Jeff Garzik22374672005-11-17 10:59:48 -05002313 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002314 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002315 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2316 (retry-- > 0))
2317 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002318
Tejun Heoda3dbb12007-07-16 14:29:40 +09002319#ifdef DEBUG
2320 {
2321 u32 sstatus, serror, scontrol;
2322
2323 mv_scr_read(ap, SCR_STATUS, &sstatus);
2324 mv_scr_read(ap, SCR_ERROR, &serror);
2325 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2326 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2327 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2328 }
2329#endif
Brett Russ31961942005-09-30 01:36:00 -04002330
Tejun Heo936fd732007-08-06 18:36:23 +09002331 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002332 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002333 return;
2334 }
2335
Jeff Garzik22374672005-11-17 10:59:48 -05002336 /* even after SStatus reflects that device is ready,
2337 * it seems to take a while for link to be fully
2338 * established (and thus Status no longer 0x80/0x7F),
2339 * so we poll a bit for that, here.
2340 */
2341 retry = 20;
2342 while (1) {
2343 u8 drv_stat = ata_check_status(ap);
2344 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2345 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002347 if (retry-- <= 0)
2348 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 if (time_after(jiffies, deadline))
2350 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002351 }
2352
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002353 /* FIXME: if we passed the deadline, the following
2354 * code probably produces an invalid result
2355 */
Brett Russ20f733e2005-09-01 18:26:17 -04002356
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002357 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002358 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002359
2360 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2361
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002362 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002363
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002364 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002365}
2366
Tejun Heocc0680a2007-08-06 18:36:23 +09002367static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002368{
Tejun Heocc0680a2007-08-06 18:36:23 +09002369 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002370 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002371
Tejun Heocf480622008-01-24 00:05:14 +09002372 mv_stop_dma(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002373
Tejun Heocf480622008-01-24 00:05:14 +09002374 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002375 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376
Tejun Heocf480622008-01-24 00:05:14 +09002377 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002378}
2379
Tejun Heocc0680a2007-08-06 18:36:23 +09002380static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002381 unsigned long deadline)
2382{
Tejun Heocc0680a2007-08-06 18:36:23 +09002383 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002385 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002386
2387 mv_stop_dma(ap);
2388
2389 mv_channel_reset(hpriv, mmio, ap->port_no);
2390
2391 mv_phy_reset(ap, class, deadline);
2392
2393 return 0;
2394}
2395
Tejun Heocc0680a2007-08-06 18:36:23 +09002396static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002397{
Tejun Heocc0680a2007-08-06 18:36:23 +09002398 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 u32 serr;
2400
2401 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002402 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002403
2404 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002405 sata_scr_read(link, SCR_ERROR, &serr);
2406 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002407
2408 /* bail out if no device is present */
2409 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2410 DPRINTK("EXIT, no device\n");
2411 return;
2412 }
2413
2414 /* set up device control */
2415 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2416}
2417
2418static void mv_error_handler(struct ata_port *ap)
2419{
2420 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2421 mv_hardreset, mv_postreset);
2422}
2423
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002424static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002425{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002426 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2428 u32 tmp, mask;
2429 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002430
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002431 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002433 shift = ap->port_no * 2;
2434 if (hc > 0)
2435 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002436
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002437 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002442}
2443
2444static void mv_eh_thaw(struct ata_port *ap)
2445{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2453
2454 /* FIXME: handle coalescing completion events properly */
2455
2456 shift = ap->port_no * 2;
2457 if (hc > 0) {
2458 shift++;
2459 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002460 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002461
2462 mask = 0x3 << shift;
2463
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2466
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2472
2473 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002476}
2477
Brett Russ05b308e2005-10-05 17:08:53 -04002478/**
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2482 *
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 */
Brett Russ31961942005-09-30 01:36:00 -04002490static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2491{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002493 unsigned serr_ofs;
2494
Jeff Garzik8b260242005-11-12 12:32:50 -05002495 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002496 */
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002498 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002505 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509
2510 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002512
Brett Russ31961942005-09-30 01:36:00 -04002513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2517
Mark Lord646a4da2008-01-26 18:30:37 -05002518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002520
Jeff Garzik8b260242005-11-12 12:32:50 -05002521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002525}
2526
Tejun Heo4447d352007-04-17 23:44:08 +09002527static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002528{
Tejun Heo4447d352007-04-17 23:44:08 +09002529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002531 u32 hp_flags = hpriv->hp_flags;
2532
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002533 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002534 case chip_5080:
2535 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002536 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002537
Auke Kok44c10132007-06-08 15:46:36 -07002538 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002539 case 0x1:
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 break;
2542 case 0x3:
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 default:
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 }
2551 break;
2552
2553 case chip_504x:
2554 case chip_508x:
2555 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002556 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002557
Auke Kok44c10132007-06-08 15:46:36 -07002558 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559 case 0x0:
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 break;
2562 case 0x3:
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
2565 default:
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2569 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002570 }
2571 break;
2572
2573 case chip_604x:
2574 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002575 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002576 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002577
Auke Kok44c10132007-06-08 15:46:36 -07002578 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002579 case 0x7:
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 break;
2582 case 0x9:
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002584 break;
2585 default:
2586 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002589 break;
2590 }
2591 break;
2592
Jeff Garzike4e7b892006-01-31 12:18:41 -05002593 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002594 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 {
Mark Lord4e520032007-12-11 12:58:05 -05002598 /*
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2600 *
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2604 *
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2610 *
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2612 *
2613 * Warn the user, lest they think we're just buggy.
2614 */
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2618 " BEWARE!\n");
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002623 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002624 case chip_6042:
2625 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002626 hp_flags |= MV_HP_GEN_IIE;
2627
Auke Kok44c10132007-06-08 15:46:36 -07002628 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 break;
2632 case 0x1:
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 break;
2640 }
2641 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002642 case chip_soc:
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002646
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002647 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002648 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002649 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002650 return 1;
2651 }
2652
2653 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2658 } else {
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002663
2664 return 0;
2665}
2666
Brett Russ05b308e2005-10-05 17:08:53 -04002667/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002668 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002669 * @host: ATA host to initialize
2670 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002671 *
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2674 *
2675 * LOCKING:
2676 * Inherited from caller.
2677 */
Tejun Heo4447d352007-04-17 23:44:08 +09002678static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002679{
2680 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002681 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002682 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002683
Tejun Heo4447d352007-04-17 23:44:08 +09002684 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002685 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002686 goto done;
2687
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2692 } else {
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2697 }
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002700
Tejun Heo4447d352007-04-17 23:44:08 +09002701 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002702
Tejun Heo4447d352007-04-17 23:44:08 +09002703 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002704 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002705
Jeff Garzikc9d39132005-11-13 17:47:51 -05002706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002707 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002708 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002709
Jeff Garzik522479f2005-11-12 22:14:02 -05002710 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002711 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002712 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002713
Tejun Heo4447d352007-04-17 23:44:08 +09002714 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002715 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002716 void __iomem *port_mmio = mv_port_base(mmio, port);
2717
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2722 }
2723
Jeff Garzikc9d39132005-11-13 17:47:51 -05002724 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002725 }
2726
Tejun Heo4447d352007-04-17 23:44:08 +09002727 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002728 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002729 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002730
2731 mv_port_init(&ap->ioaddr, port_mmio);
2732
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002733#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2738 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002739#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002740 }
2741
2742 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2744
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2749
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002752 }
2753
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002757
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2763 else
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002766
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2773 } else {
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2779 }
Brett Russ31961942005-09-30 01:36:00 -04002780done:
Brett Russ20f733e2005-09-01 18:26:17 -04002781 return rc;
2782}
2783
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002784static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2785{
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2787 MV_CRQB_Q_SZ, 0);
2788 if (!hpriv->crqb_pool)
2789 return -ENOMEM;
2790
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2792 MV_CRPB_Q_SZ, 0);
2793 if (!hpriv->crpb_pool)
2794 return -ENOMEM;
2795
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2797 MV_SG_TBL_SZ, 0);
2798 if (!hpriv->sg_tbl_pool)
2799 return -ENOMEM;
2800
2801 return 0;
2802}
2803
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002804/**
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2806 * host
2807 * @pdev: platform device found
2808 *
2809 * LOCKING:
2810 * Inherited from caller.
2811 */
2812static int mv_platform_probe(struct platform_device *pdev)
2813{
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2821 int n_ports, rc;
2822
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2825
2826 /*
2827 * Simple resource validation ..
2828 */
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2831 return -EINVAL;
2832 }
2833
2834 /*
2835 * Get the register base first
2836 */
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2838 if (res == NULL)
2839 return -EINVAL;
2840
2841 /* allocate host */
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2844
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2847
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2852
2853 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002856 hpriv->base -= MV_SATAHC0_REG_BASE;
2857
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2859 if (rc)
2860 return rc;
2861
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2864 if (rc)
2865 return rc;
2866
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2869 host->n_ports);
2870
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2873}
2874
2875/*
2876 *
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2879 *
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2882 */
2883static int __devexit mv_platform_remove(struct platform_device *pdev)
2884{
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002887
2888 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002889 return 0;
2890}
2891
2892static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2895 .driver = {
2896 .name = DRV_NAME,
2897 .owner = THIS_MODULE,
2898 },
2899};
2900
2901
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002902#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002903static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2905
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002906
2907static struct pci_driver mv_pci_driver = {
2908 .name = DRV_NAME,
2909 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002910 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002911 .remove = ata_pci_remove_one,
2912};
2913
2914/*
2915 * module options
2916 */
2917static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2918
2919
2920/* move to PCI layer or libata core? */
2921static int pci_go_64(struct pci_dev *pdev)
2922{
2923 int rc;
2924
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 if (rc) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 if (rc) {
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2932 return rc;
2933 }
2934 }
2935 } else {
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 if (rc) {
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2940 return rc;
2941 }
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 if (rc) {
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2946 return rc;
2947 }
2948 }
2949
2950 return rc;
2951}
2952
Brett Russ05b308e2005-10-05 17:08:53 -04002953/**
2954 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002955 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002956 *
2957 * FIXME: complete this.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 */
Tejun Heo4447d352007-04-17 23:44:08 +09002962static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002963{
Tejun Heo4447d352007-04-17 23:44:08 +09002964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002966 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002967 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002968
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2971 */
Brett Russ31961942005-09-30 01:36:00 -04002972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2973 if (scc == 0)
2974 scc_s = "SCSI";
2975 else if (scc == 0x01)
2976 scc_s = "RAID";
2977 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002978 scc_s = "?";
2979
2980 if (IS_GEN_I(hpriv))
2981 gen = "I";
2982 else if (IS_GEN_II(hpriv))
2983 gen = "II";
2984 else if (IS_GEN_IIE(hpriv))
2985 gen = "IIE";
2986 else
2987 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002988
Jeff Garzika9524a72005-10-30 14:39:11 -05002989 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2993}
2994
Brett Russ05b308e2005-10-05 17:08:53 -04002995/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
2999 *
3000 * LOCKING:
3001 * Inherited from caller.
3002 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003003static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003005{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003006 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003007 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3011 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003012
Jeff Garzika9524a72005-10-30 14:39:11 -05003013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003015
Tejun Heo4447d352007-04-17 23:44:08 +09003016 /* allocate host */
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3022 return -ENOMEM;
3023 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003024 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003025
3026 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003027 rc = pcim_enable_device(pdev);
3028 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003029 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003030
Tejun Heo0d5ff562007-02-01 15:06:36 +09003031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003033 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003034 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003035 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003036 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003038
Jeff Garzikd88184f2007-02-26 01:26:06 -05003039 rc = pci_go_64(pdev);
3040 if (rc)
3041 return rc;
3042
Mark Lordda2fa9b2008-01-26 18:32:45 -05003043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3044 if (rc)
3045 return rc;
3046
Brett Russ20f733e2005-09-01 18:26:17 -04003047 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003048 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003049 if (rc)
3050 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003051
Brett Russ31961942005-09-30 01:36:00 -04003052 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003053 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003054 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003055
Brett Russ31961942005-09-30 01:36:00 -04003056 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003057 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003058
Tejun Heo4447d352007-04-17 23:44:08 +09003059 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003060 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003063}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003064#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003065
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003066static int mv_platform_probe(struct platform_device *pdev);
3067static int __devexit mv_platform_remove(struct platform_device *pdev);
3068
Brett Russ20f733e2005-09-01 18:26:17 -04003069static int __init mv_init(void)
3070{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003071 int rc = -ENODEV;
3072#ifdef CONFIG_PCI
3073 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003074 if (rc < 0)
3075 return rc;
3076#endif
3077 rc = platform_driver_register(&mv_platform_driver);
3078
3079#ifdef CONFIG_PCI
3080 if (rc < 0)
3081 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003082#endif
3083 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003084}
3085
3086static void __exit mv_exit(void)
3087{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003088#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003089 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003090#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003091 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003092}
3093
3094MODULE_AUTHOR("Brett Russ");
3095MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096MODULE_LICENSE("GPL");
3097MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003099MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003100
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003101#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003102module_param(msi, int, 0444);
3103MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003104#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003105
Brett Russ20f733e2005-09-01 18:26:17 -04003106module_init(mv_init);
3107module_exit(mv_exit);