blob: 9a89390531b1fc0416b789bb48e189a0fb28aeb2 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 /* PCI interface registers */
156
Brett Russ31961942005-09-30 01:36:00 -0400157 PCI_COMMAND_OFS = 0xc00,
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
Jeff Garzik522479f2005-11-12 22:14:02 -0500164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
Mark Lord02a121d2007-12-01 13:07:22 -0500175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500182
Brett Russ20f733e2005-09-01 18:26:17 -0400183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500226 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500260
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400308 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400312
Brett Russ31961942005-09-30 01:36:00 -0400313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400328
Jeff Garzikc9d39132005-11-13 17:47:51 -0500329 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500330 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400343
Brett Russ31961942005-09-30 01:36:00 -0400344 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400348};
349
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500354
Jeff Garzik095fec82005-11-12 09:50:49 -0500355enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500360
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400366 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
Jeff Garzik522479f2005-11-12 22:14:02 -0500370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500376 chip_6042,
377 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500378 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ReQuest Block: 32B */
382struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400387};
388
Jeff Garzike4e7b892006-01-31 12:18:41 -0500389struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500395};
396
Brett Russ31961942005-09-30 01:36:00 -0400397/* Command ResPonse Block: 8B */
398struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400402};
403
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
412struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
Brett Russ31961942005-09-30 01:36:00 -0400423 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400424};
425
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
Mark Lord02a121d2007-12-01 13:07:22 -0500431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500450};
451
Jeff Garzik47c2b672005-11-12 21:13:17 -0500452struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500462};
463
Tejun Heoda3dbb12007-07-16 14:29:40 +0900464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900473static int mv_prereset(struct ata_link *link, unsigned long deadline);
474static int mv_hardreset(struct ata_link *link, unsigned int *class,
475 unsigned long deadline);
476static void mv_postreset(struct ata_link *link, unsigned int *classes);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400477static void mv_eh_freeze(struct ata_port *ap);
478static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500479static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400480
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500481static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
482 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500483static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
484static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
485 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500486static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
487 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500488static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100489static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500490
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500491static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
492 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500493static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
494static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
495 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500496static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
497 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500498static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500499static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
500 void __iomem *mmio);
501static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
502 void __iomem *mmio);
503static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
504 void __iomem *mmio, unsigned int n_hc);
505static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
506 void __iomem *mmio);
507static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100508static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500511static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
512 void __iomem *port_mmio, int want_ncq);
513static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500514
Mark Lordeb73d552008-01-29 13:24:00 -0500515/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
516 * because we have to allow room for worst case splitting of
517 * PRDs for 64K boundaries in mv_fill_sg().
518 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400519static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900520 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400521 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400522 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400523};
524
525static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900526 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500527 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400528 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400529 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400530};
531
Tejun Heo029cfd62008-03-25 12:22:49 +0900532static struct ata_port_operations mv5_ops = {
533 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500534
Jeff Garzikc9d39132005-11-13 17:47:51 -0500535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
537
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400538 .freeze = mv_eh_freeze,
539 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900540 .prereset = mv_prereset,
541 .hardreset = mv_hardreset,
542 .postreset = mv_postreset,
543 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900544 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400545
Jeff Garzikc9d39132005-11-13 17:47:51 -0500546 .scr_read = mv5_scr_read,
547 .scr_write = mv5_scr_write,
548
549 .port_start = mv_port_start,
550 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500551};
552
Tejun Heo029cfd62008-03-25 12:22:49 +0900553static struct ata_port_operations mv6_ops = {
554 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500555 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900556 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400557 .scr_read = mv_scr_read,
558 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400559};
560
Tejun Heo029cfd62008-03-25 12:22:49 +0900561static struct ata_port_operations mv_iie_ops = {
562 .inherits = &mv6_ops,
563 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500564 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500565};
566
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100567static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400568 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400569 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400570 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400571 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500572 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400573 },
574 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400576 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400577 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400579 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500580 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500582 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400583 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500584 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500585 },
Brett Russ20f733e2005-09-01 18:26:17 -0400586 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500587 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
588 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400589 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400590 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500591 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400592 },
593 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500595 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400599 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500600 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500605 .port_ops = &mv_iie_ops,
606 },
607 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
609 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400611 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500612 .port_ops = &mv_iie_ops,
613 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500614 { /* chip_soc */
615 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv_iie_ops,
619 },
Brett Russ20f733e2005-09-01 18:26:17 -0400620};
621
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500622static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400623 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
625 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
626 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100627 /* RocketRAID 1740/174x have different identifiers */
628 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
629 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400630
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400631 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
633 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
634 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
635 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500636
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400637 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
638
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200639 /* Adaptec 1430SA */
640 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
641
Mark Lord02a121d2007-12-01 13:07:22 -0500642 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800643 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
644
Mark Lord02a121d2007-12-01 13:07:22 -0500645 /* Highpoint RocketRAID PCIe series */
646 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
647 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
648
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400649 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400650};
651
Jeff Garzik47c2b672005-11-12 21:13:17 -0500652static const struct mv_hw_ops mv5xxx_ops = {
653 .phy_errata = mv5_phy_errata,
654 .enable_leds = mv5_enable_leds,
655 .read_preamp = mv5_read_preamp,
656 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500657 .reset_flash = mv5_reset_flash,
658 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500659};
660
661static const struct mv_hw_ops mv6xxx_ops = {
662 .phy_errata = mv6_phy_errata,
663 .enable_leds = mv6_enable_leds,
664 .read_preamp = mv6_read_preamp,
665 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500666 .reset_flash = mv6_reset_flash,
667 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500668};
669
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500670static const struct mv_hw_ops mv_soc_ops = {
671 .phy_errata = mv6_phy_errata,
672 .enable_leds = mv_soc_enable_leds,
673 .read_preamp = mv_soc_read_preamp,
674 .reset_hc = mv_soc_reset_hc,
675 .reset_flash = mv_soc_reset_flash,
676 .reset_bus = mv_soc_reset_bus,
677};
678
Brett Russ20f733e2005-09-01 18:26:17 -0400679/*
680 * Functions
681 */
682
683static inline void writelfl(unsigned long data, void __iomem *addr)
684{
685 writel(data, addr);
686 (void) readl(addr); /* flush to avoid PCI posted write */
687}
688
Brett Russ20f733e2005-09-01 18:26:17 -0400689static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
690{
691 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
692}
693
Jeff Garzikc9d39132005-11-13 17:47:51 -0500694static inline unsigned int mv_hc_from_port(unsigned int port)
695{
696 return port >> MV_PORT_HC_SHIFT;
697}
698
699static inline unsigned int mv_hardport_from_port(unsigned int port)
700{
701 return port & MV_PORT_MASK;
702}
703
704static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
705 unsigned int port)
706{
707 return mv_hc_base(base, mv_hc_from_port(port));
708}
709
Brett Russ20f733e2005-09-01 18:26:17 -0400710static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
711{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500712 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500713 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500714 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400715}
716
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500717static inline void __iomem *mv_host_base(struct ata_host *host)
718{
719 struct mv_host_priv *hpriv = host->private_data;
720 return hpriv->base;
721}
722
Brett Russ20f733e2005-09-01 18:26:17 -0400723static inline void __iomem *mv_ap_base(struct ata_port *ap)
724{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500725 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400726}
727
Jeff Garzikcca39742006-08-24 03:19:22 -0400728static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400729{
Jeff Garzikcca39742006-08-24 03:19:22 -0400730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400731}
732
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400733static void mv_set_edma_ptrs(void __iomem *port_mmio,
734 struct mv_host_priv *hpriv,
735 struct mv_port_priv *pp)
736{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400737 u32 index;
738
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400739 /*
740 * initialize request queue
741 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400742 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
743
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400746 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400750 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400753 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400754
755 /*
756 * initialize response queue
757 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400758 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
759
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400760 WARN_ON(pp->crpb_dma & 0xff);
761 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
762
763 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400764 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400765 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400767 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400768
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400769 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400770 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400771}
772
Brett Russ05b308e2005-10-05 17:08:53 -0400773/**
774 * mv_start_dma - Enable eDMA engine
775 * @base: port base address
776 * @pp: port private data
777 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900778 * Verify the local cache of the eDMA state is accurate with a
779 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400780 *
781 * LOCKING:
782 * Inherited from caller.
783 */
Mark Lord0c589122008-01-26 18:31:16 -0500784static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500785 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400786{
Mark Lord72109162008-01-26 18:31:33 -0500787 int want_ncq = (protocol == ATA_PROT_NCQ);
788
789 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
790 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
791 if (want_ncq != using_ncq)
792 __mv_stop_dma(ap);
793 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500795 struct mv_host_priv *hpriv = ap->host->private_data;
796 int hard_port = mv_hardport_from_port(ap->port_no);
797 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100798 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500799 u32 hc_irq_cause, ipending;
800
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400801 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500802 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400803
Mark Lord0c589122008-01-26 18:31:16 -0500804 /* clear EDMA interrupt indicator, if any */
805 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
806 ipending = (DEV_IRQ << hard_port) |
807 (CRPB_DMA_DONE << hard_port);
808 if (hc_irq_cause & ipending) {
809 writelfl(hc_irq_cause & ~ipending,
810 hc_mmio + HC_IRQ_CAUSE_OFS);
811 }
812
Mark Lord72109162008-01-26 18:31:33 -0500813 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500814
815 /* clear FIS IRQ Cause */
816 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
817
Mark Lordf630d562008-01-26 18:31:00 -0500818 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819
Mark Lordf630d562008-01-26 18:31:00 -0500820 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 }
Mark Lordf630d562008-01-26 18:31:00 -0500823 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400824}
825
Brett Russ05b308e2005-10-05 17:08:53 -0400826/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400827 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400828 * @ap: ATA channel to manipulate
829 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900830 * Verify the local cache of the eDMA state is accurate with a
831 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400832 *
833 * LOCKING:
834 * Inherited from caller.
835 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400836static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400837{
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400840 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400842
Jeff Garzik4537deb2007-07-12 14:30:19 -0400843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400844 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400845 */
Brett Russ31961942005-09-30 01:36:00 -0400846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400848 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400850 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500851
Brett Russ31961942005-09-30 01:36:00 -0400852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400855 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400856 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400857
Brett Russ31961942005-09-30 01:36:00 -0400858 udelay(100);
859 }
860
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400861 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400863 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400864 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400865
866 return err;
Brett Russ31961942005-09-30 01:36:00 -0400867}
868
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400869static int mv_stop_dma(struct ata_port *ap)
870{
871 unsigned long flags;
872 int rc;
873
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
877
878 return rc;
879}
880
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400881#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400882static void mv_dump_mem(void __iomem *start, unsigned bytes)
883{
Brett Russ31961942005-09-30 01:36:00 -0400884 int b, w;
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400888 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400889 b += sizeof(u32);
890 }
891 printk("\n");
892 }
Brett Russ31961942005-09-30 01:36:00 -0400893}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400894#endif
895
Brett Russ31961942005-09-30 01:36:00 -0400896static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897{
898#ifdef ATA_DEBUG
899 int b, w;
900 u32 dw;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400904 (void) pci_read_config_dword(pdev, b, &dw);
905 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400906 b += sizeof(u32);
907 }
908 printk("\n");
909 }
910#endif
911}
912static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
914{
915#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500916 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
920
921 if (0 > port) {
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
924 num_hcs = 2;
925 } else {
926 start_hc = port >> MV_PORT_HC_SHIFT;
927 start_port = port;
928 num_ports = num_hcs = 1;
929 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400931 num_ports > 1 ? num_ports - 1 : start_port);
932
933 if (NULL != pdev) {
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
936 }
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700943 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
946 }
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400949 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400950 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400951 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400952 mv_dump_mem(port_base+0x300, 0x60);
953 }
954#endif
955}
956
Brett Russ20f733e2005-09-01 18:26:17 -0400957static unsigned int mv_scr_offset(unsigned int sc_reg_in)
958{
959 unsigned int ofs;
960
961 switch (sc_reg_in) {
962 case SCR_STATUS:
963 case SCR_CONTROL:
964 case SCR_ERROR:
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 break;
967 case SCR_ACTIVE:
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
969 break;
970 default:
971 ofs = 0xffffffffU;
972 break;
973 }
974 return ofs;
975}
976
Tejun Heoda3dbb12007-07-16 14:29:40 +0900977static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400978{
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
980
Tejun Heoda3dbb12007-07-16 14:29:40 +0900981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
983 return 0;
984 } else
985 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400986}
987
Tejun Heoda3dbb12007-07-16 14:29:40 +0900988static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400989{
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
991
Tejun Heoda3dbb12007-07-16 14:29:40 +0900992 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400993 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900994 return 0;
995 } else
996 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400997}
998
Mark Lordf2738272008-01-26 18:32:29 -0500999static void mv6_dev_config(struct ata_device *adev)
1000{
1001 /*
1002 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1003 * See mv_qc_prep() for more info.
1004 */
1005 if (adev->flags & ATA_DFLAG_NCQ)
1006 if (adev->max_sectors > ATA_MAX_SECTORS)
1007 adev->max_sectors = ATA_MAX_SECTORS;
1008}
1009
Mark Lord72109162008-01-26 18:31:33 -05001010static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1011 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001012{
Mark Lord0c589122008-01-26 18:31:16 -05001013 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001014
1015 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001016 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001017
Mark Lord0c589122008-01-26 18:31:16 -05001018 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001019 cfg |= (1 << 8); /* enab config burst size mask */
1020
Mark Lord0c589122008-01-26 18:31:16 -05001021 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001022 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1023
1024 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001025 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1026 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001027 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001028 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001029 }
1030
Mark Lord72109162008-01-26 18:31:33 -05001031 if (want_ncq) {
1032 cfg |= EDMA_CFG_NCQ;
1033 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1034 } else
1035 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1036
Jeff Garzike4e7b892006-01-31 12:18:41 -05001037 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1038}
1039
Mark Lordda2fa9b2008-01-26 18:32:45 -05001040static void mv_port_free_dma_mem(struct ata_port *ap)
1041{
1042 struct mv_host_priv *hpriv = ap->host->private_data;
1043 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001044 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001045
1046 if (pp->crqb) {
1047 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1048 pp->crqb = NULL;
1049 }
1050 if (pp->crpb) {
1051 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1052 pp->crpb = NULL;
1053 }
Mark Lordeb73d552008-01-29 13:24:00 -05001054 /*
1055 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1056 * For later hardware, we have one unique sg_tbl per NCQ tag.
1057 */
1058 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1059 if (pp->sg_tbl[tag]) {
1060 if (tag == 0 || !IS_GEN_I(hpriv))
1061 dma_pool_free(hpriv->sg_tbl_pool,
1062 pp->sg_tbl[tag],
1063 pp->sg_tbl_dma[tag]);
1064 pp->sg_tbl[tag] = NULL;
1065 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001066 }
1067}
1068
Brett Russ05b308e2005-10-05 17:08:53 -04001069/**
1070 * mv_port_start - Port specific init/start routine.
1071 * @ap: ATA channel to manipulate
1072 *
1073 * Allocate and point to DMA memory, init port private memory,
1074 * zero indices.
1075 *
1076 * LOCKING:
1077 * Inherited from caller.
1078 */
Brett Russ31961942005-09-30 01:36:00 -04001079static int mv_port_start(struct ata_port *ap)
1080{
Jeff Garzikcca39742006-08-24 03:19:22 -04001081 struct device *dev = ap->host->dev;
1082 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001083 struct mv_port_priv *pp;
1084 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001085 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001086 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001087
Tejun Heo24dc5f32007-01-20 16:00:28 +09001088 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001089 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001090 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001091 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001092
Mark Lordda2fa9b2008-01-26 18:32:45 -05001093 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1094 if (!pp->crqb)
1095 return -ENOMEM;
1096 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001097
Mark Lordda2fa9b2008-01-26 18:32:45 -05001098 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1099 if (!pp->crpb)
1100 goto out_port_free_dma_mem;
1101 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001102
Mark Lordeb73d552008-01-29 13:24:00 -05001103 /*
1104 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1105 * For later hardware, we need one unique sg_tbl per NCQ tag.
1106 */
1107 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1108 if (tag == 0 || !IS_GEN_I(hpriv)) {
1109 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1110 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1111 if (!pp->sg_tbl[tag])
1112 goto out_port_free_dma_mem;
1113 } else {
1114 pp->sg_tbl[tag] = pp->sg_tbl[0];
1115 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1116 }
1117 }
Brett Russ31961942005-09-30 01:36:00 -04001118
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001119 spin_lock_irqsave(&ap->host->lock, flags);
1120
Mark Lord72109162008-01-26 18:31:33 -05001121 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001122 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001123
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001124 spin_unlock_irqrestore(&ap->host->lock, flags);
1125
Brett Russ31961942005-09-30 01:36:00 -04001126 /* Don't turn on EDMA here...do it before DMA commands only. Else
1127 * we'll be unable to send non-data, PIO, etc due to restricted access
1128 * to shadow regs.
1129 */
Brett Russ31961942005-09-30 01:36:00 -04001130 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001131
1132out_port_free_dma_mem:
1133 mv_port_free_dma_mem(ap);
1134 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001135}
1136
Brett Russ05b308e2005-10-05 17:08:53 -04001137/**
1138 * mv_port_stop - Port specific cleanup/stop routine.
1139 * @ap: ATA channel to manipulate
1140 *
1141 * Stop DMA, cleanup port memory.
1142 *
1143 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001144 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001145 */
Brett Russ31961942005-09-30 01:36:00 -04001146static void mv_port_stop(struct ata_port *ap)
1147{
Brett Russ31961942005-09-30 01:36:00 -04001148 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001149 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001150}
1151
Brett Russ05b308e2005-10-05 17:08:53 -04001152/**
1153 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1154 * @qc: queued command whose SG list to source from
1155 *
1156 * Populate the SG list and mark the last entry.
1157 *
1158 * LOCKING:
1159 * Inherited from caller.
1160 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001161static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001162{
1163 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001164 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001165 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001166 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001167
Mark Lordeb73d552008-01-29 13:24:00 -05001168 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001169 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001170 dma_addr_t addr = sg_dma_address(sg);
1171 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001172
Olof Johansson4007b492007-10-02 20:45:27 -05001173 while (sg_len) {
1174 u32 offset = addr & 0xffff;
1175 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001176
Olof Johansson4007b492007-10-02 20:45:27 -05001177 if ((offset + sg_len > 0x10000))
1178 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001179
Olof Johansson4007b492007-10-02 20:45:27 -05001180 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1181 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001182 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001183
1184 sg_len -= len;
1185 addr += len;
1186
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001187 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001188 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001189 }
Brett Russ31961942005-09-30 01:36:00 -04001190 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001191
1192 if (likely(last_sg))
1193 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001194}
1195
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001196static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001197{
Mark Lord559eeda2006-05-19 16:40:15 -04001198 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001199 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001200 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001201}
1202
Brett Russ05b308e2005-10-05 17:08:53 -04001203/**
1204 * mv_qc_prep - Host specific command preparation.
1205 * @qc: queued command to prepare
1206 *
1207 * This routine simply redirects to the general purpose routine
1208 * if command is not DMA. Else, it handles prep of the CRQB
1209 * (command request block), does some sanity checking, and calls
1210 * the SG load routine.
1211 *
1212 * LOCKING:
1213 * Inherited from caller.
1214 */
Brett Russ31961942005-09-30 01:36:00 -04001215static void mv_qc_prep(struct ata_queued_cmd *qc)
1216{
1217 struct ata_port *ap = qc->ap;
1218 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001219 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001220 struct ata_taskfile *tf;
1221 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001222 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001223
Mark Lord138bfdd2008-01-26 18:33:18 -05001224 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1225 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001226 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001227
Brett Russ31961942005-09-30 01:36:00 -04001228 /* Fill in command request block
1229 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001230 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001231 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001232 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001233 flags |= qc->tag << CRQB_TAG_SHIFT;
1234
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001235 /* get current queue index from software */
1236 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001237
Mark Lorda6432432006-05-19 16:36:36 -04001238 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001239 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001240 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001241 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001242 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1243
1244 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001245 tf = &qc->tf;
1246
1247 /* Sadly, the CRQB cannot accomodate all registers--there are
1248 * only 11 bytes...so we must pick and choose required
1249 * registers based on the command. So, we drop feature and
1250 * hob_feature for [RW] DMA commands, but they are needed for
1251 * NCQ. NCQ will drop hob_nsect.
1252 */
1253 switch (tf->command) {
1254 case ATA_CMD_READ:
1255 case ATA_CMD_READ_EXT:
1256 case ATA_CMD_WRITE:
1257 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001258 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001259 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1260 break;
Brett Russ31961942005-09-30 01:36:00 -04001261 case ATA_CMD_FPDMA_READ:
1262 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001263 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001264 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1265 break;
Brett Russ31961942005-09-30 01:36:00 -04001266 default:
1267 /* The only other commands EDMA supports in non-queued and
1268 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1269 * of which are defined/used by Linux. If we get here, this
1270 * driver needs work.
1271 *
1272 * FIXME: modify libata to give qc_prep a return value and
1273 * return error here.
1274 */
1275 BUG_ON(tf->command);
1276 break;
1277 }
1278 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1279 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1280 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1285 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1286 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1287
Jeff Garzike4e7b892006-01-31 12:18:41 -05001288 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001289 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001290 mv_fill_sg(qc);
1291}
1292
1293/**
1294 * mv_qc_prep_iie - Host specific command preparation.
1295 * @qc: queued command to prepare
1296 *
1297 * This routine simply redirects to the general purpose routine
1298 * if command is not DMA. Else, it handles prep of the CRQB
1299 * (command request block), does some sanity checking, and calls
1300 * the SG load routine.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1306{
1307 struct ata_port *ap = qc->ap;
1308 struct mv_port_priv *pp = ap->private_data;
1309 struct mv_crqb_iie *crqb;
1310 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001311 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001312 u32 flags = 0;
1313
Mark Lord138bfdd2008-01-26 18:33:18 -05001314 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1315 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001316 return;
1317
Jeff Garzike4e7b892006-01-31 12:18:41 -05001318 /* Fill in Gen IIE command request block
1319 */
1320 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1321 flags |= CRQB_FLAG_READ;
1322
Tejun Heobeec7db2006-02-11 19:11:13 +09001323 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001324 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001325 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001326
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001327 /* get current queue index from software */
1328 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001329
1330 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001331 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1332 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001333 crqb->flags = cpu_to_le32(flags);
1334
1335 tf = &qc->tf;
1336 crqb->ata_cmd[0] = cpu_to_le32(
1337 (tf->command << 16) |
1338 (tf->feature << 24)
1339 );
1340 crqb->ata_cmd[1] = cpu_to_le32(
1341 (tf->lbal << 0) |
1342 (tf->lbam << 8) |
1343 (tf->lbah << 16) |
1344 (tf->device << 24)
1345 );
1346 crqb->ata_cmd[2] = cpu_to_le32(
1347 (tf->hob_lbal << 0) |
1348 (tf->hob_lbam << 8) |
1349 (tf->hob_lbah << 16) |
1350 (tf->hob_feature << 24)
1351 );
1352 crqb->ata_cmd[3] = cpu_to_le32(
1353 (tf->nsect << 0) |
1354 (tf->hob_nsect << 8)
1355 );
1356
1357 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1358 return;
Brett Russ31961942005-09-30 01:36:00 -04001359 mv_fill_sg(qc);
1360}
1361
Brett Russ05b308e2005-10-05 17:08:53 -04001362/**
1363 * mv_qc_issue - Initiate a command to the host
1364 * @qc: queued command to start
1365 *
1366 * This routine simply redirects to the general purpose routine
1367 * if command is not DMA. Else, it sanity checks our local
1368 * caches of the request producer/consumer indices then enables
1369 * DMA and bumps the request producer index.
1370 *
1371 * LOCKING:
1372 * Inherited from caller.
1373 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001374static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001375{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001376 struct ata_port *ap = qc->ap;
1377 void __iomem *port_mmio = mv_ap_base(ap);
1378 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001379 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001380
Mark Lord138bfdd2008-01-26 18:33:18 -05001381 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1382 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001383 /* We're about to send a non-EDMA capable command to the
1384 * port. Turn off EDMA so there won't be problems accessing
1385 * shadow block, etc registers.
1386 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001387 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001388 return ata_qc_issue_prot(qc);
1389 }
1390
Mark Lord72109162008-01-26 18:31:33 -05001391 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001392
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001393 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001394
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001396
1397 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001400
1401 return 0;
1402}
1403
Brett Russ05b308e2005-10-05 17:08:53 -04001404/**
Brett Russ05b308e2005-10-05 17:08:53 -04001405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001407 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001408 *
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1414 *
1415 * LOCKING:
1416 * Inherited from caller.
1417 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001418static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001419{
Brett Russ31961942005-09-30 01:36:00 -04001420 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001426 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001427
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001428 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001429
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1433 */
Tejun Heo936fd732007-08-06 18:36:23 +09001434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001436 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001437
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1439
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1441
1442 /*
1443 * all generations share these EDMA error cause bits
1444 */
1445
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001452 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001453 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001454 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001458 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001459 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 }
1461
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001462 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1464
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001466 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001468 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001469 }
1470 } else {
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1472
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001474 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001476 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 }
1478
1479 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001482 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001483 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 }
1485 }
Brett Russ20f733e2005-09-01 18:26:17 -04001486
1487 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001489
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 if (!err_mask) {
1491 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001492 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001493 }
1494
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1497
1498 if (qc)
1499 qc->err_mask |= err_mask;
1500 else
1501 ehi->err_mask |= err_mask;
1502
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1505 else
1506 ata_port_abort(ap);
1507}
1508
1509static void mv_intr_pio(struct ata_port *ap)
1510{
1511 struct ata_queued_cmd *qc;
1512 u8 ata_status;
1513
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1517 return;
1518
1519 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001521 if (unlikely(!qc)) /* no active tag */
1522 return;
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1524 return;
1525
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1529}
1530
1531static void mv_intr_edma(struct ata_port *ap)
1532{
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1539
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1543
1544 while (1) {
1545 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001546 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1551 break;
1552
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001553 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001554 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001555 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001561 else
1562 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001563
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001564 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001565
Mark Lordcb924412008-01-26 18:32:09 -05001566 /* For non-NCQ mode, the lower 8 bits of status
1567 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1568 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 */
1570 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001571 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001572 mv_err_intr(ap, qc);
1573 return;
1574 }
1575
1576 /* and finally, complete the ATA command */
1577 if (qc) {
1578 qc->err_mask |=
1579 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1580 ata_qc_complete(qc);
1581 }
1582
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001583 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001584 * indicate (after the loop completes) to hardware
1585 * that we have consumed a response queue entry.
1586 */
1587 work_done = true;
1588 pp->resp_idx++;
1589 }
1590
1591 if (work_done)
1592 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1593 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1594 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001595}
1596
Brett Russ05b308e2005-10-05 17:08:53 -04001597/**
1598 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001599 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001600 * @relevant: port error bits relevant to this host controller
1601 * @hc: which host controller we're to look at
1602 *
1603 * Read then write clear the HC interrupt status then walk each
1604 * port connected to the HC and see if it needs servicing. Port
1605 * success ints are reported in the HC interrupt status reg, the
1606 * port error ints are reported in the higher level main
1607 * interrupt status register and thus are passed in via the
1608 * 'relevant' argument.
1609 *
1610 * LOCKING:
1611 * Inherited from caller.
1612 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001613static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001614{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001615 struct mv_host_priv *hpriv = host->private_data;
1616 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001617 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001618 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001619 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001620
Jeff Garzik35177262007-02-24 21:26:42 -05001621 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001622 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001623 else
Brett Russ20f733e2005-09-01 18:26:17 -04001624 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001625
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001626 if (HAS_PCI(host))
1627 last_port = port0 + MV_PORTS_PER_HC;
1628 else
1629 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001630 /* we'll need the HC success int register in most cases */
1631 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001632 if (!hc_irq_cause)
1633 return;
1634
1635 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001636
1637 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001638 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001639
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001640 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001641 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001642 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001643 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001644
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001645 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001646 continue;
1647
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001648 pp = ap->private_data;
1649
Brett Russ31961942005-09-30 01:36:00 -04001650 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001651 if (port >= MV_PORTS_PER_HC) {
1652 shift++; /* skip bit 8 in the HC Main IRQ reg */
1653 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001654 have_err_bits = ((PORT0_ERR << shift) & relevant);
1655
1656 if (unlikely(have_err_bits)) {
1657 struct ata_queued_cmd *qc;
1658
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001659 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001660 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1661 continue;
1662
1663 mv_err_intr(ap, qc);
1664 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001665 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001666
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001667 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1668
1669 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1670 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1671 mv_intr_edma(ap);
1672 } else {
1673 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1674 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001675 }
1676 }
1677 VPRINTK("EXIT\n");
1678}
1679
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001680static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1681{
Mark Lord02a121d2007-12-01 13:07:22 -05001682 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001683 struct ata_port *ap;
1684 struct ata_queued_cmd *qc;
1685 struct ata_eh_info *ehi;
1686 unsigned int i, err_mask, printed = 0;
1687 u32 err_cause;
1688
Mark Lord02a121d2007-12-01 13:07:22 -05001689 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001690
1691 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1692 err_cause);
1693
1694 DPRINTK("All regs @ PCI error\n");
1695 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1696
Mark Lord02a121d2007-12-01 13:07:22 -05001697 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001698
1699 for (i = 0; i < host->n_ports; i++) {
1700 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001701 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001702 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001703 ata_ehi_clear_desc(ehi);
1704 if (!printed++)
1705 ata_ehi_push_desc(ehi,
1706 "PCI err cause 0x%08x", err_cause);
1707 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001708 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001709 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001710 if (qc)
1711 qc->err_mask |= err_mask;
1712 else
1713 ehi->err_mask |= err_mask;
1714
1715 ata_port_freeze(ap);
1716 }
1717 }
1718}
1719
Brett Russ05b308e2005-10-05 17:08:53 -04001720/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001721 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001722 * @irq: unused
1723 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001724 *
1725 * Read the read only register to determine if any host
1726 * controllers have pending interrupts. If so, call lower level
1727 * routine to handle. Also check for PCI errors which are only
1728 * reported here.
1729 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001730 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001731 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001732 * interrupts.
1733 */
David Howells7d12e782006-10-05 14:55:46 +01001734static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001735{
Jeff Garzikcca39742006-08-24 03:19:22 -04001736 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001737 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001738 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001739 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001740 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001741
Mark Lord646a4da2008-01-26 18:30:37 -05001742 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001743
1744 irq_stat = readl(hpriv->main_cause_reg_addr);
1745 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001746
1747 /* check the cases where we either have nothing pending or have read
1748 * a bogus register value which can indicate HW removal or PCI fault
1749 */
Mark Lord646a4da2008-01-26 18:30:37 -05001750 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1751 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001752
Jeff Garzikcca39742006-08-24 03:19:22 -04001753 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001754
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001755 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001756 mv_pci_error(host, mmio);
1757 handled = 1;
1758 goto out_unlock; /* skip all other HC irq handling */
1759 }
1760
Brett Russ20f733e2005-09-01 18:26:17 -04001761 for (hc = 0; hc < n_hcs; hc++) {
1762 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1763 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001764 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001765 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001766 }
1767 }
Mark Lord615ab952006-05-19 16:24:56 -04001768
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001769out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001770 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001771
1772 return IRQ_RETVAL(handled);
1773}
1774
Jeff Garzikc9d39132005-11-13 17:47:51 -05001775static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1776{
1777 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1778 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1779
1780 return hc_mmio + ofs;
1781}
1782
1783static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1784{
1785 unsigned int ofs;
1786
1787 switch (sc_reg_in) {
1788 case SCR_STATUS:
1789 case SCR_ERROR:
1790 case SCR_CONTROL:
1791 ofs = sc_reg_in * sizeof(u32);
1792 break;
1793 default:
1794 ofs = 0xffffffffU;
1795 break;
1796 }
1797 return ofs;
1798}
1799
Tejun Heoda3dbb12007-07-16 14:29:40 +09001800static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001801{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001802 struct mv_host_priv *hpriv = ap->host->private_data;
1803 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001804 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001805 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1806
Tejun Heoda3dbb12007-07-16 14:29:40 +09001807 if (ofs != 0xffffffffU) {
1808 *val = readl(addr + ofs);
1809 return 0;
1810 } else
1811 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001812}
1813
Tejun Heoda3dbb12007-07-16 14:29:40 +09001814static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001815{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001816 struct mv_host_priv *hpriv = ap->host->private_data;
1817 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001818 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001819 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1820
Tejun Heoda3dbb12007-07-16 14:29:40 +09001821 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001822 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001823 return 0;
1824 } else
1825 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001826}
1827
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001828static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001829{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001830 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001831 int early_5080;
1832
Auke Kok44c10132007-06-08 15:46:36 -07001833 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001834
1835 if (!early_5080) {
1836 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1837 tmp |= (1 << 0);
1838 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1839 }
1840
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001841 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001842}
1843
1844static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1845{
1846 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1847}
1848
Jeff Garzik47c2b672005-11-12 21:13:17 -05001849static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001850 void __iomem *mmio)
1851{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001852 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1853 u32 tmp;
1854
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1856
1857 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1858 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001859}
1860
Jeff Garzik47c2b672005-11-12 21:13:17 -05001861static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001862{
Jeff Garzik522479f2005-11-12 22:14:02 -05001863 u32 tmp;
1864
1865 writel(0, mmio + MV_GPIO_PORT_CTL);
1866
1867 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1868
1869 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1870 tmp |= ~(1 << 0);
1871 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001872}
1873
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001874static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1875 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001876{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001877 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1878 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1879 u32 tmp;
1880 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1881
1882 if (fix_apm_sq) {
1883 tmp = readl(phy_mmio + MV5_LT_MODE);
1884 tmp |= (1 << 19);
1885 writel(tmp, phy_mmio + MV5_LT_MODE);
1886
1887 tmp = readl(phy_mmio + MV5_PHY_CTL);
1888 tmp &= ~0x3;
1889 tmp |= 0x1;
1890 writel(tmp, phy_mmio + MV5_PHY_CTL);
1891 }
1892
1893 tmp = readl(phy_mmio + MV5_PHY_MODE);
1894 tmp &= ~mask;
1895 tmp |= hpriv->signal[port].pre;
1896 tmp |= hpriv->signal[port].amps;
1897 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001898}
1899
Jeff Garzikc9d39132005-11-13 17:47:51 -05001900
1901#undef ZERO
1902#define ZERO(reg) writel(0, port_mmio + (reg))
1903static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001905{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001906 void __iomem *port_mmio = mv_port_base(mmio, port);
1907
1908 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1909
1910 mv_channel_reset(hpriv, mmio, port);
1911
1912 ZERO(0x028); /* command */
1913 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1914 ZERO(0x004); /* timer */
1915 ZERO(0x008); /* irq err cause */
1916 ZERO(0x00c); /* irq err mask */
1917 ZERO(0x010); /* rq bah */
1918 ZERO(0x014); /* rq inp */
1919 ZERO(0x018); /* rq outp */
1920 ZERO(0x01c); /* respq bah */
1921 ZERO(0x024); /* respq outp */
1922 ZERO(0x020); /* respq inp */
1923 ZERO(0x02c); /* test control */
1924 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1925}
1926#undef ZERO
1927
1928#define ZERO(reg) writel(0, hc_mmio + (reg))
1929static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int hc)
1931{
1932 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1933 u32 tmp;
1934
1935 ZERO(0x00c);
1936 ZERO(0x010);
1937 ZERO(0x014);
1938 ZERO(0x018);
1939
1940 tmp = readl(hc_mmio + 0x20);
1941 tmp &= 0x1c1c1c1c;
1942 tmp |= 0x03030303;
1943 writel(tmp, hc_mmio + 0x20);
1944}
1945#undef ZERO
1946
1947static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int n_hc)
1949{
1950 unsigned int hc, port;
1951
1952 for (hc = 0; hc < n_hc; hc++) {
1953 for (port = 0; port < MV_PORTS_PER_HC; port++)
1954 mv5_reset_hc_port(hpriv, mmio,
1955 (hc * MV_PORTS_PER_HC) + port);
1956
1957 mv5_reset_one_hc(hpriv, mmio, hc);
1958 }
1959
1960 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001961}
1962
Jeff Garzik101ffae2005-11-12 22:17:49 -05001963#undef ZERO
1964#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001965static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001966{
Mark Lord02a121d2007-12-01 13:07:22 -05001967 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001968 u32 tmp;
1969
1970 tmp = readl(mmio + MV_PCI_MODE);
1971 tmp &= 0xff00ffff;
1972 writel(tmp, mmio + MV_PCI_MODE);
1973
1974 ZERO(MV_PCI_DISC_TIMER);
1975 ZERO(MV_PCI_MSI_TRIGGER);
1976 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1977 ZERO(HC_MAIN_IRQ_MASK_OFS);
1978 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001979 ZERO(hpriv->irq_cause_ofs);
1980 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001981 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1982 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1983 ZERO(MV_PCI_ERR_ATTRIBUTE);
1984 ZERO(MV_PCI_ERR_COMMAND);
1985}
1986#undef ZERO
1987
1988static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1989{
1990 u32 tmp;
1991
1992 mv5_reset_flash(hpriv, mmio);
1993
1994 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1995 tmp &= 0x3;
1996 tmp |= (1 << 5) | (1 << 6);
1997 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1998}
1999
2000/**
2001 * mv6_reset_hc - Perform the 6xxx global soft reset
2002 * @mmio: base address of the HBA
2003 *
2004 * This routine only applies to 6xxx parts.
2005 *
2006 * LOCKING:
2007 * Inherited from caller.
2008 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002009static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2010 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002011{
2012 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2013 int i, rc = 0;
2014 u32 t;
2015
2016 /* Following procedure defined in PCI "main command and status
2017 * register" table.
2018 */
2019 t = readl(reg);
2020 writel(t | STOP_PCI_MASTER, reg);
2021
2022 for (i = 0; i < 1000; i++) {
2023 udelay(1);
2024 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002025 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002026 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002027 }
2028 if (!(PCI_MASTER_EMPTY & t)) {
2029 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2030 rc = 1;
2031 goto done;
2032 }
2033
2034 /* set reset */
2035 i = 5;
2036 do {
2037 writel(t | GLOB_SFT_RST, reg);
2038 t = readl(reg);
2039 udelay(1);
2040 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2041
2042 if (!(GLOB_SFT_RST & t)) {
2043 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2044 rc = 1;
2045 goto done;
2046 }
2047
2048 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2049 i = 5;
2050 do {
2051 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2052 t = readl(reg);
2053 udelay(1);
2054 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2055
2056 if (GLOB_SFT_RST & t) {
2057 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2058 rc = 1;
2059 }
2060done:
2061 return rc;
2062}
2063
Jeff Garzik47c2b672005-11-12 21:13:17 -05002064static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002065 void __iomem *mmio)
2066{
2067 void __iomem *port_mmio;
2068 u32 tmp;
2069
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002070 tmp = readl(mmio + MV_RESET_CFG);
2071 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002072 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002073 hpriv->signal[idx].pre = 0x1 << 5;
2074 return;
2075 }
2076
2077 port_mmio = mv_port_base(mmio, idx);
2078 tmp = readl(port_mmio + PHY_MODE2);
2079
2080 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2081 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2082}
2083
Jeff Garzik47c2b672005-11-12 21:13:17 -05002084static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002085{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002086 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002087}
2088
Jeff Garzikc9d39132005-11-13 17:47:51 -05002089static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002090 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002091{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002092 void __iomem *port_mmio = mv_port_base(mmio, port);
2093
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002094 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002095 int fix_phy_mode2 =
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002097 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002098 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2099 u32 m2, tmp;
2100
2101 if (fix_phy_mode2) {
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~(1 << 16);
2104 m2 |= (1 << 31);
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108
2109 m2 = readl(port_mmio + PHY_MODE2);
2110 m2 &= ~((1 << 16) | (1 << 31));
2111 writel(m2, port_mmio + PHY_MODE2);
2112
2113 udelay(200);
2114 }
2115
2116 /* who knows what this magic does */
2117 tmp = readl(port_mmio + PHY_MODE3);
2118 tmp &= ~0x7F800000;
2119 tmp |= 0x2A800000;
2120 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002121
2122 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002123 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002124
2125 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002126
2127 if (hp_flags & MV_HP_ERRATA_60X1B2)
2128 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002129
2130 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2131
2132 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002133
2134 if (hp_flags & MV_HP_ERRATA_60X1B2)
2135 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002136 }
2137
2138 /* Revert values of pre-emphasis and signal amps to the saved ones */
2139 m2 = readl(port_mmio + PHY_MODE2);
2140
2141 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002142 m2 |= hpriv->signal[port].amps;
2143 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002144 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002145
Jeff Garzike4e7b892006-01-31 12:18:41 -05002146 /* according to mvSata 3.6.1, some IIE values are fixed */
2147 if (IS_GEN_IIE(hpriv)) {
2148 m2 &= ~0xC30FF01F;
2149 m2 |= 0x0000900F;
2150 }
2151
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002152 writel(m2, port_mmio + PHY_MODE2);
2153}
2154
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002155/* TODO: use the generic LED interface to configure the SATA Presence */
2156/* & Acitivy LEDs on the board */
2157static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2158 void __iomem *mmio)
2159{
2160 return;
2161}
2162
2163static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2164 void __iomem *mmio)
2165{
2166 void __iomem *port_mmio;
2167 u32 tmp;
2168
2169 port_mmio = mv_port_base(mmio, idx);
2170 tmp = readl(port_mmio + PHY_MODE2);
2171
2172 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2173 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2174}
2175
2176#undef ZERO
2177#define ZERO(reg) writel(0, port_mmio + (reg))
2178static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2179 void __iomem *mmio, unsigned int port)
2180{
2181 void __iomem *port_mmio = mv_port_base(mmio, port);
2182
2183 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2184
2185 mv_channel_reset(hpriv, mmio, port);
2186
2187 ZERO(0x028); /* command */
2188 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2189 ZERO(0x004); /* timer */
2190 ZERO(0x008); /* irq err cause */
2191 ZERO(0x00c); /* irq err mask */
2192 ZERO(0x010); /* rq bah */
2193 ZERO(0x014); /* rq inp */
2194 ZERO(0x018); /* rq outp */
2195 ZERO(0x01c); /* respq bah */
2196 ZERO(0x024); /* respq outp */
2197 ZERO(0x020); /* respq inp */
2198 ZERO(0x02c); /* test control */
2199 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2200}
2201
2202#undef ZERO
2203
2204#define ZERO(reg) writel(0, hc_mmio + (reg))
2205static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2206 void __iomem *mmio)
2207{
2208 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2209
2210 ZERO(0x00c);
2211 ZERO(0x010);
2212 ZERO(0x014);
2213
2214}
2215
2216#undef ZERO
2217
2218static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2219 void __iomem *mmio, unsigned int n_hc)
2220{
2221 unsigned int port;
2222
2223 for (port = 0; port < hpriv->n_ports; port++)
2224 mv_soc_reset_hc_port(hpriv, mmio, port);
2225
2226 mv_soc_reset_one_hc(hpriv, mmio);
2227
2228 return 0;
2229}
2230
2231static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2232 void __iomem *mmio)
2233{
2234 return;
2235}
2236
2237static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2238{
2239 return;
2240}
2241
Jeff Garzikc9d39132005-11-13 17:47:51 -05002242static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2243 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002244{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002245 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002246
Brett Russ31961942005-09-30 01:36:00 -04002247 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002248
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002249 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002250 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002251 ifctl |= (1 << 7); /* enable gen2i speed */
2252 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002253 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2254 }
2255
Brett Russ20f733e2005-09-01 18:26:17 -04002256 udelay(25); /* allow reset propagation */
2257
2258 /* Spec never mentions clearing the bit. Marvell's driver does
2259 * clear the bit, however.
2260 */
Brett Russ31961942005-09-30 01:36:00 -04002261 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002262
Jeff Garzikc9d39132005-11-13 17:47:51 -05002263 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2264
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002265 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002266 mdelay(1);
2267}
2268
Jeff Garzikc9d39132005-11-13 17:47:51 -05002269/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002270 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002271 * @ap: ATA channel to manipulate
2272 *
2273 * Part of this is taken from __sata_phy_reset and modified to
2274 * not sleep since this routine gets called from interrupt level.
2275 *
2276 * LOCKING:
2277 * Inherited from caller. This is coded to safe to call at
2278 * interrupt level, i.e. it does not sleep.
2279 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002280static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2281 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002282{
2283 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002284 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002285 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002286 int retry = 5;
2287 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002288
2289 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002290
Tejun Heoda3dbb12007-07-16 14:29:40 +09002291#ifdef DEBUG
2292 {
2293 u32 sstatus, serror, scontrol;
2294
2295 mv_scr_read(ap, SCR_STATUS, &sstatus);
2296 mv_scr_read(ap, SCR_ERROR, &serror);
2297 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2298 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002299 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002300 }
2301#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002302
Jeff Garzik22374672005-11-17 10:59:48 -05002303 /* Issue COMRESET via SControl */
2304comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002305 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002306 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002307
Tejun Heo936fd732007-08-06 18:36:23 +09002308 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002309 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002310
Brett Russ31961942005-09-30 01:36:00 -04002311 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002312 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002313 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002314 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002315
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002316 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002317 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002318
Jeff Garzik22374672005-11-17 10:59:48 -05002319 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002320 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002321 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2322 (retry-- > 0))
2323 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002324
Tejun Heoda3dbb12007-07-16 14:29:40 +09002325#ifdef DEBUG
2326 {
2327 u32 sstatus, serror, scontrol;
2328
2329 mv_scr_read(ap, SCR_STATUS, &sstatus);
2330 mv_scr_read(ap, SCR_ERROR, &serror);
2331 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2332 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2333 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2334 }
2335#endif
Brett Russ31961942005-09-30 01:36:00 -04002336
Tejun Heo936fd732007-08-06 18:36:23 +09002337 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002338 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002339 return;
2340 }
2341
Jeff Garzik22374672005-11-17 10:59:48 -05002342 /* even after SStatus reflects that device is ready,
2343 * it seems to take a while for link to be fully
2344 * established (and thus Status no longer 0x80/0x7F),
2345 * so we poll a bit for that, here.
2346 */
2347 retry = 20;
2348 while (1) {
2349 u8 drv_stat = ata_check_status(ap);
2350 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2351 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002352 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002353 if (retry-- <= 0)
2354 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002355 if (time_after(jiffies, deadline))
2356 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002357 }
2358
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002359 /* FIXME: if we passed the deadline, the following
2360 * code probably produces an invalid result
2361 */
Brett Russ20f733e2005-09-01 18:26:17 -04002362
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002363 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002364 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002365
2366 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2367
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002368 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002369
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002370 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002371}
2372
Tejun Heocc0680a2007-08-06 18:36:23 +09002373static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002374{
Tejun Heocc0680a2007-08-06 18:36:23 +09002375 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002377
Tejun Heocf480622008-01-24 00:05:14 +09002378 mv_stop_dma(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002379
Tejun Heocf480622008-01-24 00:05:14 +09002380 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002381 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002382
Tejun Heocf480622008-01-24 00:05:14 +09002383 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002384}
2385
Tejun Heocc0680a2007-08-06 18:36:23 +09002386static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 unsigned long deadline)
2388{
Tejun Heocc0680a2007-08-06 18:36:23 +09002389 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002390 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002391 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002392
2393 mv_stop_dma(ap);
2394
2395 mv_channel_reset(hpriv, mmio, ap->port_no);
2396
2397 mv_phy_reset(ap, class, deadline);
2398
2399 return 0;
2400}
2401
Tejun Heocc0680a2007-08-06 18:36:23 +09002402static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002403{
Tejun Heocc0680a2007-08-06 18:36:23 +09002404 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405 u32 serr;
2406
2407 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002408 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002409
2410 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002411 sata_scr_read(link, SCR_ERROR, &serr);
2412 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002413
2414 /* bail out if no device is present */
2415 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2416 DPRINTK("EXIT, no device\n");
2417 return;
2418 }
2419
2420 /* set up device control */
2421 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2422}
2423
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002424static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002425{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002426 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2428 u32 tmp, mask;
2429 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002430
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002431 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002433 shift = ap->port_no * 2;
2434 if (hc > 0)
2435 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002436
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002437 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002442}
2443
2444static void mv_eh_thaw(struct ata_port *ap)
2445{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2453
2454 /* FIXME: handle coalescing completion events properly */
2455
2456 shift = ap->port_no * 2;
2457 if (hc > 0) {
2458 shift++;
2459 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002460 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002461
2462 mask = 0x3 << shift;
2463
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2466
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2472
2473 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002476}
2477
Brett Russ05b308e2005-10-05 17:08:53 -04002478/**
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2482 *
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 */
Brett Russ31961942005-09-30 01:36:00 -04002490static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2491{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002493 unsigned serr_ofs;
2494
Jeff Garzik8b260242005-11-12 12:32:50 -05002495 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002496 */
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002498 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002505 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509
2510 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002512
Brett Russ31961942005-09-30 01:36:00 -04002513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2517
Mark Lord646a4da2008-01-26 18:30:37 -05002518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002520
Jeff Garzik8b260242005-11-12 12:32:50 -05002521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002525}
2526
Tejun Heo4447d352007-04-17 23:44:08 +09002527static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002528{
Tejun Heo4447d352007-04-17 23:44:08 +09002529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002531 u32 hp_flags = hpriv->hp_flags;
2532
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002533 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002534 case chip_5080:
2535 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002536 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002537
Auke Kok44c10132007-06-08 15:46:36 -07002538 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002539 case 0x1:
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 break;
2542 case 0x3:
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 default:
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 }
2551 break;
2552
2553 case chip_504x:
2554 case chip_508x:
2555 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002556 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002557
Auke Kok44c10132007-06-08 15:46:36 -07002558 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559 case 0x0:
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 break;
2562 case 0x3:
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
2565 default:
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2569 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002570 }
2571 break;
2572
2573 case chip_604x:
2574 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002575 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002576 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002577
Auke Kok44c10132007-06-08 15:46:36 -07002578 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002579 case 0x7:
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 break;
2582 case 0x9:
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002584 break;
2585 default:
2586 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002589 break;
2590 }
2591 break;
2592
Jeff Garzike4e7b892006-01-31 12:18:41 -05002593 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002594 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 {
Mark Lord4e520032007-12-11 12:58:05 -05002598 /*
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2600 *
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2604 *
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2610 *
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2612 *
2613 * Warn the user, lest they think we're just buggy.
2614 */
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2618 " BEWARE!\n");
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002623 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002624 case chip_6042:
2625 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002626 hp_flags |= MV_HP_GEN_IIE;
2627
Auke Kok44c10132007-06-08 15:46:36 -07002628 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 break;
2632 case 0x1:
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 break;
2640 }
2641 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002642 case chip_soc:
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002646
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002647 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002648 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002649 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002650 return 1;
2651 }
2652
2653 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2658 } else {
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002663
2664 return 0;
2665}
2666
Brett Russ05b308e2005-10-05 17:08:53 -04002667/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002668 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002669 * @host: ATA host to initialize
2670 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002671 *
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2674 *
2675 * LOCKING:
2676 * Inherited from caller.
2677 */
Tejun Heo4447d352007-04-17 23:44:08 +09002678static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002679{
2680 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002681 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002682 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002683
Tejun Heo4447d352007-04-17 23:44:08 +09002684 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002685 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002686 goto done;
2687
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2692 } else {
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2697 }
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002700
Tejun Heo4447d352007-04-17 23:44:08 +09002701 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002702
Tejun Heo4447d352007-04-17 23:44:08 +09002703 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002704 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002705
Jeff Garzikc9d39132005-11-13 17:47:51 -05002706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002707 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002708 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002709
Jeff Garzik522479f2005-11-12 22:14:02 -05002710 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002711 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002712 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002713
Tejun Heo4447d352007-04-17 23:44:08 +09002714 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002715 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002716 void __iomem *port_mmio = mv_port_base(mmio, port);
2717
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2722 }
2723
Jeff Garzikc9d39132005-11-13 17:47:51 -05002724 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002725 }
2726
Tejun Heo4447d352007-04-17 23:44:08 +09002727 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002728 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002729 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002730
2731 mv_port_init(&ap->ioaddr, port_mmio);
2732
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002733#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2738 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002739#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002740 }
2741
2742 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2744
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2749
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002752 }
2753
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002757
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2763 else
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002766
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2773 } else {
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2779 }
Brett Russ31961942005-09-30 01:36:00 -04002780done:
Brett Russ20f733e2005-09-01 18:26:17 -04002781 return rc;
2782}
2783
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002784static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2785{
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2787 MV_CRQB_Q_SZ, 0);
2788 if (!hpriv->crqb_pool)
2789 return -ENOMEM;
2790
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2792 MV_CRPB_Q_SZ, 0);
2793 if (!hpriv->crpb_pool)
2794 return -ENOMEM;
2795
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2797 MV_SG_TBL_SZ, 0);
2798 if (!hpriv->sg_tbl_pool)
2799 return -ENOMEM;
2800
2801 return 0;
2802}
2803
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002804/**
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2806 * host
2807 * @pdev: platform device found
2808 *
2809 * LOCKING:
2810 * Inherited from caller.
2811 */
2812static int mv_platform_probe(struct platform_device *pdev)
2813{
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2821 int n_ports, rc;
2822
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2825
2826 /*
2827 * Simple resource validation ..
2828 */
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2831 return -EINVAL;
2832 }
2833
2834 /*
2835 * Get the register base first
2836 */
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2838 if (res == NULL)
2839 return -EINVAL;
2840
2841 /* allocate host */
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2844
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2847
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2852
2853 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002856 hpriv->base -= MV_SATAHC0_REG_BASE;
2857
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2859 if (rc)
2860 return rc;
2861
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2864 if (rc)
2865 return rc;
2866
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2869 host->n_ports);
2870
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2873}
2874
2875/*
2876 *
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2879 *
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2882 */
2883static int __devexit mv_platform_remove(struct platform_device *pdev)
2884{
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002887
2888 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002889 return 0;
2890}
2891
2892static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2895 .driver = {
2896 .name = DRV_NAME,
2897 .owner = THIS_MODULE,
2898 },
2899};
2900
2901
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002902#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002903static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2905
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002906
2907static struct pci_driver mv_pci_driver = {
2908 .name = DRV_NAME,
2909 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002910 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002911 .remove = ata_pci_remove_one,
2912};
2913
2914/*
2915 * module options
2916 */
2917static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2918
2919
2920/* move to PCI layer or libata core? */
2921static int pci_go_64(struct pci_dev *pdev)
2922{
2923 int rc;
2924
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 if (rc) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 if (rc) {
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2932 return rc;
2933 }
2934 }
2935 } else {
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 if (rc) {
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2940 return rc;
2941 }
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 if (rc) {
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2946 return rc;
2947 }
2948 }
2949
2950 return rc;
2951}
2952
Brett Russ05b308e2005-10-05 17:08:53 -04002953/**
2954 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002955 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002956 *
2957 * FIXME: complete this.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 */
Tejun Heo4447d352007-04-17 23:44:08 +09002962static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002963{
Tejun Heo4447d352007-04-17 23:44:08 +09002964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002966 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002967 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002968
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2971 */
Brett Russ31961942005-09-30 01:36:00 -04002972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2973 if (scc == 0)
2974 scc_s = "SCSI";
2975 else if (scc == 0x01)
2976 scc_s = "RAID";
2977 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002978 scc_s = "?";
2979
2980 if (IS_GEN_I(hpriv))
2981 gen = "I";
2982 else if (IS_GEN_II(hpriv))
2983 gen = "II";
2984 else if (IS_GEN_IIE(hpriv))
2985 gen = "IIE";
2986 else
2987 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002988
Jeff Garzika9524a72005-10-30 14:39:11 -05002989 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2993}
2994
Brett Russ05b308e2005-10-05 17:08:53 -04002995/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
2999 *
3000 * LOCKING:
3001 * Inherited from caller.
3002 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003003static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003005{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003006 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003007 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3011 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003012
Jeff Garzika9524a72005-10-30 14:39:11 -05003013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003015
Tejun Heo4447d352007-04-17 23:44:08 +09003016 /* allocate host */
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3022 return -ENOMEM;
3023 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003024 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003025
3026 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003027 rc = pcim_enable_device(pdev);
3028 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003029 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003030
Tejun Heo0d5ff562007-02-01 15:06:36 +09003031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003033 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003034 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003035 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003036 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003038
Jeff Garzikd88184f2007-02-26 01:26:06 -05003039 rc = pci_go_64(pdev);
3040 if (rc)
3041 return rc;
3042
Mark Lordda2fa9b2008-01-26 18:32:45 -05003043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3044 if (rc)
3045 return rc;
3046
Brett Russ20f733e2005-09-01 18:26:17 -04003047 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003048 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003049 if (rc)
3050 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003051
Brett Russ31961942005-09-30 01:36:00 -04003052 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003053 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003054 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003055
Brett Russ31961942005-09-30 01:36:00 -04003056 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003057 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003058
Tejun Heo4447d352007-04-17 23:44:08 +09003059 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003060 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003063}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003064#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003065
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003066static int mv_platform_probe(struct platform_device *pdev);
3067static int __devexit mv_platform_remove(struct platform_device *pdev);
3068
Brett Russ20f733e2005-09-01 18:26:17 -04003069static int __init mv_init(void)
3070{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003071 int rc = -ENODEV;
3072#ifdef CONFIG_PCI
3073 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003074 if (rc < 0)
3075 return rc;
3076#endif
3077 rc = platform_driver_register(&mv_platform_driver);
3078
3079#ifdef CONFIG_PCI
3080 if (rc < 0)
3081 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003082#endif
3083 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003084}
3085
3086static void __exit mv_exit(void)
3087{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003088#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003089 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003090#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003091 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003092}
3093
3094MODULE_AUTHOR("Brett Russ");
3095MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096MODULE_LICENSE("GPL");
3097MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003099MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003100
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003101#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003102module_param(msi, int, 0444);
3103MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003104#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003105
Brett Russ20f733e2005-09-01 18:26:17 -04003106module_init(mv_init);
3107module_exit(mv_exit);