blob: a4944c8ad46d123779c953b33ac65d44f98d9073 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 /* PCI interface registers */
156
Brett Russ31961942005-09-30 01:36:00 -0400157 PCI_COMMAND_OFS = 0xc00,
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
Jeff Garzik522479f2005-11-12 22:14:02 -0500164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
Mark Lord02a121d2007-12-01 13:07:22 -0500175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500182
Brett Russ20f733e2005-09-01 18:26:17 -0400183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500226 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500260
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400308 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400312
Brett Russ31961942005-09-30 01:36:00 -0400313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400328
Jeff Garzikc9d39132005-11-13 17:47:51 -0500329 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500330 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400343
Brett Russ31961942005-09-30 01:36:00 -0400344 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400348};
349
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500354
Jeff Garzik095fec82005-11-12 09:50:49 -0500355enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500360
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400366 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
Jeff Garzik522479f2005-11-12 22:14:02 -0500370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500376 chip_6042,
377 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500378 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ReQuest Block: 32B */
382struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400387};
388
Jeff Garzike4e7b892006-01-31 12:18:41 -0500389struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500395};
396
Brett Russ31961942005-09-30 01:36:00 -0400397/* Command ResPonse Block: 8B */
398struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400402};
403
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
412struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
Brett Russ31961942005-09-30 01:36:00 -0400423 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400424};
425
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
Mark Lord02a121d2007-12-01 13:07:22 -0500431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500450};
451
Jeff Garzik47c2b672005-11-12 21:13:17 -0500452struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500462};
463
Brett Russ20f733e2005-09-01 18:26:17 -0400464static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900465static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400469static int mv_port_start(struct ata_port *ap);
470static void mv_port_stop(struct ata_port *ap);
471static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500472static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900473static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400474static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400475static void mv_eh_freeze(struct ata_port *ap);
476static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500477static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400478
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500479static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500481static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500484static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500486static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100487static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500488
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500489static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500491static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500494static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500496static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500497static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 void __iomem *mmio);
499static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 void __iomem *mmio);
501static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 void __iomem *mmio, unsigned int n_hc);
503static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 void __iomem *mmio);
505static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100506static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500507static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500509static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 void __iomem *port_mmio, int want_ncq);
511static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500512
Mark Lordeb73d552008-01-29 13:24:00 -0500513/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg().
516 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400517static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400518 .module = THIS_MODULE,
519 .name = DRV_NAME,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400524 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
527 .use_clustering = 1,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400530 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
533};
534
535static struct scsi_host_template mv6_sht = {
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400542 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400543 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500546 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400549 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900550 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400551 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400552};
553
Jeff Garzikc9d39132005-11-13 17:47:51 -0500554static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
560
Jeff Garzikcffacd82007-03-09 09:46:47 -0500561 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500562
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900565 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500566
Jeff Garzikc9d39132005-11-13 17:47:51 -0500567 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900568 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500569
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400570 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400571 .freeze = mv_eh_freeze,
572 .thaw = mv_eh_thaw,
573
Jeff Garzikc9d39132005-11-13 17:47:51 -0500574 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write,
576
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500579};
580
581static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500582 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
588
Jeff Garzikcffacd82007-03-09 09:46:47 -0500589 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400590
Brett Russ31961942005-09-30 01:36:00 -0400591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900593 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400594
Brett Russ20f733e2005-09-01 18:26:17 -0400595 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900596 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400597
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400598 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400599 .freeze = mv_eh_freeze,
600 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500601 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400602
Brett Russ20f733e2005-09-01 18:26:17 -0400603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
605
Brett Russ31961942005-09-30 01:36:00 -0400606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400608};
609
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500611 .tf_load = ata_tf_load,
612 .tf_read = ata_tf_read,
613 .check_status = ata_check_status,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
616
Jeff Garzikcffacd82007-03-09 09:46:47 -0500617 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500618
619 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900621 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500622
Jeff Garzike4e7b892006-01-31 12:18:41 -0500623 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900624 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500625
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400626 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400627 .freeze = mv_eh_freeze,
628 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500629 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400630
Jeff Garzike4e7b892006-01-31 12:18:41 -0500631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
633
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500636};
637
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100638static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400639 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400640 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400641 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400642 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500643 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400644 },
645 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400646 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400647 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400648 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500649 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400650 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500651 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400652 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500653 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400654 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500655 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500656 },
Brett Russ20f733e2005-09-01 18:26:17 -0400657 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400660 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400661 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500662 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400663 },
664 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400667 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400668 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500669 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400670 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500671 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500674 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400675 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500676 .port_ops = &mv_iie_ops,
677 },
678 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500681 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400682 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500683 .port_ops = &mv_iie_ops,
684 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500685 { /* chip_soc */
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 .pio_mask = 0x1f, /* pio0-4 */
688 .udma_mask = ATA_UDMA6,
689 .port_ops = &mv_iie_ops,
690 },
Brett Russ20f733e2005-09-01 18:26:17 -0400691};
692
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500693static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400694 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100698 /* RocketRAID 1740/174x have different identifiers */
699 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400701
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400702 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500707
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400708 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
709
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200710 /* Adaptec 1430SA */
711 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
712
Mark Lord02a121d2007-12-01 13:07:22 -0500713 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800714 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
715
Mark Lord02a121d2007-12-01 13:07:22 -0500716 /* Highpoint RocketRAID PCIe series */
717 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
719
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400720 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400721};
722
Jeff Garzik47c2b672005-11-12 21:13:17 -0500723static const struct mv_hw_ops mv5xxx_ops = {
724 .phy_errata = mv5_phy_errata,
725 .enable_leds = mv5_enable_leds,
726 .read_preamp = mv5_read_preamp,
727 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500728 .reset_flash = mv5_reset_flash,
729 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500730};
731
732static const struct mv_hw_ops mv6xxx_ops = {
733 .phy_errata = mv6_phy_errata,
734 .enable_leds = mv6_enable_leds,
735 .read_preamp = mv6_read_preamp,
736 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500737 .reset_flash = mv6_reset_flash,
738 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500739};
740
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500741static const struct mv_hw_ops mv_soc_ops = {
742 .phy_errata = mv6_phy_errata,
743 .enable_leds = mv_soc_enable_leds,
744 .read_preamp = mv_soc_read_preamp,
745 .reset_hc = mv_soc_reset_hc,
746 .reset_flash = mv_soc_reset_flash,
747 .reset_bus = mv_soc_reset_bus,
748};
749
Brett Russ20f733e2005-09-01 18:26:17 -0400750/*
751 * Functions
752 */
753
754static inline void writelfl(unsigned long data, void __iomem *addr)
755{
756 writel(data, addr);
757 (void) readl(addr); /* flush to avoid PCI posted write */
758}
759
Brett Russ20f733e2005-09-01 18:26:17 -0400760static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761{
762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
763}
764
Jeff Garzikc9d39132005-11-13 17:47:51 -0500765static inline unsigned int mv_hc_from_port(unsigned int port)
766{
767 return port >> MV_PORT_HC_SHIFT;
768}
769
770static inline unsigned int mv_hardport_from_port(unsigned int port)
771{
772 return port & MV_PORT_MASK;
773}
774
775static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
776 unsigned int port)
777{
778 return mv_hc_base(base, mv_hc_from_port(port));
779}
780
Brett Russ20f733e2005-09-01 18:26:17 -0400781static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
782{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500783 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500784 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400786}
787
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500788static inline void __iomem *mv_host_base(struct ata_host *host)
789{
790 struct mv_host_priv *hpriv = host->private_data;
791 return hpriv->base;
792}
793
Brett Russ20f733e2005-09-01 18:26:17 -0400794static inline void __iomem *mv_ap_base(struct ata_port *ap)
795{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500796 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400797}
798
Jeff Garzikcca39742006-08-24 03:19:22 -0400799static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400800{
Jeff Garzikcca39742006-08-24 03:19:22 -0400801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400802}
803
804static void mv_irq_clear(struct ata_port *ap)
805{
806}
807
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
811{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400812 u32 index;
813
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814 /*
815 * initialize request queue
816 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400817 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
818
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400819 WARN_ON(pp->crqb_dma & 0x3ff);
820 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400821 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400822 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
823
824 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400825 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400826 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400828 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400829
830 /*
831 * initialize response queue
832 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400833 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
834
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835 WARN_ON(pp->crpb_dma & 0xff);
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
837
838 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400839 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400840 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400842 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400843
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400844 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400845 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400846}
847
Brett Russ05b308e2005-10-05 17:08:53 -0400848/**
849 * mv_start_dma - Enable eDMA engine
850 * @base: port base address
851 * @pp: port private data
852 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900853 * Verify the local cache of the eDMA state is accurate with a
854 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400855 *
856 * LOCKING:
857 * Inherited from caller.
858 */
Mark Lord0c589122008-01-26 18:31:16 -0500859static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500860 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400861{
Mark Lord72109162008-01-26 18:31:33 -0500862 int want_ncq = (protocol == ATA_PROT_NCQ);
863
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq)
867 __mv_stop_dma(ap);
868 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100873 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500874 u32 hc_irq_cause, ipending;
875
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400876 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500877 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400878
Mark Lord0c589122008-01-26 18:31:16 -0500879 /* clear EDMA interrupt indicator, if any */
880 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
881 ipending = (DEV_IRQ << hard_port) |
882 (CRPB_DMA_DONE << hard_port);
883 if (hc_irq_cause & ipending) {
884 writelfl(hc_irq_cause & ~ipending,
885 hc_mmio + HC_IRQ_CAUSE_OFS);
886 }
887
Mark Lord72109162008-01-26 18:31:33 -0500888 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500889
890 /* clear FIS IRQ Cause */
891 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
892
Mark Lordf630d562008-01-26 18:31:00 -0500893 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400894
Mark Lordf630d562008-01-26 18:31:00 -0500895 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400896 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
897 }
Mark Lordf630d562008-01-26 18:31:00 -0500898 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400899}
900
Brett Russ05b308e2005-10-05 17:08:53 -0400901/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400902 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400903 * @ap: ATA channel to manipulate
904 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900905 * Verify the local cache of the eDMA state is accurate with a
906 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400907 *
908 * LOCKING:
909 * Inherited from caller.
910 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400911static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400912{
913 void __iomem *port_mmio = mv_ap_base(ap);
914 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400915 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400916 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400917
Jeff Garzik4537deb2007-07-12 14:30:19 -0400918 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400919 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400920 */
Brett Russ31961942005-09-30 01:36:00 -0400921 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400923 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900924 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400925 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500926
Brett Russ31961942005-09-30 01:36:00 -0400927 /* now properly wait for the eDMA to stop */
928 for (i = 1000; i > 0; i--) {
929 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400930 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400931 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400932
Brett Russ31961942005-09-30 01:36:00 -0400933 udelay(100);
934 }
935
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400936 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900937 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400938 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400939 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400940
941 return err;
Brett Russ31961942005-09-30 01:36:00 -0400942}
943
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400944static int mv_stop_dma(struct ata_port *ap)
945{
946 unsigned long flags;
947 int rc;
948
949 spin_lock_irqsave(&ap->host->lock, flags);
950 rc = __mv_stop_dma(ap);
951 spin_unlock_irqrestore(&ap->host->lock, flags);
952
953 return rc;
954}
955
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400956#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400957static void mv_dump_mem(void __iomem *start, unsigned bytes)
958{
Brett Russ31961942005-09-30 01:36:00 -0400959 int b, w;
960 for (b = 0; b < bytes; ) {
961 DPRINTK("%p: ", start + b);
962 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400963 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
Brett Russ31961942005-09-30 01:36:00 -0400968}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400969#endif
970
Brett Russ31961942005-09-30 01:36:00 -0400971static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
972{
973#ifdef ATA_DEBUG
974 int b, w;
975 u32 dw;
976 for (b = 0; b < bytes; ) {
977 DPRINTK("%02x: ", b);
978 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400979 (void) pci_read_config_dword(pdev, b, &dw);
980 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400981 b += sizeof(u32);
982 }
983 printk("\n");
984 }
985#endif
986}
987static void mv_dump_all_regs(void __iomem *mmio_base, int port,
988 struct pci_dev *pdev)
989{
990#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500991 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400992 port >> MV_PORT_HC_SHIFT);
993 void __iomem *port_base;
994 int start_port, num_ports, p, start_hc, num_hcs, hc;
995
996 if (0 > port) {
997 start_hc = start_port = 0;
998 num_ports = 8; /* shld be benign for 4 port devs */
999 num_hcs = 2;
1000 } else {
1001 start_hc = port >> MV_PORT_HC_SHIFT;
1002 start_port = port;
1003 num_ports = num_hcs = 1;
1004 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001005 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001006 num_ports > 1 ? num_ports - 1 : start_port);
1007
1008 if (NULL != pdev) {
1009 DPRINTK("PCI config space regs:\n");
1010 mv_dump_pci_cfg(pdev, 0x68);
1011 }
1012 DPRINTK("PCI regs:\n");
1013 mv_dump_mem(mmio_base+0xc00, 0x3c);
1014 mv_dump_mem(mmio_base+0xd00, 0x34);
1015 mv_dump_mem(mmio_base+0xf00, 0x4);
1016 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1017 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001018 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001019 DPRINTK("HC regs (HC %i):\n", hc);
1020 mv_dump_mem(hc_base, 0x1c);
1021 }
1022 for (p = start_port; p < start_port + num_ports; p++) {
1023 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001024 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001025 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001026 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001027 mv_dump_mem(port_base+0x300, 0x60);
1028 }
1029#endif
1030}
1031
Brett Russ20f733e2005-09-01 18:26:17 -04001032static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1033{
1034 unsigned int ofs;
1035
1036 switch (sc_reg_in) {
1037 case SCR_STATUS:
1038 case SCR_CONTROL:
1039 case SCR_ERROR:
1040 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1041 break;
1042 case SCR_ACTIVE:
1043 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1044 break;
1045 default:
1046 ofs = 0xffffffffU;
1047 break;
1048 }
1049 return ofs;
1050}
1051
Tejun Heoda3dbb12007-07-16 14:29:40 +09001052static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001053{
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055
Tejun Heoda3dbb12007-07-16 14:29:40 +09001056 if (ofs != 0xffffffffU) {
1057 *val = readl(mv_ap_base(ap) + ofs);
1058 return 0;
1059 } else
1060 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001061}
1062
Tejun Heoda3dbb12007-07-16 14:29:40 +09001063static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001064{
1065 unsigned int ofs = mv_scr_offset(sc_reg_in);
1066
Tejun Heoda3dbb12007-07-16 14:29:40 +09001067 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001068 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001069 return 0;
1070 } else
1071 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001072}
1073
Mark Lordf2738272008-01-26 18:32:29 -05001074static void mv6_dev_config(struct ata_device *adev)
1075{
1076 /*
1077 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 * See mv_qc_prep() for more info.
1079 */
1080 if (adev->flags & ATA_DFLAG_NCQ)
1081 if (adev->max_sectors > ATA_MAX_SECTORS)
1082 adev->max_sectors = ATA_MAX_SECTORS;
1083}
1084
Mark Lord72109162008-01-26 18:31:33 -05001085static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1086 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001087{
Mark Lord0c589122008-01-26 18:31:16 -05001088 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001089
1090 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001091 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001092
Mark Lord0c589122008-01-26 18:31:16 -05001093 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001094 cfg |= (1 << 8); /* enab config burst size mask */
1095
Mark Lord0c589122008-01-26 18:31:16 -05001096 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001097 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1098
1099 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001100 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1101 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001102 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001103 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001104 }
1105
Mark Lord72109162008-01-26 18:31:33 -05001106 if (want_ncq) {
1107 cfg |= EDMA_CFG_NCQ;
1108 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1109 } else
1110 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1111
Jeff Garzike4e7b892006-01-31 12:18:41 -05001112 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1113}
1114
Mark Lordda2fa9b2008-01-26 18:32:45 -05001115static void mv_port_free_dma_mem(struct ata_port *ap)
1116{
1117 struct mv_host_priv *hpriv = ap->host->private_data;
1118 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001119 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001120
1121 if (pp->crqb) {
1122 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1123 pp->crqb = NULL;
1124 }
1125 if (pp->crpb) {
1126 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1127 pp->crpb = NULL;
1128 }
Mark Lordeb73d552008-01-29 13:24:00 -05001129 /*
1130 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1131 * For later hardware, we have one unique sg_tbl per NCQ tag.
1132 */
1133 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1134 if (pp->sg_tbl[tag]) {
1135 if (tag == 0 || !IS_GEN_I(hpriv))
1136 dma_pool_free(hpriv->sg_tbl_pool,
1137 pp->sg_tbl[tag],
1138 pp->sg_tbl_dma[tag]);
1139 pp->sg_tbl[tag] = NULL;
1140 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001141 }
1142}
1143
Brett Russ05b308e2005-10-05 17:08:53 -04001144/**
1145 * mv_port_start - Port specific init/start routine.
1146 * @ap: ATA channel to manipulate
1147 *
1148 * Allocate and point to DMA memory, init port private memory,
1149 * zero indices.
1150 *
1151 * LOCKING:
1152 * Inherited from caller.
1153 */
Brett Russ31961942005-09-30 01:36:00 -04001154static int mv_port_start(struct ata_port *ap)
1155{
Jeff Garzikcca39742006-08-24 03:19:22 -04001156 struct device *dev = ap->host->dev;
1157 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001160 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001161 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001162
Tejun Heo24dc5f32007-01-20 16:00:28 +09001163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001164 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001165 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001166 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001167
Mark Lordda2fa9b2008-01-26 18:32:45 -05001168 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1169 if (!pp->crqb)
1170 return -ENOMEM;
1171 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001172
Mark Lordda2fa9b2008-01-26 18:32:45 -05001173 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1174 if (!pp->crpb)
1175 goto out_port_free_dma_mem;
1176 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001177
Mark Lordeb73d552008-01-29 13:24:00 -05001178 /*
1179 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1180 * For later hardware, we need one unique sg_tbl per NCQ tag.
1181 */
1182 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1183 if (tag == 0 || !IS_GEN_I(hpriv)) {
1184 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1185 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1186 if (!pp->sg_tbl[tag])
1187 goto out_port_free_dma_mem;
1188 } else {
1189 pp->sg_tbl[tag] = pp->sg_tbl[0];
1190 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1191 }
1192 }
Brett Russ31961942005-09-30 01:36:00 -04001193
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001194 spin_lock_irqsave(&ap->host->lock, flags);
1195
Mark Lord72109162008-01-26 18:31:33 -05001196 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001197 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001198
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001199 spin_unlock_irqrestore(&ap->host->lock, flags);
1200
Brett Russ31961942005-09-30 01:36:00 -04001201 /* Don't turn on EDMA here...do it before DMA commands only. Else
1202 * we'll be unable to send non-data, PIO, etc due to restricted access
1203 * to shadow regs.
1204 */
Brett Russ31961942005-09-30 01:36:00 -04001205 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001206
1207out_port_free_dma_mem:
1208 mv_port_free_dma_mem(ap);
1209 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001210}
1211
Brett Russ05b308e2005-10-05 17:08:53 -04001212/**
1213 * mv_port_stop - Port specific cleanup/stop routine.
1214 * @ap: ATA channel to manipulate
1215 *
1216 * Stop DMA, cleanup port memory.
1217 *
1218 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001219 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001220 */
Brett Russ31961942005-09-30 01:36:00 -04001221static void mv_port_stop(struct ata_port *ap)
1222{
Brett Russ31961942005-09-30 01:36:00 -04001223 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001224 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001225}
1226
Brett Russ05b308e2005-10-05 17:08:53 -04001227/**
1228 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1229 * @qc: queued command whose SG list to source from
1230 *
1231 * Populate the SG list and mark the last entry.
1232 *
1233 * LOCKING:
1234 * Inherited from caller.
1235 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001236static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001237{
1238 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001239 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001240 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001241 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001242
Mark Lordeb73d552008-01-29 13:24:00 -05001243 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001244 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001245 dma_addr_t addr = sg_dma_address(sg);
1246 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001247
Olof Johansson4007b492007-10-02 20:45:27 -05001248 while (sg_len) {
1249 u32 offset = addr & 0xffff;
1250 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001251
Olof Johansson4007b492007-10-02 20:45:27 -05001252 if ((offset + sg_len > 0x10000))
1253 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001254
Olof Johansson4007b492007-10-02 20:45:27 -05001255 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1256 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001257 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001258
1259 sg_len -= len;
1260 addr += len;
1261
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001262 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001263 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001264 }
Brett Russ31961942005-09-30 01:36:00 -04001265 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001266
1267 if (likely(last_sg))
1268 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001269}
1270
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001271static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001272{
Mark Lord559eeda2006-05-19 16:40:15 -04001273 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001274 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001275 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001276}
1277
Brett Russ05b308e2005-10-05 17:08:53 -04001278/**
1279 * mv_qc_prep - Host specific command preparation.
1280 * @qc: queued command to prepare
1281 *
1282 * This routine simply redirects to the general purpose routine
1283 * if command is not DMA. Else, it handles prep of the CRQB
1284 * (command request block), does some sanity checking, and calls
1285 * the SG load routine.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
Brett Russ31961942005-09-30 01:36:00 -04001290static void mv_qc_prep(struct ata_queued_cmd *qc)
1291{
1292 struct ata_port *ap = qc->ap;
1293 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001294 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001295 struct ata_taskfile *tf;
1296 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001297 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001298
Mark Lord138bfdd2008-01-26 18:33:18 -05001299 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1300 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001301 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001302
Brett Russ31961942005-09-30 01:36:00 -04001303 /* Fill in command request block
1304 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001305 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001306 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001307 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001308 flags |= qc->tag << CRQB_TAG_SHIFT;
1309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001310 /* get current queue index from software */
1311 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001312
Mark Lorda6432432006-05-19 16:36:36 -04001313 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001314 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001315 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001316 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001317 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1318
1319 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001320 tf = &qc->tf;
1321
1322 /* Sadly, the CRQB cannot accomodate all registers--there are
1323 * only 11 bytes...so we must pick and choose required
1324 * registers based on the command. So, we drop feature and
1325 * hob_feature for [RW] DMA commands, but they are needed for
1326 * NCQ. NCQ will drop hob_nsect.
1327 */
1328 switch (tf->command) {
1329 case ATA_CMD_READ:
1330 case ATA_CMD_READ_EXT:
1331 case ATA_CMD_WRITE:
1332 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001333 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001334 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1335 break;
Brett Russ31961942005-09-30 01:36:00 -04001336 case ATA_CMD_FPDMA_READ:
1337 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001338 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001339 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1340 break;
Brett Russ31961942005-09-30 01:36:00 -04001341 default:
1342 /* The only other commands EDMA supports in non-queued and
1343 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1344 * of which are defined/used by Linux. If we get here, this
1345 * driver needs work.
1346 *
1347 * FIXME: modify libata to give qc_prep a return value and
1348 * return error here.
1349 */
1350 BUG_ON(tf->command);
1351 break;
1352 }
1353 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1354 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1355 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1356 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1357 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1360 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1361 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1362
Jeff Garzike4e7b892006-01-31 12:18:41 -05001363 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001364 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001365 mv_fill_sg(qc);
1366}
1367
1368/**
1369 * mv_qc_prep_iie - Host specific command preparation.
1370 * @qc: queued command to prepare
1371 *
1372 * This routine simply redirects to the general purpose routine
1373 * if command is not DMA. Else, it handles prep of the CRQB
1374 * (command request block), does some sanity checking, and calls
1375 * the SG load routine.
1376 *
1377 * LOCKING:
1378 * Inherited from caller.
1379 */
1380static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1381{
1382 struct ata_port *ap = qc->ap;
1383 struct mv_port_priv *pp = ap->private_data;
1384 struct mv_crqb_iie *crqb;
1385 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001386 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001387 u32 flags = 0;
1388
Mark Lord138bfdd2008-01-26 18:33:18 -05001389 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1390 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001391 return;
1392
Jeff Garzike4e7b892006-01-31 12:18:41 -05001393 /* Fill in Gen IIE command request block
1394 */
1395 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1396 flags |= CRQB_FLAG_READ;
1397
Tejun Heobeec7db2006-02-11 19:11:13 +09001398 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001399 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001400 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001401
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001402 /* get current queue index from software */
1403 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001404
1405 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001406 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1407 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001408 crqb->flags = cpu_to_le32(flags);
1409
1410 tf = &qc->tf;
1411 crqb->ata_cmd[0] = cpu_to_le32(
1412 (tf->command << 16) |
1413 (tf->feature << 24)
1414 );
1415 crqb->ata_cmd[1] = cpu_to_le32(
1416 (tf->lbal << 0) |
1417 (tf->lbam << 8) |
1418 (tf->lbah << 16) |
1419 (tf->device << 24)
1420 );
1421 crqb->ata_cmd[2] = cpu_to_le32(
1422 (tf->hob_lbal << 0) |
1423 (tf->hob_lbam << 8) |
1424 (tf->hob_lbah << 16) |
1425 (tf->hob_feature << 24)
1426 );
1427 crqb->ata_cmd[3] = cpu_to_le32(
1428 (tf->nsect << 0) |
1429 (tf->hob_nsect << 8)
1430 );
1431
1432 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1433 return;
Brett Russ31961942005-09-30 01:36:00 -04001434 mv_fill_sg(qc);
1435}
1436
Brett Russ05b308e2005-10-05 17:08:53 -04001437/**
1438 * mv_qc_issue - Initiate a command to the host
1439 * @qc: queued command to start
1440 *
1441 * This routine simply redirects to the general purpose routine
1442 * if command is not DMA. Else, it sanity checks our local
1443 * caches of the request producer/consumer indices then enables
1444 * DMA and bumps the request producer index.
1445 *
1446 * LOCKING:
1447 * Inherited from caller.
1448 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001449static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001450{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001451 struct ata_port *ap = qc->ap;
1452 void __iomem *port_mmio = mv_ap_base(ap);
1453 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001455
Mark Lord138bfdd2008-01-26 18:33:18 -05001456 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1457 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001458 /* We're about to send a non-EDMA capable command to the
1459 * port. Turn off EDMA so there won't be problems accessing
1460 * shadow block, etc registers.
1461 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001462 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001463 return ata_qc_issue_prot(qc);
1464 }
1465
Mark Lord72109162008-01-26 18:31:33 -05001466 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001467
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001469
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001470 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001471
1472 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1474 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001475
1476 return 0;
1477}
1478
Brett Russ05b308e2005-10-05 17:08:53 -04001479/**
Brett Russ05b308e2005-10-05 17:08:53 -04001480 * mv_err_intr - Handle error interrupts on the port
1481 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001482 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001483 *
1484 * In most cases, just clear the interrupt and move on. However,
1485 * some cases require an eDMA reset, which is done right before
1486 * the COMRESET in mv_phy_reset(). The SERR case requires a
1487 * clear of pending errors in the SATA SERROR register. Finally,
1488 * if the port disabled DMA, update our cached copy to match.
1489 *
1490 * LOCKING:
1491 * Inherited from caller.
1492 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001493static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001494{
Brett Russ31961942005-09-30 01:36:00 -04001495 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001496 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1497 struct mv_port_priv *pp = ap->private_data;
1498 struct mv_host_priv *hpriv = ap->host->private_data;
1499 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1500 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001501 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001502
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001504
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001505 if (!edma_enabled) {
1506 /* just a guess: do we need to do this? should we
1507 * expand this, and do it in all cases?
1508 */
Tejun Heo936fd732007-08-06 18:36:23 +09001509 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1510 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001511 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001512
1513 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1514
1515 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1516
1517 /*
1518 * all generations share these EDMA error cause bits
1519 */
1520
1521 if (edma_err_cause & EDMA_ERR_DEV)
1522 err_mask |= AC_ERR_DEV;
1523 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001524 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001525 EDMA_ERR_INTRL_PAR)) {
1526 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001527 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001528 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001529 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1531 ata_ehi_hotplugged(ehi);
1532 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001533 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001534 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 }
1536
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001537 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538 eh_freeze_mask = EDMA_EH_FREEZE_5;
1539
1540 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001541 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001542 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001543 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001544 }
1545 } else {
1546 eh_freeze_mask = EDMA_EH_FREEZE;
1547
1548 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001549 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001551 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001552 }
1553
1554 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001555 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1556 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001558 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001559 }
1560 }
Brett Russ20f733e2005-09-01 18:26:17 -04001561
1562 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001563 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001564
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001565 if (!err_mask) {
1566 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001567 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001568 }
1569
1570 ehi->serror |= serr;
1571 ehi->action |= action;
1572
1573 if (qc)
1574 qc->err_mask |= err_mask;
1575 else
1576 ehi->err_mask |= err_mask;
1577
1578 if (edma_err_cause & eh_freeze_mask)
1579 ata_port_freeze(ap);
1580 else
1581 ata_port_abort(ap);
1582}
1583
1584static void mv_intr_pio(struct ata_port *ap)
1585{
1586 struct ata_queued_cmd *qc;
1587 u8 ata_status;
1588
1589 /* ignore spurious intr if drive still BUSY */
1590 ata_status = readb(ap->ioaddr.status_addr);
1591 if (unlikely(ata_status & ATA_BUSY))
1592 return;
1593
1594 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001595 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001596 if (unlikely(!qc)) /* no active tag */
1597 return;
1598 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1599 return;
1600
1601 /* and finally, complete the ATA command */
1602 qc->err_mask |= ac_err_mask(ata_status);
1603 ata_qc_complete(qc);
1604}
1605
1606static void mv_intr_edma(struct ata_port *ap)
1607{
1608 void __iomem *port_mmio = mv_ap_base(ap);
1609 struct mv_host_priv *hpriv = ap->host->private_data;
1610 struct mv_port_priv *pp = ap->private_data;
1611 struct ata_queued_cmd *qc;
1612 u32 out_index, in_index;
1613 bool work_done = false;
1614
1615 /* get h/w response queue pointer */
1616 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1617 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1618
1619 while (1) {
1620 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001621 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001622
1623 /* get s/w response queue last-read pointer, and compare */
1624 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1625 if (in_index == out_index)
1626 break;
1627
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001628 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001629 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001630 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001631
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001632 /* Gen II/IIE: get active ATA command via tag, to enable
1633 * support for queueing. this works transparently for
1634 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001636 else
1637 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001638
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001639 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001640
Mark Lordcb924412008-01-26 18:32:09 -05001641 /* For non-NCQ mode, the lower 8 bits of status
1642 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1643 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644 */
1645 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001646 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001647 mv_err_intr(ap, qc);
1648 return;
1649 }
1650
1651 /* and finally, complete the ATA command */
1652 if (qc) {
1653 qc->err_mask |=
1654 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1655 ata_qc_complete(qc);
1656 }
1657
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001658 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001659 * indicate (after the loop completes) to hardware
1660 * that we have consumed a response queue entry.
1661 */
1662 work_done = true;
1663 pp->resp_idx++;
1664 }
1665
1666 if (work_done)
1667 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1668 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1669 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001670}
1671
Brett Russ05b308e2005-10-05 17:08:53 -04001672/**
1673 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001674 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001675 * @relevant: port error bits relevant to this host controller
1676 * @hc: which host controller we're to look at
1677 *
1678 * Read then write clear the HC interrupt status then walk each
1679 * port connected to the HC and see if it needs servicing. Port
1680 * success ints are reported in the HC interrupt status reg, the
1681 * port error ints are reported in the higher level main
1682 * interrupt status register and thus are passed in via the
1683 * 'relevant' argument.
1684 *
1685 * LOCKING:
1686 * Inherited from caller.
1687 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001688static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001689{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001690 struct mv_host_priv *hpriv = host->private_data;
1691 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001692 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001693 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001694 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001695
Jeff Garzik35177262007-02-24 21:26:42 -05001696 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001697 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001698 else
Brett Russ20f733e2005-09-01 18:26:17 -04001699 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001700
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001701 if (HAS_PCI(host))
1702 last_port = port0 + MV_PORTS_PER_HC;
1703 else
1704 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001705 /* we'll need the HC success int register in most cases */
1706 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 if (!hc_irq_cause)
1708 return;
1709
1710 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001711
1712 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001713 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001714
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001715 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001716 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001717 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001718 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001721 continue;
1722
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001723 pp = ap->private_data;
1724
Brett Russ31961942005-09-30 01:36:00 -04001725 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001726 if (port >= MV_PORTS_PER_HC) {
1727 shift++; /* skip bit 8 in the HC Main IRQ reg */
1728 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001729 have_err_bits = ((PORT0_ERR << shift) & relevant);
1730
1731 if (unlikely(have_err_bits)) {
1732 struct ata_queued_cmd *qc;
1733
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001734 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001735 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1736 continue;
1737
1738 mv_err_intr(ap, qc);
1739 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001740 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001741
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001742 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1743
1744 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1745 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1746 mv_intr_edma(ap);
1747 } else {
1748 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1749 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001750 }
1751 }
1752 VPRINTK("EXIT\n");
1753}
1754
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001755static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1756{
Mark Lord02a121d2007-12-01 13:07:22 -05001757 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001758 struct ata_port *ap;
1759 struct ata_queued_cmd *qc;
1760 struct ata_eh_info *ehi;
1761 unsigned int i, err_mask, printed = 0;
1762 u32 err_cause;
1763
Mark Lord02a121d2007-12-01 13:07:22 -05001764 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001765
1766 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1767 err_cause);
1768
1769 DPRINTK("All regs @ PCI error\n");
1770 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1771
Mark Lord02a121d2007-12-01 13:07:22 -05001772 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001773
1774 for (i = 0; i < host->n_ports; i++) {
1775 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001776 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001777 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001778 ata_ehi_clear_desc(ehi);
1779 if (!printed++)
1780 ata_ehi_push_desc(ehi,
1781 "PCI err cause 0x%08x", err_cause);
1782 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001783 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001784 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001785 if (qc)
1786 qc->err_mask |= err_mask;
1787 else
1788 ehi->err_mask |= err_mask;
1789
1790 ata_port_freeze(ap);
1791 }
1792 }
1793}
1794
Brett Russ05b308e2005-10-05 17:08:53 -04001795/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001796 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001797 * @irq: unused
1798 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001799 *
1800 * Read the read only register to determine if any host
1801 * controllers have pending interrupts. If so, call lower level
1802 * routine to handle. Also check for PCI errors which are only
1803 * reported here.
1804 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001805 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001806 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001807 * interrupts.
1808 */
David Howells7d12e782006-10-05 14:55:46 +01001809static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001810{
Jeff Garzikcca39742006-08-24 03:19:22 -04001811 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001812 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001813 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001814 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001815 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001816
Mark Lord646a4da2008-01-26 18:30:37 -05001817 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001818
1819 irq_stat = readl(hpriv->main_cause_reg_addr);
1820 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001821
1822 /* check the cases where we either have nothing pending or have read
1823 * a bogus register value which can indicate HW removal or PCI fault
1824 */
Mark Lord646a4da2008-01-26 18:30:37 -05001825 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1826 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001827
Jeff Garzikcca39742006-08-24 03:19:22 -04001828 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001829
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001830 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001831 mv_pci_error(host, mmio);
1832 handled = 1;
1833 goto out_unlock; /* skip all other HC irq handling */
1834 }
1835
Brett Russ20f733e2005-09-01 18:26:17 -04001836 for (hc = 0; hc < n_hcs; hc++) {
1837 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1838 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001839 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001840 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001841 }
1842 }
Mark Lord615ab952006-05-19 16:24:56 -04001843
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001844out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001845 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001846
1847 return IRQ_RETVAL(handled);
1848}
1849
Jeff Garzikc9d39132005-11-13 17:47:51 -05001850static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1851{
1852 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1853 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1854
1855 return hc_mmio + ofs;
1856}
1857
1858static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1859{
1860 unsigned int ofs;
1861
1862 switch (sc_reg_in) {
1863 case SCR_STATUS:
1864 case SCR_ERROR:
1865 case SCR_CONTROL:
1866 ofs = sc_reg_in * sizeof(u32);
1867 break;
1868 default:
1869 ofs = 0xffffffffU;
1870 break;
1871 }
1872 return ofs;
1873}
1874
Tejun Heoda3dbb12007-07-16 14:29:40 +09001875static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001876{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001877 struct mv_host_priv *hpriv = ap->host->private_data;
1878 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001879 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001880 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1881
Tejun Heoda3dbb12007-07-16 14:29:40 +09001882 if (ofs != 0xffffffffU) {
1883 *val = readl(addr + ofs);
1884 return 0;
1885 } else
1886 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001887}
1888
Tejun Heoda3dbb12007-07-16 14:29:40 +09001889static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001890{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001891 struct mv_host_priv *hpriv = ap->host->private_data;
1892 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001893 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1895
Tejun Heoda3dbb12007-07-16 14:29:40 +09001896 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001897 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001898 return 0;
1899 } else
1900 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001901}
1902
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001903static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001904{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001905 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001906 int early_5080;
1907
Auke Kok44c10132007-06-08 15:46:36 -07001908 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001909
1910 if (!early_5080) {
1911 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 tmp |= (1 << 0);
1913 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1914 }
1915
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001916 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001917}
1918
1919static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1920{
1921 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1922}
1923
Jeff Garzik47c2b672005-11-12 21:13:17 -05001924static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001925 void __iomem *mmio)
1926{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001927 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1928 u32 tmp;
1929
1930 tmp = readl(phy_mmio + MV5_PHY_MODE);
1931
1932 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1933 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001934}
1935
Jeff Garzik47c2b672005-11-12 21:13:17 -05001936static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001937{
Jeff Garzik522479f2005-11-12 22:14:02 -05001938 u32 tmp;
1939
1940 writel(0, mmio + MV_GPIO_PORT_CTL);
1941
1942 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1943
1944 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1945 tmp |= ~(1 << 0);
1946 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001947}
1948
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001949static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1950 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001951{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001952 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1953 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1954 u32 tmp;
1955 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1956
1957 if (fix_apm_sq) {
1958 tmp = readl(phy_mmio + MV5_LT_MODE);
1959 tmp |= (1 << 19);
1960 writel(tmp, phy_mmio + MV5_LT_MODE);
1961
1962 tmp = readl(phy_mmio + MV5_PHY_CTL);
1963 tmp &= ~0x3;
1964 tmp |= 0x1;
1965 writel(tmp, phy_mmio + MV5_PHY_CTL);
1966 }
1967
1968 tmp = readl(phy_mmio + MV5_PHY_MODE);
1969 tmp &= ~mask;
1970 tmp |= hpriv->signal[port].pre;
1971 tmp |= hpriv->signal[port].amps;
1972 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001973}
1974
Jeff Garzikc9d39132005-11-13 17:47:51 -05001975
1976#undef ZERO
1977#define ZERO(reg) writel(0, port_mmio + (reg))
1978static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1979 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001980{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001981 void __iomem *port_mmio = mv_port_base(mmio, port);
1982
1983 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1984
1985 mv_channel_reset(hpriv, mmio, port);
1986
1987 ZERO(0x028); /* command */
1988 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1989 ZERO(0x004); /* timer */
1990 ZERO(0x008); /* irq err cause */
1991 ZERO(0x00c); /* irq err mask */
1992 ZERO(0x010); /* rq bah */
1993 ZERO(0x014); /* rq inp */
1994 ZERO(0x018); /* rq outp */
1995 ZERO(0x01c); /* respq bah */
1996 ZERO(0x024); /* respq outp */
1997 ZERO(0x020); /* respq inp */
1998 ZERO(0x02c); /* test control */
1999 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2000}
2001#undef ZERO
2002
2003#define ZERO(reg) writel(0, hc_mmio + (reg))
2004static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 unsigned int hc)
2006{
2007 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2008 u32 tmp;
2009
2010 ZERO(0x00c);
2011 ZERO(0x010);
2012 ZERO(0x014);
2013 ZERO(0x018);
2014
2015 tmp = readl(hc_mmio + 0x20);
2016 tmp &= 0x1c1c1c1c;
2017 tmp |= 0x03030303;
2018 writel(tmp, hc_mmio + 0x20);
2019}
2020#undef ZERO
2021
2022static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2023 unsigned int n_hc)
2024{
2025 unsigned int hc, port;
2026
2027 for (hc = 0; hc < n_hc; hc++) {
2028 for (port = 0; port < MV_PORTS_PER_HC; port++)
2029 mv5_reset_hc_port(hpriv, mmio,
2030 (hc * MV_PORTS_PER_HC) + port);
2031
2032 mv5_reset_one_hc(hpriv, mmio, hc);
2033 }
2034
2035 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002036}
2037
Jeff Garzik101ffae2005-11-12 22:17:49 -05002038#undef ZERO
2039#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002040static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002041{
Mark Lord02a121d2007-12-01 13:07:22 -05002042 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002043 u32 tmp;
2044
2045 tmp = readl(mmio + MV_PCI_MODE);
2046 tmp &= 0xff00ffff;
2047 writel(tmp, mmio + MV_PCI_MODE);
2048
2049 ZERO(MV_PCI_DISC_TIMER);
2050 ZERO(MV_PCI_MSI_TRIGGER);
2051 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2052 ZERO(HC_MAIN_IRQ_MASK_OFS);
2053 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002054 ZERO(hpriv->irq_cause_ofs);
2055 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002056 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2057 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2058 ZERO(MV_PCI_ERR_ATTRIBUTE);
2059 ZERO(MV_PCI_ERR_COMMAND);
2060}
2061#undef ZERO
2062
2063static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2064{
2065 u32 tmp;
2066
2067 mv5_reset_flash(hpriv, mmio);
2068
2069 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2070 tmp &= 0x3;
2071 tmp |= (1 << 5) | (1 << 6);
2072 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2073}
2074
2075/**
2076 * mv6_reset_hc - Perform the 6xxx global soft reset
2077 * @mmio: base address of the HBA
2078 *
2079 * This routine only applies to 6xxx parts.
2080 *
2081 * LOCKING:
2082 * Inherited from caller.
2083 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002084static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2085 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002086{
2087 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2088 int i, rc = 0;
2089 u32 t;
2090
2091 /* Following procedure defined in PCI "main command and status
2092 * register" table.
2093 */
2094 t = readl(reg);
2095 writel(t | STOP_PCI_MASTER, reg);
2096
2097 for (i = 0; i < 1000; i++) {
2098 udelay(1);
2099 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002100 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002101 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002102 }
2103 if (!(PCI_MASTER_EMPTY & t)) {
2104 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2105 rc = 1;
2106 goto done;
2107 }
2108
2109 /* set reset */
2110 i = 5;
2111 do {
2112 writel(t | GLOB_SFT_RST, reg);
2113 t = readl(reg);
2114 udelay(1);
2115 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2116
2117 if (!(GLOB_SFT_RST & t)) {
2118 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2119 rc = 1;
2120 goto done;
2121 }
2122
2123 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2124 i = 5;
2125 do {
2126 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2127 t = readl(reg);
2128 udelay(1);
2129 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2130
2131 if (GLOB_SFT_RST & t) {
2132 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2133 rc = 1;
2134 }
2135done:
2136 return rc;
2137}
2138
Jeff Garzik47c2b672005-11-12 21:13:17 -05002139static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002140 void __iomem *mmio)
2141{
2142 void __iomem *port_mmio;
2143 u32 tmp;
2144
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002145 tmp = readl(mmio + MV_RESET_CFG);
2146 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002147 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002148 hpriv->signal[idx].pre = 0x1 << 5;
2149 return;
2150 }
2151
2152 port_mmio = mv_port_base(mmio, idx);
2153 tmp = readl(port_mmio + PHY_MODE2);
2154
2155 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2156 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2157}
2158
Jeff Garzik47c2b672005-11-12 21:13:17 -05002159static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002160{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002161 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002162}
2163
Jeff Garzikc9d39132005-11-13 17:47:51 -05002164static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002165 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002166{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002167 void __iomem *port_mmio = mv_port_base(mmio, port);
2168
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002169 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002170 int fix_phy_mode2 =
2171 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002172 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002173 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2174 u32 m2, tmp;
2175
2176 if (fix_phy_mode2) {
2177 m2 = readl(port_mmio + PHY_MODE2);
2178 m2 &= ~(1 << 16);
2179 m2 |= (1 << 31);
2180 writel(m2, port_mmio + PHY_MODE2);
2181
2182 udelay(200);
2183
2184 m2 = readl(port_mmio + PHY_MODE2);
2185 m2 &= ~((1 << 16) | (1 << 31));
2186 writel(m2, port_mmio + PHY_MODE2);
2187
2188 udelay(200);
2189 }
2190
2191 /* who knows what this magic does */
2192 tmp = readl(port_mmio + PHY_MODE3);
2193 tmp &= ~0x7F800000;
2194 tmp |= 0x2A800000;
2195 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002196
2197 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002198 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002199
2200 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002201
2202 if (hp_flags & MV_HP_ERRATA_60X1B2)
2203 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002204
2205 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2206
2207 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002208
2209 if (hp_flags & MV_HP_ERRATA_60X1B2)
2210 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002211 }
2212
2213 /* Revert values of pre-emphasis and signal amps to the saved ones */
2214 m2 = readl(port_mmio + PHY_MODE2);
2215
2216 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002217 m2 |= hpriv->signal[port].amps;
2218 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002219 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002220
Jeff Garzike4e7b892006-01-31 12:18:41 -05002221 /* according to mvSata 3.6.1, some IIE values are fixed */
2222 if (IS_GEN_IIE(hpriv)) {
2223 m2 &= ~0xC30FF01F;
2224 m2 |= 0x0000900F;
2225 }
2226
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002227 writel(m2, port_mmio + PHY_MODE2);
2228}
2229
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002230/* TODO: use the generic LED interface to configure the SATA Presence */
2231/* & Acitivy LEDs on the board */
2232static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2233 void __iomem *mmio)
2234{
2235 return;
2236}
2237
2238static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2239 void __iomem *mmio)
2240{
2241 void __iomem *port_mmio;
2242 u32 tmp;
2243
2244 port_mmio = mv_port_base(mmio, idx);
2245 tmp = readl(port_mmio + PHY_MODE2);
2246
2247 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2248 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2249}
2250
2251#undef ZERO
2252#define ZERO(reg) writel(0, port_mmio + (reg))
2253static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2254 void __iomem *mmio, unsigned int port)
2255{
2256 void __iomem *port_mmio = mv_port_base(mmio, port);
2257
2258 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2259
2260 mv_channel_reset(hpriv, mmio, port);
2261
2262 ZERO(0x028); /* command */
2263 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2264 ZERO(0x004); /* timer */
2265 ZERO(0x008); /* irq err cause */
2266 ZERO(0x00c); /* irq err mask */
2267 ZERO(0x010); /* rq bah */
2268 ZERO(0x014); /* rq inp */
2269 ZERO(0x018); /* rq outp */
2270 ZERO(0x01c); /* respq bah */
2271 ZERO(0x024); /* respq outp */
2272 ZERO(0x020); /* respq inp */
2273 ZERO(0x02c); /* test control */
2274 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2275}
2276
2277#undef ZERO
2278
2279#define ZERO(reg) writel(0, hc_mmio + (reg))
2280static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2281 void __iomem *mmio)
2282{
2283 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2284
2285 ZERO(0x00c);
2286 ZERO(0x010);
2287 ZERO(0x014);
2288
2289}
2290
2291#undef ZERO
2292
2293static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2294 void __iomem *mmio, unsigned int n_hc)
2295{
2296 unsigned int port;
2297
2298 for (port = 0; port < hpriv->n_ports; port++)
2299 mv_soc_reset_hc_port(hpriv, mmio, port);
2300
2301 mv_soc_reset_one_hc(hpriv, mmio);
2302
2303 return 0;
2304}
2305
2306static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2307 void __iomem *mmio)
2308{
2309 return;
2310}
2311
2312static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2313{
2314 return;
2315}
2316
Jeff Garzikc9d39132005-11-13 17:47:51 -05002317static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2318 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002319{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002320 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002321
Brett Russ31961942005-09-30 01:36:00 -04002322 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002323
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002324 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002325 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002326 ifctl |= (1 << 7); /* enable gen2i speed */
2327 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002328 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2329 }
2330
Brett Russ20f733e2005-09-01 18:26:17 -04002331 udelay(25); /* allow reset propagation */
2332
2333 /* Spec never mentions clearing the bit. Marvell's driver does
2334 * clear the bit, however.
2335 */
Brett Russ31961942005-09-30 01:36:00 -04002336 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002337
Jeff Garzikc9d39132005-11-13 17:47:51 -05002338 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2339
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002340 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002341 mdelay(1);
2342}
2343
Jeff Garzikc9d39132005-11-13 17:47:51 -05002344/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002345 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002346 * @ap: ATA channel to manipulate
2347 *
2348 * Part of this is taken from __sata_phy_reset and modified to
2349 * not sleep since this routine gets called from interrupt level.
2350 *
2351 * LOCKING:
2352 * Inherited from caller. This is coded to safe to call at
2353 * interrupt level, i.e. it does not sleep.
2354 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002355static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2356 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002357{
2358 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002359 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002360 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002361 int retry = 5;
2362 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002363
2364 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002365
Tejun Heoda3dbb12007-07-16 14:29:40 +09002366#ifdef DEBUG
2367 {
2368 u32 sstatus, serror, scontrol;
2369
2370 mv_scr_read(ap, SCR_STATUS, &sstatus);
2371 mv_scr_read(ap, SCR_ERROR, &serror);
2372 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2373 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002374 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002375 }
2376#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002377
Jeff Garzik22374672005-11-17 10:59:48 -05002378 /* Issue COMRESET via SControl */
2379comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002380 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002381 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002382
Tejun Heo936fd732007-08-06 18:36:23 +09002383 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002385
Brett Russ31961942005-09-30 01:36:00 -04002386 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002387 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002388 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002389 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002390
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002391 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002392 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002393
Jeff Garzik22374672005-11-17 10:59:48 -05002394 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002395 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002396 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2397 (retry-- > 0))
2398 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002399
Tejun Heoda3dbb12007-07-16 14:29:40 +09002400#ifdef DEBUG
2401 {
2402 u32 sstatus, serror, scontrol;
2403
2404 mv_scr_read(ap, SCR_STATUS, &sstatus);
2405 mv_scr_read(ap, SCR_ERROR, &serror);
2406 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2407 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2408 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2409 }
2410#endif
Brett Russ31961942005-09-30 01:36:00 -04002411
Tejun Heo936fd732007-08-06 18:36:23 +09002412 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002413 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002414 return;
2415 }
2416
Jeff Garzik22374672005-11-17 10:59:48 -05002417 /* even after SStatus reflects that device is ready,
2418 * it seems to take a while for link to be fully
2419 * established (and thus Status no longer 0x80/0x7F),
2420 * so we poll a bit for that, here.
2421 */
2422 retry = 20;
2423 while (1) {
2424 u8 drv_stat = ata_check_status(ap);
2425 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2426 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002428 if (retry-- <= 0)
2429 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002430 if (time_after(jiffies, deadline))
2431 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002432 }
2433
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002434 /* FIXME: if we passed the deadline, the following
2435 * code probably produces an invalid result
2436 */
Brett Russ20f733e2005-09-01 18:26:17 -04002437
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002438 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002439 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002440
2441 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2442
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002443 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002444
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002445 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002446}
2447
Tejun Heocc0680a2007-08-06 18:36:23 +09002448static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002449{
Tejun Heocc0680a2007-08-06 18:36:23 +09002450 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002451 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002452
Tejun Heocf480622008-01-24 00:05:14 +09002453 mv_stop_dma(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002454
Tejun Heocf480622008-01-24 00:05:14 +09002455 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002456 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002457
Tejun Heocf480622008-01-24 00:05:14 +09002458 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002459}
2460
Tejun Heocc0680a2007-08-06 18:36:23 +09002461static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002462 unsigned long deadline)
2463{
Tejun Heocc0680a2007-08-06 18:36:23 +09002464 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002465 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002466 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002467
2468 mv_stop_dma(ap);
2469
2470 mv_channel_reset(hpriv, mmio, ap->port_no);
2471
2472 mv_phy_reset(ap, class, deadline);
2473
2474 return 0;
2475}
2476
Tejun Heocc0680a2007-08-06 18:36:23 +09002477static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002478{
Tejun Heocc0680a2007-08-06 18:36:23 +09002479 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002480 u32 serr;
2481
2482 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002483 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002484
2485 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002486 sata_scr_read(link, SCR_ERROR, &serr);
2487 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002488
2489 /* bail out if no device is present */
2490 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2491 DPRINTK("EXIT, no device\n");
2492 return;
2493 }
2494
2495 /* set up device control */
2496 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2497}
2498
2499static void mv_error_handler(struct ata_port *ap)
2500{
2501 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2502 mv_hardreset, mv_postreset);
2503}
2504
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002505static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002506{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002507 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002508 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2509 u32 tmp, mask;
2510 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002511
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002512 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002513
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002514 shift = ap->port_no * 2;
2515 if (hc > 0)
2516 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002517
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002518 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002519
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002520 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002521 tmp = readl(hpriv->main_mask_reg_addr);
2522 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002523}
2524
2525static void mv_eh_thaw(struct ata_port *ap)
2526{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002527 struct mv_host_priv *hpriv = ap->host->private_data;
2528 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002529 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2530 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2531 void __iomem *port_mmio = mv_ap_base(ap);
2532 u32 tmp, mask, hc_irq_cause;
2533 unsigned int shift, hc_port_no = ap->port_no;
2534
2535 /* FIXME: handle coalescing completion events properly */
2536
2537 shift = ap->port_no * 2;
2538 if (hc > 0) {
2539 shift++;
2540 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002541 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002542
2543 mask = 0x3 << shift;
2544
2545 /* clear EDMA errors on this port */
2546 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2547
2548 /* clear pending irq events */
2549 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2550 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2551 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2552 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2553
2554 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002555 tmp = readl(hpriv->main_mask_reg_addr);
2556 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002557}
2558
Brett Russ05b308e2005-10-05 17:08:53 -04002559/**
2560 * mv_port_init - Perform some early initialization on a single port.
2561 * @port: libata data structure storing shadow register addresses
2562 * @port_mmio: base address of the port
2563 *
2564 * Initialize shadow register mmio addresses, clear outstanding
2565 * interrupts on the port, and unmask interrupts for the future
2566 * start of the port.
2567 *
2568 * LOCKING:
2569 * Inherited from caller.
2570 */
Brett Russ31961942005-09-30 01:36:00 -04002571static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2572{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002573 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002574 unsigned serr_ofs;
2575
Jeff Garzik8b260242005-11-12 12:32:50 -05002576 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002577 */
2578 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002579 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002580 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2581 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2582 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2583 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2584 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2585 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002586 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002587 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2588 /* special case: control/altstatus doesn't have ATA_REG_ address */
2589 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2590
2591 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002592 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002593
Brett Russ31961942005-09-30 01:36:00 -04002594 /* Clear any currently outstanding port interrupt conditions */
2595 serr_ofs = mv_scr_offset(SCR_ERROR);
2596 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2597 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2598
Mark Lord646a4da2008-01-26 18:30:37 -05002599 /* unmask all non-transient EDMA error interrupts */
2600 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002601
Jeff Garzik8b260242005-11-12 12:32:50 -05002602 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002603 readl(port_mmio + EDMA_CFG_OFS),
2604 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2605 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002606}
2607
Tejun Heo4447d352007-04-17 23:44:08 +09002608static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002609{
Tejun Heo4447d352007-04-17 23:44:08 +09002610 struct pci_dev *pdev = to_pci_dev(host->dev);
2611 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002612 u32 hp_flags = hpriv->hp_flags;
2613
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002614 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002615 case chip_5080:
2616 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002617 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002618
Auke Kok44c10132007-06-08 15:46:36 -07002619 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002620 case 0x1:
2621 hp_flags |= MV_HP_ERRATA_50XXB0;
2622 break;
2623 case 0x3:
2624 hp_flags |= MV_HP_ERRATA_50XXB2;
2625 break;
2626 default:
2627 dev_printk(KERN_WARNING, &pdev->dev,
2628 "Applying 50XXB2 workarounds to unknown rev\n");
2629 hp_flags |= MV_HP_ERRATA_50XXB2;
2630 break;
2631 }
2632 break;
2633
2634 case chip_504x:
2635 case chip_508x:
2636 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002637 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002638
Auke Kok44c10132007-06-08 15:46:36 -07002639 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002640 case 0x0:
2641 hp_flags |= MV_HP_ERRATA_50XXB0;
2642 break;
2643 case 0x3:
2644 hp_flags |= MV_HP_ERRATA_50XXB2;
2645 break;
2646 default:
2647 dev_printk(KERN_WARNING, &pdev->dev,
2648 "Applying B2 workarounds to unknown rev\n");
2649 hp_flags |= MV_HP_ERRATA_50XXB2;
2650 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002651 }
2652 break;
2653
2654 case chip_604x:
2655 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002656 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002657 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002658
Auke Kok44c10132007-06-08 15:46:36 -07002659 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002660 case 0x7:
2661 hp_flags |= MV_HP_ERRATA_60X1B2;
2662 break;
2663 case 0x9:
2664 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002665 break;
2666 default:
2667 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002668 "Applying B2 workarounds to unknown rev\n");
2669 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002670 break;
2671 }
2672 break;
2673
Jeff Garzike4e7b892006-01-31 12:18:41 -05002674 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002675 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002676 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2677 (pdev->device == 0x2300 || pdev->device == 0x2310))
2678 {
Mark Lord4e520032007-12-11 12:58:05 -05002679 /*
2680 * Highpoint RocketRAID PCIe 23xx series cards:
2681 *
2682 * Unconfigured drives are treated as "Legacy"
2683 * by the BIOS, and it overwrites sector 8 with
2684 * a "Lgcy" metadata block prior to Linux boot.
2685 *
2686 * Configured drives (RAID or JBOD) leave sector 8
2687 * alone, but instead overwrite a high numbered
2688 * sector for the RAID metadata. This sector can
2689 * be determined exactly, by truncating the physical
2690 * drive capacity to a nice even GB value.
2691 *
2692 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2693 *
2694 * Warn the user, lest they think we're just buggy.
2695 */
2696 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2697 " BIOS CORRUPTS DATA on all attached drives,"
2698 " regardless of if/how they are configured."
2699 " BEWARE!\n");
2700 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2701 " use sectors 8-9 on \"Legacy\" drives,"
2702 " and avoid the final two gigabytes on"
2703 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002704 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002705 case chip_6042:
2706 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002707 hp_flags |= MV_HP_GEN_IIE;
2708
Auke Kok44c10132007-06-08 15:46:36 -07002709 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002710 case 0x0:
2711 hp_flags |= MV_HP_ERRATA_XX42A0;
2712 break;
2713 case 0x1:
2714 hp_flags |= MV_HP_ERRATA_60X1C0;
2715 break;
2716 default:
2717 dev_printk(KERN_WARNING, &pdev->dev,
2718 "Applying 60X1C0 workarounds to unknown rev\n");
2719 hp_flags |= MV_HP_ERRATA_60X1C0;
2720 break;
2721 }
2722 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002723 case chip_soc:
2724 hpriv->ops = &mv_soc_ops;
2725 hp_flags |= MV_HP_ERRATA_60X1C0;
2726 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002727
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002728 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002729 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002730 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002731 return 1;
2732 }
2733
2734 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002735 if (hp_flags & MV_HP_PCIE) {
2736 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2737 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2738 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2739 } else {
2740 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2741 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2742 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2743 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002744
2745 return 0;
2746}
2747
Brett Russ05b308e2005-10-05 17:08:53 -04002748/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002749 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002750 * @host: ATA host to initialize
2751 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002752 *
2753 * If possible, do an early global reset of the host. Then do
2754 * our port init and clear/unmask all/relevant host interrupts.
2755 *
2756 * LOCKING:
2757 * Inherited from caller.
2758 */
Tejun Heo4447d352007-04-17 23:44:08 +09002759static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002760{
2761 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002762 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002763 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002764
Tejun Heo4447d352007-04-17 23:44:08 +09002765 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002766 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002767 goto done;
2768
2769 if (HAS_PCI(host)) {
2770 hpriv->main_cause_reg_addr = hpriv->base +
2771 HC_MAIN_IRQ_CAUSE_OFS;
2772 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2773 } else {
2774 hpriv->main_cause_reg_addr = hpriv->base +
2775 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2776 hpriv->main_mask_reg_addr = hpriv->base +
2777 HC_SOC_MAIN_IRQ_MASK_OFS;
2778 }
2779 /* global interrupt mask */
2780 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002781
Tejun Heo4447d352007-04-17 23:44:08 +09002782 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002783
Tejun Heo4447d352007-04-17 23:44:08 +09002784 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002785 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002786
Jeff Garzikc9d39132005-11-13 17:47:51 -05002787 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002788 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002789 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002790
Jeff Garzik522479f2005-11-12 22:14:02 -05002791 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002792 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002793 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002794
Tejun Heo4447d352007-04-17 23:44:08 +09002795 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002796 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002797 void __iomem *port_mmio = mv_port_base(mmio, port);
2798
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002799 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002800 ifctl |= (1 << 7); /* enable gen2i speed */
2801 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002802 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2803 }
2804
Jeff Garzikc9d39132005-11-13 17:47:51 -05002805 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002806 }
2807
Tejun Heo4447d352007-04-17 23:44:08 +09002808 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002809 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002810 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002811
2812 mv_port_init(&ap->ioaddr, port_mmio);
2813
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002814#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002815 if (HAS_PCI(host)) {
2816 unsigned int offset = port_mmio - mmio;
2817 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2818 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2819 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002820#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002821 }
2822
2823 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002824 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2825
2826 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2827 "(before clear)=0x%08x\n", hc,
2828 readl(hc_mmio + HC_CFG_OFS),
2829 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2830
2831 /* Clear any currently outstanding hc interrupt conditions */
2832 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002833 }
2834
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002835 if (HAS_PCI(host)) {
2836 /* Clear any currently outstanding host interrupt conditions */
2837 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002838
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002839 /* and unmask interrupt generation for host regs */
2840 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2841 if (IS_GEN_I(hpriv))
2842 writelfl(~HC_MAIN_MASKED_IRQS_5,
2843 hpriv->main_mask_reg_addr);
2844 else
2845 writelfl(~HC_MAIN_MASKED_IRQS,
2846 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002847
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002848 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2849 "PCI int cause/mask=0x%08x/0x%08x\n",
2850 readl(hpriv->main_cause_reg_addr),
2851 readl(hpriv->main_mask_reg_addr),
2852 readl(mmio + hpriv->irq_cause_ofs),
2853 readl(mmio + hpriv->irq_mask_ofs));
2854 } else {
2855 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2856 hpriv->main_mask_reg_addr);
2857 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2858 readl(hpriv->main_cause_reg_addr),
2859 readl(hpriv->main_mask_reg_addr));
2860 }
Brett Russ31961942005-09-30 01:36:00 -04002861done:
Brett Russ20f733e2005-09-01 18:26:17 -04002862 return rc;
2863}
2864
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002865static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2866{
2867 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2868 MV_CRQB_Q_SZ, 0);
2869 if (!hpriv->crqb_pool)
2870 return -ENOMEM;
2871
2872 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2873 MV_CRPB_Q_SZ, 0);
2874 if (!hpriv->crpb_pool)
2875 return -ENOMEM;
2876
2877 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2878 MV_SG_TBL_SZ, 0);
2879 if (!hpriv->sg_tbl_pool)
2880 return -ENOMEM;
2881
2882 return 0;
2883}
2884
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002885/**
2886 * mv_platform_probe - handle a positive probe of an soc Marvell
2887 * host
2888 * @pdev: platform device found
2889 *
2890 * LOCKING:
2891 * Inherited from caller.
2892 */
2893static int mv_platform_probe(struct platform_device *pdev)
2894{
2895 static int printed_version;
2896 const struct mv_sata_platform_data *mv_platform_data;
2897 const struct ata_port_info *ppi[] =
2898 { &mv_port_info[chip_soc], NULL };
2899 struct ata_host *host;
2900 struct mv_host_priv *hpriv;
2901 struct resource *res;
2902 int n_ports, rc;
2903
2904 if (!printed_version++)
2905 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2906
2907 /*
2908 * Simple resource validation ..
2909 */
2910 if (unlikely(pdev->num_resources != 2)) {
2911 dev_err(&pdev->dev, "invalid number of resources\n");
2912 return -EINVAL;
2913 }
2914
2915 /*
2916 * Get the register base first
2917 */
2918 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2919 if (res == NULL)
2920 return -EINVAL;
2921
2922 /* allocate host */
2923 mv_platform_data = pdev->dev.platform_data;
2924 n_ports = mv_platform_data->n_ports;
2925
2926 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2927 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2928
2929 if (!host || !hpriv)
2930 return -ENOMEM;
2931 host->private_data = hpriv;
2932 hpriv->n_ports = n_ports;
2933
2934 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002935 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2936 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002937 hpriv->base -= MV_SATAHC0_REG_BASE;
2938
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002939 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2940 if (rc)
2941 return rc;
2942
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002943 /* initialize adapter */
2944 rc = mv_init_host(host, chip_soc);
2945 if (rc)
2946 return rc;
2947
2948 dev_printk(KERN_INFO, &pdev->dev,
2949 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2950 host->n_ports);
2951
2952 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2953 IRQF_SHARED, &mv6_sht);
2954}
2955
2956/*
2957 *
2958 * mv_platform_remove - unplug a platform interface
2959 * @pdev: platform device
2960 *
2961 * A platform bus SATA device has been unplugged. Perform the needed
2962 * cleanup. Also called on module unload for any active devices.
2963 */
2964static int __devexit mv_platform_remove(struct platform_device *pdev)
2965{
2966 struct device *dev = &pdev->dev;
2967 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002968
2969 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002970 return 0;
2971}
2972
2973static struct platform_driver mv_platform_driver = {
2974 .probe = mv_platform_probe,
2975 .remove = __devexit_p(mv_platform_remove),
2976 .driver = {
2977 .name = DRV_NAME,
2978 .owner = THIS_MODULE,
2979 },
2980};
2981
2982
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002983#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002984static int mv_pci_init_one(struct pci_dev *pdev,
2985 const struct pci_device_id *ent);
2986
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002987
2988static struct pci_driver mv_pci_driver = {
2989 .name = DRV_NAME,
2990 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002991 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002992 .remove = ata_pci_remove_one,
2993};
2994
2995/*
2996 * module options
2997 */
2998static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2999
3000
3001/* move to PCI layer or libata core? */
3002static int pci_go_64(struct pci_dev *pdev)
3003{
3004 int rc;
3005
3006 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3007 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3008 if (rc) {
3009 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3010 if (rc) {
3011 dev_printk(KERN_ERR, &pdev->dev,
3012 "64-bit DMA enable failed\n");
3013 return rc;
3014 }
3015 }
3016 } else {
3017 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3018 if (rc) {
3019 dev_printk(KERN_ERR, &pdev->dev,
3020 "32-bit DMA enable failed\n");
3021 return rc;
3022 }
3023 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3024 if (rc) {
3025 dev_printk(KERN_ERR, &pdev->dev,
3026 "32-bit consistent DMA enable failed\n");
3027 return rc;
3028 }
3029 }
3030
3031 return rc;
3032}
3033
Brett Russ05b308e2005-10-05 17:08:53 -04003034/**
3035 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003036 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003037 *
3038 * FIXME: complete this.
3039 *
3040 * LOCKING:
3041 * Inherited from caller.
3042 */
Tejun Heo4447d352007-04-17 23:44:08 +09003043static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003044{
Tejun Heo4447d352007-04-17 23:44:08 +09003045 struct pci_dev *pdev = to_pci_dev(host->dev);
3046 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003047 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003048 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003049
3050 /* Use this to determine the HW stepping of the chip so we know
3051 * what errata to workaround
3052 */
Brett Russ31961942005-09-30 01:36:00 -04003053 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3054 if (scc == 0)
3055 scc_s = "SCSI";
3056 else if (scc == 0x01)
3057 scc_s = "RAID";
3058 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003059 scc_s = "?";
3060
3061 if (IS_GEN_I(hpriv))
3062 gen = "I";
3063 else if (IS_GEN_II(hpriv))
3064 gen = "II";
3065 else if (IS_GEN_IIE(hpriv))
3066 gen = "IIE";
3067 else
3068 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04003069
Jeff Garzika9524a72005-10-30 14:39:11 -05003070 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003071 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3072 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003073 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3074}
3075
Brett Russ05b308e2005-10-05 17:08:53 -04003076/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003077 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003078 * @pdev: PCI device found
3079 * @ent: PCI device ID entry for the matched host
3080 *
3081 * LOCKING:
3082 * Inherited from caller.
3083 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003084static int mv_pci_init_one(struct pci_dev *pdev,
3085 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003086{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003087 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003088 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003089 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3090 struct ata_host *host;
3091 struct mv_host_priv *hpriv;
3092 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003093
Jeff Garzika9524a72005-10-30 14:39:11 -05003094 if (!printed_version++)
3095 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003096
Tejun Heo4447d352007-04-17 23:44:08 +09003097 /* allocate host */
3098 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3099
3100 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3101 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3102 if (!host || !hpriv)
3103 return -ENOMEM;
3104 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003105 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003106
3107 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003108 rc = pcim_enable_device(pdev);
3109 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003110 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003111
Tejun Heo0d5ff562007-02-01 15:06:36 +09003112 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3113 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003114 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003115 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003116 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003117 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003118 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003119
Jeff Garzikd88184f2007-02-26 01:26:06 -05003120 rc = pci_go_64(pdev);
3121 if (rc)
3122 return rc;
3123
Mark Lordda2fa9b2008-01-26 18:32:45 -05003124 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3125 if (rc)
3126 return rc;
3127
Brett Russ20f733e2005-09-01 18:26:17 -04003128 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003129 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003130 if (rc)
3131 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003132
Brett Russ31961942005-09-30 01:36:00 -04003133 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003134 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003135 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003136
Brett Russ31961942005-09-30 01:36:00 -04003137 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003138 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003139
Tejun Heo4447d352007-04-17 23:44:08 +09003140 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003141 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003142 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003143 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003144}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003145#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003146
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003147static int mv_platform_probe(struct platform_device *pdev);
3148static int __devexit mv_platform_remove(struct platform_device *pdev);
3149
Brett Russ20f733e2005-09-01 18:26:17 -04003150static int __init mv_init(void)
3151{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003152 int rc = -ENODEV;
3153#ifdef CONFIG_PCI
3154 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003155 if (rc < 0)
3156 return rc;
3157#endif
3158 rc = platform_driver_register(&mv_platform_driver);
3159
3160#ifdef CONFIG_PCI
3161 if (rc < 0)
3162 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003163#endif
3164 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003165}
3166
3167static void __exit mv_exit(void)
3168{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003169#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003170 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003171#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003172 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003173}
3174
3175MODULE_AUTHOR("Brett Russ");
3176MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3177MODULE_LICENSE("GPL");
3178MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3179MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003180MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003181
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003182#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003183module_param(msi, int, 0444);
3184MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003185#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003186
Brett Russ20f733e2005-09-01 18:26:17 -04003187module_init(mv_init);
3188module_exit(mv_exit);