blob: 1e97a33cd2602d1df2347080bd7f7493c6152702 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050073#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050074#include <linux/platform_device.h>
75#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040076#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050077#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040078#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080
81#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050082#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040083
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040095 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
Brett Russ20f733e2005-09-01 18:26:17 -0400101 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500102 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
Brett Russ31961942005-09-30 01:36:00 -0400111 MV_MAX_Q_DEPTH = 32,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
113
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400116 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
117 */
118 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
119 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500120 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400121 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400122
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORTS_PER_HC = 4,
124 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
125 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400126 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400127 MV_PORT_MASK = 3,
128
129 /* Host Flags */
130 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
131 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100132 /* SoC integrated controllers, no PCI interface */
133 MV_FLAG_SOC = (1 << 28),
134
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400136 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
137 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500138 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400139
Brett Russ31961942005-09-30 01:36:00 -0400140 CRQB_FLAG_READ = (1 << 0),
141 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
143 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400144 CRQB_CMD_ADDR_SHIFT = 8,
145 CRQB_CMD_CS = (0x2 << 11),
146 CRQB_CMD_LAST = (1 << 15),
147
148 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400149 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
150 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400151
152 EPRD_FLAG_END_OF_TBL = (1 << 31),
153
Brett Russ20f733e2005-09-01 18:26:17 -0400154 /* PCI interface registers */
155
Brett Russ31961942005-09-30 01:36:00 -0400156 PCI_COMMAND_OFS = 0xc00,
157
Brett Russ20f733e2005-09-01 18:26:17 -0400158 PCI_MAIN_CMD_STS_OFS = 0xd30,
159 STOP_PCI_MASTER = (1 << 2),
160 PCI_MASTER_EMPTY = (1 << 3),
161 GLOB_SFT_RST = (1 << 4),
162
Jeff Garzik522479f2005-11-12 22:14:02 -0500163 MV_PCI_MODE = 0xd00,
164 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
165 MV_PCI_DISC_TIMER = 0xd04,
166 MV_PCI_MSI_TRIGGER = 0xc38,
167 MV_PCI_SERR_MASK = 0xc28,
168 MV_PCI_XBAR_TMOUT = 0x1d04,
169 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
170 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
171 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
172 MV_PCI_ERR_COMMAND = 0x1d50,
173
Mark Lord02a121d2007-12-01 13:07:22 -0500174 PCI_IRQ_CAUSE_OFS = 0x1d58,
175 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400176 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
177
Mark Lord02a121d2007-12-01 13:07:22 -0500178 PCIE_IRQ_CAUSE_OFS = 0x1900,
179 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500180 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500181
Brett Russ20f733e2005-09-01 18:26:17 -0400182 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
183 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500184 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
185 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORT0_ERR = (1 << 0), /* shift by port # */
187 PORT0_DONE = (1 << 1), /* shift by port # */
188 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
189 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
190 PCI_ERR = (1 << 18),
191 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
192 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 PORTS_0_3_COAL_DONE = (1 << 8),
194 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400195 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
196 GPIO_INT = (1 << 22),
197 SELF_INT = (1 << 23),
198 TWSI_INT = (1 << 24),
199 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500200 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500201 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500202 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400203 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
204 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500205 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
206 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500207 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400208
209 /* SATAHC registers */
210 HC_CFG_OFS = 0,
211
212 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400213 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400214 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
215 DEV_IRQ = (1 << 8), /* shift by port # */
216
217 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400218 SHD_BLK_OFS = 0x100,
219 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400220
221 /* SATA registers */
222 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
223 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500224 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500225 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500226 PHY_MODE4 = 0x314,
227 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500228 MV5_PHY_MODE = 0x74,
229 MV5_LT_MODE = 0x30,
230 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500231 SATA_INTERFACE_CTL = 0x050,
232
233 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400234
235 /* Port registers */
236 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500237 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
238 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
239 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
240 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
241 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400242
243 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
244 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400245 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
246 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
247 EDMA_ERR_DEV = (1 << 2), /* device error */
248 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
249 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
250 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400251 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
252 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400253 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400254 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
256 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
257 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
258 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500259
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400260 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500261 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
262 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
263 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
264 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500267
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400268 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500269 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
270 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
271 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
272 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
273 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
274
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400275 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400278 EDMA_ERR_OVERRUN_5 = (1 << 5),
279 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500280
281 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
282 EDMA_ERR_LNK_CTRL_RX_1 |
283 EDMA_ERR_LNK_CTRL_RX_3 |
284 EDMA_ERR_LNK_CTRL_TX,
285
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400286 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
287 EDMA_ERR_PRD_PAR |
288 EDMA_ERR_DEV_DCON |
289 EDMA_ERR_DEV_CON |
290 EDMA_ERR_SERR |
291 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400292 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400293 EDMA_ERR_CRPB_PAR |
294 EDMA_ERR_INTRL_PAR |
295 EDMA_ERR_IORDY |
296 EDMA_ERR_LNK_CTRL_RX_2 |
297 EDMA_ERR_LNK_DATA_RX |
298 EDMA_ERR_LNK_DATA_TX |
299 EDMA_ERR_TRANS_PROTO,
300 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
301 EDMA_ERR_PRD_PAR |
302 EDMA_ERR_DEV_DCON |
303 EDMA_ERR_DEV_CON |
304 EDMA_ERR_OVERRUN_5 |
305 EDMA_ERR_UNDERRUN_5 |
306 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400307 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400308 EDMA_ERR_CRPB_PAR |
309 EDMA_ERR_INTRL_PAR |
310 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400311
Brett Russ31961942005-09-30 01:36:00 -0400312 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
313 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400314
315 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
316 EDMA_REQ_Q_PTR_SHIFT = 5,
317
318 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
319 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
320 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400321 EDMA_RSP_Q_PTR_SHIFT = 3,
322
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400323 EDMA_CMD_OFS = 0x28, /* EDMA command register */
324 EDMA_EN = (1 << 0), /* enable EDMA */
325 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
326 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400327
Jeff Garzikc9d39132005-11-13 17:47:51 -0500328 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500329 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500330
Brett Russ31961942005-09-30 01:36:00 -0400331 /* Host private flags (hp_flags) */
332 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500333 MV_HP_ERRATA_50XXB0 = (1 << 1),
334 MV_HP_ERRATA_50XXB2 = (1 << 2),
335 MV_HP_ERRATA_60X1B2 = (1 << 3),
336 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500337 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400338 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
339 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
340 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500341 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400342
Brett Russ31961942005-09-30 01:36:00 -0400343 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400344 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500345 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400346 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400347};
348
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400349#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
350#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500351#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100352#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500353
Jeff Garzik095fec82005-11-12 09:50:49 -0500354enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400355 /* DMA boundary 0xffff is required by the s/g splitting
356 * we need on /length/ in mv_fill-sg().
357 */
358 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500359
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400360 /* mask of register bits containing lower 32 bits
361 * of EDMA request queue DMA address
362 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500363 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
364
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400365 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500366 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
367};
368
Jeff Garzik522479f2005-11-12 22:14:02 -0500369enum chip_type {
370 chip_504x,
371 chip_508x,
372 chip_5080,
373 chip_604x,
374 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500375 chip_6042,
376 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500377 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500378};
379
Brett Russ31961942005-09-30 01:36:00 -0400380/* Command ReQuest Block: 32B */
381struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400382 __le32 sg_addr;
383 __le32 sg_addr_hi;
384 __le16 ctrl_flags;
385 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400386};
387
Jeff Garzike4e7b892006-01-31 12:18:41 -0500388struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400389 __le32 addr;
390 __le32 addr_hi;
391 __le32 flags;
392 __le32 len;
393 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500394};
395
Brett Russ31961942005-09-30 01:36:00 -0400396/* Command ResPonse Block: 8B */
397struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400398 __le16 id;
399 __le16 flags;
400 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400401};
402
403/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
404struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400405 __le32 addr;
406 __le32 flags_size;
407 __le32 addr_hi;
408 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400409};
410
411struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400412 struct mv_crqb *crqb;
413 dma_addr_t crqb_dma;
414 struct mv_crpb *crpb;
415 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500416 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
417 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400418
419 unsigned int req_idx;
420 unsigned int resp_idx;
421
Brett Russ31961942005-09-30 01:36:00 -0400422 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400423};
424
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500425struct mv_port_signal {
426 u32 amps;
427 u32 pre;
428};
429
Mark Lord02a121d2007-12-01 13:07:22 -0500430struct mv_host_priv {
431 u32 hp_flags;
432 struct mv_port_signal signal[8];
433 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500434 int n_ports;
435 void __iomem *base;
436 void __iomem *main_cause_reg_addr;
437 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500438 u32 irq_cause_ofs;
439 u32 irq_mask_ofs;
440 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500441 /*
442 * These consistent DMA memory pools give us guaranteed
443 * alignment for hardware-accessed data structures,
444 * and less memory waste in accomplishing the alignment.
445 */
446 struct dma_pool *crqb_pool;
447 struct dma_pool *crpb_pool;
448 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500449};
450
Jeff Garzik47c2b672005-11-12 21:13:17 -0500451struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500452 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500454 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
455 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
456 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500457 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
458 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500459 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100460 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500461};
462
Brett Russ20f733e2005-09-01 18:26:17 -0400463static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400473static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500476static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400477
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500511
Mark Lordeb73d552008-01-29 13:24:00 -0500512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400516static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400517 .module = THIS_MODULE,
518 .name = DRV_NAME,
519 .ioctl = ata_scsi_ioctl,
520 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400521 .can_queue = ATA_DEF_QUEUE,
522 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400523 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400524 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
525 .emulated = ATA_SHT_EMULATED,
526 .use_clustering = 1,
527 .proc_name = DRV_NAME,
528 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400529 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400530 .slave_destroy = ata_scsi_slave_destroy,
531 .bios_param = ata_std_bios_param,
532};
533
534static struct scsi_host_template mv6_sht = {
535 .module = THIS_MODULE,
536 .name = DRV_NAME,
537 .ioctl = ata_scsi_ioctl,
538 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500539 .change_queue_depth = ata_scsi_change_queue_depth,
540 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400541 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400542 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400543 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
544 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500545 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400546 .proc_name = DRV_NAME,
547 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400548 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900549 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400550 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400551};
552
Jeff Garzikc9d39132005-11-13 17:47:51 -0500553static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500554 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read,
556 .check_status = ata_check_status,
557 .exec_command = ata_exec_command,
558 .dev_select = ata_std_dev_select,
559
Jeff Garzikcffacd82007-03-09 09:46:47 -0500560 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500561
562 .qc_prep = mv_qc_prep,
563 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900564 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500565
Jeff Garzikc9d39132005-11-13 17:47:51 -0500566 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900567 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500568
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400569 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400570 .freeze = mv_eh_freeze,
571 .thaw = mv_eh_thaw,
572
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578};
579
580static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500581 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400582 .tf_load = ata_tf_load,
583 .tf_read = ata_tf_read,
584 .check_status = ata_check_status,
585 .exec_command = ata_exec_command,
586 .dev_select = ata_std_dev_select,
587
Jeff Garzikcffacd82007-03-09 09:46:47 -0500588 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400589
Brett Russ31961942005-09-30 01:36:00 -0400590 .qc_prep = mv_qc_prep,
591 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900592 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400593
Brett Russ20f733e2005-09-01 18:26:17 -0400594 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900595 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400596
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400597 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400598 .freeze = mv_eh_freeze,
599 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500600 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400601
Brett Russ20f733e2005-09-01 18:26:17 -0400602 .scr_read = mv_scr_read,
603 .scr_write = mv_scr_write,
604
Brett Russ31961942005-09-30 01:36:00 -0400605 .port_start = mv_port_start,
606 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400607};
608
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 .tf_load = ata_tf_load,
611 .tf_read = ata_tf_read,
612 .check_status = ata_check_status,
613 .exec_command = ata_exec_command,
614 .dev_select = ata_std_dev_select,
615
Jeff Garzikcffacd82007-03-09 09:46:47 -0500616 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500617
618 .qc_prep = mv_qc_prep_iie,
619 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900620 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500621
Jeff Garzike4e7b892006-01-31 12:18:41 -0500622 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900623 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500624
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400625 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400626 .freeze = mv_eh_freeze,
627 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500628 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400629
Jeff Garzike4e7b892006-01-31 12:18:41 -0500630 .scr_read = mv_scr_read,
631 .scr_write = mv_scr_write,
632
633 .port_start = mv_port_start,
634 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635};
636
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100637static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400638 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400639 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400640 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400641 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500642 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400643 },
644 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400645 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400646 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400647 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500648 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400649 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500650 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400651 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500652 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400653 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500654 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500655 },
Brett Russ20f733e2005-09-01 18:26:17 -0400656 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500657 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
658 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400659 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400660 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500661 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400662 },
663 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400664 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500665 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400666 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400667 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500668 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400669 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500670 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500671 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
672 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500673 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400674 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500675 .port_ops = &mv_iie_ops,
676 },
677 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500678 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
679 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500680 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400681 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500682 .port_ops = &mv_iie_ops,
683 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500684 { /* chip_soc */
685 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
686 .pio_mask = 0x1f, /* pio0-4 */
687 .udma_mask = ATA_UDMA6,
688 .port_ops = &mv_iie_ops,
689 },
Brett Russ20f733e2005-09-01 18:26:17 -0400690};
691
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500692static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400693 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
694 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
696 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100697 /* RocketRAID 1740/174x have different identifiers */
698 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
699 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400700
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400701 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
702 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
704 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
705 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500706
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400707 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
708
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200709 /* Adaptec 1430SA */
710 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
711
Mark Lord02a121d2007-12-01 13:07:22 -0500712 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800713 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
714
Mark Lord02a121d2007-12-01 13:07:22 -0500715 /* Highpoint RocketRAID PCIe series */
716 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
717 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
718
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400719 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400720};
721
Jeff Garzik47c2b672005-11-12 21:13:17 -0500722static const struct mv_hw_ops mv5xxx_ops = {
723 .phy_errata = mv5_phy_errata,
724 .enable_leds = mv5_enable_leds,
725 .read_preamp = mv5_read_preamp,
726 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500727 .reset_flash = mv5_reset_flash,
728 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500729};
730
731static const struct mv_hw_ops mv6xxx_ops = {
732 .phy_errata = mv6_phy_errata,
733 .enable_leds = mv6_enable_leds,
734 .read_preamp = mv6_read_preamp,
735 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500736 .reset_flash = mv6_reset_flash,
737 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500738};
739
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500740static const struct mv_hw_ops mv_soc_ops = {
741 .phy_errata = mv6_phy_errata,
742 .enable_leds = mv_soc_enable_leds,
743 .read_preamp = mv_soc_read_preamp,
744 .reset_hc = mv_soc_reset_hc,
745 .reset_flash = mv_soc_reset_flash,
746 .reset_bus = mv_soc_reset_bus,
747};
748
Brett Russ20f733e2005-09-01 18:26:17 -0400749/*
750 * Functions
751 */
752
753static inline void writelfl(unsigned long data, void __iomem *addr)
754{
755 writel(data, addr);
756 (void) readl(addr); /* flush to avoid PCI posted write */
757}
758
Brett Russ20f733e2005-09-01 18:26:17 -0400759static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
760{
761 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
762}
763
Jeff Garzikc9d39132005-11-13 17:47:51 -0500764static inline unsigned int mv_hc_from_port(unsigned int port)
765{
766 return port >> MV_PORT_HC_SHIFT;
767}
768
769static inline unsigned int mv_hardport_from_port(unsigned int port)
770{
771 return port & MV_PORT_MASK;
772}
773
774static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
775 unsigned int port)
776{
777 return mv_hc_base(base, mv_hc_from_port(port));
778}
779
Brett Russ20f733e2005-09-01 18:26:17 -0400780static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
781{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500782 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500783 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500784 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400785}
786
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500787static inline void __iomem *mv_host_base(struct ata_host *host)
788{
789 struct mv_host_priv *hpriv = host->private_data;
790 return hpriv->base;
791}
792
Brett Russ20f733e2005-09-01 18:26:17 -0400793static inline void __iomem *mv_ap_base(struct ata_port *ap)
794{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500795 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400796}
797
Jeff Garzikcca39742006-08-24 03:19:22 -0400798static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400799{
Jeff Garzikcca39742006-08-24 03:19:22 -0400800 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400801}
802
803static void mv_irq_clear(struct ata_port *ap)
804{
805}
806
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400807static void mv_set_edma_ptrs(void __iomem *port_mmio,
808 struct mv_host_priv *hpriv,
809 struct mv_port_priv *pp)
810{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 u32 index;
812
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400813 /*
814 * initialize request queue
815 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
817
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818 WARN_ON(pp->crqb_dma & 0x3ff);
819 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400820 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
822
823 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400824 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400825 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
826 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400827 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400828
829 /*
830 * initialize response queue
831 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400832 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
833
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400834 WARN_ON(pp->crpb_dma & 0xff);
835 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
836
837 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400838 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400839 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
840 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400841 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400842
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400845}
846
Brett Russ05b308e2005-10-05 17:08:53 -0400847/**
848 * mv_start_dma - Enable eDMA engine
849 * @base: port base address
850 * @pp: port private data
851 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900852 * Verify the local cache of the eDMA state is accurate with a
853 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400854 *
855 * LOCKING:
856 * Inherited from caller.
857 */
Mark Lord0c589122008-01-26 18:31:16 -0500858static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500859 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400860{
Mark Lord72109162008-01-26 18:31:33 -0500861 int want_ncq = (protocol == ATA_PROT_NCQ);
862
863 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
864 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
865 if (want_ncq != using_ncq)
866 __mv_stop_dma(ap);
867 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400868 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500869 struct mv_host_priv *hpriv = ap->host->private_data;
870 int hard_port = mv_hardport_from_port(ap->port_no);
871 void __iomem *hc_mmio = mv_hc_base_from_port(
872 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
873 u32 hc_irq_cause, ipending;
874
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400875 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500876 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400877
Mark Lord0c589122008-01-26 18:31:16 -0500878 /* clear EDMA interrupt indicator, if any */
879 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
880 ipending = (DEV_IRQ << hard_port) |
881 (CRPB_DMA_DONE << hard_port);
882 if (hc_irq_cause & ipending) {
883 writelfl(hc_irq_cause & ~ipending,
884 hc_mmio + HC_IRQ_CAUSE_OFS);
885 }
886
Mark Lord72109162008-01-26 18:31:33 -0500887 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500888
889 /* clear FIS IRQ Cause */
890 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
891
Mark Lordf630d562008-01-26 18:31:00 -0500892 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400893
Mark Lordf630d562008-01-26 18:31:00 -0500894 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400895 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
896 }
Mark Lordf630d562008-01-26 18:31:00 -0500897 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400898}
899
Brett Russ05b308e2005-10-05 17:08:53 -0400900/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400901 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400902 * @ap: ATA channel to manipulate
903 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900904 * Verify the local cache of the eDMA state is accurate with a
905 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400906 *
907 * LOCKING:
908 * Inherited from caller.
909 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400910static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400911{
912 void __iomem *port_mmio = mv_ap_base(ap);
913 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400914 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400915 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400916
Jeff Garzik4537deb2007-07-12 14:30:19 -0400917 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400918 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400919 */
Brett Russ31961942005-09-30 01:36:00 -0400920 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
921 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400922 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900923 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400924 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500925
Brett Russ31961942005-09-30 01:36:00 -0400926 /* now properly wait for the eDMA to stop */
927 for (i = 1000; i > 0; i--) {
928 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400929 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400930 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400931
Brett Russ31961942005-09-30 01:36:00 -0400932 udelay(100);
933 }
934
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400935 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900936 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400937 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400938 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400939
940 return err;
Brett Russ31961942005-09-30 01:36:00 -0400941}
942
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400943static int mv_stop_dma(struct ata_port *ap)
944{
945 unsigned long flags;
946 int rc;
947
948 spin_lock_irqsave(&ap->host->lock, flags);
949 rc = __mv_stop_dma(ap);
950 spin_unlock_irqrestore(&ap->host->lock, flags);
951
952 return rc;
953}
954
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400955#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400956static void mv_dump_mem(void __iomem *start, unsigned bytes)
957{
Brett Russ31961942005-09-30 01:36:00 -0400958 int b, w;
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%p: ", start + b);
961 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400962 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400963 b += sizeof(u32);
964 }
965 printk("\n");
966 }
Brett Russ31961942005-09-30 01:36:00 -0400967}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400968#endif
969
Brett Russ31961942005-09-30 01:36:00 -0400970static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
971{
972#ifdef ATA_DEBUG
973 int b, w;
974 u32 dw;
975 for (b = 0; b < bytes; ) {
976 DPRINTK("%02x: ", b);
977 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400978 (void) pci_read_config_dword(pdev, b, &dw);
979 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400980 b += sizeof(u32);
981 }
982 printk("\n");
983 }
984#endif
985}
986static void mv_dump_all_regs(void __iomem *mmio_base, int port,
987 struct pci_dev *pdev)
988{
989#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500990 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400991 port >> MV_PORT_HC_SHIFT);
992 void __iomem *port_base;
993 int start_port, num_ports, p, start_hc, num_hcs, hc;
994
995 if (0 > port) {
996 start_hc = start_port = 0;
997 num_ports = 8; /* shld be benign for 4 port devs */
998 num_hcs = 2;
999 } else {
1000 start_hc = port >> MV_PORT_HC_SHIFT;
1001 start_port = port;
1002 num_ports = num_hcs = 1;
1003 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001004 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001005 num_ports > 1 ? num_ports - 1 : start_port);
1006
1007 if (NULL != pdev) {
1008 DPRINTK("PCI config space regs:\n");
1009 mv_dump_pci_cfg(pdev, 0x68);
1010 }
1011 DPRINTK("PCI regs:\n");
1012 mv_dump_mem(mmio_base+0xc00, 0x3c);
1013 mv_dump_mem(mmio_base+0xd00, 0x34);
1014 mv_dump_mem(mmio_base+0xf00, 0x4);
1015 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1016 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001017 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001018 DPRINTK("HC regs (HC %i):\n", hc);
1019 mv_dump_mem(hc_base, 0x1c);
1020 }
1021 for (p = start_port; p < start_port + num_ports; p++) {
1022 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001023 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001024 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001025 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001026 mv_dump_mem(port_base+0x300, 0x60);
1027 }
1028#endif
1029}
1030
Brett Russ20f733e2005-09-01 18:26:17 -04001031static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1032{
1033 unsigned int ofs;
1034
1035 switch (sc_reg_in) {
1036 case SCR_STATUS:
1037 case SCR_CONTROL:
1038 case SCR_ERROR:
1039 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1040 break;
1041 case SCR_ACTIVE:
1042 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1043 break;
1044 default:
1045 ofs = 0xffffffffU;
1046 break;
1047 }
1048 return ofs;
1049}
1050
Tejun Heoda3dbb12007-07-16 14:29:40 +09001051static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001052{
1053 unsigned int ofs = mv_scr_offset(sc_reg_in);
1054
Tejun Heoda3dbb12007-07-16 14:29:40 +09001055 if (ofs != 0xffffffffU) {
1056 *val = readl(mv_ap_base(ap) + ofs);
1057 return 0;
1058 } else
1059 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001060}
1061
Tejun Heoda3dbb12007-07-16 14:29:40 +09001062static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001063{
1064 unsigned int ofs = mv_scr_offset(sc_reg_in);
1065
Tejun Heoda3dbb12007-07-16 14:29:40 +09001066 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001067 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001068 return 0;
1069 } else
1070 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001071}
1072
Mark Lordf2738272008-01-26 18:32:29 -05001073static void mv6_dev_config(struct ata_device *adev)
1074{
1075 /*
1076 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1077 * See mv_qc_prep() for more info.
1078 */
1079 if (adev->flags & ATA_DFLAG_NCQ)
1080 if (adev->max_sectors > ATA_MAX_SECTORS)
1081 adev->max_sectors = ATA_MAX_SECTORS;
1082}
1083
Mark Lord72109162008-01-26 18:31:33 -05001084static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1085 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001086{
Mark Lord0c589122008-01-26 18:31:16 -05001087 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001088
1089 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001090 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001091
Mark Lord0c589122008-01-26 18:31:16 -05001092 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093 cfg |= (1 << 8); /* enab config burst size mask */
1094
Mark Lord0c589122008-01-26 18:31:16 -05001095 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001096 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1097
1098 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001099 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1100 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001101 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001102 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001103 }
1104
Mark Lord72109162008-01-26 18:31:33 -05001105 if (want_ncq) {
1106 cfg |= EDMA_CFG_NCQ;
1107 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1108 } else
1109 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1110
Jeff Garzike4e7b892006-01-31 12:18:41 -05001111 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1112}
1113
Mark Lordda2fa9b2008-01-26 18:32:45 -05001114static void mv_port_free_dma_mem(struct ata_port *ap)
1115{
1116 struct mv_host_priv *hpriv = ap->host->private_data;
1117 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001118 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001119
1120 if (pp->crqb) {
1121 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1122 pp->crqb = NULL;
1123 }
1124 if (pp->crpb) {
1125 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1126 pp->crpb = NULL;
1127 }
Mark Lordeb73d552008-01-29 13:24:00 -05001128 /*
1129 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1130 * For later hardware, we have one unique sg_tbl per NCQ tag.
1131 */
1132 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1133 if (pp->sg_tbl[tag]) {
1134 if (tag == 0 || !IS_GEN_I(hpriv))
1135 dma_pool_free(hpriv->sg_tbl_pool,
1136 pp->sg_tbl[tag],
1137 pp->sg_tbl_dma[tag]);
1138 pp->sg_tbl[tag] = NULL;
1139 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001140 }
1141}
1142
Brett Russ05b308e2005-10-05 17:08:53 -04001143/**
1144 * mv_port_start - Port specific init/start routine.
1145 * @ap: ATA channel to manipulate
1146 *
1147 * Allocate and point to DMA memory, init port private memory,
1148 * zero indices.
1149 *
1150 * LOCKING:
1151 * Inherited from caller.
1152 */
Brett Russ31961942005-09-30 01:36:00 -04001153static int mv_port_start(struct ata_port *ap)
1154{
Jeff Garzikcca39742006-08-24 03:19:22 -04001155 struct device *dev = ap->host->dev;
1156 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001157 struct mv_port_priv *pp;
1158 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001159 unsigned long flags;
Mark Lordeb73d552008-01-29 13:24:00 -05001160 int tag, rc;
Brett Russ31961942005-09-30 01:36:00 -04001161
Tejun Heo24dc5f32007-01-20 16:00:28 +09001162 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001163 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001164 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001165 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001166
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001167 rc = ata_pad_alloc(ap, dev);
1168 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001169 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001170
Mark Lordda2fa9b2008-01-26 18:32:45 -05001171 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1172 if (!pp->crqb)
1173 return -ENOMEM;
1174 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001175
Mark Lordda2fa9b2008-01-26 18:32:45 -05001176 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1177 if (!pp->crpb)
1178 goto out_port_free_dma_mem;
1179 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001180
Mark Lordeb73d552008-01-29 13:24:00 -05001181 /*
1182 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1183 * For later hardware, we need one unique sg_tbl per NCQ tag.
1184 */
1185 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1186 if (tag == 0 || !IS_GEN_I(hpriv)) {
1187 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1188 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1189 if (!pp->sg_tbl[tag])
1190 goto out_port_free_dma_mem;
1191 } else {
1192 pp->sg_tbl[tag] = pp->sg_tbl[0];
1193 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1194 }
1195 }
Brett Russ31961942005-09-30 01:36:00 -04001196
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001197 spin_lock_irqsave(&ap->host->lock, flags);
1198
Mark Lord72109162008-01-26 18:31:33 -05001199 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001200 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001201
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001202 spin_unlock_irqrestore(&ap->host->lock, flags);
1203
Brett Russ31961942005-09-30 01:36:00 -04001204 /* Don't turn on EDMA here...do it before DMA commands only. Else
1205 * we'll be unable to send non-data, PIO, etc due to restricted access
1206 * to shadow regs.
1207 */
Brett Russ31961942005-09-30 01:36:00 -04001208 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001209
1210out_port_free_dma_mem:
1211 mv_port_free_dma_mem(ap);
1212 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001213}
1214
Brett Russ05b308e2005-10-05 17:08:53 -04001215/**
1216 * mv_port_stop - Port specific cleanup/stop routine.
1217 * @ap: ATA channel to manipulate
1218 *
1219 * Stop DMA, cleanup port memory.
1220 *
1221 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001222 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001223 */
Brett Russ31961942005-09-30 01:36:00 -04001224static void mv_port_stop(struct ata_port *ap)
1225{
Brett Russ31961942005-09-30 01:36:00 -04001226 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001227 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001228}
1229
Brett Russ05b308e2005-10-05 17:08:53 -04001230/**
1231 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1232 * @qc: queued command whose SG list to source from
1233 *
1234 * Populate the SG list and mark the last entry.
1235 *
1236 * LOCKING:
1237 * Inherited from caller.
1238 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001239static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001240{
1241 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001242 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001243 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001244 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001245
Mark Lordeb73d552008-01-29 13:24:00 -05001246 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001247 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001248 dma_addr_t addr = sg_dma_address(sg);
1249 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001250
Olof Johansson4007b492007-10-02 20:45:27 -05001251 while (sg_len) {
1252 u32 offset = addr & 0xffff;
1253 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001254
Olof Johansson4007b492007-10-02 20:45:27 -05001255 if ((offset + sg_len > 0x10000))
1256 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001257
Olof Johansson4007b492007-10-02 20:45:27 -05001258 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1259 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001260 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001261
1262 sg_len -= len;
1263 addr += len;
1264
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001265 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001266 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001267 }
Brett Russ31961942005-09-30 01:36:00 -04001268 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001269
1270 if (likely(last_sg))
1271 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001272}
1273
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001274static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001275{
Mark Lord559eeda2006-05-19 16:40:15 -04001276 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001277 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001278 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001279}
1280
Brett Russ05b308e2005-10-05 17:08:53 -04001281/**
1282 * mv_qc_prep - Host specific command preparation.
1283 * @qc: queued command to prepare
1284 *
1285 * This routine simply redirects to the general purpose routine
1286 * if command is not DMA. Else, it handles prep of the CRQB
1287 * (command request block), does some sanity checking, and calls
1288 * the SG load routine.
1289 *
1290 * LOCKING:
1291 * Inherited from caller.
1292 */
Brett Russ31961942005-09-30 01:36:00 -04001293static void mv_qc_prep(struct ata_queued_cmd *qc)
1294{
1295 struct ata_port *ap = qc->ap;
1296 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001297 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001298 struct ata_taskfile *tf;
1299 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001300 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001301
Mark Lord138bfdd2008-01-26 18:33:18 -05001302 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1303 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001304 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001305
Brett Russ31961942005-09-30 01:36:00 -04001306 /* Fill in command request block
1307 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001308 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001309 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001310 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001311 flags |= qc->tag << CRQB_TAG_SHIFT;
1312
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001313 /* get current queue index from software */
1314 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001315
Mark Lorda6432432006-05-19 16:36:36 -04001316 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001317 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001318 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001319 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001320 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1321
1322 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001323 tf = &qc->tf;
1324
1325 /* Sadly, the CRQB cannot accomodate all registers--there are
1326 * only 11 bytes...so we must pick and choose required
1327 * registers based on the command. So, we drop feature and
1328 * hob_feature for [RW] DMA commands, but they are needed for
1329 * NCQ. NCQ will drop hob_nsect.
1330 */
1331 switch (tf->command) {
1332 case ATA_CMD_READ:
1333 case ATA_CMD_READ_EXT:
1334 case ATA_CMD_WRITE:
1335 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001336 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001337 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1338 break;
Brett Russ31961942005-09-30 01:36:00 -04001339 case ATA_CMD_FPDMA_READ:
1340 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001341 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001342 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1343 break;
Brett Russ31961942005-09-30 01:36:00 -04001344 default:
1345 /* The only other commands EDMA supports in non-queued and
1346 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1347 * of which are defined/used by Linux. If we get here, this
1348 * driver needs work.
1349 *
1350 * FIXME: modify libata to give qc_prep a return value and
1351 * return error here.
1352 */
1353 BUG_ON(tf->command);
1354 break;
1355 }
1356 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1357 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1358 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1362 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1364 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1365
Jeff Garzike4e7b892006-01-31 12:18:41 -05001366 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001367 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001368 mv_fill_sg(qc);
1369}
1370
1371/**
1372 * mv_qc_prep_iie - Host specific command preparation.
1373 * @qc: queued command to prepare
1374 *
1375 * This routine simply redirects to the general purpose routine
1376 * if command is not DMA. Else, it handles prep of the CRQB
1377 * (command request block), does some sanity checking, and calls
1378 * the SG load routine.
1379 *
1380 * LOCKING:
1381 * Inherited from caller.
1382 */
1383static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1384{
1385 struct ata_port *ap = qc->ap;
1386 struct mv_port_priv *pp = ap->private_data;
1387 struct mv_crqb_iie *crqb;
1388 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001389 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001390 u32 flags = 0;
1391
Mark Lord138bfdd2008-01-26 18:33:18 -05001392 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1393 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001394 return;
1395
Jeff Garzike4e7b892006-01-31 12:18:41 -05001396 /* Fill in Gen IIE command request block
1397 */
1398 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1399 flags |= CRQB_FLAG_READ;
1400
Tejun Heobeec7db2006-02-11 19:11:13 +09001401 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001402 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001403 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001404
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001405 /* get current queue index from software */
1406 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001407
1408 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001409 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1410 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001411 crqb->flags = cpu_to_le32(flags);
1412
1413 tf = &qc->tf;
1414 crqb->ata_cmd[0] = cpu_to_le32(
1415 (tf->command << 16) |
1416 (tf->feature << 24)
1417 );
1418 crqb->ata_cmd[1] = cpu_to_le32(
1419 (tf->lbal << 0) |
1420 (tf->lbam << 8) |
1421 (tf->lbah << 16) |
1422 (tf->device << 24)
1423 );
1424 crqb->ata_cmd[2] = cpu_to_le32(
1425 (tf->hob_lbal << 0) |
1426 (tf->hob_lbam << 8) |
1427 (tf->hob_lbah << 16) |
1428 (tf->hob_feature << 24)
1429 );
1430 crqb->ata_cmd[3] = cpu_to_le32(
1431 (tf->nsect << 0) |
1432 (tf->hob_nsect << 8)
1433 );
1434
1435 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1436 return;
Brett Russ31961942005-09-30 01:36:00 -04001437 mv_fill_sg(qc);
1438}
1439
Brett Russ05b308e2005-10-05 17:08:53 -04001440/**
1441 * mv_qc_issue - Initiate a command to the host
1442 * @qc: queued command to start
1443 *
1444 * This routine simply redirects to the general purpose routine
1445 * if command is not DMA. Else, it sanity checks our local
1446 * caches of the request producer/consumer indices then enables
1447 * DMA and bumps the request producer index.
1448 *
1449 * LOCKING:
1450 * Inherited from caller.
1451 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001452static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001453{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001454 struct ata_port *ap = qc->ap;
1455 void __iomem *port_mmio = mv_ap_base(ap);
1456 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001458
Mark Lord138bfdd2008-01-26 18:33:18 -05001459 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1460 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001461 /* We're about to send a non-EDMA capable command to the
1462 * port. Turn off EDMA so there won't be problems accessing
1463 * shadow block, etc registers.
1464 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001465 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001466 return ata_qc_issue_prot(qc);
1467 }
1468
Mark Lord72109162008-01-26 18:31:33 -05001469 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001470
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001471 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001472
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001474
1475 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001476 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1477 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001478
1479 return 0;
1480}
1481
Brett Russ05b308e2005-10-05 17:08:53 -04001482/**
Brett Russ05b308e2005-10-05 17:08:53 -04001483 * mv_err_intr - Handle error interrupts on the port
1484 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001485 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001486 *
1487 * In most cases, just clear the interrupt and move on. However,
1488 * some cases require an eDMA reset, which is done right before
1489 * the COMRESET in mv_phy_reset(). The SERR case requires a
1490 * clear of pending errors in the SATA SERROR register. Finally,
1491 * if the port disabled DMA, update our cached copy to match.
1492 *
1493 * LOCKING:
1494 * Inherited from caller.
1495 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001496static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001497{
Brett Russ31961942005-09-30 01:36:00 -04001498 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001499 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1500 struct mv_port_priv *pp = ap->private_data;
1501 struct mv_host_priv *hpriv = ap->host->private_data;
1502 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1503 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001504 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001505
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001506 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001507
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001508 if (!edma_enabled) {
1509 /* just a guess: do we need to do this? should we
1510 * expand this, and do it in all cases?
1511 */
Tejun Heo936fd732007-08-06 18:36:23 +09001512 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1513 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001514 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001515
1516 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1517
1518 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1519
1520 /*
1521 * all generations share these EDMA error cause bits
1522 */
1523
1524 if (edma_err_cause & EDMA_ERR_DEV)
1525 err_mask |= AC_ERR_DEV;
1526 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001527 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001528 EDMA_ERR_INTRL_PAR)) {
1529 err_mask |= AC_ERR_ATA_BUS;
1530 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001531 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001532 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001533 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1534 ata_ehi_hotplugged(ehi);
1535 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001536 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001537 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538 }
1539
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001540 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001541 eh_freeze_mask = EDMA_EH_FREEZE_5;
1542
1543 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1544 struct mv_port_priv *pp = ap->private_data;
1545 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001546 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547 }
1548 } else {
1549 eh_freeze_mask = EDMA_EH_FREEZE;
1550
1551 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1552 struct mv_port_priv *pp = ap->private_data;
1553 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001554 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001555 }
1556
1557 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001558 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1559 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560 err_mask = AC_ERR_ATA_BUS;
1561 action |= ATA_EH_HARDRESET;
1562 }
1563 }
Brett Russ20f733e2005-09-01 18:26:17 -04001564
1565 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001566 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001567
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001568 if (!err_mask) {
1569 err_mask = AC_ERR_OTHER;
1570 action |= ATA_EH_HARDRESET;
1571 }
1572
1573 ehi->serror |= serr;
1574 ehi->action |= action;
1575
1576 if (qc)
1577 qc->err_mask |= err_mask;
1578 else
1579 ehi->err_mask |= err_mask;
1580
1581 if (edma_err_cause & eh_freeze_mask)
1582 ata_port_freeze(ap);
1583 else
1584 ata_port_abort(ap);
1585}
1586
1587static void mv_intr_pio(struct ata_port *ap)
1588{
1589 struct ata_queued_cmd *qc;
1590 u8 ata_status;
1591
1592 /* ignore spurious intr if drive still BUSY */
1593 ata_status = readb(ap->ioaddr.status_addr);
1594 if (unlikely(ata_status & ATA_BUSY))
1595 return;
1596
1597 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001598 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001599 if (unlikely(!qc)) /* no active tag */
1600 return;
1601 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1602 return;
1603
1604 /* and finally, complete the ATA command */
1605 qc->err_mask |= ac_err_mask(ata_status);
1606 ata_qc_complete(qc);
1607}
1608
1609static void mv_intr_edma(struct ata_port *ap)
1610{
1611 void __iomem *port_mmio = mv_ap_base(ap);
1612 struct mv_host_priv *hpriv = ap->host->private_data;
1613 struct mv_port_priv *pp = ap->private_data;
1614 struct ata_queued_cmd *qc;
1615 u32 out_index, in_index;
1616 bool work_done = false;
1617
1618 /* get h/w response queue pointer */
1619 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1620 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1621
1622 while (1) {
1623 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001624 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001625
1626 /* get s/w response queue last-read pointer, and compare */
1627 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1628 if (in_index == out_index)
1629 break;
1630
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001631 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001632 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001633 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001634
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001635 /* Gen II/IIE: get active ATA command via tag, to enable
1636 * support for queueing. this works transparently for
1637 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001638 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001639 else
1640 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001641
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001642 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001643
Mark Lordcb924412008-01-26 18:32:09 -05001644 /* For non-NCQ mode, the lower 8 bits of status
1645 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1646 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001647 */
1648 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001649 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001650 mv_err_intr(ap, qc);
1651 return;
1652 }
1653
1654 /* and finally, complete the ATA command */
1655 if (qc) {
1656 qc->err_mask |=
1657 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1658 ata_qc_complete(qc);
1659 }
1660
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001661 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001662 * indicate (after the loop completes) to hardware
1663 * that we have consumed a response queue entry.
1664 */
1665 work_done = true;
1666 pp->resp_idx++;
1667 }
1668
1669 if (work_done)
1670 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1671 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1672 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001673}
1674
Brett Russ05b308e2005-10-05 17:08:53 -04001675/**
1676 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001677 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001678 * @relevant: port error bits relevant to this host controller
1679 * @hc: which host controller we're to look at
1680 *
1681 * Read then write clear the HC interrupt status then walk each
1682 * port connected to the HC and see if it needs servicing. Port
1683 * success ints are reported in the HC interrupt status reg, the
1684 * port error ints are reported in the higher level main
1685 * interrupt status register and thus are passed in via the
1686 * 'relevant' argument.
1687 *
1688 * LOCKING:
1689 * Inherited from caller.
1690 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001691static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001692{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001693 struct mv_host_priv *hpriv = host->private_data;
1694 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001695 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001696 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001697 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001698
Jeff Garzik35177262007-02-24 21:26:42 -05001699 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001700 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001701 else
Brett Russ20f733e2005-09-01 18:26:17 -04001702 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001703
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001704 if (HAS_PCI(host))
1705 last_port = port0 + MV_PORTS_PER_HC;
1706 else
1707 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001708 /* we'll need the HC success int register in most cases */
1709 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001710 if (!hc_irq_cause)
1711 return;
1712
1713 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001714
1715 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001716 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001717
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001718 for (port = port0; port < port0 + last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001719 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001720 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001721 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001722
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001723 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001724 continue;
1725
Brett Russ31961942005-09-30 01:36:00 -04001726 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001727 if (port >= MV_PORTS_PER_HC) {
1728 shift++; /* skip bit 8 in the HC Main IRQ reg */
1729 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730 have_err_bits = ((PORT0_ERR << shift) & relevant);
1731
1732 if (unlikely(have_err_bits)) {
1733 struct ata_queued_cmd *qc;
1734
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001735 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001736 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1737 continue;
1738
1739 mv_err_intr(ap, qc);
1740 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001741 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001742
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001743 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1744
1745 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1746 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1747 mv_intr_edma(ap);
1748 } else {
1749 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1750 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001751 }
1752 }
1753 VPRINTK("EXIT\n");
1754}
1755
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001756static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1757{
Mark Lord02a121d2007-12-01 13:07:22 -05001758 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001759 struct ata_port *ap;
1760 struct ata_queued_cmd *qc;
1761 struct ata_eh_info *ehi;
1762 unsigned int i, err_mask, printed = 0;
1763 u32 err_cause;
1764
Mark Lord02a121d2007-12-01 13:07:22 -05001765 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001766
1767 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1768 err_cause);
1769
1770 DPRINTK("All regs @ PCI error\n");
1771 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1772
Mark Lord02a121d2007-12-01 13:07:22 -05001773 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001774
1775 for (i = 0; i < host->n_ports; i++) {
1776 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001777 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001778 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001779 ata_ehi_clear_desc(ehi);
1780 if (!printed++)
1781 ata_ehi_push_desc(ehi,
1782 "PCI err cause 0x%08x", err_cause);
1783 err_mask = AC_ERR_HOST_BUS;
1784 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001785 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001786 if (qc)
1787 qc->err_mask |= err_mask;
1788 else
1789 ehi->err_mask |= err_mask;
1790
1791 ata_port_freeze(ap);
1792 }
1793 }
1794}
1795
Brett Russ05b308e2005-10-05 17:08:53 -04001796/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001797 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001798 * @irq: unused
1799 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001800 *
1801 * Read the read only register to determine if any host
1802 * controllers have pending interrupts. If so, call lower level
1803 * routine to handle. Also check for PCI errors which are only
1804 * reported here.
1805 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001806 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001807 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001808 * interrupts.
1809 */
David Howells7d12e782006-10-05 14:55:46 +01001810static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001811{
Jeff Garzikcca39742006-08-24 03:19:22 -04001812 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001813 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001814 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001815 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001816 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001817
Mark Lord646a4da2008-01-26 18:30:37 -05001818 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001819
1820 irq_stat = readl(hpriv->main_cause_reg_addr);
1821 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001822
1823 /* check the cases where we either have nothing pending or have read
1824 * a bogus register value which can indicate HW removal or PCI fault
1825 */
Mark Lord646a4da2008-01-26 18:30:37 -05001826 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1827 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001828
Jeff Garzikcca39742006-08-24 03:19:22 -04001829 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001830
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001831 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001832 mv_pci_error(host, mmio);
1833 handled = 1;
1834 goto out_unlock; /* skip all other HC irq handling */
1835 }
1836
Brett Russ20f733e2005-09-01 18:26:17 -04001837 for (hc = 0; hc < n_hcs; hc++) {
1838 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1839 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001840 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001841 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001842 }
1843 }
Mark Lord615ab952006-05-19 16:24:56 -04001844
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001845out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001846 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001847
1848 return IRQ_RETVAL(handled);
1849}
1850
Jeff Garzikc9d39132005-11-13 17:47:51 -05001851static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1852{
1853 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1854 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1855
1856 return hc_mmio + ofs;
1857}
1858
1859static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1860{
1861 unsigned int ofs;
1862
1863 switch (sc_reg_in) {
1864 case SCR_STATUS:
1865 case SCR_ERROR:
1866 case SCR_CONTROL:
1867 ofs = sc_reg_in * sizeof(u32);
1868 break;
1869 default:
1870 ofs = 0xffffffffU;
1871 break;
1872 }
1873 return ofs;
1874}
1875
Tejun Heoda3dbb12007-07-16 14:29:40 +09001876static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001877{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001878 struct mv_host_priv *hpriv = ap->host->private_data;
1879 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001880 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001881 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1882
Tejun Heoda3dbb12007-07-16 14:29:40 +09001883 if (ofs != 0xffffffffU) {
1884 *val = readl(addr + ofs);
1885 return 0;
1886 } else
1887 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001888}
1889
Tejun Heoda3dbb12007-07-16 14:29:40 +09001890static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001891{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001892 struct mv_host_priv *hpriv = ap->host->private_data;
1893 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001894 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001895 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1896
Tejun Heoda3dbb12007-07-16 14:29:40 +09001897 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001898 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001899 return 0;
1900 } else
1901 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001902}
1903
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001904static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001905{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001906 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001907 int early_5080;
1908
Auke Kok44c10132007-06-08 15:46:36 -07001909 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001910
1911 if (!early_5080) {
1912 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1913 tmp |= (1 << 0);
1914 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1915 }
1916
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001917 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001918}
1919
1920static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1921{
1922 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1923}
1924
Jeff Garzik47c2b672005-11-12 21:13:17 -05001925static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001926 void __iomem *mmio)
1927{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001928 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1929 u32 tmp;
1930
1931 tmp = readl(phy_mmio + MV5_PHY_MODE);
1932
1933 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1934 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001935}
1936
Jeff Garzik47c2b672005-11-12 21:13:17 -05001937static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001938{
Jeff Garzik522479f2005-11-12 22:14:02 -05001939 u32 tmp;
1940
1941 writel(0, mmio + MV_GPIO_PORT_CTL);
1942
1943 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1944
1945 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1946 tmp |= ~(1 << 0);
1947 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001948}
1949
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001950static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1951 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001952{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001953 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1954 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1955 u32 tmp;
1956 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1957
1958 if (fix_apm_sq) {
1959 tmp = readl(phy_mmio + MV5_LT_MODE);
1960 tmp |= (1 << 19);
1961 writel(tmp, phy_mmio + MV5_LT_MODE);
1962
1963 tmp = readl(phy_mmio + MV5_PHY_CTL);
1964 tmp &= ~0x3;
1965 tmp |= 0x1;
1966 writel(tmp, phy_mmio + MV5_PHY_CTL);
1967 }
1968
1969 tmp = readl(phy_mmio + MV5_PHY_MODE);
1970 tmp &= ~mask;
1971 tmp |= hpriv->signal[port].pre;
1972 tmp |= hpriv->signal[port].amps;
1973 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001974}
1975
Jeff Garzikc9d39132005-11-13 17:47:51 -05001976
1977#undef ZERO
1978#define ZERO(reg) writel(0, port_mmio + (reg))
1979static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1980 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001981{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001982 void __iomem *port_mmio = mv_port_base(mmio, port);
1983
1984 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1985
1986 mv_channel_reset(hpriv, mmio, port);
1987
1988 ZERO(0x028); /* command */
1989 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1990 ZERO(0x004); /* timer */
1991 ZERO(0x008); /* irq err cause */
1992 ZERO(0x00c); /* irq err mask */
1993 ZERO(0x010); /* rq bah */
1994 ZERO(0x014); /* rq inp */
1995 ZERO(0x018); /* rq outp */
1996 ZERO(0x01c); /* respq bah */
1997 ZERO(0x024); /* respq outp */
1998 ZERO(0x020); /* respq inp */
1999 ZERO(0x02c); /* test control */
2000 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2001}
2002#undef ZERO
2003
2004#define ZERO(reg) writel(0, hc_mmio + (reg))
2005static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2006 unsigned int hc)
2007{
2008 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2009 u32 tmp;
2010
2011 ZERO(0x00c);
2012 ZERO(0x010);
2013 ZERO(0x014);
2014 ZERO(0x018);
2015
2016 tmp = readl(hc_mmio + 0x20);
2017 tmp &= 0x1c1c1c1c;
2018 tmp |= 0x03030303;
2019 writel(tmp, hc_mmio + 0x20);
2020}
2021#undef ZERO
2022
2023static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2024 unsigned int n_hc)
2025{
2026 unsigned int hc, port;
2027
2028 for (hc = 0; hc < n_hc; hc++) {
2029 for (port = 0; port < MV_PORTS_PER_HC; port++)
2030 mv5_reset_hc_port(hpriv, mmio,
2031 (hc * MV_PORTS_PER_HC) + port);
2032
2033 mv5_reset_one_hc(hpriv, mmio, hc);
2034 }
2035
2036 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002037}
2038
Jeff Garzik101ffae2005-11-12 22:17:49 -05002039#undef ZERO
2040#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002041static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002042{
Mark Lord02a121d2007-12-01 13:07:22 -05002043 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002044 u32 tmp;
2045
2046 tmp = readl(mmio + MV_PCI_MODE);
2047 tmp &= 0xff00ffff;
2048 writel(tmp, mmio + MV_PCI_MODE);
2049
2050 ZERO(MV_PCI_DISC_TIMER);
2051 ZERO(MV_PCI_MSI_TRIGGER);
2052 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2053 ZERO(HC_MAIN_IRQ_MASK_OFS);
2054 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002055 ZERO(hpriv->irq_cause_ofs);
2056 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002057 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2058 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2059 ZERO(MV_PCI_ERR_ATTRIBUTE);
2060 ZERO(MV_PCI_ERR_COMMAND);
2061}
2062#undef ZERO
2063
2064static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2065{
2066 u32 tmp;
2067
2068 mv5_reset_flash(hpriv, mmio);
2069
2070 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2071 tmp &= 0x3;
2072 tmp |= (1 << 5) | (1 << 6);
2073 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2074}
2075
2076/**
2077 * mv6_reset_hc - Perform the 6xxx global soft reset
2078 * @mmio: base address of the HBA
2079 *
2080 * This routine only applies to 6xxx parts.
2081 *
2082 * LOCKING:
2083 * Inherited from caller.
2084 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002085static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2086 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002087{
2088 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2089 int i, rc = 0;
2090 u32 t;
2091
2092 /* Following procedure defined in PCI "main command and status
2093 * register" table.
2094 */
2095 t = readl(reg);
2096 writel(t | STOP_PCI_MASTER, reg);
2097
2098 for (i = 0; i < 1000; i++) {
2099 udelay(1);
2100 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002101 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002102 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002103 }
2104 if (!(PCI_MASTER_EMPTY & t)) {
2105 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2106 rc = 1;
2107 goto done;
2108 }
2109
2110 /* set reset */
2111 i = 5;
2112 do {
2113 writel(t | GLOB_SFT_RST, reg);
2114 t = readl(reg);
2115 udelay(1);
2116 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2117
2118 if (!(GLOB_SFT_RST & t)) {
2119 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2120 rc = 1;
2121 goto done;
2122 }
2123
2124 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2125 i = 5;
2126 do {
2127 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2128 t = readl(reg);
2129 udelay(1);
2130 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2131
2132 if (GLOB_SFT_RST & t) {
2133 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2134 rc = 1;
2135 }
2136done:
2137 return rc;
2138}
2139
Jeff Garzik47c2b672005-11-12 21:13:17 -05002140static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002141 void __iomem *mmio)
2142{
2143 void __iomem *port_mmio;
2144 u32 tmp;
2145
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002146 tmp = readl(mmio + MV_RESET_CFG);
2147 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002148 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002149 hpriv->signal[idx].pre = 0x1 << 5;
2150 return;
2151 }
2152
2153 port_mmio = mv_port_base(mmio, idx);
2154 tmp = readl(port_mmio + PHY_MODE2);
2155
2156 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2157 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2158}
2159
Jeff Garzik47c2b672005-11-12 21:13:17 -05002160static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002161{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002162 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002163}
2164
Jeff Garzikc9d39132005-11-13 17:47:51 -05002165static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002166 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002167{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002168 void __iomem *port_mmio = mv_port_base(mmio, port);
2169
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002170 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002171 int fix_phy_mode2 =
2172 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002173 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002174 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2175 u32 m2, tmp;
2176
2177 if (fix_phy_mode2) {
2178 m2 = readl(port_mmio + PHY_MODE2);
2179 m2 &= ~(1 << 16);
2180 m2 |= (1 << 31);
2181 writel(m2, port_mmio + PHY_MODE2);
2182
2183 udelay(200);
2184
2185 m2 = readl(port_mmio + PHY_MODE2);
2186 m2 &= ~((1 << 16) | (1 << 31));
2187 writel(m2, port_mmio + PHY_MODE2);
2188
2189 udelay(200);
2190 }
2191
2192 /* who knows what this magic does */
2193 tmp = readl(port_mmio + PHY_MODE3);
2194 tmp &= ~0x7F800000;
2195 tmp |= 0x2A800000;
2196 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002197
2198 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002199 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002200
2201 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002202
2203 if (hp_flags & MV_HP_ERRATA_60X1B2)
2204 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002205
2206 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2207
2208 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002209
2210 if (hp_flags & MV_HP_ERRATA_60X1B2)
2211 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002212 }
2213
2214 /* Revert values of pre-emphasis and signal amps to the saved ones */
2215 m2 = readl(port_mmio + PHY_MODE2);
2216
2217 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002218 m2 |= hpriv->signal[port].amps;
2219 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002220 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002221
Jeff Garzike4e7b892006-01-31 12:18:41 -05002222 /* according to mvSata 3.6.1, some IIE values are fixed */
2223 if (IS_GEN_IIE(hpriv)) {
2224 m2 &= ~0xC30FF01F;
2225 m2 |= 0x0000900F;
2226 }
2227
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002228 writel(m2, port_mmio + PHY_MODE2);
2229}
2230
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002231/* TODO: use the generic LED interface to configure the SATA Presence */
2232/* & Acitivy LEDs on the board */
2233static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2234 void __iomem *mmio)
2235{
2236 return;
2237}
2238
2239static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2240 void __iomem *mmio)
2241{
2242 void __iomem *port_mmio;
2243 u32 tmp;
2244
2245 port_mmio = mv_port_base(mmio, idx);
2246 tmp = readl(port_mmio + PHY_MODE2);
2247
2248 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2249 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2250}
2251
2252#undef ZERO
2253#define ZERO(reg) writel(0, port_mmio + (reg))
2254static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2255 void __iomem *mmio, unsigned int port)
2256{
2257 void __iomem *port_mmio = mv_port_base(mmio, port);
2258
2259 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2260
2261 mv_channel_reset(hpriv, mmio, port);
2262
2263 ZERO(0x028); /* command */
2264 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2265 ZERO(0x004); /* timer */
2266 ZERO(0x008); /* irq err cause */
2267 ZERO(0x00c); /* irq err mask */
2268 ZERO(0x010); /* rq bah */
2269 ZERO(0x014); /* rq inp */
2270 ZERO(0x018); /* rq outp */
2271 ZERO(0x01c); /* respq bah */
2272 ZERO(0x024); /* respq outp */
2273 ZERO(0x020); /* respq inp */
2274 ZERO(0x02c); /* test control */
2275 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2276}
2277
2278#undef ZERO
2279
2280#define ZERO(reg) writel(0, hc_mmio + (reg))
2281static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2282 void __iomem *mmio)
2283{
2284 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2285
2286 ZERO(0x00c);
2287 ZERO(0x010);
2288 ZERO(0x014);
2289
2290}
2291
2292#undef ZERO
2293
2294static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2295 void __iomem *mmio, unsigned int n_hc)
2296{
2297 unsigned int port;
2298
2299 for (port = 0; port < hpriv->n_ports; port++)
2300 mv_soc_reset_hc_port(hpriv, mmio, port);
2301
2302 mv_soc_reset_one_hc(hpriv, mmio);
2303
2304 return 0;
2305}
2306
2307static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2308 void __iomem *mmio)
2309{
2310 return;
2311}
2312
2313static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2314{
2315 return;
2316}
2317
Jeff Garzikc9d39132005-11-13 17:47:51 -05002318static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2319 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002320{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002321 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002322
Brett Russ31961942005-09-30 01:36:00 -04002323 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002324
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002325 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002326 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002327 ifctl |= (1 << 7); /* enable gen2i speed */
2328 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002329 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2330 }
2331
Brett Russ20f733e2005-09-01 18:26:17 -04002332 udelay(25); /* allow reset propagation */
2333
2334 /* Spec never mentions clearing the bit. Marvell's driver does
2335 * clear the bit, however.
2336 */
Brett Russ31961942005-09-30 01:36:00 -04002337 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002338
Jeff Garzikc9d39132005-11-13 17:47:51 -05002339 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2340
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002341 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002342 mdelay(1);
2343}
2344
Jeff Garzikc9d39132005-11-13 17:47:51 -05002345/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002347 * @ap: ATA channel to manipulate
2348 *
2349 * Part of this is taken from __sata_phy_reset and modified to
2350 * not sleep since this routine gets called from interrupt level.
2351 *
2352 * LOCKING:
2353 * Inherited from caller. This is coded to safe to call at
2354 * interrupt level, i.e. it does not sleep.
2355 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002356static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2357 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002358{
2359 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002360 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002361 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002362 int retry = 5;
2363 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002364
2365 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002366
Tejun Heoda3dbb12007-07-16 14:29:40 +09002367#ifdef DEBUG
2368 {
2369 u32 sstatus, serror, scontrol;
2370
2371 mv_scr_read(ap, SCR_STATUS, &sstatus);
2372 mv_scr_read(ap, SCR_ERROR, &serror);
2373 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2374 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002375 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002376 }
2377#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002378
Jeff Garzik22374672005-11-17 10:59:48 -05002379 /* Issue COMRESET via SControl */
2380comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002381 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002382 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002383
Tejun Heo936fd732007-08-06 18:36:23 +09002384 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002385 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002386
Brett Russ31961942005-09-30 01:36:00 -04002387 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002388 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002389 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002390 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002391
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002392 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002393 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002394
Jeff Garzik22374672005-11-17 10:59:48 -05002395 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002396 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002397 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2398 (retry-- > 0))
2399 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002400
Tejun Heoda3dbb12007-07-16 14:29:40 +09002401#ifdef DEBUG
2402 {
2403 u32 sstatus, serror, scontrol;
2404
2405 mv_scr_read(ap, SCR_STATUS, &sstatus);
2406 mv_scr_read(ap, SCR_ERROR, &serror);
2407 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2408 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2409 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2410 }
2411#endif
Brett Russ31961942005-09-30 01:36:00 -04002412
Tejun Heo936fd732007-08-06 18:36:23 +09002413 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002414 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002415 return;
2416 }
2417
Jeff Garzik22374672005-11-17 10:59:48 -05002418 /* even after SStatus reflects that device is ready,
2419 * it seems to take a while for link to be fully
2420 * established (and thus Status no longer 0x80/0x7F),
2421 * so we poll a bit for that, here.
2422 */
2423 retry = 20;
2424 while (1) {
2425 u8 drv_stat = ata_check_status(ap);
2426 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2427 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002428 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002429 if (retry-- <= 0)
2430 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002431 if (time_after(jiffies, deadline))
2432 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002433 }
2434
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002435 /* FIXME: if we passed the deadline, the following
2436 * code probably produces an invalid result
2437 */
Brett Russ20f733e2005-09-01 18:26:17 -04002438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002440 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002441
2442 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2443
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002444 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002445
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002446 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002447}
2448
Tejun Heocc0680a2007-08-06 18:36:23 +09002449static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002450{
Tejun Heocc0680a2007-08-06 18:36:23 +09002451 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002452 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002453 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002454 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002455
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002456 rc = mv_stop_dma(ap);
2457 if (rc)
2458 ehc->i.action |= ATA_EH_HARDRESET;
2459
2460 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2461 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2462 ehc->i.action |= ATA_EH_HARDRESET;
2463 }
2464
2465 /* if we're about to do hardreset, nothing more to do */
2466 if (ehc->i.action & ATA_EH_HARDRESET)
2467 return 0;
2468
Tejun Heocc0680a2007-08-06 18:36:23 +09002469 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002470 rc = ata_wait_ready(ap, deadline);
2471 else
2472 rc = -ENODEV;
2473
2474 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002475}
2476
Tejun Heocc0680a2007-08-06 18:36:23 +09002477static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002478 unsigned long deadline)
2479{
Tejun Heocc0680a2007-08-06 18:36:23 +09002480 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002481 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002482 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002483
2484 mv_stop_dma(ap);
2485
2486 mv_channel_reset(hpriv, mmio, ap->port_no);
2487
2488 mv_phy_reset(ap, class, deadline);
2489
2490 return 0;
2491}
2492
Tejun Heocc0680a2007-08-06 18:36:23 +09002493static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002494{
Tejun Heocc0680a2007-08-06 18:36:23 +09002495 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002496 u32 serr;
2497
2498 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002499 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002500
2501 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002502 sata_scr_read(link, SCR_ERROR, &serr);
2503 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002504
2505 /* bail out if no device is present */
2506 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2507 DPRINTK("EXIT, no device\n");
2508 return;
2509 }
2510
2511 /* set up device control */
2512 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2513}
2514
2515static void mv_error_handler(struct ata_port *ap)
2516{
2517 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2518 mv_hardreset, mv_postreset);
2519}
2520
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002521static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002522{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002523 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002524 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2525 u32 tmp, mask;
2526 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002527
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002528 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002529
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002530 shift = ap->port_no * 2;
2531 if (hc > 0)
2532 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002533
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002534 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002535
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002536 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002537 tmp = readl(hpriv->main_mask_reg_addr);
2538 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002539}
2540
2541static void mv_eh_thaw(struct ata_port *ap)
2542{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002543 struct mv_host_priv *hpriv = ap->host->private_data;
2544 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002545 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2546 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2547 void __iomem *port_mmio = mv_ap_base(ap);
2548 u32 tmp, mask, hc_irq_cause;
2549 unsigned int shift, hc_port_no = ap->port_no;
2550
2551 /* FIXME: handle coalescing completion events properly */
2552
2553 shift = ap->port_no * 2;
2554 if (hc > 0) {
2555 shift++;
2556 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002557 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002558
2559 mask = 0x3 << shift;
2560
2561 /* clear EDMA errors on this port */
2562 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2563
2564 /* clear pending irq events */
2565 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2566 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2567 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2568 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2569
2570 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002571 tmp = readl(hpriv->main_mask_reg_addr);
2572 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002573}
2574
Brett Russ05b308e2005-10-05 17:08:53 -04002575/**
2576 * mv_port_init - Perform some early initialization on a single port.
2577 * @port: libata data structure storing shadow register addresses
2578 * @port_mmio: base address of the port
2579 *
2580 * Initialize shadow register mmio addresses, clear outstanding
2581 * interrupts on the port, and unmask interrupts for the future
2582 * start of the port.
2583 *
2584 * LOCKING:
2585 * Inherited from caller.
2586 */
Brett Russ31961942005-09-30 01:36:00 -04002587static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2588{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002589 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002590 unsigned serr_ofs;
2591
Jeff Garzik8b260242005-11-12 12:32:50 -05002592 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002593 */
2594 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002595 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002596 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2597 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2598 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2599 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2600 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2601 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002602 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002603 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2604 /* special case: control/altstatus doesn't have ATA_REG_ address */
2605 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2606
2607 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002608 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002609
Brett Russ31961942005-09-30 01:36:00 -04002610 /* Clear any currently outstanding port interrupt conditions */
2611 serr_ofs = mv_scr_offset(SCR_ERROR);
2612 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2613 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2614
Mark Lord646a4da2008-01-26 18:30:37 -05002615 /* unmask all non-transient EDMA error interrupts */
2616 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002617
Jeff Garzik8b260242005-11-12 12:32:50 -05002618 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002619 readl(port_mmio + EDMA_CFG_OFS),
2620 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2621 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002622}
2623
Tejun Heo4447d352007-04-17 23:44:08 +09002624static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002625{
Tejun Heo4447d352007-04-17 23:44:08 +09002626 struct pci_dev *pdev = to_pci_dev(host->dev);
2627 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002628 u32 hp_flags = hpriv->hp_flags;
2629
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002630 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002631 case chip_5080:
2632 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002633 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002634
Auke Kok44c10132007-06-08 15:46:36 -07002635 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002636 case 0x1:
2637 hp_flags |= MV_HP_ERRATA_50XXB0;
2638 break;
2639 case 0x3:
2640 hp_flags |= MV_HP_ERRATA_50XXB2;
2641 break;
2642 default:
2643 dev_printk(KERN_WARNING, &pdev->dev,
2644 "Applying 50XXB2 workarounds to unknown rev\n");
2645 hp_flags |= MV_HP_ERRATA_50XXB2;
2646 break;
2647 }
2648 break;
2649
2650 case chip_504x:
2651 case chip_508x:
2652 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002653 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002654
Auke Kok44c10132007-06-08 15:46:36 -07002655 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002656 case 0x0:
2657 hp_flags |= MV_HP_ERRATA_50XXB0;
2658 break;
2659 case 0x3:
2660 hp_flags |= MV_HP_ERRATA_50XXB2;
2661 break;
2662 default:
2663 dev_printk(KERN_WARNING, &pdev->dev,
2664 "Applying B2 workarounds to unknown rev\n");
2665 hp_flags |= MV_HP_ERRATA_50XXB2;
2666 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002667 }
2668 break;
2669
2670 case chip_604x:
2671 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002672 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002673 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002674
Auke Kok44c10132007-06-08 15:46:36 -07002675 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002676 case 0x7:
2677 hp_flags |= MV_HP_ERRATA_60X1B2;
2678 break;
2679 case 0x9:
2680 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002681 break;
2682 default:
2683 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002684 "Applying B2 workarounds to unknown rev\n");
2685 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002686 break;
2687 }
2688 break;
2689
Jeff Garzike4e7b892006-01-31 12:18:41 -05002690 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002691 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002692 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2693 (pdev->device == 0x2300 || pdev->device == 0x2310))
2694 {
Mark Lord4e520032007-12-11 12:58:05 -05002695 /*
2696 * Highpoint RocketRAID PCIe 23xx series cards:
2697 *
2698 * Unconfigured drives are treated as "Legacy"
2699 * by the BIOS, and it overwrites sector 8 with
2700 * a "Lgcy" metadata block prior to Linux boot.
2701 *
2702 * Configured drives (RAID or JBOD) leave sector 8
2703 * alone, but instead overwrite a high numbered
2704 * sector for the RAID metadata. This sector can
2705 * be determined exactly, by truncating the physical
2706 * drive capacity to a nice even GB value.
2707 *
2708 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2709 *
2710 * Warn the user, lest they think we're just buggy.
2711 */
2712 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2713 " BIOS CORRUPTS DATA on all attached drives,"
2714 " regardless of if/how they are configured."
2715 " BEWARE!\n");
2716 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2717 " use sectors 8-9 on \"Legacy\" drives,"
2718 " and avoid the final two gigabytes on"
2719 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002720 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002721 case chip_6042:
2722 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002723 hp_flags |= MV_HP_GEN_IIE;
2724
Auke Kok44c10132007-06-08 15:46:36 -07002725 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002726 case 0x0:
2727 hp_flags |= MV_HP_ERRATA_XX42A0;
2728 break;
2729 case 0x1:
2730 hp_flags |= MV_HP_ERRATA_60X1C0;
2731 break;
2732 default:
2733 dev_printk(KERN_WARNING, &pdev->dev,
2734 "Applying 60X1C0 workarounds to unknown rev\n");
2735 hp_flags |= MV_HP_ERRATA_60X1C0;
2736 break;
2737 }
2738 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002739 case chip_soc:
2740 hpriv->ops = &mv_soc_ops;
2741 hp_flags |= MV_HP_ERRATA_60X1C0;
2742 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002743
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002744 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002745 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002746 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002747 return 1;
2748 }
2749
2750 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002751 if (hp_flags & MV_HP_PCIE) {
2752 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2753 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2754 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2755 } else {
2756 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2757 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2758 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2759 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002760
2761 return 0;
2762}
2763
Brett Russ05b308e2005-10-05 17:08:53 -04002764/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002765 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002766 * @host: ATA host to initialize
2767 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002768 *
2769 * If possible, do an early global reset of the host. Then do
2770 * our port init and clear/unmask all/relevant host interrupts.
2771 *
2772 * LOCKING:
2773 * Inherited from caller.
2774 */
Tejun Heo4447d352007-04-17 23:44:08 +09002775static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002776{
2777 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002778 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002779 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002780
Tejun Heo4447d352007-04-17 23:44:08 +09002781 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002782 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002783 goto done;
2784
2785 if (HAS_PCI(host)) {
2786 hpriv->main_cause_reg_addr = hpriv->base +
2787 HC_MAIN_IRQ_CAUSE_OFS;
2788 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2789 } else {
2790 hpriv->main_cause_reg_addr = hpriv->base +
2791 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2792 hpriv->main_mask_reg_addr = hpriv->base +
2793 HC_SOC_MAIN_IRQ_MASK_OFS;
2794 }
2795 /* global interrupt mask */
2796 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002797
Tejun Heo4447d352007-04-17 23:44:08 +09002798 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002799
Tejun Heo4447d352007-04-17 23:44:08 +09002800 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002801 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002802
Jeff Garzikc9d39132005-11-13 17:47:51 -05002803 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002804 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002805 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002806
Jeff Garzik522479f2005-11-12 22:14:02 -05002807 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002808 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002809 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002810
Tejun Heo4447d352007-04-17 23:44:08 +09002811 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002812 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002813 void __iomem *port_mmio = mv_port_base(mmio, port);
2814
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002815 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002816 ifctl |= (1 << 7); /* enable gen2i speed */
2817 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002818 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2819 }
2820
Jeff Garzikc9d39132005-11-13 17:47:51 -05002821 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002822 }
2823
Tejun Heo4447d352007-04-17 23:44:08 +09002824 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002825 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002826 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002827
2828 mv_port_init(&ap->ioaddr, port_mmio);
2829
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002830#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002831 if (HAS_PCI(host)) {
2832 unsigned int offset = port_mmio - mmio;
2833 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2834 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2835 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002836#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002837 }
2838
2839 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002840 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2841
2842 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2843 "(before clear)=0x%08x\n", hc,
2844 readl(hc_mmio + HC_CFG_OFS),
2845 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2846
2847 /* Clear any currently outstanding hc interrupt conditions */
2848 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002849 }
2850
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002851 if (HAS_PCI(host)) {
2852 /* Clear any currently outstanding host interrupt conditions */
2853 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002854
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002855 /* and unmask interrupt generation for host regs */
2856 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2857 if (IS_GEN_I(hpriv))
2858 writelfl(~HC_MAIN_MASKED_IRQS_5,
2859 hpriv->main_mask_reg_addr);
2860 else
2861 writelfl(~HC_MAIN_MASKED_IRQS,
2862 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002863
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002864 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2865 "PCI int cause/mask=0x%08x/0x%08x\n",
2866 readl(hpriv->main_cause_reg_addr),
2867 readl(hpriv->main_mask_reg_addr),
2868 readl(mmio + hpriv->irq_cause_ofs),
2869 readl(mmio + hpriv->irq_mask_ofs));
2870 } else {
2871 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2872 hpriv->main_mask_reg_addr);
2873 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2874 readl(hpriv->main_cause_reg_addr),
2875 readl(hpriv->main_mask_reg_addr));
2876 }
Brett Russ31961942005-09-30 01:36:00 -04002877done:
Brett Russ20f733e2005-09-01 18:26:17 -04002878 return rc;
2879}
2880
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002881/**
2882 * mv_platform_probe - handle a positive probe of an soc Marvell
2883 * host
2884 * @pdev: platform device found
2885 *
2886 * LOCKING:
2887 * Inherited from caller.
2888 */
2889static int mv_platform_probe(struct platform_device *pdev)
2890{
2891 static int printed_version;
2892 const struct mv_sata_platform_data *mv_platform_data;
2893 const struct ata_port_info *ppi[] =
2894 { &mv_port_info[chip_soc], NULL };
2895 struct ata_host *host;
2896 struct mv_host_priv *hpriv;
2897 struct resource *res;
2898 int n_ports, rc;
2899
2900 if (!printed_version++)
2901 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2902
2903 /*
2904 * Simple resource validation ..
2905 */
2906 if (unlikely(pdev->num_resources != 2)) {
2907 dev_err(&pdev->dev, "invalid number of resources\n");
2908 return -EINVAL;
2909 }
2910
2911 /*
2912 * Get the register base first
2913 */
2914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2915 if (res == NULL)
2916 return -EINVAL;
2917
2918 /* allocate host */
2919 mv_platform_data = pdev->dev.platform_data;
2920 n_ports = mv_platform_data->n_ports;
2921
2922 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2923 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2924
2925 if (!host || !hpriv)
2926 return -ENOMEM;
2927 host->private_data = hpriv;
2928 hpriv->n_ports = n_ports;
2929
2930 host->iomap = NULL;
2931 hpriv->base = ioremap(res->start, res->end - res->start + 1);
2932 hpriv->base -= MV_SATAHC0_REG_BASE;
2933
2934 /* initialize adapter */
2935 rc = mv_init_host(host, chip_soc);
2936 if (rc)
2937 return rc;
2938
2939 dev_printk(KERN_INFO, &pdev->dev,
2940 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2941 host->n_ports);
2942
2943 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2944 IRQF_SHARED, &mv6_sht);
2945}
2946
2947/*
2948 *
2949 * mv_platform_remove - unplug a platform interface
2950 * @pdev: platform device
2951 *
2952 * A platform bus SATA device has been unplugged. Perform the needed
2953 * cleanup. Also called on module unload for any active devices.
2954 */
2955static int __devexit mv_platform_remove(struct platform_device *pdev)
2956{
2957 struct device *dev = &pdev->dev;
2958 struct ata_host *host = dev_get_drvdata(dev);
2959 struct mv_host_priv *hpriv = host->private_data;
2960 void __iomem *base = hpriv->base;
2961
2962 ata_host_detach(host);
2963 iounmap(base);
2964 return 0;
2965}
2966
2967static struct platform_driver mv_platform_driver = {
2968 .probe = mv_platform_probe,
2969 .remove = __devexit_p(mv_platform_remove),
2970 .driver = {
2971 .name = DRV_NAME,
2972 .owner = THIS_MODULE,
2973 },
2974};
2975
2976
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002977#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002978static int mv_pci_init_one(struct pci_dev *pdev,
2979 const struct pci_device_id *ent);
2980
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002981
2982static struct pci_driver mv_pci_driver = {
2983 .name = DRV_NAME,
2984 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002985 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002986 .remove = ata_pci_remove_one,
2987};
2988
2989/*
2990 * module options
2991 */
2992static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2993
2994
2995/* move to PCI layer or libata core? */
2996static int pci_go_64(struct pci_dev *pdev)
2997{
2998 int rc;
2999
3000 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3001 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3002 if (rc) {
3003 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3004 if (rc) {
3005 dev_printk(KERN_ERR, &pdev->dev,
3006 "64-bit DMA enable failed\n");
3007 return rc;
3008 }
3009 }
3010 } else {
3011 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3012 if (rc) {
3013 dev_printk(KERN_ERR, &pdev->dev,
3014 "32-bit DMA enable failed\n");
3015 return rc;
3016 }
3017 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3018 if (rc) {
3019 dev_printk(KERN_ERR, &pdev->dev,
3020 "32-bit consistent DMA enable failed\n");
3021 return rc;
3022 }
3023 }
3024
3025 return rc;
3026}
3027
Brett Russ05b308e2005-10-05 17:08:53 -04003028/**
3029 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003030 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003031 *
3032 * FIXME: complete this.
3033 *
3034 * LOCKING:
3035 * Inherited from caller.
3036 */
Tejun Heo4447d352007-04-17 23:44:08 +09003037static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003038{
Tejun Heo4447d352007-04-17 23:44:08 +09003039 struct pci_dev *pdev = to_pci_dev(host->dev);
3040 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003041 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003042 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003043
3044 /* Use this to determine the HW stepping of the chip so we know
3045 * what errata to workaround
3046 */
Brett Russ31961942005-09-30 01:36:00 -04003047 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3048 if (scc == 0)
3049 scc_s = "SCSI";
3050 else if (scc == 0x01)
3051 scc_s = "RAID";
3052 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003053 scc_s = "?";
3054
3055 if (IS_GEN_I(hpriv))
3056 gen = "I";
3057 else if (IS_GEN_II(hpriv))
3058 gen = "II";
3059 else if (IS_GEN_IIE(hpriv))
3060 gen = "IIE";
3061 else
3062 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04003063
Jeff Garzika9524a72005-10-30 14:39:11 -05003064 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003065 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3066 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003067 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3068}
3069
Mark Lordda2fa9b2008-01-26 18:32:45 -05003070static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3071{
3072 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3073 MV_CRQB_Q_SZ, 0);
3074 if (!hpriv->crqb_pool)
3075 return -ENOMEM;
3076
3077 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3078 MV_CRPB_Q_SZ, 0);
3079 if (!hpriv->crpb_pool)
3080 return -ENOMEM;
3081
3082 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3083 MV_SG_TBL_SZ, 0);
3084 if (!hpriv->sg_tbl_pool)
3085 return -ENOMEM;
3086
3087 return 0;
3088}
3089
Brett Russ05b308e2005-10-05 17:08:53 -04003090/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003091 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003092 * @pdev: PCI device found
3093 * @ent: PCI device ID entry for the matched host
3094 *
3095 * LOCKING:
3096 * Inherited from caller.
3097 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003098static int mv_pci_init_one(struct pci_dev *pdev,
3099 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003100{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003101 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003102 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003103 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3104 struct ata_host *host;
3105 struct mv_host_priv *hpriv;
3106 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003107
Jeff Garzika9524a72005-10-30 14:39:11 -05003108 if (!printed_version++)
3109 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003110
Tejun Heo4447d352007-04-17 23:44:08 +09003111 /* allocate host */
3112 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3113
3114 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3115 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3116 if (!host || !hpriv)
3117 return -ENOMEM;
3118 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003119 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003120
3121 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003122 rc = pcim_enable_device(pdev);
3123 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003124 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003125
Tejun Heo0d5ff562007-02-01 15:06:36 +09003126 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3127 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003128 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003129 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003130 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003131 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003132 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003133
Jeff Garzikd88184f2007-02-26 01:26:06 -05003134 rc = pci_go_64(pdev);
3135 if (rc)
3136 return rc;
3137
Mark Lordda2fa9b2008-01-26 18:32:45 -05003138 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3139 if (rc)
3140 return rc;
3141
Brett Russ20f733e2005-09-01 18:26:17 -04003142 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003143 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003144 if (rc)
3145 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003146
Brett Russ31961942005-09-30 01:36:00 -04003147 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003148 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003149 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003150
Brett Russ31961942005-09-30 01:36:00 -04003151 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003152 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003153
Tejun Heo4447d352007-04-17 23:44:08 +09003154 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003155 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003156 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003157 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003158}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003159#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003160
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003161static int mv_platform_probe(struct platform_device *pdev);
3162static int __devexit mv_platform_remove(struct platform_device *pdev);
3163
Brett Russ20f733e2005-09-01 18:26:17 -04003164static int __init mv_init(void)
3165{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003166 int rc = -ENODEV;
3167#ifdef CONFIG_PCI
3168 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003169 if (rc < 0)
3170 return rc;
3171#endif
3172 rc = platform_driver_register(&mv_platform_driver);
3173
3174#ifdef CONFIG_PCI
3175 if (rc < 0)
3176 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003177#endif
3178 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003179}
3180
3181static void __exit mv_exit(void)
3182{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003183#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003184 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003185#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003186 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003187}
3188
3189MODULE_AUTHOR("Brett Russ");
3190MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3191MODULE_LICENSE("GPL");
3192MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3193MODULE_VERSION(DRV_VERSION);
3194
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003195#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003196module_param(msi, int, 0444);
3197MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003198#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003199
Brett Russ20f733e2005-09-01 18:26:17 -04003200module_init(mv_init);
3201module_exit(mv_exit);