blob: ee6ca97c454511044d628542c536c1f2a84e4665 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
43 6) Add port multiplier support (intermediate)
44
Jeff Garzik4a05e202007-05-24 23:40:15 -040045 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
Jeff Garzik4a05e202007-05-24 23:40:15 -040063*/
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400134 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
149
150 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400153
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
155
Brett Russ20f733e2005-09-01 18:26:17 -0400156 /* PCI interface registers */
157
Brett Russ31961942005-09-30 01:36:00 -0400158 PCI_COMMAND_OFS = 0xc00,
159
Brett Russ20f733e2005-09-01 18:26:17 -0400160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
164
Jeff Garzik522479f2005-11-12 22:14:02 -0500165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
175
Mark Lord02a121d2007-12-01 13:07:22 -0500176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179
Mark Lord02a121d2007-12-01 13:07:22 -0500180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500183
Brett Russ20f733e2005-09-01 18:26:17 -0400184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400210
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
213
214 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
218
219 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lorde12bef52008-03-31 19:33:56 -0400227 LTMODE_OFS = 0x30c,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500228 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500229 PHY_MODE4 = 0x314,
230 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400231 SATA_IFCTL_OFS = 0x344,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
234 FIS_CFG_OFS = 0x360,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500235 MV5_PHY_MODE = 0x74,
236 MV5_LT_MODE = 0x30,
237 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400238 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500239
240 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400241
242 /* Port registers */
243 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500244 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
245 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
246 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
247 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
248 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400249 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
250 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400251
252 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
253 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
255 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
256 EDMA_ERR_DEV = (1 << 2), /* device error */
257 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
258 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
259 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400260 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
261 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400263 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
265 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
266 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
267 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
273 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
274
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400275 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
281 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
282 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
283
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400284 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500285
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400287 EDMA_ERR_OVERRUN_5 = (1 << 5),
288 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500289
290 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
291 EDMA_ERR_LNK_CTRL_RX_1 |
292 EDMA_ERR_LNK_CTRL_RX_3 |
293 EDMA_ERR_LNK_CTRL_TX,
294
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
296 EDMA_ERR_PRD_PAR |
297 EDMA_ERR_DEV_DCON |
298 EDMA_ERR_DEV_CON |
299 EDMA_ERR_SERR |
300 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400301 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400302 EDMA_ERR_CRPB_PAR |
303 EDMA_ERR_INTRL_PAR |
304 EDMA_ERR_IORDY |
305 EDMA_ERR_LNK_CTRL_RX_2 |
306 EDMA_ERR_LNK_DATA_RX |
307 EDMA_ERR_LNK_DATA_TX |
308 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400310 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
311 EDMA_ERR_PRD_PAR |
312 EDMA_ERR_DEV_DCON |
313 EDMA_ERR_DEV_CON |
314 EDMA_ERR_OVERRUN_5 |
315 EDMA_ERR_UNDERRUN_5 |
316 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400317 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400318 EDMA_ERR_CRPB_PAR |
319 EDMA_ERR_INTRL_PAR |
320 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400321
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
323 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400324
325 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
326 EDMA_REQ_Q_PTR_SHIFT = 5,
327
328 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
329 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
330 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400331 EDMA_RSP_Q_PTR_SHIFT = 3,
332
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 EDMA_CMD_OFS = 0x28, /* EDMA command register */
334 EDMA_EN = (1 << 0), /* enable EDMA */
335 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
336 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400337
Jeff Garzikc9d39132005-11-13 17:47:51 -0500338 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500340
Brett Russ31961942005-09-30 01:36:00 -0400341 /* Host private flags (hp_flags) */
342 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500343 MV_HP_ERRATA_50XXB0 = (1 << 1),
344 MV_HP_ERRATA_50XXB2 = (1 << 2),
345 MV_HP_ERRATA_60X1B2 = (1 << 3),
346 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500347 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
349 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
350 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500351 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400352
Brett Russ31961942005-09-30 01:36:00 -0400353 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400354 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500355 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400356};
357
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400358#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
359#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500360#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100361#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500362
Jeff Garzik095fec82005-11-12 09:50:49 -0500363enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400364 /* DMA boundary 0xffff is required by the s/g splitting
365 * we need on /length/ in mv_fill-sg().
366 */
367 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500368
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400369 /* mask of register bits containing lower 32 bits
370 * of EDMA request queue DMA address
371 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500372 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
373
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400374 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500375 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
376};
377
Jeff Garzik522479f2005-11-12 22:14:02 -0500378enum chip_type {
379 chip_504x,
380 chip_508x,
381 chip_5080,
382 chip_604x,
383 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500384 chip_6042,
385 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500386 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500387};
388
Brett Russ31961942005-09-30 01:36:00 -0400389/* Command ReQuest Block: 32B */
390struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400391 __le32 sg_addr;
392 __le32 sg_addr_hi;
393 __le16 ctrl_flags;
394 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400395};
396
Jeff Garzike4e7b892006-01-31 12:18:41 -0500397struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400398 __le32 addr;
399 __le32 addr_hi;
400 __le32 flags;
401 __le32 len;
402 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500403};
404
Brett Russ31961942005-09-30 01:36:00 -0400405/* Command ResPonse Block: 8B */
406struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400407 __le16 id;
408 __le16 flags;
409 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400410};
411
412/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
413struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400414 __le32 addr;
415 __le32 flags_size;
416 __le32 addr_hi;
417 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400418};
419
420struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400421 struct mv_crqb *crqb;
422 dma_addr_t crqb_dma;
423 struct mv_crpb *crpb;
424 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500425 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
426 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400427
428 unsigned int req_idx;
429 unsigned int resp_idx;
430
Brett Russ31961942005-09-30 01:36:00 -0400431 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400432};
433
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500434struct mv_port_signal {
435 u32 amps;
436 u32 pre;
437};
438
Mark Lord02a121d2007-12-01 13:07:22 -0500439struct mv_host_priv {
440 u32 hp_flags;
441 struct mv_port_signal signal[8];
442 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500443 int n_ports;
444 void __iomem *base;
445 void __iomem *main_cause_reg_addr;
446 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500447 u32 irq_cause_ofs;
448 u32 irq_mask_ofs;
449 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500450 /*
451 * These consistent DMA memory pools give us guaranteed
452 * alignment for hardware-accessed data structures,
453 * and less memory waste in accomplishing the alignment.
454 */
455 struct dma_pool *crqb_pool;
456 struct dma_pool *crpb_pool;
457 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500458};
459
Jeff Garzik47c2b672005-11-12 21:13:17 -0500460struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500461 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
464 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500466 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500468 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100469 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470};
471
Tejun Heoda3dbb12007-07-16 14:29:40 +0900472static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
473static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
474static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
475static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400476static int mv_port_start(struct ata_port *ap);
477static void mv_port_stop(struct ata_port *ap);
478static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500479static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900480static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900481static int mv_prereset(struct ata_link *link, unsigned long deadline);
482static int mv_hardreset(struct ata_link *link, unsigned int *class,
483 unsigned long deadline);
484static void mv_postreset(struct ata_link *link, unsigned int *classes);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400485static void mv_eh_freeze(struct ata_port *ap);
486static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500487static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400488
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500489static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500491static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500494static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500496static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100497static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500498
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500499static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
500 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500501static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
502static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
503 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500504static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500506static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500507static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
508 void __iomem *mmio);
509static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
510 void __iomem *mmio);
511static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
512 void __iomem *mmio, unsigned int n_hc);
513static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
514 void __iomem *mmio);
515static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100516static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400517static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500518 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400519static int mv_stop_edma(struct ata_port *ap);
520static int mv_stop_edma_engine(struct ata_port *ap);
521static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500522
Mark Lordeb73d552008-01-29 13:24:00 -0500523/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
524 * because we have to allow room for worst case splitting of
525 * PRDs for 64K boundaries in mv_fill_sg().
526 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400527static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900528 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400529 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400530 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400531};
532
533static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900534 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500535 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400536 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400537 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400538};
539
Tejun Heo029cfd62008-03-25 12:22:49 +0900540static struct ata_port_operations mv5_ops = {
541 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500542
Jeff Garzikc9d39132005-11-13 17:47:51 -0500543 .qc_prep = mv_qc_prep,
544 .qc_issue = mv_qc_issue,
545
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400546 .freeze = mv_eh_freeze,
547 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900548 .prereset = mv_prereset,
549 .hardreset = mv_hardreset,
550 .postreset = mv_postreset,
551 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900552 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400553
Jeff Garzikc9d39132005-11-13 17:47:51 -0500554 .scr_read = mv5_scr_read,
555 .scr_write = mv5_scr_write,
556
557 .port_start = mv_port_start,
558 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500559};
560
Tejun Heo029cfd62008-03-25 12:22:49 +0900561static struct ata_port_operations mv6_ops = {
562 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500563 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900564 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400567};
568
Tejun Heo029cfd62008-03-25 12:22:49 +0900569static struct ata_port_operations mv_iie_ops = {
570 .inherits = &mv6_ops,
571 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500572 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500573};
574
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100575static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400576 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400577 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400578 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400579 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500580 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400581 },
582 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500586 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400587 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500588 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500592 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500593 },
Brett Russ20f733e2005-09-01 18:26:17 -0400594 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400597 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400598 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500599 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400600 },
601 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500603 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400604 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400605 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500606 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400607 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500608 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500609 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
610 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500611 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400612 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500613 .port_ops = &mv_iie_ops,
614 },
615 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500616 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
617 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500618 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400619 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500620 .port_ops = &mv_iie_ops,
621 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500622 { /* chip_soc */
623 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv_iie_ops,
627 },
Brett Russ20f733e2005-09-01 18:26:17 -0400628};
629
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500630static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400631 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
632 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
633 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
634 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100635 /* RocketRAID 1740/174x have different identifiers */
636 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
637 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400638
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400639 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
640 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
641 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
642 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
643 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500644
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400645 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
646
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200647 /* Adaptec 1430SA */
648 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
649
Mark Lord02a121d2007-12-01 13:07:22 -0500650 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800651 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
652
Mark Lord02a121d2007-12-01 13:07:22 -0500653 /* Highpoint RocketRAID PCIe series */
654 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
655 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
656
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400657 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400658};
659
Jeff Garzik47c2b672005-11-12 21:13:17 -0500660static const struct mv_hw_ops mv5xxx_ops = {
661 .phy_errata = mv5_phy_errata,
662 .enable_leds = mv5_enable_leds,
663 .read_preamp = mv5_read_preamp,
664 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500665 .reset_flash = mv5_reset_flash,
666 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500667};
668
669static const struct mv_hw_ops mv6xxx_ops = {
670 .phy_errata = mv6_phy_errata,
671 .enable_leds = mv6_enable_leds,
672 .read_preamp = mv6_read_preamp,
673 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500674 .reset_flash = mv6_reset_flash,
675 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500676};
677
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500678static const struct mv_hw_ops mv_soc_ops = {
679 .phy_errata = mv6_phy_errata,
680 .enable_leds = mv_soc_enable_leds,
681 .read_preamp = mv_soc_read_preamp,
682 .reset_hc = mv_soc_reset_hc,
683 .reset_flash = mv_soc_reset_flash,
684 .reset_bus = mv_soc_reset_bus,
685};
686
Brett Russ20f733e2005-09-01 18:26:17 -0400687/*
688 * Functions
689 */
690
691static inline void writelfl(unsigned long data, void __iomem *addr)
692{
693 writel(data, addr);
694 (void) readl(addr); /* flush to avoid PCI posted write */
695}
696
Brett Russ20f733e2005-09-01 18:26:17 -0400697static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
698{
699 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
700}
701
Jeff Garzikc9d39132005-11-13 17:47:51 -0500702static inline unsigned int mv_hc_from_port(unsigned int port)
703{
704 return port >> MV_PORT_HC_SHIFT;
705}
706
707static inline unsigned int mv_hardport_from_port(unsigned int port)
708{
709 return port & MV_PORT_MASK;
710}
711
712static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
713 unsigned int port)
714{
715 return mv_hc_base(base, mv_hc_from_port(port));
716}
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
719{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500720 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500721 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500722 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400723}
724
Mark Lorde12bef52008-03-31 19:33:56 -0400725static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
726{
727 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
728 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
729
730 return hc_mmio + ofs;
731}
732
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500733static inline void __iomem *mv_host_base(struct ata_host *host)
734{
735 struct mv_host_priv *hpriv = host->private_data;
736 return hpriv->base;
737}
738
Brett Russ20f733e2005-09-01 18:26:17 -0400739static inline void __iomem *mv_ap_base(struct ata_port *ap)
740{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500741 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400742}
743
Jeff Garzikcca39742006-08-24 03:19:22 -0400744static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400745{
Jeff Garzikcca39742006-08-24 03:19:22 -0400746 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400749static void mv_set_edma_ptrs(void __iomem *port_mmio,
750 struct mv_host_priv *hpriv,
751 struct mv_port_priv *pp)
752{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400753 u32 index;
754
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400755 /*
756 * initialize request queue
757 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400758 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
759
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400760 WARN_ON(pp->crqb_dma & 0x3ff);
761 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400762 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400763 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
764
765 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400766 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400767 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
768 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400769 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400770
771 /*
772 * initialize response queue
773 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400774 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
775
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400776 WARN_ON(pp->crpb_dma & 0xff);
777 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
778
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400780 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400781 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
782 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400785 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787}
788
Brett Russ05b308e2005-10-05 17:08:53 -0400789/**
790 * mv_start_dma - Enable eDMA engine
791 * @base: port base address
792 * @pp: port private data
793 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900794 * Verify the local cache of the eDMA state is accurate with a
795 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400796 *
797 * LOCKING:
798 * Inherited from caller.
799 */
Mark Lord0c589122008-01-26 18:31:16 -0500800static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500801 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400802{
Mark Lord72109162008-01-26 18:31:33 -0500803 int want_ncq = (protocol == ATA_PROT_NCQ);
804
805 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
806 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
807 if (want_ncq != using_ncq)
Mark Lorde12bef52008-03-31 19:33:56 -0400808 mv_stop_edma_engine(ap);
Mark Lord72109162008-01-26 18:31:33 -0500809 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500811 struct mv_host_priv *hpriv = ap->host->private_data;
812 int hard_port = mv_hardport_from_port(ap->port_no);
813 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100814 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500815 u32 hc_irq_cause, ipending;
816
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400817 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500818 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819
Mark Lord0c589122008-01-26 18:31:16 -0500820 /* clear EDMA interrupt indicator, if any */
821 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
822 ipending = (DEV_IRQ << hard_port) |
823 (CRPB_DMA_DONE << hard_port);
824 if (hc_irq_cause & ipending) {
825 writelfl(hc_irq_cause & ~ipending,
826 hc_mmio + HC_IRQ_CAUSE_OFS);
827 }
828
Mark Lorde12bef52008-03-31 19:33:56 -0400829 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500830
831 /* clear FIS IRQ Cause */
832 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
833
Mark Lordf630d562008-01-26 18:31:00 -0500834 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400835
Mark Lordf630d562008-01-26 18:31:00 -0500836 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400837 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
838 }
Mark Lordf630d562008-01-26 18:31:00 -0500839 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400840}
841
Brett Russ05b308e2005-10-05 17:08:53 -0400842/**
Mark Lorde12bef52008-03-31 19:33:56 -0400843 * mv_stop_edma_engine - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400844 * @ap: ATA channel to manipulate
845 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900846 * Verify the local cache of the eDMA state is accurate with a
847 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400848 *
849 * LOCKING:
850 * Inherited from caller.
851 */
Mark Lorde12bef52008-03-31 19:33:56 -0400852static int mv_stop_edma_engine(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400853{
854 void __iomem *port_mmio = mv_ap_base(ap);
855 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400856 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400857 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400858
Jeff Garzik4537deb2007-07-12 14:30:19 -0400859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400860 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400861 */
Brett Russ31961942005-09-30 01:36:00 -0400862 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
863 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400864 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900865 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400866 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500867
Brett Russ31961942005-09-30 01:36:00 -0400868 /* now properly wait for the eDMA to stop */
869 for (i = 1000; i > 0; i--) {
870 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400871 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400872 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400873
Brett Russ31961942005-09-30 01:36:00 -0400874 udelay(100);
875 }
876
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400877 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900878 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400879 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400880 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400881
882 return err;
Brett Russ31961942005-09-30 01:36:00 -0400883}
884
Mark Lorde12bef52008-03-31 19:33:56 -0400885static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400886{
887 unsigned long flags;
888 int rc;
889
890 spin_lock_irqsave(&ap->host->lock, flags);
Mark Lorde12bef52008-03-31 19:33:56 -0400891 rc = mv_stop_edma_engine(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400892 spin_unlock_irqrestore(&ap->host->lock, flags);
893
894 return rc;
895}
896
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400897#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400898static void mv_dump_mem(void __iomem *start, unsigned bytes)
899{
Brett Russ31961942005-09-30 01:36:00 -0400900 int b, w;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%p: ", start + b);
903 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400904 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400905 b += sizeof(u32);
906 }
907 printk("\n");
908 }
Brett Russ31961942005-09-30 01:36:00 -0400909}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400910#endif
911
Brett Russ31961942005-09-30 01:36:00 -0400912static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
913{
914#ifdef ATA_DEBUG
915 int b, w;
916 u32 dw;
917 for (b = 0; b < bytes; ) {
918 DPRINTK("%02x: ", b);
919 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400920 (void) pci_read_config_dword(pdev, b, &dw);
921 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400922 b += sizeof(u32);
923 }
924 printk("\n");
925 }
926#endif
927}
928static void mv_dump_all_regs(void __iomem *mmio_base, int port,
929 struct pci_dev *pdev)
930{
931#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500932 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400933 port >> MV_PORT_HC_SHIFT);
934 void __iomem *port_base;
935 int start_port, num_ports, p, start_hc, num_hcs, hc;
936
937 if (0 > port) {
938 start_hc = start_port = 0;
939 num_ports = 8; /* shld be benign for 4 port devs */
940 num_hcs = 2;
941 } else {
942 start_hc = port >> MV_PORT_HC_SHIFT;
943 start_port = port;
944 num_ports = num_hcs = 1;
945 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500946 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400947 num_ports > 1 ? num_ports - 1 : start_port);
948
949 if (NULL != pdev) {
950 DPRINTK("PCI config space regs:\n");
951 mv_dump_pci_cfg(pdev, 0x68);
952 }
953 DPRINTK("PCI regs:\n");
954 mv_dump_mem(mmio_base+0xc00, 0x3c);
955 mv_dump_mem(mmio_base+0xd00, 0x34);
956 mv_dump_mem(mmio_base+0xf00, 0x4);
957 mv_dump_mem(mmio_base+0x1d00, 0x6c);
958 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700959 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400960 DPRINTK("HC regs (HC %i):\n", hc);
961 mv_dump_mem(hc_base, 0x1c);
962 }
963 for (p = start_port; p < start_port + num_ports; p++) {
964 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400965 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400966 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400967 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400968 mv_dump_mem(port_base+0x300, 0x60);
969 }
970#endif
971}
972
Brett Russ20f733e2005-09-01 18:26:17 -0400973static unsigned int mv_scr_offset(unsigned int sc_reg_in)
974{
975 unsigned int ofs;
976
977 switch (sc_reg_in) {
978 case SCR_STATUS:
979 case SCR_CONTROL:
980 case SCR_ERROR:
981 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
982 break;
983 case SCR_ACTIVE:
984 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
985 break;
986 default:
987 ofs = 0xffffffffU;
988 break;
989 }
990 return ofs;
991}
992
Tejun Heoda3dbb12007-07-16 14:29:40 +0900993static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400994{
995 unsigned int ofs = mv_scr_offset(sc_reg_in);
996
Tejun Heoda3dbb12007-07-16 14:29:40 +0900997 if (ofs != 0xffffffffU) {
998 *val = readl(mv_ap_base(ap) + ofs);
999 return 0;
1000 } else
1001 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001002}
1003
Tejun Heoda3dbb12007-07-16 14:29:40 +09001004static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001005{
1006 unsigned int ofs = mv_scr_offset(sc_reg_in);
1007
Tejun Heoda3dbb12007-07-16 14:29:40 +09001008 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001009 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001010 return 0;
1011 } else
1012 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001013}
1014
Mark Lordf2738272008-01-26 18:32:29 -05001015static void mv6_dev_config(struct ata_device *adev)
1016{
1017 /*
1018 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1019 * See mv_qc_prep() for more info.
1020 */
1021 if (adev->flags & ATA_DFLAG_NCQ)
1022 if (adev->max_sectors > ATA_MAX_SECTORS)
1023 adev->max_sectors = ATA_MAX_SECTORS;
1024}
1025
Mark Lorde12bef52008-03-31 19:33:56 -04001026static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001027{
Mark Lord0c589122008-01-26 18:31:16 -05001028 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001029 struct mv_port_priv *pp = ap->private_data;
1030 struct mv_host_priv *hpriv = ap->host->private_data;
1031 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001032
1033 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001034 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001035
Mark Lord0c589122008-01-26 18:31:16 -05001036 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001037 cfg |= (1 << 8); /* enab config burst size mask */
1038
Mark Lord0c589122008-01-26 18:31:16 -05001039 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001040 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1041
1042 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001043 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1044 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001045 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001046 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001047 }
1048
Mark Lord72109162008-01-26 18:31:33 -05001049 if (want_ncq) {
1050 cfg |= EDMA_CFG_NCQ;
1051 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1052 } else
1053 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1054
Jeff Garzike4e7b892006-01-31 12:18:41 -05001055 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1056}
1057
Mark Lordda2fa9b2008-01-26 18:32:45 -05001058static void mv_port_free_dma_mem(struct ata_port *ap)
1059{
1060 struct mv_host_priv *hpriv = ap->host->private_data;
1061 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001062 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001063
1064 if (pp->crqb) {
1065 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1066 pp->crqb = NULL;
1067 }
1068 if (pp->crpb) {
1069 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1070 pp->crpb = NULL;
1071 }
Mark Lordeb73d552008-01-29 13:24:00 -05001072 /*
1073 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1074 * For later hardware, we have one unique sg_tbl per NCQ tag.
1075 */
1076 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1077 if (pp->sg_tbl[tag]) {
1078 if (tag == 0 || !IS_GEN_I(hpriv))
1079 dma_pool_free(hpriv->sg_tbl_pool,
1080 pp->sg_tbl[tag],
1081 pp->sg_tbl_dma[tag]);
1082 pp->sg_tbl[tag] = NULL;
1083 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001084 }
1085}
1086
Brett Russ05b308e2005-10-05 17:08:53 -04001087/**
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1090 *
1091 * Allocate and point to DMA memory, init port private memory,
1092 * zero indices.
1093 *
1094 * LOCKING:
1095 * Inherited from caller.
1096 */
Brett Russ31961942005-09-30 01:36:00 -04001097static int mv_port_start(struct ata_port *ap)
1098{
Jeff Garzikcca39742006-08-24 03:19:22 -04001099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001103 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001104 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001105
Tejun Heo24dc5f32007-01-20 16:00:28 +09001106 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001107 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001108 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001109 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001110
Mark Lordda2fa9b2008-01-26 18:32:45 -05001111 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1112 if (!pp->crqb)
1113 return -ENOMEM;
1114 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001115
Mark Lordda2fa9b2008-01-26 18:32:45 -05001116 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1117 if (!pp->crpb)
1118 goto out_port_free_dma_mem;
1119 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001120
Mark Lordeb73d552008-01-29 13:24:00 -05001121 /*
1122 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1123 * For later hardware, we need one unique sg_tbl per NCQ tag.
1124 */
1125 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1126 if (tag == 0 || !IS_GEN_I(hpriv)) {
1127 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1128 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1129 if (!pp->sg_tbl[tag])
1130 goto out_port_free_dma_mem;
1131 } else {
1132 pp->sg_tbl[tag] = pp->sg_tbl[0];
1133 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1134 }
1135 }
Brett Russ31961942005-09-30 01:36:00 -04001136
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001137 spin_lock_irqsave(&ap->host->lock, flags);
1138
Mark Lorde12bef52008-03-31 19:33:56 -04001139 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001140 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001141
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001142 spin_unlock_irqrestore(&ap->host->lock, flags);
1143
Brett Russ31961942005-09-30 01:36:00 -04001144 /* Don't turn on EDMA here...do it before DMA commands only. Else
1145 * we'll be unable to send non-data, PIO, etc due to restricted access
1146 * to shadow regs.
1147 */
Brett Russ31961942005-09-30 01:36:00 -04001148 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001149
1150out_port_free_dma_mem:
1151 mv_port_free_dma_mem(ap);
1152 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001153}
1154
Brett Russ05b308e2005-10-05 17:08:53 -04001155/**
1156 * mv_port_stop - Port specific cleanup/stop routine.
1157 * @ap: ATA channel to manipulate
1158 *
1159 * Stop DMA, cleanup port memory.
1160 *
1161 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001162 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001163 */
Brett Russ31961942005-09-30 01:36:00 -04001164static void mv_port_stop(struct ata_port *ap)
1165{
Mark Lorde12bef52008-03-31 19:33:56 -04001166 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001167 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001168}
1169
Brett Russ05b308e2005-10-05 17:08:53 -04001170/**
1171 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1172 * @qc: queued command whose SG list to source from
1173 *
1174 * Populate the SG list and mark the last entry.
1175 *
1176 * LOCKING:
1177 * Inherited from caller.
1178 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001179static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001180{
1181 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001182 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001183 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001184 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001185
Mark Lordeb73d552008-01-29 13:24:00 -05001186 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001187 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001188 dma_addr_t addr = sg_dma_address(sg);
1189 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001190
Olof Johansson4007b492007-10-02 20:45:27 -05001191 while (sg_len) {
1192 u32 offset = addr & 0xffff;
1193 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001194
Olof Johansson4007b492007-10-02 20:45:27 -05001195 if ((offset + sg_len > 0x10000))
1196 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001197
Olof Johansson4007b492007-10-02 20:45:27 -05001198 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1199 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001200 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001201
1202 sg_len -= len;
1203 addr += len;
1204
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001205 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001206 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001207 }
Brett Russ31961942005-09-30 01:36:00 -04001208 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001209
1210 if (likely(last_sg))
1211 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001212}
1213
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001214static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001215{
Mark Lord559eeda2006-05-19 16:40:15 -04001216 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001217 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001218 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001219}
1220
Brett Russ05b308e2005-10-05 17:08:53 -04001221/**
1222 * mv_qc_prep - Host specific command preparation.
1223 * @qc: queued command to prepare
1224 *
1225 * This routine simply redirects to the general purpose routine
1226 * if command is not DMA. Else, it handles prep of the CRQB
1227 * (command request block), does some sanity checking, and calls
1228 * the SG load routine.
1229 *
1230 * LOCKING:
1231 * Inherited from caller.
1232 */
Brett Russ31961942005-09-30 01:36:00 -04001233static void mv_qc_prep(struct ata_queued_cmd *qc)
1234{
1235 struct ata_port *ap = qc->ap;
1236 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001237 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001238 struct ata_taskfile *tf;
1239 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001240 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001241
Mark Lord138bfdd2008-01-26 18:33:18 -05001242 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1243 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001244 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001245
Brett Russ31961942005-09-30 01:36:00 -04001246 /* Fill in command request block
1247 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001248 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001249 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001250 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001251 flags |= qc->tag << CRQB_TAG_SHIFT;
1252
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001253 /* get current queue index from software */
1254 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001255
Mark Lorda6432432006-05-19 16:36:36 -04001256 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001257 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001258 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001259 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001260 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1261
1262 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001263 tf = &qc->tf;
1264
1265 /* Sadly, the CRQB cannot accomodate all registers--there are
1266 * only 11 bytes...so we must pick and choose required
1267 * registers based on the command. So, we drop feature and
1268 * hob_feature for [RW] DMA commands, but they are needed for
1269 * NCQ. NCQ will drop hob_nsect.
1270 */
1271 switch (tf->command) {
1272 case ATA_CMD_READ:
1273 case ATA_CMD_READ_EXT:
1274 case ATA_CMD_WRITE:
1275 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001276 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001277 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1278 break;
Brett Russ31961942005-09-30 01:36:00 -04001279 case ATA_CMD_FPDMA_READ:
1280 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001281 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001282 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1283 break;
Brett Russ31961942005-09-30 01:36:00 -04001284 default:
1285 /* The only other commands EDMA supports in non-queued and
1286 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1287 * of which are defined/used by Linux. If we get here, this
1288 * driver needs work.
1289 *
1290 * FIXME: modify libata to give qc_prep a return value and
1291 * return error here.
1292 */
1293 BUG_ON(tf->command);
1294 break;
1295 }
1296 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1297 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1298 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1299 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1300 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1301 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1302 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1303 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1304 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1305
Jeff Garzike4e7b892006-01-31 12:18:41 -05001306 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001307 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001308 mv_fill_sg(qc);
1309}
1310
1311/**
1312 * mv_qc_prep_iie - Host specific command preparation.
1313 * @qc: queued command to prepare
1314 *
1315 * This routine simply redirects to the general purpose routine
1316 * if command is not DMA. Else, it handles prep of the CRQB
1317 * (command request block), does some sanity checking, and calls
1318 * the SG load routine.
1319 *
1320 * LOCKING:
1321 * Inherited from caller.
1322 */
1323static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1324{
1325 struct ata_port *ap = qc->ap;
1326 struct mv_port_priv *pp = ap->private_data;
1327 struct mv_crqb_iie *crqb;
1328 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001329 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001330 u32 flags = 0;
1331
Mark Lord138bfdd2008-01-26 18:33:18 -05001332 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1333 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001334 return;
1335
Mark Lorde12bef52008-03-31 19:33:56 -04001336 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001337 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1338 flags |= CRQB_FLAG_READ;
1339
Tejun Heobeec7db2006-02-11 19:11:13 +09001340 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001341 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001342 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001343
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001344 /* get current queue index from software */
1345 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001346
1347 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001348 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1349 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001350 crqb->flags = cpu_to_le32(flags);
1351
1352 tf = &qc->tf;
1353 crqb->ata_cmd[0] = cpu_to_le32(
1354 (tf->command << 16) |
1355 (tf->feature << 24)
1356 );
1357 crqb->ata_cmd[1] = cpu_to_le32(
1358 (tf->lbal << 0) |
1359 (tf->lbam << 8) |
1360 (tf->lbah << 16) |
1361 (tf->device << 24)
1362 );
1363 crqb->ata_cmd[2] = cpu_to_le32(
1364 (tf->hob_lbal << 0) |
1365 (tf->hob_lbam << 8) |
1366 (tf->hob_lbah << 16) |
1367 (tf->hob_feature << 24)
1368 );
1369 crqb->ata_cmd[3] = cpu_to_le32(
1370 (tf->nsect << 0) |
1371 (tf->hob_nsect << 8)
1372 );
1373
1374 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1375 return;
Brett Russ31961942005-09-30 01:36:00 -04001376 mv_fill_sg(qc);
1377}
1378
Brett Russ05b308e2005-10-05 17:08:53 -04001379/**
1380 * mv_qc_issue - Initiate a command to the host
1381 * @qc: queued command to start
1382 *
1383 * This routine simply redirects to the general purpose routine
1384 * if command is not DMA. Else, it sanity checks our local
1385 * caches of the request producer/consumer indices then enables
1386 * DMA and bumps the request producer index.
1387 *
1388 * LOCKING:
1389 * Inherited from caller.
1390 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001391static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001392{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001393 struct ata_port *ap = qc->ap;
1394 void __iomem *port_mmio = mv_ap_base(ap);
1395 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001396 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001397
Mark Lord138bfdd2008-01-26 18:33:18 -05001398 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1399 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001400 /* We're about to send a non-EDMA capable command to the
1401 * port. Turn off EDMA so there won't be problems accessing
1402 * shadow block, etc registers.
1403 */
Mark Lorde12bef52008-03-31 19:33:56 -04001404 mv_stop_edma_engine(ap);
Brett Russ31961942005-09-30 01:36:00 -04001405 return ata_qc_issue_prot(qc);
1406 }
1407
Mark Lord72109162008-01-26 18:31:33 -05001408 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001409
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001410 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001411
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001412 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001413
1414 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001415 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1416 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001417
1418 return 0;
1419}
1420
Brett Russ05b308e2005-10-05 17:08:53 -04001421/**
Brett Russ05b308e2005-10-05 17:08:53 -04001422 * mv_err_intr - Handle error interrupts on the port
1423 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001424 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001425 *
1426 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001427 * some cases require an eDMA reset, which also performs a COMRESET.
1428 * The SERR case requires a clear of pending errors in the SATA
1429 * SERROR register. Finally, if the port disabled DMA,
1430 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001431 *
1432 * LOCKING:
1433 * Inherited from caller.
1434 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001435static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001436{
Brett Russ31961942005-09-30 01:36:00 -04001437 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001438 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1439 struct mv_port_priv *pp = ap->private_data;
1440 struct mv_host_priv *hpriv = ap->host->private_data;
1441 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1442 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001443 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001444
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001445 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001446
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 if (!edma_enabled) {
1448 /* just a guess: do we need to do this? should we
1449 * expand this, and do it in all cases?
1450 */
Tejun Heo936fd732007-08-06 18:36:23 +09001451 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1452 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001453 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454
1455 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1456
1457 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1458
1459 /*
1460 * all generations share these EDMA error cause bits
1461 */
1462
1463 if (edma_err_cause & EDMA_ERR_DEV)
1464 err_mask |= AC_ERR_DEV;
1465 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001466 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001467 EDMA_ERR_INTRL_PAR)) {
1468 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001469 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001470 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001471 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1473 ata_ehi_hotplugged(ehi);
1474 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001475 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001476 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 }
1478
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001479 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001480 eh_freeze_mask = EDMA_EH_FREEZE_5;
1481
1482 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001483 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001485 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001486 }
1487 } else {
1488 eh_freeze_mask = EDMA_EH_FREEZE;
1489
1490 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001491 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001492 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001493 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001494 }
1495
1496 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001497 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1498 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001499 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001500 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001501 }
1502 }
Brett Russ20f733e2005-09-01 18:26:17 -04001503
1504 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001505 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001506
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 if (!err_mask) {
1508 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001509 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001510 }
1511
1512 ehi->serror |= serr;
1513 ehi->action |= action;
1514
1515 if (qc)
1516 qc->err_mask |= err_mask;
1517 else
1518 ehi->err_mask |= err_mask;
1519
1520 if (edma_err_cause & eh_freeze_mask)
1521 ata_port_freeze(ap);
1522 else
1523 ata_port_abort(ap);
1524}
1525
1526static void mv_intr_pio(struct ata_port *ap)
1527{
1528 struct ata_queued_cmd *qc;
1529 u8 ata_status;
1530
1531 /* ignore spurious intr if drive still BUSY */
1532 ata_status = readb(ap->ioaddr.status_addr);
1533 if (unlikely(ata_status & ATA_BUSY))
1534 return;
1535
1536 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001537 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538 if (unlikely(!qc)) /* no active tag */
1539 return;
1540 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1541 return;
1542
1543 /* and finally, complete the ATA command */
1544 qc->err_mask |= ac_err_mask(ata_status);
1545 ata_qc_complete(qc);
1546}
1547
1548static void mv_intr_edma(struct ata_port *ap)
1549{
1550 void __iomem *port_mmio = mv_ap_base(ap);
1551 struct mv_host_priv *hpriv = ap->host->private_data;
1552 struct mv_port_priv *pp = ap->private_data;
1553 struct ata_queued_cmd *qc;
1554 u32 out_index, in_index;
1555 bool work_done = false;
1556
1557 /* get h/w response queue pointer */
1558 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1559 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1560
1561 while (1) {
1562 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001563 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001564
1565 /* get s/w response queue last-read pointer, and compare */
1566 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1567 if (in_index == out_index)
1568 break;
1569
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001571 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001572 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001573
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001574 /* Gen II/IIE: get active ATA command via tag, to enable
1575 * support for queueing. this works transparently for
1576 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001577 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001578 else
1579 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001580
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001581 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001582
Mark Lordcb924412008-01-26 18:32:09 -05001583 /* For non-NCQ mode, the lower 8 bits of status
1584 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1585 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001586 */
1587 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001588 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001589 mv_err_intr(ap, qc);
1590 return;
1591 }
1592
1593 /* and finally, complete the ATA command */
1594 if (qc) {
1595 qc->err_mask |=
1596 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1597 ata_qc_complete(qc);
1598 }
1599
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001600 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001601 * indicate (after the loop completes) to hardware
1602 * that we have consumed a response queue entry.
1603 */
1604 work_done = true;
1605 pp->resp_idx++;
1606 }
1607
1608 if (work_done)
1609 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1610 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1611 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001612}
1613
Brett Russ05b308e2005-10-05 17:08:53 -04001614/**
1615 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001616 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001617 * @relevant: port error bits relevant to this host controller
1618 * @hc: which host controller we're to look at
1619 *
1620 * Read then write clear the HC interrupt status then walk each
1621 * port connected to the HC and see if it needs servicing. Port
1622 * success ints are reported in the HC interrupt status reg, the
1623 * port error ints are reported in the higher level main
1624 * interrupt status register and thus are passed in via the
1625 * 'relevant' argument.
1626 *
1627 * LOCKING:
1628 * Inherited from caller.
1629 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001630static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001631{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001632 struct mv_host_priv *hpriv = host->private_data;
1633 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001634 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001635 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001636 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001637
Jeff Garzik35177262007-02-24 21:26:42 -05001638 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001639 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001640 else
Brett Russ20f733e2005-09-01 18:26:17 -04001641 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001642
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001643 if (HAS_PCI(host))
1644 last_port = port0 + MV_PORTS_PER_HC;
1645 else
1646 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001647 /* we'll need the HC success int register in most cases */
1648 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001649 if (!hc_irq_cause)
1650 return;
1651
1652 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001653
1654 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001655 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001656
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001657 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001658 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001659 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001660 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001661
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001662 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001663 continue;
1664
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001665 pp = ap->private_data;
1666
Brett Russ31961942005-09-30 01:36:00 -04001667 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001668 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001669 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001670
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001671 have_err_bits = ((PORT0_ERR << shift) & relevant);
1672
1673 if (unlikely(have_err_bits)) {
1674 struct ata_queued_cmd *qc;
1675
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001676 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001677 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1678 continue;
1679
1680 mv_err_intr(ap, qc);
1681 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001682 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001683
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001684 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1685
1686 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1687 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1688 mv_intr_edma(ap);
1689 } else {
1690 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1691 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001692 }
1693 }
1694 VPRINTK("EXIT\n");
1695}
1696
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001697static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1698{
Mark Lord02a121d2007-12-01 13:07:22 -05001699 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001700 struct ata_port *ap;
1701 struct ata_queued_cmd *qc;
1702 struct ata_eh_info *ehi;
1703 unsigned int i, err_mask, printed = 0;
1704 u32 err_cause;
1705
Mark Lord02a121d2007-12-01 13:07:22 -05001706 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707
1708 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1709 err_cause);
1710
1711 DPRINTK("All regs @ PCI error\n");
1712 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1713
Mark Lord02a121d2007-12-01 13:07:22 -05001714 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001715
1716 for (i = 0; i < host->n_ports; i++) {
1717 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001718 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001719 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 ata_ehi_clear_desc(ehi);
1721 if (!printed++)
1722 ata_ehi_push_desc(ehi,
1723 "PCI err cause 0x%08x", err_cause);
1724 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001725 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001726 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001727 if (qc)
1728 qc->err_mask |= err_mask;
1729 else
1730 ehi->err_mask |= err_mask;
1731
1732 ata_port_freeze(ap);
1733 }
1734 }
1735}
1736
Brett Russ05b308e2005-10-05 17:08:53 -04001737/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001738 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001739 * @irq: unused
1740 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001741 *
1742 * Read the read only register to determine if any host
1743 * controllers have pending interrupts. If so, call lower level
1744 * routine to handle. Also check for PCI errors which are only
1745 * reported here.
1746 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001747 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001748 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001749 * interrupts.
1750 */
David Howells7d12e782006-10-05 14:55:46 +01001751static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001752{
Jeff Garzikcca39742006-08-24 03:19:22 -04001753 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001754 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001755 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001756 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001757 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001758
Mark Lorde12bef52008-03-31 19:33:56 -04001759 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001760 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001761
1762 irq_stat = readl(hpriv->main_cause_reg_addr);
1763 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001764
1765 /* check the cases where we either have nothing pending or have read
1766 * a bogus register value which can indicate HW removal or PCI fault
1767 */
Mark Lord646a4da2008-01-26 18:30:37 -05001768 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1769 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001770
Jeff Garzikcca39742006-08-24 03:19:22 -04001771 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001772
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001773 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001774 mv_pci_error(host, mmio);
1775 handled = 1;
1776 goto out_unlock; /* skip all other HC irq handling */
1777 }
1778
Brett Russ20f733e2005-09-01 18:26:17 -04001779 for (hc = 0; hc < n_hcs; hc++) {
1780 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1781 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001782 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001783 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001784 }
1785 }
Mark Lord615ab952006-05-19 16:24:56 -04001786
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001787out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001788 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001789
1790 return IRQ_RETVAL(handled);
1791}
1792
Jeff Garzikc9d39132005-11-13 17:47:51 -05001793static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1794{
1795 unsigned int ofs;
1796
1797 switch (sc_reg_in) {
1798 case SCR_STATUS:
1799 case SCR_ERROR:
1800 case SCR_CONTROL:
1801 ofs = sc_reg_in * sizeof(u32);
1802 break;
1803 default:
1804 ofs = 0xffffffffU;
1805 break;
1806 }
1807 return ofs;
1808}
1809
Tejun Heoda3dbb12007-07-16 14:29:40 +09001810static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001811{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001812 struct mv_host_priv *hpriv = ap->host->private_data;
1813 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001814 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001815 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1816
Tejun Heoda3dbb12007-07-16 14:29:40 +09001817 if (ofs != 0xffffffffU) {
1818 *val = readl(addr + ofs);
1819 return 0;
1820 } else
1821 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001822}
1823
Tejun Heoda3dbb12007-07-16 14:29:40 +09001824static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001825{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001826 struct mv_host_priv *hpriv = ap->host->private_data;
1827 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001828 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001829 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1830
Tejun Heoda3dbb12007-07-16 14:29:40 +09001831 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001832 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001833 return 0;
1834 } else
1835 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001836}
1837
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001838static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001839{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001840 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001841 int early_5080;
1842
Auke Kok44c10132007-06-08 15:46:36 -07001843 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001844
1845 if (!early_5080) {
1846 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1847 tmp |= (1 << 0);
1848 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1849 }
1850
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001851 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001852}
1853
1854static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1855{
1856 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1857}
1858
Jeff Garzik47c2b672005-11-12 21:13:17 -05001859static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001860 void __iomem *mmio)
1861{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001862 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1863 u32 tmp;
1864
1865 tmp = readl(phy_mmio + MV5_PHY_MODE);
1866
1867 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1868 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001869}
1870
Jeff Garzik47c2b672005-11-12 21:13:17 -05001871static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001872{
Jeff Garzik522479f2005-11-12 22:14:02 -05001873 u32 tmp;
1874
1875 writel(0, mmio + MV_GPIO_PORT_CTL);
1876
1877 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1878
1879 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1880 tmp |= ~(1 << 0);
1881 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001882}
1883
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001884static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1885 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001886{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001887 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1888 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1889 u32 tmp;
1890 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1891
1892 if (fix_apm_sq) {
1893 tmp = readl(phy_mmio + MV5_LT_MODE);
1894 tmp |= (1 << 19);
1895 writel(tmp, phy_mmio + MV5_LT_MODE);
1896
1897 tmp = readl(phy_mmio + MV5_PHY_CTL);
1898 tmp &= ~0x3;
1899 tmp |= 0x1;
1900 writel(tmp, phy_mmio + MV5_PHY_CTL);
1901 }
1902
1903 tmp = readl(phy_mmio + MV5_PHY_MODE);
1904 tmp &= ~mask;
1905 tmp |= hpriv->signal[port].pre;
1906 tmp |= hpriv->signal[port].amps;
1907 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001908}
1909
Jeff Garzikc9d39132005-11-13 17:47:51 -05001910
1911#undef ZERO
1912#define ZERO(reg) writel(0, port_mmio + (reg))
1913static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1914 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001915{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001916 void __iomem *port_mmio = mv_port_base(mmio, port);
1917
1918 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1919
Mark Lorde12bef52008-03-31 19:33:56 -04001920 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001921
1922 ZERO(0x028); /* command */
1923 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1924 ZERO(0x004); /* timer */
1925 ZERO(0x008); /* irq err cause */
1926 ZERO(0x00c); /* irq err mask */
1927 ZERO(0x010); /* rq bah */
1928 ZERO(0x014); /* rq inp */
1929 ZERO(0x018); /* rq outp */
1930 ZERO(0x01c); /* respq bah */
1931 ZERO(0x024); /* respq outp */
1932 ZERO(0x020); /* respq inp */
1933 ZERO(0x02c); /* test control */
1934 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1935}
1936#undef ZERO
1937
1938#define ZERO(reg) writel(0, hc_mmio + (reg))
1939static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1940 unsigned int hc)
1941{
1942 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1943 u32 tmp;
1944
1945 ZERO(0x00c);
1946 ZERO(0x010);
1947 ZERO(0x014);
1948 ZERO(0x018);
1949
1950 tmp = readl(hc_mmio + 0x20);
1951 tmp &= 0x1c1c1c1c;
1952 tmp |= 0x03030303;
1953 writel(tmp, hc_mmio + 0x20);
1954}
1955#undef ZERO
1956
1957static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1958 unsigned int n_hc)
1959{
1960 unsigned int hc, port;
1961
1962 for (hc = 0; hc < n_hc; hc++) {
1963 for (port = 0; port < MV_PORTS_PER_HC; port++)
1964 mv5_reset_hc_port(hpriv, mmio,
1965 (hc * MV_PORTS_PER_HC) + port);
1966
1967 mv5_reset_one_hc(hpriv, mmio, hc);
1968 }
1969
1970 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001971}
1972
Jeff Garzik101ffae2005-11-12 22:17:49 -05001973#undef ZERO
1974#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001975static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001976{
Mark Lord02a121d2007-12-01 13:07:22 -05001977 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001978 u32 tmp;
1979
1980 tmp = readl(mmio + MV_PCI_MODE);
1981 tmp &= 0xff00ffff;
1982 writel(tmp, mmio + MV_PCI_MODE);
1983
1984 ZERO(MV_PCI_DISC_TIMER);
1985 ZERO(MV_PCI_MSI_TRIGGER);
1986 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1987 ZERO(HC_MAIN_IRQ_MASK_OFS);
1988 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001989 ZERO(hpriv->irq_cause_ofs);
1990 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001991 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1992 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1993 ZERO(MV_PCI_ERR_ATTRIBUTE);
1994 ZERO(MV_PCI_ERR_COMMAND);
1995}
1996#undef ZERO
1997
1998static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1999{
2000 u32 tmp;
2001
2002 mv5_reset_flash(hpriv, mmio);
2003
2004 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2005 tmp &= 0x3;
2006 tmp |= (1 << 5) | (1 << 6);
2007 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2008}
2009
2010/**
2011 * mv6_reset_hc - Perform the 6xxx global soft reset
2012 * @mmio: base address of the HBA
2013 *
2014 * This routine only applies to 6xxx parts.
2015 *
2016 * LOCKING:
2017 * Inherited from caller.
2018 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002019static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2020 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002021{
2022 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2023 int i, rc = 0;
2024 u32 t;
2025
2026 /* Following procedure defined in PCI "main command and status
2027 * register" table.
2028 */
2029 t = readl(reg);
2030 writel(t | STOP_PCI_MASTER, reg);
2031
2032 for (i = 0; i < 1000; i++) {
2033 udelay(1);
2034 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002035 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002036 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002037 }
2038 if (!(PCI_MASTER_EMPTY & t)) {
2039 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2040 rc = 1;
2041 goto done;
2042 }
2043
2044 /* set reset */
2045 i = 5;
2046 do {
2047 writel(t | GLOB_SFT_RST, reg);
2048 t = readl(reg);
2049 udelay(1);
2050 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2051
2052 if (!(GLOB_SFT_RST & t)) {
2053 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2054 rc = 1;
2055 goto done;
2056 }
2057
2058 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2059 i = 5;
2060 do {
2061 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2062 t = readl(reg);
2063 udelay(1);
2064 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2065
2066 if (GLOB_SFT_RST & t) {
2067 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2068 rc = 1;
2069 }
2070done:
2071 return rc;
2072}
2073
Jeff Garzik47c2b672005-11-12 21:13:17 -05002074static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002075 void __iomem *mmio)
2076{
2077 void __iomem *port_mmio;
2078 u32 tmp;
2079
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002080 tmp = readl(mmio + MV_RESET_CFG);
2081 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002082 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002083 hpriv->signal[idx].pre = 0x1 << 5;
2084 return;
2085 }
2086
2087 port_mmio = mv_port_base(mmio, idx);
2088 tmp = readl(port_mmio + PHY_MODE2);
2089
2090 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2091 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2092}
2093
Jeff Garzik47c2b672005-11-12 21:13:17 -05002094static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002095{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002096 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002097}
2098
Jeff Garzikc9d39132005-11-13 17:47:51 -05002099static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002100 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002101{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002102 void __iomem *port_mmio = mv_port_base(mmio, port);
2103
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002104 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002105 int fix_phy_mode2 =
2106 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002107 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002108 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2109 u32 m2, tmp;
2110
2111 if (fix_phy_mode2) {
2112 m2 = readl(port_mmio + PHY_MODE2);
2113 m2 &= ~(1 << 16);
2114 m2 |= (1 << 31);
2115 writel(m2, port_mmio + PHY_MODE2);
2116
2117 udelay(200);
2118
2119 m2 = readl(port_mmio + PHY_MODE2);
2120 m2 &= ~((1 << 16) | (1 << 31));
2121 writel(m2, port_mmio + PHY_MODE2);
2122
2123 udelay(200);
2124 }
2125
2126 /* who knows what this magic does */
2127 tmp = readl(port_mmio + PHY_MODE3);
2128 tmp &= ~0x7F800000;
2129 tmp |= 0x2A800000;
2130 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002131
2132 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002133 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002134
2135 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002136
2137 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002138 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002139
Mark Lorde12bef52008-03-31 19:33:56 -04002140 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002141 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2142
2143 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002144
2145 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002146 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002147 }
2148
2149 /* Revert values of pre-emphasis and signal amps to the saved ones */
2150 m2 = readl(port_mmio + PHY_MODE2);
2151
2152 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002153 m2 |= hpriv->signal[port].amps;
2154 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002155 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002156
Jeff Garzike4e7b892006-01-31 12:18:41 -05002157 /* according to mvSata 3.6.1, some IIE values are fixed */
2158 if (IS_GEN_IIE(hpriv)) {
2159 m2 &= ~0xC30FF01F;
2160 m2 |= 0x0000900F;
2161 }
2162
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002163 writel(m2, port_mmio + PHY_MODE2);
2164}
2165
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002166/* TODO: use the generic LED interface to configure the SATA Presence */
2167/* & Acitivy LEDs on the board */
2168static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2169 void __iomem *mmio)
2170{
2171 return;
2172}
2173
2174static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2175 void __iomem *mmio)
2176{
2177 void __iomem *port_mmio;
2178 u32 tmp;
2179
2180 port_mmio = mv_port_base(mmio, idx);
2181 tmp = readl(port_mmio + PHY_MODE2);
2182
2183 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2184 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2185}
2186
2187#undef ZERO
2188#define ZERO(reg) writel(0, port_mmio + (reg))
2189static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2190 void __iomem *mmio, unsigned int port)
2191{
2192 void __iomem *port_mmio = mv_port_base(mmio, port);
2193
2194 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2195
Mark Lorde12bef52008-03-31 19:33:56 -04002196 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002197
2198 ZERO(0x028); /* command */
2199 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2200 ZERO(0x004); /* timer */
2201 ZERO(0x008); /* irq err cause */
2202 ZERO(0x00c); /* irq err mask */
2203 ZERO(0x010); /* rq bah */
2204 ZERO(0x014); /* rq inp */
2205 ZERO(0x018); /* rq outp */
2206 ZERO(0x01c); /* respq bah */
2207 ZERO(0x024); /* respq outp */
2208 ZERO(0x020); /* respq inp */
2209 ZERO(0x02c); /* test control */
2210 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2211}
2212
2213#undef ZERO
2214
2215#define ZERO(reg) writel(0, hc_mmio + (reg))
2216static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2217 void __iomem *mmio)
2218{
2219 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2220
2221 ZERO(0x00c);
2222 ZERO(0x010);
2223 ZERO(0x014);
2224
2225}
2226
2227#undef ZERO
2228
2229static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2230 void __iomem *mmio, unsigned int n_hc)
2231{
2232 unsigned int port;
2233
2234 for (port = 0; port < hpriv->n_ports; port++)
2235 mv_soc_reset_hc_port(hpriv, mmio, port);
2236
2237 mv_soc_reset_one_hc(hpriv, mmio);
2238
2239 return 0;
2240}
2241
2242static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2243 void __iomem *mmio)
2244{
2245 return;
2246}
2247
2248static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2249{
2250 return;
2251}
2252
Mark Lorde12bef52008-03-31 19:33:56 -04002253static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002254 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002255{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002256 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002257
Brett Russ31961942005-09-30 01:36:00 -04002258 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002259
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002260 if (IS_GEN_II(hpriv)) {
Mark Lorde12bef52008-03-31 19:33:56 -04002261 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
Mark Lordeb46d682006-05-19 16:29:21 -04002262 ifctl |= (1 << 7); /* enable gen2i speed */
2263 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Mark Lorde12bef52008-03-31 19:33:56 -04002264 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002265 }
2266
Brett Russ20f733e2005-09-01 18:26:17 -04002267 udelay(25); /* allow reset propagation */
2268
2269 /* Spec never mentions clearing the bit. Marvell's driver does
2270 * clear the bit, however.
2271 */
Brett Russ31961942005-09-30 01:36:00 -04002272 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002273
Jeff Garzikc9d39132005-11-13 17:47:51 -05002274 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2275
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002276 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002277 mdelay(1);
2278}
2279
Jeff Garzikc9d39132005-11-13 17:47:51 -05002280/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002281 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002282 * @ap: ATA channel to manipulate
2283 *
2284 * Part of this is taken from __sata_phy_reset and modified to
2285 * not sleep since this routine gets called from interrupt level.
2286 *
2287 * LOCKING:
2288 * Inherited from caller. This is coded to safe to call at
2289 * interrupt level, i.e. it does not sleep.
2290 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002291static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2292 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002293{
2294 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002295 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002296 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002297 int retry = 5;
2298 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002299
2300 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002301
Tejun Heoda3dbb12007-07-16 14:29:40 +09002302#ifdef DEBUG
2303 {
2304 u32 sstatus, serror, scontrol;
2305
2306 mv_scr_read(ap, SCR_STATUS, &sstatus);
2307 mv_scr_read(ap, SCR_ERROR, &serror);
2308 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2309 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002310 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002311 }
2312#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002313
Jeff Garzik22374672005-11-17 10:59:48 -05002314 /* Issue COMRESET via SControl */
2315comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002316 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002317 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002318
Tejun Heo936fd732007-08-06 18:36:23 +09002319 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002321
Brett Russ31961942005-09-30 01:36:00 -04002322 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002323 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002324 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002325 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002326
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002327 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002328 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002329
Jeff Garzik22374672005-11-17 10:59:48 -05002330 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002331 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002332 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2333 (retry-- > 0))
2334 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002335
Tejun Heoda3dbb12007-07-16 14:29:40 +09002336#ifdef DEBUG
2337 {
2338 u32 sstatus, serror, scontrol;
2339
2340 mv_scr_read(ap, SCR_STATUS, &sstatus);
2341 mv_scr_read(ap, SCR_ERROR, &serror);
2342 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2343 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2344 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2345 }
2346#endif
Brett Russ31961942005-09-30 01:36:00 -04002347
Tejun Heo936fd732007-08-06 18:36:23 +09002348 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002350 return;
2351 }
2352
Jeff Garzik22374672005-11-17 10:59:48 -05002353 /* even after SStatus reflects that device is ready,
2354 * it seems to take a while for link to be fully
2355 * established (and thus Status no longer 0x80/0x7F),
2356 * so we poll a bit for that, here.
2357 */
2358 retry = 20;
2359 while (1) {
2360 u8 drv_stat = ata_check_status(ap);
2361 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2362 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002363 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002364 if (retry-- <= 0)
2365 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002366 if (time_after(jiffies, deadline))
2367 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002368 }
2369
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002370 /* FIXME: if we passed the deadline, the following
2371 * code probably produces an invalid result
2372 */
Brett Russ20f733e2005-09-01 18:26:17 -04002373
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002374 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002375 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002376
2377 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2378
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002379 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002380
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002381 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002382}
2383
Tejun Heocc0680a2007-08-06 18:36:23 +09002384static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002385{
Mark Lorde12bef52008-03-31 19:33:56 -04002386 mv_stop_edma(link->ap);
Tejun Heocf480622008-01-24 00:05:14 +09002387 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002388}
2389
Tejun Heocc0680a2007-08-06 18:36:23 +09002390static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002391 unsigned long deadline)
2392{
Tejun Heocc0680a2007-08-06 18:36:23 +09002393 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002394 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002395 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002396
Mark Lorde12bef52008-03-31 19:33:56 -04002397 mv_stop_edma(ap);
2398 mv_reset_channel(hpriv, mmio, ap->port_no);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 mv_phy_reset(ap, class, deadline);
2400
2401 return 0;
2402}
2403
Tejun Heocc0680a2007-08-06 18:36:23 +09002404static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405{
Tejun Heocc0680a2007-08-06 18:36:23 +09002406 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002407 u32 serr;
2408
2409 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002410 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002411
2412 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002413 sata_scr_read(link, SCR_ERROR, &serr);
2414 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002415
2416 /* bail out if no device is present */
2417 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2418 DPRINTK("EXIT, no device\n");
2419 return;
2420 }
2421
2422 /* set up device control */
2423 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2424}
2425
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002426static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002427{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002428 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002429 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2430 u32 tmp, mask;
2431 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002433 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002434
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002435 shift = ap->port_no * 2;
2436 if (hc > 0)
2437 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002440
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002441 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002442 tmp = readl(hpriv->main_mask_reg_addr);
2443 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002444}
2445
2446static void mv_eh_thaw(struct ata_port *ap)
2447{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002448 struct mv_host_priv *hpriv = ap->host->private_data;
2449 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002450 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2451 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2452 void __iomem *port_mmio = mv_ap_base(ap);
2453 u32 tmp, mask, hc_irq_cause;
2454 unsigned int shift, hc_port_no = ap->port_no;
2455
2456 /* FIXME: handle coalescing completion events properly */
2457
2458 shift = ap->port_no * 2;
2459 if (hc > 0) {
2460 shift++;
2461 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002462 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002463
2464 mask = 0x3 << shift;
2465
2466 /* clear EDMA errors on this port */
2467 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2468
2469 /* clear pending irq events */
2470 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2471 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2472 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2473 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2474
2475 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002476 tmp = readl(hpriv->main_mask_reg_addr);
2477 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002478}
2479
Brett Russ05b308e2005-10-05 17:08:53 -04002480/**
2481 * mv_port_init - Perform some early initialization on a single port.
2482 * @port: libata data structure storing shadow register addresses
2483 * @port_mmio: base address of the port
2484 *
2485 * Initialize shadow register mmio addresses, clear outstanding
2486 * interrupts on the port, and unmask interrupts for the future
2487 * start of the port.
2488 *
2489 * LOCKING:
2490 * Inherited from caller.
2491 */
Brett Russ31961942005-09-30 01:36:00 -04002492static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2493{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002494 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002495 unsigned serr_ofs;
2496
Jeff Garzik8b260242005-11-12 12:32:50 -05002497 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002498 */
2499 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002500 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002501 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2502 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2503 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2504 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2505 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2506 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002507 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002508 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2509 /* special case: control/altstatus doesn't have ATA_REG_ address */
2510 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2511
2512 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002513 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002514
Brett Russ31961942005-09-30 01:36:00 -04002515 /* Clear any currently outstanding port interrupt conditions */
2516 serr_ofs = mv_scr_offset(SCR_ERROR);
2517 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2518 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2519
Mark Lord646a4da2008-01-26 18:30:37 -05002520 /* unmask all non-transient EDMA error interrupts */
2521 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002522
Jeff Garzik8b260242005-11-12 12:32:50 -05002523 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002524 readl(port_mmio + EDMA_CFG_OFS),
2525 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2526 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002527}
2528
Tejun Heo4447d352007-04-17 23:44:08 +09002529static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002530{
Tejun Heo4447d352007-04-17 23:44:08 +09002531 struct pci_dev *pdev = to_pci_dev(host->dev);
2532 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002533 u32 hp_flags = hpriv->hp_flags;
2534
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002535 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002536 case chip_5080:
2537 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002538 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002539
Auke Kok44c10132007-06-08 15:46:36 -07002540 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541 case 0x1:
2542 hp_flags |= MV_HP_ERRATA_50XXB0;
2543 break;
2544 case 0x3:
2545 hp_flags |= MV_HP_ERRATA_50XXB2;
2546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
2549 "Applying 50XXB2 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_50XXB2;
2551 break;
2552 }
2553 break;
2554
2555 case chip_504x:
2556 case chip_508x:
2557 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002558 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559
Auke Kok44c10132007-06-08 15:46:36 -07002560 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002561 case 0x0:
2562 hp_flags |= MV_HP_ERRATA_50XXB0;
2563 break;
2564 case 0x3:
2565 hp_flags |= MV_HP_ERRATA_50XXB2;
2566 break;
2567 default:
2568 dev_printk(KERN_WARNING, &pdev->dev,
2569 "Applying B2 workarounds to unknown rev\n");
2570 hp_flags |= MV_HP_ERRATA_50XXB2;
2571 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002572 }
2573 break;
2574
2575 case chip_604x:
2576 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002577 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002578 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002579
Auke Kok44c10132007-06-08 15:46:36 -07002580 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002581 case 0x7:
2582 hp_flags |= MV_HP_ERRATA_60X1B2;
2583 break;
2584 case 0x9:
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002586 break;
2587 default:
2588 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002589 "Applying B2 workarounds to unknown rev\n");
2590 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002591 break;
2592 }
2593 break;
2594
Jeff Garzike4e7b892006-01-31 12:18:41 -05002595 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002596 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002597 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2598 (pdev->device == 0x2300 || pdev->device == 0x2310))
2599 {
Mark Lord4e520032007-12-11 12:58:05 -05002600 /*
2601 * Highpoint RocketRAID PCIe 23xx series cards:
2602 *
2603 * Unconfigured drives are treated as "Legacy"
2604 * by the BIOS, and it overwrites sector 8 with
2605 * a "Lgcy" metadata block prior to Linux boot.
2606 *
2607 * Configured drives (RAID or JBOD) leave sector 8
2608 * alone, but instead overwrite a high numbered
2609 * sector for the RAID metadata. This sector can
2610 * be determined exactly, by truncating the physical
2611 * drive capacity to a nice even GB value.
2612 *
2613 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2614 *
2615 * Warn the user, lest they think we're just buggy.
2616 */
2617 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2618 " BIOS CORRUPTS DATA on all attached drives,"
2619 " regardless of if/how they are configured."
2620 " BEWARE!\n");
2621 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2622 " use sectors 8-9 on \"Legacy\" drives,"
2623 " and avoid the final two gigabytes on"
2624 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002625 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002626 case chip_6042:
2627 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002628 hp_flags |= MV_HP_GEN_IIE;
2629
Auke Kok44c10132007-06-08 15:46:36 -07002630 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002631 case 0x0:
2632 hp_flags |= MV_HP_ERRATA_XX42A0;
2633 break;
2634 case 0x1:
2635 hp_flags |= MV_HP_ERRATA_60X1C0;
2636 break;
2637 default:
2638 dev_printk(KERN_WARNING, &pdev->dev,
2639 "Applying 60X1C0 workarounds to unknown rev\n");
2640 hp_flags |= MV_HP_ERRATA_60X1C0;
2641 break;
2642 }
2643 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002644 case chip_soc:
2645 hpriv->ops = &mv_soc_ops;
2646 hp_flags |= MV_HP_ERRATA_60X1C0;
2647 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002648
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002649 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002650 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002651 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002652 return 1;
2653 }
2654
2655 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002656 if (hp_flags & MV_HP_PCIE) {
2657 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2660 } else {
2661 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2662 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2663 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2664 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002665
2666 return 0;
2667}
2668
Brett Russ05b308e2005-10-05 17:08:53 -04002669/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002670 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002671 * @host: ATA host to initialize
2672 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002673 *
2674 * If possible, do an early global reset of the host. Then do
2675 * our port init and clear/unmask all/relevant host interrupts.
2676 *
2677 * LOCKING:
2678 * Inherited from caller.
2679 */
Tejun Heo4447d352007-04-17 23:44:08 +09002680static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002681{
2682 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002683 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002684 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002685
Tejun Heo4447d352007-04-17 23:44:08 +09002686 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002687 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002688 goto done;
2689
2690 if (HAS_PCI(host)) {
2691 hpriv->main_cause_reg_addr = hpriv->base +
2692 HC_MAIN_IRQ_CAUSE_OFS;
2693 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2694 } else {
2695 hpriv->main_cause_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2697 hpriv->main_mask_reg_addr = hpriv->base +
2698 HC_SOC_MAIN_IRQ_MASK_OFS;
2699 }
2700 /* global interrupt mask */
2701 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002702
Tejun Heo4447d352007-04-17 23:44:08 +09002703 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002704
Tejun Heo4447d352007-04-17 23:44:08 +09002705 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002706 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002707
Jeff Garzikc9d39132005-11-13 17:47:51 -05002708 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002709 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002710 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002711
Jeff Garzik522479f2005-11-12 22:14:02 -05002712 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002713 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002714 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002715
Tejun Heo4447d352007-04-17 23:44:08 +09002716 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002717 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002718 void __iomem *port_mmio = mv_port_base(mmio, port);
2719
Mark Lorde12bef52008-03-31 19:33:56 -04002720 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
Mark Lordeb46d682006-05-19 16:29:21 -04002721 ifctl |= (1 << 7); /* enable gen2i speed */
2722 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Mark Lorde12bef52008-03-31 19:33:56 -04002723 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002724 }
2725
Jeff Garzikc9d39132005-11-13 17:47:51 -05002726 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002727 }
2728
Tejun Heo4447d352007-04-17 23:44:08 +09002729 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002730 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002731 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002732
2733 mv_port_init(&ap->ioaddr, port_mmio);
2734
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002735#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002736 if (HAS_PCI(host)) {
2737 unsigned int offset = port_mmio - mmio;
2738 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2739 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2740 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002741#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002742 }
2743
2744 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002745 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2746
2747 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2748 "(before clear)=0x%08x\n", hc,
2749 readl(hc_mmio + HC_CFG_OFS),
2750 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2751
2752 /* Clear any currently outstanding hc interrupt conditions */
2753 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002754 }
2755
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002756 if (HAS_PCI(host)) {
2757 /* Clear any currently outstanding host interrupt conditions */
2758 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002759
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002760 /* and unmask interrupt generation for host regs */
2761 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2762 if (IS_GEN_I(hpriv))
2763 writelfl(~HC_MAIN_MASKED_IRQS_5,
2764 hpriv->main_mask_reg_addr);
2765 else
2766 writelfl(~HC_MAIN_MASKED_IRQS,
2767 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002768
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002769 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2770 "PCI int cause/mask=0x%08x/0x%08x\n",
2771 readl(hpriv->main_cause_reg_addr),
2772 readl(hpriv->main_mask_reg_addr),
2773 readl(mmio + hpriv->irq_cause_ofs),
2774 readl(mmio + hpriv->irq_mask_ofs));
2775 } else {
2776 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2777 hpriv->main_mask_reg_addr);
2778 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2779 readl(hpriv->main_cause_reg_addr),
2780 readl(hpriv->main_mask_reg_addr));
2781 }
Brett Russ31961942005-09-30 01:36:00 -04002782done:
Brett Russ20f733e2005-09-01 18:26:17 -04002783 return rc;
2784}
2785
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002786static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2787{
2788 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2789 MV_CRQB_Q_SZ, 0);
2790 if (!hpriv->crqb_pool)
2791 return -ENOMEM;
2792
2793 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2794 MV_CRPB_Q_SZ, 0);
2795 if (!hpriv->crpb_pool)
2796 return -ENOMEM;
2797
2798 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2799 MV_SG_TBL_SZ, 0);
2800 if (!hpriv->sg_tbl_pool)
2801 return -ENOMEM;
2802
2803 return 0;
2804}
2805
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002806/**
2807 * mv_platform_probe - handle a positive probe of an soc Marvell
2808 * host
2809 * @pdev: platform device found
2810 *
2811 * LOCKING:
2812 * Inherited from caller.
2813 */
2814static int mv_platform_probe(struct platform_device *pdev)
2815{
2816 static int printed_version;
2817 const struct mv_sata_platform_data *mv_platform_data;
2818 const struct ata_port_info *ppi[] =
2819 { &mv_port_info[chip_soc], NULL };
2820 struct ata_host *host;
2821 struct mv_host_priv *hpriv;
2822 struct resource *res;
2823 int n_ports, rc;
2824
2825 if (!printed_version++)
2826 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2827
2828 /*
2829 * Simple resource validation ..
2830 */
2831 if (unlikely(pdev->num_resources != 2)) {
2832 dev_err(&pdev->dev, "invalid number of resources\n");
2833 return -EINVAL;
2834 }
2835
2836 /*
2837 * Get the register base first
2838 */
2839 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2840 if (res == NULL)
2841 return -EINVAL;
2842
2843 /* allocate host */
2844 mv_platform_data = pdev->dev.platform_data;
2845 n_ports = mv_platform_data->n_ports;
2846
2847 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2848 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2849
2850 if (!host || !hpriv)
2851 return -ENOMEM;
2852 host->private_data = hpriv;
2853 hpriv->n_ports = n_ports;
2854
2855 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002856 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2857 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002858 hpriv->base -= MV_SATAHC0_REG_BASE;
2859
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002860 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2861 if (rc)
2862 return rc;
2863
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002864 /* initialize adapter */
2865 rc = mv_init_host(host, chip_soc);
2866 if (rc)
2867 return rc;
2868
2869 dev_printk(KERN_INFO, &pdev->dev,
2870 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2871 host->n_ports);
2872
2873 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2874 IRQF_SHARED, &mv6_sht);
2875}
2876
2877/*
2878 *
2879 * mv_platform_remove - unplug a platform interface
2880 * @pdev: platform device
2881 *
2882 * A platform bus SATA device has been unplugged. Perform the needed
2883 * cleanup. Also called on module unload for any active devices.
2884 */
2885static int __devexit mv_platform_remove(struct platform_device *pdev)
2886{
2887 struct device *dev = &pdev->dev;
2888 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002889
2890 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002891 return 0;
2892}
2893
2894static struct platform_driver mv_platform_driver = {
2895 .probe = mv_platform_probe,
2896 .remove = __devexit_p(mv_platform_remove),
2897 .driver = {
2898 .name = DRV_NAME,
2899 .owner = THIS_MODULE,
2900 },
2901};
2902
2903
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002904#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002905static int mv_pci_init_one(struct pci_dev *pdev,
2906 const struct pci_device_id *ent);
2907
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002908
2909static struct pci_driver mv_pci_driver = {
2910 .name = DRV_NAME,
2911 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002912 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002913 .remove = ata_pci_remove_one,
2914};
2915
2916/*
2917 * module options
2918 */
2919static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2920
2921
2922/* move to PCI layer or libata core? */
2923static int pci_go_64(struct pci_dev *pdev)
2924{
2925 int rc;
2926
2927 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2929 if (rc) {
2930 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2931 if (rc) {
2932 dev_printk(KERN_ERR, &pdev->dev,
2933 "64-bit DMA enable failed\n");
2934 return rc;
2935 }
2936 }
2937 } else {
2938 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2939 if (rc) {
2940 dev_printk(KERN_ERR, &pdev->dev,
2941 "32-bit DMA enable failed\n");
2942 return rc;
2943 }
2944 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2945 if (rc) {
2946 dev_printk(KERN_ERR, &pdev->dev,
2947 "32-bit consistent DMA enable failed\n");
2948 return rc;
2949 }
2950 }
2951
2952 return rc;
2953}
2954
Brett Russ05b308e2005-10-05 17:08:53 -04002955/**
2956 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002957 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002958 *
2959 * FIXME: complete this.
2960 *
2961 * LOCKING:
2962 * Inherited from caller.
2963 */
Tejun Heo4447d352007-04-17 23:44:08 +09002964static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002965{
Tejun Heo4447d352007-04-17 23:44:08 +09002966 struct pci_dev *pdev = to_pci_dev(host->dev);
2967 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002968 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002969 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002970
2971 /* Use this to determine the HW stepping of the chip so we know
2972 * what errata to workaround
2973 */
Brett Russ31961942005-09-30 01:36:00 -04002974 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2975 if (scc == 0)
2976 scc_s = "SCSI";
2977 else if (scc == 0x01)
2978 scc_s = "RAID";
2979 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002980 scc_s = "?";
2981
2982 if (IS_GEN_I(hpriv))
2983 gen = "I";
2984 else if (IS_GEN_II(hpriv))
2985 gen = "II";
2986 else if (IS_GEN_IIE(hpriv))
2987 gen = "IIE";
2988 else
2989 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002990
Jeff Garzika9524a72005-10-30 14:39:11 -05002991 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002992 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2993 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002994 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2995}
2996
Brett Russ05b308e2005-10-05 17:08:53 -04002997/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002998 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002999 * @pdev: PCI device found
3000 * @ent: PCI device ID entry for the matched host
3001 *
3002 * LOCKING:
3003 * Inherited from caller.
3004 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003005static int mv_pci_init_one(struct pci_dev *pdev,
3006 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003007{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003008 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003009 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003010 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3011 struct ata_host *host;
3012 struct mv_host_priv *hpriv;
3013 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003014
Jeff Garzika9524a72005-10-30 14:39:11 -05003015 if (!printed_version++)
3016 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003017
Tejun Heo4447d352007-04-17 23:44:08 +09003018 /* allocate host */
3019 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3020
3021 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3022 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3023 if (!host || !hpriv)
3024 return -ENOMEM;
3025 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003026 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003027
3028 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003029 rc = pcim_enable_device(pdev);
3030 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003031 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003032
Tejun Heo0d5ff562007-02-01 15:06:36 +09003033 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3034 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003035 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003036 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003037 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003038 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003039 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003040
Jeff Garzikd88184f2007-02-26 01:26:06 -05003041 rc = pci_go_64(pdev);
3042 if (rc)
3043 return rc;
3044
Mark Lordda2fa9b2008-01-26 18:32:45 -05003045 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3046 if (rc)
3047 return rc;
3048
Brett Russ20f733e2005-09-01 18:26:17 -04003049 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003050 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003051 if (rc)
3052 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003053
Brett Russ31961942005-09-30 01:36:00 -04003054 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003055 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003056 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003057
Brett Russ31961942005-09-30 01:36:00 -04003058 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003059 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003060
Tejun Heo4447d352007-04-17 23:44:08 +09003061 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003062 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003063 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003064 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003065}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003066#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003067
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003068static int mv_platform_probe(struct platform_device *pdev);
3069static int __devexit mv_platform_remove(struct platform_device *pdev);
3070
Brett Russ20f733e2005-09-01 18:26:17 -04003071static int __init mv_init(void)
3072{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003073 int rc = -ENODEV;
3074#ifdef CONFIG_PCI
3075 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003076 if (rc < 0)
3077 return rc;
3078#endif
3079 rc = platform_driver_register(&mv_platform_driver);
3080
3081#ifdef CONFIG_PCI
3082 if (rc < 0)
3083 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003084#endif
3085 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003086}
3087
3088static void __exit mv_exit(void)
3089{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003090#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003091 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003092#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003093 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003094}
3095
3096MODULE_AUTHOR("Brett Russ");
3097MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3098MODULE_LICENSE("GPL");
3099MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3100MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003101MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003102
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003103#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003104module_param(msi, int, 0444);
3105MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003106#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003107
Brett Russ20f733e2005-09-01 18:26:17 -04003108module_init(mv_init);
3109module_exit(mv_exit);