blob: 5b9f937d0f87e2acce89ca88b894429edec83493 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
43 6) Add port multiplier support (intermediate)
44
Jeff Garzik4a05e202007-05-24 23:40:15 -040045 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
Jeff Garzik4a05e202007-05-24 23:40:15 -040063*/
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400134 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
149
150 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400153
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
155
Brett Russ20f733e2005-09-01 18:26:17 -0400156 /* PCI interface registers */
157
Brett Russ31961942005-09-30 01:36:00 -0400158 PCI_COMMAND_OFS = 0xc00,
159
Brett Russ20f733e2005-09-01 18:26:17 -0400160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
164
Jeff Garzik522479f2005-11-12 22:14:02 -0500165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
175
Mark Lord02a121d2007-12-01 13:07:22 -0500176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179
Mark Lord02a121d2007-12-01 13:07:22 -0500180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500183
Brett Russ20f733e2005-09-01 18:26:17 -0400184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400210
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
213
214 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
218
219 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lorde12bef52008-03-31 19:33:56 -0400227 LTMODE_OFS = 0x30c,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500228 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500229 PHY_MODE4 = 0x314,
230 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400231 SATA_IFCTL_OFS = 0x344,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
234 FIS_CFG_OFS = 0x360,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500235 MV5_PHY_MODE = 0x74,
236 MV5_LT_MODE = 0x30,
237 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400238 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500239
240 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400241
242 /* Port registers */
243 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500244 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
245 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
246 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
247 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
248 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400249 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
250 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400251
252 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
253 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
255 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
256 EDMA_ERR_DEV = (1 << 2), /* device error */
257 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
258 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
259 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400260 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
261 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400263 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
265 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
266 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
267 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
273 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
274
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400275 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
281 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
282 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
283
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400284 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500285
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400287 EDMA_ERR_OVERRUN_5 = (1 << 5),
288 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500289
290 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
291 EDMA_ERR_LNK_CTRL_RX_1 |
292 EDMA_ERR_LNK_CTRL_RX_3 |
293 EDMA_ERR_LNK_CTRL_TX,
294
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
296 EDMA_ERR_PRD_PAR |
297 EDMA_ERR_DEV_DCON |
298 EDMA_ERR_DEV_CON |
299 EDMA_ERR_SERR |
300 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400301 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400302 EDMA_ERR_CRPB_PAR |
303 EDMA_ERR_INTRL_PAR |
304 EDMA_ERR_IORDY |
305 EDMA_ERR_LNK_CTRL_RX_2 |
306 EDMA_ERR_LNK_DATA_RX |
307 EDMA_ERR_LNK_DATA_TX |
308 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400310 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
311 EDMA_ERR_PRD_PAR |
312 EDMA_ERR_DEV_DCON |
313 EDMA_ERR_DEV_CON |
314 EDMA_ERR_OVERRUN_5 |
315 EDMA_ERR_UNDERRUN_5 |
316 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400317 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400318 EDMA_ERR_CRPB_PAR |
319 EDMA_ERR_INTRL_PAR |
320 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400321
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
323 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400324
325 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
326 EDMA_REQ_Q_PTR_SHIFT = 5,
327
328 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
329 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
330 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400331 EDMA_RSP_Q_PTR_SHIFT = 3,
332
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 EDMA_CMD_OFS = 0x28, /* EDMA command register */
334 EDMA_EN = (1 << 0), /* enable EDMA */
335 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
336 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400337
Jeff Garzikc9d39132005-11-13 17:47:51 -0500338 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500340
Brett Russ31961942005-09-30 01:36:00 -0400341 /* Host private flags (hp_flags) */
342 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500343 MV_HP_ERRATA_50XXB0 = (1 << 1),
344 MV_HP_ERRATA_50XXB2 = (1 << 2),
345 MV_HP_ERRATA_60X1B2 = (1 << 3),
346 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500347 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
349 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
350 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500351 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400352
Brett Russ31961942005-09-30 01:36:00 -0400353 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400354 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500355 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400356};
357
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400358#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
359#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500360#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100361#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500362
Jeff Garzik095fec82005-11-12 09:50:49 -0500363enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400364 /* DMA boundary 0xffff is required by the s/g splitting
365 * we need on /length/ in mv_fill-sg().
366 */
367 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500368
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400369 /* mask of register bits containing lower 32 bits
370 * of EDMA request queue DMA address
371 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500372 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
373
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400374 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500375 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
376};
377
Jeff Garzik522479f2005-11-12 22:14:02 -0500378enum chip_type {
379 chip_504x,
380 chip_508x,
381 chip_5080,
382 chip_604x,
383 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500384 chip_6042,
385 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500386 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500387};
388
Brett Russ31961942005-09-30 01:36:00 -0400389/* Command ReQuest Block: 32B */
390struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400391 __le32 sg_addr;
392 __le32 sg_addr_hi;
393 __le16 ctrl_flags;
394 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400395};
396
Jeff Garzike4e7b892006-01-31 12:18:41 -0500397struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400398 __le32 addr;
399 __le32 addr_hi;
400 __le32 flags;
401 __le32 len;
402 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500403};
404
Brett Russ31961942005-09-30 01:36:00 -0400405/* Command ResPonse Block: 8B */
406struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400407 __le16 id;
408 __le16 flags;
409 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400410};
411
412/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
413struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400414 __le32 addr;
415 __le32 flags_size;
416 __le32 addr_hi;
417 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400418};
419
420struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400421 struct mv_crqb *crqb;
422 dma_addr_t crqb_dma;
423 struct mv_crpb *crpb;
424 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500425 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
426 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400427
428 unsigned int req_idx;
429 unsigned int resp_idx;
430
Brett Russ31961942005-09-30 01:36:00 -0400431 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400432};
433
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500434struct mv_port_signal {
435 u32 amps;
436 u32 pre;
437};
438
Mark Lord02a121d2007-12-01 13:07:22 -0500439struct mv_host_priv {
440 u32 hp_flags;
441 struct mv_port_signal signal[8];
442 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500443 int n_ports;
444 void __iomem *base;
445 void __iomem *main_cause_reg_addr;
446 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500447 u32 irq_cause_ofs;
448 u32 irq_mask_ofs;
449 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500450 /*
451 * These consistent DMA memory pools give us guaranteed
452 * alignment for hardware-accessed data structures,
453 * and less memory waste in accomplishing the alignment.
454 */
455 struct dma_pool *crqb_pool;
456 struct dma_pool *crpb_pool;
457 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500458};
459
Jeff Garzik47c2b672005-11-12 21:13:17 -0500460struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500461 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
464 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500466 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500468 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100469 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470};
471
Tejun Heoda3dbb12007-07-16 14:29:40 +0900472static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
473static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
474static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
475static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400476static int mv_port_start(struct ata_port *ap);
477static void mv_port_stop(struct ata_port *ap);
478static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500479static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900480static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900481static int mv_prereset(struct ata_link *link, unsigned long deadline);
482static int mv_hardreset(struct ata_link *link, unsigned int *class,
483 unsigned long deadline);
484static void mv_postreset(struct ata_link *link, unsigned int *classes);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400485static void mv_eh_freeze(struct ata_port *ap);
486static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500487static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400488
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500489static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500491static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500494static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500496static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100497static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500498
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500499static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
500 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500501static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
502static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
503 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500504static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500506static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500507static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
508 void __iomem *mmio);
509static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
510 void __iomem *mmio);
511static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
512 void __iomem *mmio, unsigned int n_hc);
513static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
514 void __iomem *mmio);
515static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100516static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400517static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500518 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400519static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400520static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400521static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500522
Mark Lordeb73d552008-01-29 13:24:00 -0500523/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
524 * because we have to allow room for worst case splitting of
525 * PRDs for 64K boundaries in mv_fill_sg().
526 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400527static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900528 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400529 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400530 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400531};
532
533static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900534 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500535 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400536 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400537 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400538};
539
Tejun Heo029cfd62008-03-25 12:22:49 +0900540static struct ata_port_operations mv5_ops = {
541 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500542
Jeff Garzikc9d39132005-11-13 17:47:51 -0500543 .qc_prep = mv_qc_prep,
544 .qc_issue = mv_qc_issue,
545
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400546 .freeze = mv_eh_freeze,
547 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900548 .prereset = mv_prereset,
549 .hardreset = mv_hardreset,
550 .postreset = mv_postreset,
551 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900552 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400553
Jeff Garzikc9d39132005-11-13 17:47:51 -0500554 .scr_read = mv5_scr_read,
555 .scr_write = mv5_scr_write,
556
557 .port_start = mv_port_start,
558 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500559};
560
Tejun Heo029cfd62008-03-25 12:22:49 +0900561static struct ata_port_operations mv6_ops = {
562 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500563 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900564 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400567};
568
Tejun Heo029cfd62008-03-25 12:22:49 +0900569static struct ata_port_operations mv_iie_ops = {
570 .inherits = &mv6_ops,
571 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500572 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500573};
574
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100575static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400576 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400577 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400578 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400579 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500580 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400581 },
582 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500586 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400587 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500588 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500592 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500593 },
Brett Russ20f733e2005-09-01 18:26:17 -0400594 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400597 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400598 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500599 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400600 },
601 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500603 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400604 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400605 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500606 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400607 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500608 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500609 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
610 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500611 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400612 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500613 .port_ops = &mv_iie_ops,
614 },
615 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500616 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
617 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500618 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400619 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500620 .port_ops = &mv_iie_ops,
621 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500622 { /* chip_soc */
623 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv_iie_ops,
627 },
Brett Russ20f733e2005-09-01 18:26:17 -0400628};
629
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500630static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400631 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
632 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
633 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
634 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100635 /* RocketRAID 1740/174x have different identifiers */
636 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
637 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400638
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400639 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
640 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
641 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
642 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
643 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500644
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400645 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
646
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200647 /* Adaptec 1430SA */
648 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
649
Mark Lord02a121d2007-12-01 13:07:22 -0500650 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800651 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
652
Mark Lord02a121d2007-12-01 13:07:22 -0500653 /* Highpoint RocketRAID PCIe series */
654 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
655 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
656
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400657 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400658};
659
Jeff Garzik47c2b672005-11-12 21:13:17 -0500660static const struct mv_hw_ops mv5xxx_ops = {
661 .phy_errata = mv5_phy_errata,
662 .enable_leds = mv5_enable_leds,
663 .read_preamp = mv5_read_preamp,
664 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500665 .reset_flash = mv5_reset_flash,
666 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500667};
668
669static const struct mv_hw_ops mv6xxx_ops = {
670 .phy_errata = mv6_phy_errata,
671 .enable_leds = mv6_enable_leds,
672 .read_preamp = mv6_read_preamp,
673 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500674 .reset_flash = mv6_reset_flash,
675 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500676};
677
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500678static const struct mv_hw_ops mv_soc_ops = {
679 .phy_errata = mv6_phy_errata,
680 .enable_leds = mv_soc_enable_leds,
681 .read_preamp = mv_soc_read_preamp,
682 .reset_hc = mv_soc_reset_hc,
683 .reset_flash = mv_soc_reset_flash,
684 .reset_bus = mv_soc_reset_bus,
685};
686
Brett Russ20f733e2005-09-01 18:26:17 -0400687/*
688 * Functions
689 */
690
691static inline void writelfl(unsigned long data, void __iomem *addr)
692{
693 writel(data, addr);
694 (void) readl(addr); /* flush to avoid PCI posted write */
695}
696
Brett Russ20f733e2005-09-01 18:26:17 -0400697static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
698{
699 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
700}
701
Jeff Garzikc9d39132005-11-13 17:47:51 -0500702static inline unsigned int mv_hc_from_port(unsigned int port)
703{
704 return port >> MV_PORT_HC_SHIFT;
705}
706
707static inline unsigned int mv_hardport_from_port(unsigned int port)
708{
709 return port & MV_PORT_MASK;
710}
711
712static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
713 unsigned int port)
714{
715 return mv_hc_base(base, mv_hc_from_port(port));
716}
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
719{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500720 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500721 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500722 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400723}
724
Mark Lorde12bef52008-03-31 19:33:56 -0400725static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
726{
727 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
728 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
729
730 return hc_mmio + ofs;
731}
732
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500733static inline void __iomem *mv_host_base(struct ata_host *host)
734{
735 struct mv_host_priv *hpriv = host->private_data;
736 return hpriv->base;
737}
738
Brett Russ20f733e2005-09-01 18:26:17 -0400739static inline void __iomem *mv_ap_base(struct ata_port *ap)
740{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500741 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400742}
743
Jeff Garzikcca39742006-08-24 03:19:22 -0400744static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400745{
Jeff Garzikcca39742006-08-24 03:19:22 -0400746 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400749static void mv_set_edma_ptrs(void __iomem *port_mmio,
750 struct mv_host_priv *hpriv,
751 struct mv_port_priv *pp)
752{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400753 u32 index;
754
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400755 /*
756 * initialize request queue
757 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400758 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
759
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400760 WARN_ON(pp->crqb_dma & 0x3ff);
761 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400762 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400763 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
764
765 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400766 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400767 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
768 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400769 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400770
771 /*
772 * initialize response queue
773 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400774 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
775
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400776 WARN_ON(pp->crpb_dma & 0xff);
777 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
778
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400780 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400781 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
782 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400785 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787}
788
Brett Russ05b308e2005-10-05 17:08:53 -0400789/**
790 * mv_start_dma - Enable eDMA engine
791 * @base: port base address
792 * @pp: port private data
793 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900794 * Verify the local cache of the eDMA state is accurate with a
795 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400796 *
797 * LOCKING:
798 * Inherited from caller.
799 */
Mark Lord0c589122008-01-26 18:31:16 -0500800static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500801 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400802{
Mark Lord72109162008-01-26 18:31:33 -0500803 int want_ncq = (protocol == ATA_PROT_NCQ);
804
805 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
806 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
807 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400808 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500809 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500811 struct mv_host_priv *hpriv = ap->host->private_data;
812 int hard_port = mv_hardport_from_port(ap->port_no);
813 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100814 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500815 u32 hc_irq_cause, ipending;
816
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400817 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500818 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819
Mark Lord0c589122008-01-26 18:31:16 -0500820 /* clear EDMA interrupt indicator, if any */
821 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
822 ipending = (DEV_IRQ << hard_port) |
823 (CRPB_DMA_DONE << hard_port);
824 if (hc_irq_cause & ipending) {
825 writelfl(hc_irq_cause & ~ipending,
826 hc_mmio + HC_IRQ_CAUSE_OFS);
827 }
828
Mark Lorde12bef52008-03-31 19:33:56 -0400829 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500830
831 /* clear FIS IRQ Cause */
832 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
833
Mark Lordf630d562008-01-26 18:31:00 -0500834 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400835
Mark Lordf630d562008-01-26 18:31:00 -0500836 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400837 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
838 }
Mark Lordf630d562008-01-26 18:31:00 -0500839 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400840}
841
Brett Russ05b308e2005-10-05 17:08:53 -0400842/**
Mark Lorde12bef52008-03-31 19:33:56 -0400843 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400844 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400845 *
846 * LOCKING:
847 * Inherited from caller.
848 */
Mark Lordb5624682008-03-31 19:34:40 -0400849static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400850{
Mark Lordb5624682008-03-31 19:34:40 -0400851 int i;
Brett Russ31961942005-09-30 01:36:00 -0400852
Mark Lordb5624682008-03-31 19:34:40 -0400853 /* Disable eDMA. The disable bit auto clears. */
854 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500855
Mark Lordb5624682008-03-31 19:34:40 -0400856 /* Wait for the chip to confirm eDMA is off. */
857 for (i = 10000; i > 0; i--) {
858 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400859 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400860 return 0;
861 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400862 }
Mark Lordb5624682008-03-31 19:34:40 -0400863 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400864}
865
Mark Lorde12bef52008-03-31 19:33:56 -0400866static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400867{
Mark Lordb5624682008-03-31 19:34:40 -0400868 void __iomem *port_mmio = mv_ap_base(ap);
869 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400870
Mark Lordb5624682008-03-31 19:34:40 -0400871 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
872 return 0;
873 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
874 if (mv_stop_edma_engine(port_mmio)) {
875 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
876 return -EIO;
877 }
878 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400879}
880
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400881#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400882static void mv_dump_mem(void __iomem *start, unsigned bytes)
883{
Brett Russ31961942005-09-30 01:36:00 -0400884 int b, w;
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400888 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400889 b += sizeof(u32);
890 }
891 printk("\n");
892 }
Brett Russ31961942005-09-30 01:36:00 -0400893}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400894#endif
895
Brett Russ31961942005-09-30 01:36:00 -0400896static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897{
898#ifdef ATA_DEBUG
899 int b, w;
900 u32 dw;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400904 (void) pci_read_config_dword(pdev, b, &dw);
905 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400906 b += sizeof(u32);
907 }
908 printk("\n");
909 }
910#endif
911}
912static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
914{
915#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500916 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
920
921 if (0 > port) {
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
924 num_hcs = 2;
925 } else {
926 start_hc = port >> MV_PORT_HC_SHIFT;
927 start_port = port;
928 num_ports = num_hcs = 1;
929 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400931 num_ports > 1 ? num_ports - 1 : start_port);
932
933 if (NULL != pdev) {
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
936 }
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700943 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
946 }
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400949 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400950 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400951 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400952 mv_dump_mem(port_base+0x300, 0x60);
953 }
954#endif
955}
956
Brett Russ20f733e2005-09-01 18:26:17 -0400957static unsigned int mv_scr_offset(unsigned int sc_reg_in)
958{
959 unsigned int ofs;
960
961 switch (sc_reg_in) {
962 case SCR_STATUS:
963 case SCR_CONTROL:
964 case SCR_ERROR:
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 break;
967 case SCR_ACTIVE:
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
969 break;
970 default:
971 ofs = 0xffffffffU;
972 break;
973 }
974 return ofs;
975}
976
Tejun Heoda3dbb12007-07-16 14:29:40 +0900977static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400978{
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
980
Tejun Heoda3dbb12007-07-16 14:29:40 +0900981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
983 return 0;
984 } else
985 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400986}
987
Tejun Heoda3dbb12007-07-16 14:29:40 +0900988static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400989{
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
991
Tejun Heoda3dbb12007-07-16 14:29:40 +0900992 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400993 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900994 return 0;
995 } else
996 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400997}
998
Mark Lordf2738272008-01-26 18:32:29 -0500999static void mv6_dev_config(struct ata_device *adev)
1000{
1001 /*
1002 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1003 * See mv_qc_prep() for more info.
1004 */
1005 if (adev->flags & ATA_DFLAG_NCQ)
1006 if (adev->max_sectors > ATA_MAX_SECTORS)
1007 adev->max_sectors = ATA_MAX_SECTORS;
1008}
1009
Mark Lorde12bef52008-03-31 19:33:56 -04001010static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001011{
Mark Lord0c589122008-01-26 18:31:16 -05001012 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001013 struct mv_port_priv *pp = ap->private_data;
1014 struct mv_host_priv *hpriv = ap->host->private_data;
1015 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001016
1017 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001018 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001019
Mark Lord0c589122008-01-26 18:31:16 -05001020 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001021 cfg |= (1 << 8); /* enab config burst size mask */
1022
Mark Lord0c589122008-01-26 18:31:16 -05001023 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001024 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1025
1026 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001027 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1028 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001029 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001031 }
1032
Mark Lord72109162008-01-26 18:31:33 -05001033 if (want_ncq) {
1034 cfg |= EDMA_CFG_NCQ;
1035 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1036 } else
1037 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1038
Jeff Garzike4e7b892006-01-31 12:18:41 -05001039 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1040}
1041
Mark Lordda2fa9b2008-01-26 18:32:45 -05001042static void mv_port_free_dma_mem(struct ata_port *ap)
1043{
1044 struct mv_host_priv *hpriv = ap->host->private_data;
1045 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001046 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001047
1048 if (pp->crqb) {
1049 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1050 pp->crqb = NULL;
1051 }
1052 if (pp->crpb) {
1053 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1054 pp->crpb = NULL;
1055 }
Mark Lordeb73d552008-01-29 13:24:00 -05001056 /*
1057 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1058 * For later hardware, we have one unique sg_tbl per NCQ tag.
1059 */
1060 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1061 if (pp->sg_tbl[tag]) {
1062 if (tag == 0 || !IS_GEN_I(hpriv))
1063 dma_pool_free(hpriv->sg_tbl_pool,
1064 pp->sg_tbl[tag],
1065 pp->sg_tbl_dma[tag]);
1066 pp->sg_tbl[tag] = NULL;
1067 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001068 }
1069}
1070
Brett Russ05b308e2005-10-05 17:08:53 -04001071/**
1072 * mv_port_start - Port specific init/start routine.
1073 * @ap: ATA channel to manipulate
1074 *
1075 * Allocate and point to DMA memory, init port private memory,
1076 * zero indices.
1077 *
1078 * LOCKING:
1079 * Inherited from caller.
1080 */
Brett Russ31961942005-09-30 01:36:00 -04001081static int mv_port_start(struct ata_port *ap)
1082{
Jeff Garzikcca39742006-08-24 03:19:22 -04001083 struct device *dev = ap->host->dev;
1084 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001085 struct mv_port_priv *pp;
1086 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001087 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001088 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001089
Tejun Heo24dc5f32007-01-20 16:00:28 +09001090 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001091 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001092 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001093 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001094
Mark Lordda2fa9b2008-01-26 18:32:45 -05001095 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1096 if (!pp->crqb)
1097 return -ENOMEM;
1098 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001099
Mark Lordda2fa9b2008-01-26 18:32:45 -05001100 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1101 if (!pp->crpb)
1102 goto out_port_free_dma_mem;
1103 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001104
Mark Lordeb73d552008-01-29 13:24:00 -05001105 /*
1106 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1107 * For later hardware, we need one unique sg_tbl per NCQ tag.
1108 */
1109 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1110 if (tag == 0 || !IS_GEN_I(hpriv)) {
1111 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1112 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1113 if (!pp->sg_tbl[tag])
1114 goto out_port_free_dma_mem;
1115 } else {
1116 pp->sg_tbl[tag] = pp->sg_tbl[0];
1117 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1118 }
1119 }
Brett Russ31961942005-09-30 01:36:00 -04001120
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001121 spin_lock_irqsave(&ap->host->lock, flags);
1122
Mark Lorde12bef52008-03-31 19:33:56 -04001123 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001124 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001125
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001126 spin_unlock_irqrestore(&ap->host->lock, flags);
1127
Brett Russ31961942005-09-30 01:36:00 -04001128 /* Don't turn on EDMA here...do it before DMA commands only. Else
1129 * we'll be unable to send non-data, PIO, etc due to restricted access
1130 * to shadow regs.
1131 */
Brett Russ31961942005-09-30 01:36:00 -04001132 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001133
1134out_port_free_dma_mem:
1135 mv_port_free_dma_mem(ap);
1136 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001137}
1138
Brett Russ05b308e2005-10-05 17:08:53 -04001139/**
1140 * mv_port_stop - Port specific cleanup/stop routine.
1141 * @ap: ATA channel to manipulate
1142 *
1143 * Stop DMA, cleanup port memory.
1144 *
1145 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001146 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001147 */
Brett Russ31961942005-09-30 01:36:00 -04001148static void mv_port_stop(struct ata_port *ap)
1149{
Mark Lorde12bef52008-03-31 19:33:56 -04001150 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001151 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001152}
1153
Brett Russ05b308e2005-10-05 17:08:53 -04001154/**
1155 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1156 * @qc: queued command whose SG list to source from
1157 *
1158 * Populate the SG list and mark the last entry.
1159 *
1160 * LOCKING:
1161 * Inherited from caller.
1162 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001163static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001164{
1165 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001166 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001167 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001168 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001169
Mark Lordeb73d552008-01-29 13:24:00 -05001170 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001171 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001172 dma_addr_t addr = sg_dma_address(sg);
1173 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001174
Olof Johansson4007b492007-10-02 20:45:27 -05001175 while (sg_len) {
1176 u32 offset = addr & 0xffff;
1177 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001178
Olof Johansson4007b492007-10-02 20:45:27 -05001179 if ((offset + sg_len > 0x10000))
1180 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001181
Olof Johansson4007b492007-10-02 20:45:27 -05001182 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1183 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001184 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001185
1186 sg_len -= len;
1187 addr += len;
1188
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001189 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001190 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001191 }
Brett Russ31961942005-09-30 01:36:00 -04001192 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001193
1194 if (likely(last_sg))
1195 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001196}
1197
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001198static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001199{
Mark Lord559eeda2006-05-19 16:40:15 -04001200 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001201 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001202 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001203}
1204
Brett Russ05b308e2005-10-05 17:08:53 -04001205/**
1206 * mv_qc_prep - Host specific command preparation.
1207 * @qc: queued command to prepare
1208 *
1209 * This routine simply redirects to the general purpose routine
1210 * if command is not DMA. Else, it handles prep of the CRQB
1211 * (command request block), does some sanity checking, and calls
1212 * the SG load routine.
1213 *
1214 * LOCKING:
1215 * Inherited from caller.
1216 */
Brett Russ31961942005-09-30 01:36:00 -04001217static void mv_qc_prep(struct ata_queued_cmd *qc)
1218{
1219 struct ata_port *ap = qc->ap;
1220 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001221 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001222 struct ata_taskfile *tf;
1223 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001224 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001225
Mark Lord138bfdd2008-01-26 18:33:18 -05001226 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1227 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001228 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001229
Brett Russ31961942005-09-30 01:36:00 -04001230 /* Fill in command request block
1231 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001232 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001233 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001234 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001235 flags |= qc->tag << CRQB_TAG_SHIFT;
1236
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001237 /* get current queue index from software */
1238 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001239
Mark Lorda6432432006-05-19 16:36:36 -04001240 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001241 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001242 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001243 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001244 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1245
1246 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001247 tf = &qc->tf;
1248
1249 /* Sadly, the CRQB cannot accomodate all registers--there are
1250 * only 11 bytes...so we must pick and choose required
1251 * registers based on the command. So, we drop feature and
1252 * hob_feature for [RW] DMA commands, but they are needed for
1253 * NCQ. NCQ will drop hob_nsect.
1254 */
1255 switch (tf->command) {
1256 case ATA_CMD_READ:
1257 case ATA_CMD_READ_EXT:
1258 case ATA_CMD_WRITE:
1259 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001260 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001261 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1262 break;
Brett Russ31961942005-09-30 01:36:00 -04001263 case ATA_CMD_FPDMA_READ:
1264 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001265 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001266 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1267 break;
Brett Russ31961942005-09-30 01:36:00 -04001268 default:
1269 /* The only other commands EDMA supports in non-queued and
1270 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1271 * of which are defined/used by Linux. If we get here, this
1272 * driver needs work.
1273 *
1274 * FIXME: modify libata to give qc_prep a return value and
1275 * return error here.
1276 */
1277 BUG_ON(tf->command);
1278 break;
1279 }
1280 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1285 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1286 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1287 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1288 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1289
Jeff Garzike4e7b892006-01-31 12:18:41 -05001290 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001291 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001292 mv_fill_sg(qc);
1293}
1294
1295/**
1296 * mv_qc_prep_iie - Host specific command preparation.
1297 * @qc: queued command to prepare
1298 *
1299 * This routine simply redirects to the general purpose routine
1300 * if command is not DMA. Else, it handles prep of the CRQB
1301 * (command request block), does some sanity checking, and calls
1302 * the SG load routine.
1303 *
1304 * LOCKING:
1305 * Inherited from caller.
1306 */
1307static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1308{
1309 struct ata_port *ap = qc->ap;
1310 struct mv_port_priv *pp = ap->private_data;
1311 struct mv_crqb_iie *crqb;
1312 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001313 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001314 u32 flags = 0;
1315
Mark Lord138bfdd2008-01-26 18:33:18 -05001316 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1317 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001318 return;
1319
Mark Lorde12bef52008-03-31 19:33:56 -04001320 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001321 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1322 flags |= CRQB_FLAG_READ;
1323
Tejun Heobeec7db2006-02-11 19:11:13 +09001324 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001325 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001326 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001327
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001328 /* get current queue index from software */
1329 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001330
1331 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001332 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1333 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001334 crqb->flags = cpu_to_le32(flags);
1335
1336 tf = &qc->tf;
1337 crqb->ata_cmd[0] = cpu_to_le32(
1338 (tf->command << 16) |
1339 (tf->feature << 24)
1340 );
1341 crqb->ata_cmd[1] = cpu_to_le32(
1342 (tf->lbal << 0) |
1343 (tf->lbam << 8) |
1344 (tf->lbah << 16) |
1345 (tf->device << 24)
1346 );
1347 crqb->ata_cmd[2] = cpu_to_le32(
1348 (tf->hob_lbal << 0) |
1349 (tf->hob_lbam << 8) |
1350 (tf->hob_lbah << 16) |
1351 (tf->hob_feature << 24)
1352 );
1353 crqb->ata_cmd[3] = cpu_to_le32(
1354 (tf->nsect << 0) |
1355 (tf->hob_nsect << 8)
1356 );
1357
1358 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1359 return;
Brett Russ31961942005-09-30 01:36:00 -04001360 mv_fill_sg(qc);
1361}
1362
Brett Russ05b308e2005-10-05 17:08:53 -04001363/**
1364 * mv_qc_issue - Initiate a command to the host
1365 * @qc: queued command to start
1366 *
1367 * This routine simply redirects to the general purpose routine
1368 * if command is not DMA. Else, it sanity checks our local
1369 * caches of the request producer/consumer indices then enables
1370 * DMA and bumps the request producer index.
1371 *
1372 * LOCKING:
1373 * Inherited from caller.
1374 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001375static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001376{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001377 struct ata_port *ap = qc->ap;
1378 void __iomem *port_mmio = mv_ap_base(ap);
1379 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001380 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001381
Mark Lord138bfdd2008-01-26 18:33:18 -05001382 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1383 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001384 /* We're about to send a non-EDMA capable command to the
1385 * port. Turn off EDMA so there won't be problems accessing
1386 * shadow block, etc registers.
1387 */
Mark Lordb5624682008-03-31 19:34:40 -04001388 mv_stop_edma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001389 return ata_qc_issue_prot(qc);
1390 }
1391
Mark Lord72109162008-01-26 18:31:33 -05001392 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001393
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001394 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001395
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001396 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001397
1398 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001399 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1400 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001401
1402 return 0;
1403}
1404
Brett Russ05b308e2005-10-05 17:08:53 -04001405/**
Brett Russ05b308e2005-10-05 17:08:53 -04001406 * mv_err_intr - Handle error interrupts on the port
1407 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001408 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001409 *
1410 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001411 * some cases require an eDMA reset, which also performs a COMRESET.
1412 * The SERR case requires a clear of pending errors in the SATA
1413 * SERROR register. Finally, if the port disabled DMA,
1414 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001415 *
1416 * LOCKING:
1417 * Inherited from caller.
1418 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001419static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001420{
Brett Russ31961942005-09-30 01:36:00 -04001421 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1423 struct mv_port_priv *pp = ap->private_data;
1424 struct mv_host_priv *hpriv = ap->host->private_data;
1425 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1426 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001427 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001428
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001429 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001430
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001431 if (!edma_enabled) {
1432 /* just a guess: do we need to do this? should we
1433 * expand this, and do it in all cases?
1434 */
Tejun Heo936fd732007-08-06 18:36:23 +09001435 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1436 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001437 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001438
1439 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1440
1441 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1442
1443 /*
1444 * all generations share these EDMA error cause bits
1445 */
1446
1447 if (edma_err_cause & EDMA_ERR_DEV)
1448 err_mask |= AC_ERR_DEV;
1449 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001450 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001451 EDMA_ERR_INTRL_PAR)) {
1452 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001453 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001454 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001455 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001456 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1457 ata_ehi_hotplugged(ehi);
1458 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001459 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001460 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001461 }
1462
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001463 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 eh_freeze_mask = EDMA_EH_FREEZE_5;
1465
1466 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001467 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001469 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001470 }
1471 } else {
1472 eh_freeze_mask = EDMA_EH_FREEZE;
1473
1474 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001475 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001476 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001477 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001478 }
1479
1480 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001481 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1482 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001483 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001484 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001485 }
1486 }
Brett Russ20f733e2005-09-01 18:26:17 -04001487
1488 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001489 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001490
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 if (!err_mask) {
1492 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001493 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001494 }
1495
1496 ehi->serror |= serr;
1497 ehi->action |= action;
1498
1499 if (qc)
1500 qc->err_mask |= err_mask;
1501 else
1502 ehi->err_mask |= err_mask;
1503
1504 if (edma_err_cause & eh_freeze_mask)
1505 ata_port_freeze(ap);
1506 else
1507 ata_port_abort(ap);
1508}
1509
1510static void mv_intr_pio(struct ata_port *ap)
1511{
1512 struct ata_queued_cmd *qc;
1513 u8 ata_status;
1514
1515 /* ignore spurious intr if drive still BUSY */
1516 ata_status = readb(ap->ioaddr.status_addr);
1517 if (unlikely(ata_status & ATA_BUSY))
1518 return;
1519
1520 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001521 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001522 if (unlikely(!qc)) /* no active tag */
1523 return;
1524 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1525 return;
1526
1527 /* and finally, complete the ATA command */
1528 qc->err_mask |= ac_err_mask(ata_status);
1529 ata_qc_complete(qc);
1530}
1531
1532static void mv_intr_edma(struct ata_port *ap)
1533{
1534 void __iomem *port_mmio = mv_ap_base(ap);
1535 struct mv_host_priv *hpriv = ap->host->private_data;
1536 struct mv_port_priv *pp = ap->private_data;
1537 struct ata_queued_cmd *qc;
1538 u32 out_index, in_index;
1539 bool work_done = false;
1540
1541 /* get h/w response queue pointer */
1542 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1543 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1544
1545 while (1) {
1546 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001547 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548
1549 /* get s/w response queue last-read pointer, and compare */
1550 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1551 if (in_index == out_index)
1552 break;
1553
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001554 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001555 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001556 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001558 /* Gen II/IIE: get active ATA command via tag, to enable
1559 * support for queueing. this works transparently for
1560 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001562 else
1563 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001564
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001565 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001566
Mark Lordcb924412008-01-26 18:32:09 -05001567 /* For non-NCQ mode, the lower 8 bits of status
1568 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1569 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570 */
1571 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001572 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001573 mv_err_intr(ap, qc);
1574 return;
1575 }
1576
1577 /* and finally, complete the ATA command */
1578 if (qc) {
1579 qc->err_mask |=
1580 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1581 ata_qc_complete(qc);
1582 }
1583
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001584 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001585 * indicate (after the loop completes) to hardware
1586 * that we have consumed a response queue entry.
1587 */
1588 work_done = true;
1589 pp->resp_idx++;
1590 }
1591
1592 if (work_done)
1593 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1594 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1595 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001596}
1597
Brett Russ05b308e2005-10-05 17:08:53 -04001598/**
1599 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001600 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001601 * @relevant: port error bits relevant to this host controller
1602 * @hc: which host controller we're to look at
1603 *
1604 * Read then write clear the HC interrupt status then walk each
1605 * port connected to the HC and see if it needs servicing. Port
1606 * success ints are reported in the HC interrupt status reg, the
1607 * port error ints are reported in the higher level main
1608 * interrupt status register and thus are passed in via the
1609 * 'relevant' argument.
1610 *
1611 * LOCKING:
1612 * Inherited from caller.
1613 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001614static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001615{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001616 struct mv_host_priv *hpriv = host->private_data;
1617 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001618 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001619 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001620 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001621
Jeff Garzik35177262007-02-24 21:26:42 -05001622 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001623 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001624 else
Brett Russ20f733e2005-09-01 18:26:17 -04001625 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001626
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001627 if (HAS_PCI(host))
1628 last_port = port0 + MV_PORTS_PER_HC;
1629 else
1630 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001631 /* we'll need the HC success int register in most cases */
1632 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001633 if (!hc_irq_cause)
1634 return;
1635
1636 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001637
1638 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001639 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001640
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001641 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001642 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001643 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001645
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001646 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001647 continue;
1648
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001649 pp = ap->private_data;
1650
Brett Russ31961942005-09-30 01:36:00 -04001651 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001652 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001653 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001654
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001655 have_err_bits = ((PORT0_ERR << shift) & relevant);
1656
1657 if (unlikely(have_err_bits)) {
1658 struct ata_queued_cmd *qc;
1659
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001660 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001661 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1662 continue;
1663
1664 mv_err_intr(ap, qc);
1665 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001666 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001667
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001668 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1669
1670 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1671 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1672 mv_intr_edma(ap);
1673 } else {
1674 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1675 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001676 }
1677 }
1678 VPRINTK("EXIT\n");
1679}
1680
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001681static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1682{
Mark Lord02a121d2007-12-01 13:07:22 -05001683 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001684 struct ata_port *ap;
1685 struct ata_queued_cmd *qc;
1686 struct ata_eh_info *ehi;
1687 unsigned int i, err_mask, printed = 0;
1688 u32 err_cause;
1689
Mark Lord02a121d2007-12-01 13:07:22 -05001690 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001691
1692 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1693 err_cause);
1694
1695 DPRINTK("All regs @ PCI error\n");
1696 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1697
Mark Lord02a121d2007-12-01 13:07:22 -05001698 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001699
1700 for (i = 0; i < host->n_ports; i++) {
1701 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001702 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001703 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001704 ata_ehi_clear_desc(ehi);
1705 if (!printed++)
1706 ata_ehi_push_desc(ehi,
1707 "PCI err cause 0x%08x", err_cause);
1708 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001709 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001710 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001711 if (qc)
1712 qc->err_mask |= err_mask;
1713 else
1714 ehi->err_mask |= err_mask;
1715
1716 ata_port_freeze(ap);
1717 }
1718 }
1719}
1720
Brett Russ05b308e2005-10-05 17:08:53 -04001721/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001722 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001723 * @irq: unused
1724 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001725 *
1726 * Read the read only register to determine if any host
1727 * controllers have pending interrupts. If so, call lower level
1728 * routine to handle. Also check for PCI errors which are only
1729 * reported here.
1730 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001731 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001732 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001733 * interrupts.
1734 */
David Howells7d12e782006-10-05 14:55:46 +01001735static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001736{
Jeff Garzikcca39742006-08-24 03:19:22 -04001737 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001738 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001739 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001740 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001741 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001742
Mark Lorde12bef52008-03-31 19:33:56 -04001743 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001744 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001745
1746 irq_stat = readl(hpriv->main_cause_reg_addr);
1747 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001748
1749 /* check the cases where we either have nothing pending or have read
1750 * a bogus register value which can indicate HW removal or PCI fault
1751 */
Mark Lord646a4da2008-01-26 18:30:37 -05001752 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1753 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001754
Jeff Garzikcca39742006-08-24 03:19:22 -04001755 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001756
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001757 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001758 mv_pci_error(host, mmio);
1759 handled = 1;
1760 goto out_unlock; /* skip all other HC irq handling */
1761 }
1762
Brett Russ20f733e2005-09-01 18:26:17 -04001763 for (hc = 0; hc < n_hcs; hc++) {
1764 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1765 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001766 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001767 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001768 }
1769 }
Mark Lord615ab952006-05-19 16:24:56 -04001770
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001771out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001772 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001773
1774 return IRQ_RETVAL(handled);
1775}
1776
Jeff Garzikc9d39132005-11-13 17:47:51 -05001777static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1778{
1779 unsigned int ofs;
1780
1781 switch (sc_reg_in) {
1782 case SCR_STATUS:
1783 case SCR_ERROR:
1784 case SCR_CONTROL:
1785 ofs = sc_reg_in * sizeof(u32);
1786 break;
1787 default:
1788 ofs = 0xffffffffU;
1789 break;
1790 }
1791 return ofs;
1792}
1793
Tejun Heoda3dbb12007-07-16 14:29:40 +09001794static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001795{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001796 struct mv_host_priv *hpriv = ap->host->private_data;
1797 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001798 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001799 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1800
Tejun Heoda3dbb12007-07-16 14:29:40 +09001801 if (ofs != 0xffffffffU) {
1802 *val = readl(addr + ofs);
1803 return 0;
1804 } else
1805 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001806}
1807
Tejun Heoda3dbb12007-07-16 14:29:40 +09001808static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001809{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001810 struct mv_host_priv *hpriv = ap->host->private_data;
1811 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001812 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001813 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1814
Tejun Heoda3dbb12007-07-16 14:29:40 +09001815 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001816 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001817 return 0;
1818 } else
1819 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001820}
1821
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001822static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001823{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001824 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001825 int early_5080;
1826
Auke Kok44c10132007-06-08 15:46:36 -07001827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001828
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 }
1834
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001835 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001836}
1837
1838static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1839{
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1841}
1842
Jeff Garzik47c2b672005-11-12 21:13:17 -05001843static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001844 void __iomem *mmio)
1845{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001853}
1854
Jeff Garzik47c2b672005-11-12 21:13:17 -05001855static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001856{
Jeff Garzik522479f2005-11-12 22:14:02 -05001857 u32 tmp;
1858
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1860
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1862
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001866}
1867
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001868static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001870{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1875
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1880
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1885 }
1886
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001892}
1893
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894
1895#undef ZERO
1896#define ZERO(reg) writel(0, port_mmio + (reg))
1897static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001899{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001900 void __iomem *port_mmio = mv_port_base(mmio, port);
1901
Mark Lordb5624682008-03-31 19:34:40 -04001902 /*
1903 * The datasheet warns against setting ATA_RST when EDMA is active
1904 * (but doesn't say what the problem might be). So we first try
1905 * to disable the EDMA engine before doing the ATA_RST operation.
1906 */
1907 mv_stop_edma_engine(port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -04001908 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001909
1910 ZERO(0x028); /* command */
1911 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1912 ZERO(0x004); /* timer */
1913 ZERO(0x008); /* irq err cause */
1914 ZERO(0x00c); /* irq err mask */
1915 ZERO(0x010); /* rq bah */
1916 ZERO(0x014); /* rq inp */
1917 ZERO(0x018); /* rq outp */
1918 ZERO(0x01c); /* respq bah */
1919 ZERO(0x024); /* respq outp */
1920 ZERO(0x020); /* respq inp */
1921 ZERO(0x02c); /* test control */
1922 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1923}
1924#undef ZERO
1925
1926#define ZERO(reg) writel(0, hc_mmio + (reg))
1927static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1928 unsigned int hc)
1929{
1930 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1931 u32 tmp;
1932
1933 ZERO(0x00c);
1934 ZERO(0x010);
1935 ZERO(0x014);
1936 ZERO(0x018);
1937
1938 tmp = readl(hc_mmio + 0x20);
1939 tmp &= 0x1c1c1c1c;
1940 tmp |= 0x03030303;
1941 writel(tmp, hc_mmio + 0x20);
1942}
1943#undef ZERO
1944
1945static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1946 unsigned int n_hc)
1947{
1948 unsigned int hc, port;
1949
1950 for (hc = 0; hc < n_hc; hc++) {
1951 for (port = 0; port < MV_PORTS_PER_HC; port++)
1952 mv5_reset_hc_port(hpriv, mmio,
1953 (hc * MV_PORTS_PER_HC) + port);
1954
1955 mv5_reset_one_hc(hpriv, mmio, hc);
1956 }
1957
1958 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001959}
1960
Jeff Garzik101ffae2005-11-12 22:17:49 -05001961#undef ZERO
1962#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001963static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001964{
Mark Lord02a121d2007-12-01 13:07:22 -05001965 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001966 u32 tmp;
1967
1968 tmp = readl(mmio + MV_PCI_MODE);
1969 tmp &= 0xff00ffff;
1970 writel(tmp, mmio + MV_PCI_MODE);
1971
1972 ZERO(MV_PCI_DISC_TIMER);
1973 ZERO(MV_PCI_MSI_TRIGGER);
1974 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1975 ZERO(HC_MAIN_IRQ_MASK_OFS);
1976 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001977 ZERO(hpriv->irq_cause_ofs);
1978 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001979 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1980 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1981 ZERO(MV_PCI_ERR_ATTRIBUTE);
1982 ZERO(MV_PCI_ERR_COMMAND);
1983}
1984#undef ZERO
1985
1986static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1987{
1988 u32 tmp;
1989
1990 mv5_reset_flash(hpriv, mmio);
1991
1992 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1993 tmp &= 0x3;
1994 tmp |= (1 << 5) | (1 << 6);
1995 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1996}
1997
1998/**
1999 * mv6_reset_hc - Perform the 6xxx global soft reset
2000 * @mmio: base address of the HBA
2001 *
2002 * This routine only applies to 6xxx parts.
2003 *
2004 * LOCKING:
2005 * Inherited from caller.
2006 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002007static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2008 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002009{
2010 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2011 int i, rc = 0;
2012 u32 t;
2013
2014 /* Following procedure defined in PCI "main command and status
2015 * register" table.
2016 */
2017 t = readl(reg);
2018 writel(t | STOP_PCI_MASTER, reg);
2019
2020 for (i = 0; i < 1000; i++) {
2021 udelay(1);
2022 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002023 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002024 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002025 }
2026 if (!(PCI_MASTER_EMPTY & t)) {
2027 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2028 rc = 1;
2029 goto done;
2030 }
2031
2032 /* set reset */
2033 i = 5;
2034 do {
2035 writel(t | GLOB_SFT_RST, reg);
2036 t = readl(reg);
2037 udelay(1);
2038 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2039
2040 if (!(GLOB_SFT_RST & t)) {
2041 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2042 rc = 1;
2043 goto done;
2044 }
2045
2046 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2047 i = 5;
2048 do {
2049 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2050 t = readl(reg);
2051 udelay(1);
2052 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2053
2054 if (GLOB_SFT_RST & t) {
2055 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2056 rc = 1;
2057 }
2058done:
2059 return rc;
2060}
2061
Jeff Garzik47c2b672005-11-12 21:13:17 -05002062static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002063 void __iomem *mmio)
2064{
2065 void __iomem *port_mmio;
2066 u32 tmp;
2067
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002068 tmp = readl(mmio + MV_RESET_CFG);
2069 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002070 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002071 hpriv->signal[idx].pre = 0x1 << 5;
2072 return;
2073 }
2074
2075 port_mmio = mv_port_base(mmio, idx);
2076 tmp = readl(port_mmio + PHY_MODE2);
2077
2078 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2079 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2080}
2081
Jeff Garzik47c2b672005-11-12 21:13:17 -05002082static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002083{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002084 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002085}
2086
Jeff Garzikc9d39132005-11-13 17:47:51 -05002087static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002088 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002089{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002090 void __iomem *port_mmio = mv_port_base(mmio, port);
2091
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002092 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002093 int fix_phy_mode2 =
2094 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002095 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2097 u32 m2, tmp;
2098
2099 if (fix_phy_mode2) {
2100 m2 = readl(port_mmio + PHY_MODE2);
2101 m2 &= ~(1 << 16);
2102 m2 |= (1 << 31);
2103 writel(m2, port_mmio + PHY_MODE2);
2104
2105 udelay(200);
2106
2107 m2 = readl(port_mmio + PHY_MODE2);
2108 m2 &= ~((1 << 16) | (1 << 31));
2109 writel(m2, port_mmio + PHY_MODE2);
2110
2111 udelay(200);
2112 }
2113
2114 /* who knows what this magic does */
2115 tmp = readl(port_mmio + PHY_MODE3);
2116 tmp &= ~0x7F800000;
2117 tmp |= 0x2A800000;
2118 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002119
2120 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002121 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002122
2123 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002124
2125 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002126 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002127
Mark Lorde12bef52008-03-31 19:33:56 -04002128 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002129 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2130
2131 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002132
2133 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002134 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002135 }
2136
2137 /* Revert values of pre-emphasis and signal amps to the saved ones */
2138 m2 = readl(port_mmio + PHY_MODE2);
2139
2140 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002141 m2 |= hpriv->signal[port].amps;
2142 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002143 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002144
Jeff Garzike4e7b892006-01-31 12:18:41 -05002145 /* according to mvSata 3.6.1, some IIE values are fixed */
2146 if (IS_GEN_IIE(hpriv)) {
2147 m2 &= ~0xC30FF01F;
2148 m2 |= 0x0000900F;
2149 }
2150
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002151 writel(m2, port_mmio + PHY_MODE2);
2152}
2153
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002154/* TODO: use the generic LED interface to configure the SATA Presence */
2155/* & Acitivy LEDs on the board */
2156static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2157 void __iomem *mmio)
2158{
2159 return;
2160}
2161
2162static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2163 void __iomem *mmio)
2164{
2165 void __iomem *port_mmio;
2166 u32 tmp;
2167
2168 port_mmio = mv_port_base(mmio, idx);
2169 tmp = readl(port_mmio + PHY_MODE2);
2170
2171 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2172 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2173}
2174
2175#undef ZERO
2176#define ZERO(reg) writel(0, port_mmio + (reg))
2177static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2178 void __iomem *mmio, unsigned int port)
2179{
2180 void __iomem *port_mmio = mv_port_base(mmio, port);
2181
Mark Lordb5624682008-03-31 19:34:40 -04002182 /*
2183 * The datasheet warns against setting ATA_RST when EDMA is active
2184 * (but doesn't say what the problem might be). So we first try
2185 * to disable the EDMA engine before doing the ATA_RST operation.
2186 */
2187 mv_stop_edma_engine(port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -04002188 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002189
2190 ZERO(0x028); /* command */
2191 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2192 ZERO(0x004); /* timer */
2193 ZERO(0x008); /* irq err cause */
2194 ZERO(0x00c); /* irq err mask */
2195 ZERO(0x010); /* rq bah */
2196 ZERO(0x014); /* rq inp */
2197 ZERO(0x018); /* rq outp */
2198 ZERO(0x01c); /* respq bah */
2199 ZERO(0x024); /* respq outp */
2200 ZERO(0x020); /* respq inp */
2201 ZERO(0x02c); /* test control */
2202 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2203}
2204
2205#undef ZERO
2206
2207#define ZERO(reg) writel(0, hc_mmio + (reg))
2208static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2209 void __iomem *mmio)
2210{
2211 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2212
2213 ZERO(0x00c);
2214 ZERO(0x010);
2215 ZERO(0x014);
2216
2217}
2218
2219#undef ZERO
2220
2221static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2222 void __iomem *mmio, unsigned int n_hc)
2223{
2224 unsigned int port;
2225
2226 for (port = 0; port < hpriv->n_ports; port++)
2227 mv_soc_reset_hc_port(hpriv, mmio, port);
2228
2229 mv_soc_reset_one_hc(hpriv, mmio);
2230
2231 return 0;
2232}
2233
2234static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2235 void __iomem *mmio)
2236{
2237 return;
2238}
2239
2240static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2241{
2242 return;
2243}
2244
Mark Lordb5624682008-03-31 19:34:40 -04002245/*
2246 * Caller must ensure that EDMA is not active,
2247 * by first doing mv_stop_edma() where needed.
2248 */
Mark Lorde12bef52008-03-31 19:33:56 -04002249static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002250 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002251{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002252 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002253
Brett Russ31961942005-09-30 01:36:00 -04002254 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002255
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002256 if (IS_GEN_II(hpriv)) {
Mark Lorde12bef52008-03-31 19:33:56 -04002257 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
Mark Lordeb46d682006-05-19 16:29:21 -04002258 ifctl |= (1 << 7); /* enable gen2i speed */
2259 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Mark Lorde12bef52008-03-31 19:33:56 -04002260 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002261 }
2262
Brett Russ20f733e2005-09-01 18:26:17 -04002263 udelay(25); /* allow reset propagation */
2264
2265 /* Spec never mentions clearing the bit. Marvell's driver does
2266 * clear the bit, however.
2267 */
Brett Russ31961942005-09-30 01:36:00 -04002268 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002269
Jeff Garzikc9d39132005-11-13 17:47:51 -05002270 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2271
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002272 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002273 mdelay(1);
2274}
2275
Jeff Garzikc9d39132005-11-13 17:47:51 -05002276/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002277 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002278 * @ap: ATA channel to manipulate
2279 *
2280 * Part of this is taken from __sata_phy_reset and modified to
2281 * not sleep since this routine gets called from interrupt level.
2282 *
2283 * LOCKING:
2284 * Inherited from caller. This is coded to safe to call at
2285 * interrupt level, i.e. it does not sleep.
2286 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002287static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2288 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002289{
2290 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002291 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002292 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002293 int retry = 5;
2294 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002295
2296 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002297
Tejun Heoda3dbb12007-07-16 14:29:40 +09002298#ifdef DEBUG
2299 {
2300 u32 sstatus, serror, scontrol;
2301
2302 mv_scr_read(ap, SCR_STATUS, &sstatus);
2303 mv_scr_read(ap, SCR_ERROR, &serror);
2304 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2305 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002306 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002307 }
2308#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002309
Jeff Garzik22374672005-11-17 10:59:48 -05002310 /* Issue COMRESET via SControl */
2311comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002312 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002313 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002314
Tejun Heo936fd732007-08-06 18:36:23 +09002315 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002316 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002317
Brett Russ31961942005-09-30 01:36:00 -04002318 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002319 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002320 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002321 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002322
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002323 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002324 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002325
Jeff Garzik22374672005-11-17 10:59:48 -05002326 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002327 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002328 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2329 (retry-- > 0))
2330 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002331
Tejun Heoda3dbb12007-07-16 14:29:40 +09002332#ifdef DEBUG
2333 {
2334 u32 sstatus, serror, scontrol;
2335
2336 mv_scr_read(ap, SCR_STATUS, &sstatus);
2337 mv_scr_read(ap, SCR_ERROR, &serror);
2338 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2339 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2340 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2341 }
2342#endif
Brett Russ31961942005-09-30 01:36:00 -04002343
Tejun Heo936fd732007-08-06 18:36:23 +09002344 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002345 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002346 return;
2347 }
2348
Jeff Garzik22374672005-11-17 10:59:48 -05002349 /* even after SStatus reflects that device is ready,
2350 * it seems to take a while for link to be fully
2351 * established (and thus Status no longer 0x80/0x7F),
2352 * so we poll a bit for that, here.
2353 */
2354 retry = 20;
2355 while (1) {
2356 u8 drv_stat = ata_check_status(ap);
2357 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2358 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002359 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002360 if (retry-- <= 0)
2361 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002362 if (time_after(jiffies, deadline))
2363 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002364 }
2365
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002366 /* FIXME: if we passed the deadline, the following
2367 * code probably produces an invalid result
2368 */
Brett Russ20f733e2005-09-01 18:26:17 -04002369
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002370 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002371 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002372
2373 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2374
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002375 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002376
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002377 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002378}
2379
Tejun Heocc0680a2007-08-06 18:36:23 +09002380static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002381{
Mark Lorde12bef52008-03-31 19:33:56 -04002382 mv_stop_edma(link->ap);
Tejun Heocf480622008-01-24 00:05:14 +09002383 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002384}
2385
Tejun Heocc0680a2007-08-06 18:36:23 +09002386static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 unsigned long deadline)
2388{
Tejun Heocc0680a2007-08-06 18:36:23 +09002389 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002390 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002391 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002392 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002393
Mark Lorde12bef52008-03-31 19:33:56 -04002394 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002395 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002396 mv_phy_reset(ap, class, deadline);
2397
2398 return 0;
2399}
2400
Tejun Heocc0680a2007-08-06 18:36:23 +09002401static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002402{
Tejun Heocc0680a2007-08-06 18:36:23 +09002403 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002404 u32 serr;
2405
2406 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002407 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408
2409 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002410 sata_scr_read(link, SCR_ERROR, &serr);
2411 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002412
2413 /* bail out if no device is present */
2414 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2415 DPRINTK("EXIT, no device\n");
2416 return;
2417 }
2418
2419 /* set up device control */
2420 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2421}
2422
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002423static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002424{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002425 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002426 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2427 u32 tmp, mask;
2428 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002429
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002430 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002431
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002432 shift = ap->port_no * 2;
2433 if (hc > 0)
2434 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002435
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002436 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002437
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002438 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002439 tmp = readl(hpriv->main_mask_reg_addr);
2440 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002441}
2442
2443static void mv_eh_thaw(struct ata_port *ap)
2444{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002445 struct mv_host_priv *hpriv = ap->host->private_data;
2446 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002447 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2448 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2449 void __iomem *port_mmio = mv_ap_base(ap);
2450 u32 tmp, mask, hc_irq_cause;
2451 unsigned int shift, hc_port_no = ap->port_no;
2452
2453 /* FIXME: handle coalescing completion events properly */
2454
2455 shift = ap->port_no * 2;
2456 if (hc > 0) {
2457 shift++;
2458 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002459 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002460
2461 mask = 0x3 << shift;
2462
2463 /* clear EDMA errors on this port */
2464 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2465
2466 /* clear pending irq events */
2467 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2468 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2469 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2470 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2471
2472 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002473 tmp = readl(hpriv->main_mask_reg_addr);
2474 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002475}
2476
Brett Russ05b308e2005-10-05 17:08:53 -04002477/**
2478 * mv_port_init - Perform some early initialization on a single port.
2479 * @port: libata data structure storing shadow register addresses
2480 * @port_mmio: base address of the port
2481 *
2482 * Initialize shadow register mmio addresses, clear outstanding
2483 * interrupts on the port, and unmask interrupts for the future
2484 * start of the port.
2485 *
2486 * LOCKING:
2487 * Inherited from caller.
2488 */
Brett Russ31961942005-09-30 01:36:00 -04002489static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2490{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002491 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002492 unsigned serr_ofs;
2493
Jeff Garzik8b260242005-11-12 12:32:50 -05002494 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002495 */
2496 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002497 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002498 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2499 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2500 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2501 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2502 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2503 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002504 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002505 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2506 /* special case: control/altstatus doesn't have ATA_REG_ address */
2507 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2508
2509 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002510 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002511
Brett Russ31961942005-09-30 01:36:00 -04002512 /* Clear any currently outstanding port interrupt conditions */
2513 serr_ofs = mv_scr_offset(SCR_ERROR);
2514 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2515 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2516
Mark Lord646a4da2008-01-26 18:30:37 -05002517 /* unmask all non-transient EDMA error interrupts */
2518 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002519
Jeff Garzik8b260242005-11-12 12:32:50 -05002520 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002521 readl(port_mmio + EDMA_CFG_OFS),
2522 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002524}
2525
Tejun Heo4447d352007-04-17 23:44:08 +09002526static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002527{
Tejun Heo4447d352007-04-17 23:44:08 +09002528 struct pci_dev *pdev = to_pci_dev(host->dev);
2529 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002530 u32 hp_flags = hpriv->hp_flags;
2531
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002532 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002533 case chip_5080:
2534 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002535 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002536
Auke Kok44c10132007-06-08 15:46:36 -07002537 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002538 case 0x1:
2539 hp_flags |= MV_HP_ERRATA_50XXB0;
2540 break;
2541 case 0x3:
2542 hp_flags |= MV_HP_ERRATA_50XXB2;
2543 break;
2544 default:
2545 dev_printk(KERN_WARNING, &pdev->dev,
2546 "Applying 50XXB2 workarounds to unknown rev\n");
2547 hp_flags |= MV_HP_ERRATA_50XXB2;
2548 break;
2549 }
2550 break;
2551
2552 case chip_504x:
2553 case chip_508x:
2554 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002555 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002556
Auke Kok44c10132007-06-08 15:46:36 -07002557 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002558 case 0x0:
2559 hp_flags |= MV_HP_ERRATA_50XXB0;
2560 break;
2561 case 0x3:
2562 hp_flags |= MV_HP_ERRATA_50XXB2;
2563 break;
2564 default:
2565 dev_printk(KERN_WARNING, &pdev->dev,
2566 "Applying B2 workarounds to unknown rev\n");
2567 hp_flags |= MV_HP_ERRATA_50XXB2;
2568 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002569 }
2570 break;
2571
2572 case chip_604x:
2573 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002574 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002575 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002576
Auke Kok44c10132007-06-08 15:46:36 -07002577 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002578 case 0x7:
2579 hp_flags |= MV_HP_ERRATA_60X1B2;
2580 break;
2581 case 0x9:
2582 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002583 break;
2584 default:
2585 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002586 "Applying B2 workarounds to unknown rev\n");
2587 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002588 break;
2589 }
2590 break;
2591
Jeff Garzike4e7b892006-01-31 12:18:41 -05002592 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002593 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002594 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2595 (pdev->device == 0x2300 || pdev->device == 0x2310))
2596 {
Mark Lord4e520032007-12-11 12:58:05 -05002597 /*
2598 * Highpoint RocketRAID PCIe 23xx series cards:
2599 *
2600 * Unconfigured drives are treated as "Legacy"
2601 * by the BIOS, and it overwrites sector 8 with
2602 * a "Lgcy" metadata block prior to Linux boot.
2603 *
2604 * Configured drives (RAID or JBOD) leave sector 8
2605 * alone, but instead overwrite a high numbered
2606 * sector for the RAID metadata. This sector can
2607 * be determined exactly, by truncating the physical
2608 * drive capacity to a nice even GB value.
2609 *
2610 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2611 *
2612 * Warn the user, lest they think we're just buggy.
2613 */
2614 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2615 " BIOS CORRUPTS DATA on all attached drives,"
2616 " regardless of if/how they are configured."
2617 " BEWARE!\n");
2618 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2619 " use sectors 8-9 on \"Legacy\" drives,"
2620 " and avoid the final two gigabytes on"
2621 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002622 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002623 case chip_6042:
2624 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002625 hp_flags |= MV_HP_GEN_IIE;
2626
Auke Kok44c10132007-06-08 15:46:36 -07002627 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002628 case 0x0:
2629 hp_flags |= MV_HP_ERRATA_XX42A0;
2630 break;
2631 case 0x1:
2632 hp_flags |= MV_HP_ERRATA_60X1C0;
2633 break;
2634 default:
2635 dev_printk(KERN_WARNING, &pdev->dev,
2636 "Applying 60X1C0 workarounds to unknown rev\n");
2637 hp_flags |= MV_HP_ERRATA_60X1C0;
2638 break;
2639 }
2640 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002641 case chip_soc:
2642 hpriv->ops = &mv_soc_ops;
2643 hp_flags |= MV_HP_ERRATA_60X1C0;
2644 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002645
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002646 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002647 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002648 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002649 return 1;
2650 }
2651
2652 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002653 if (hp_flags & MV_HP_PCIE) {
2654 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2655 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2656 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2657 } else {
2658 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2659 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2660 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2661 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002662
2663 return 0;
2664}
2665
Brett Russ05b308e2005-10-05 17:08:53 -04002666/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002667 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002668 * @host: ATA host to initialize
2669 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002670 *
2671 * If possible, do an early global reset of the host. Then do
2672 * our port init and clear/unmask all/relevant host interrupts.
2673 *
2674 * LOCKING:
2675 * Inherited from caller.
2676 */
Tejun Heo4447d352007-04-17 23:44:08 +09002677static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002678{
2679 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002680 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002681 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002682
Tejun Heo4447d352007-04-17 23:44:08 +09002683 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002684 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002685 goto done;
2686
2687 if (HAS_PCI(host)) {
2688 hpriv->main_cause_reg_addr = hpriv->base +
2689 HC_MAIN_IRQ_CAUSE_OFS;
2690 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2691 } else {
2692 hpriv->main_cause_reg_addr = hpriv->base +
2693 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2694 hpriv->main_mask_reg_addr = hpriv->base +
2695 HC_SOC_MAIN_IRQ_MASK_OFS;
2696 }
2697 /* global interrupt mask */
2698 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002699
Tejun Heo4447d352007-04-17 23:44:08 +09002700 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002701
Tejun Heo4447d352007-04-17 23:44:08 +09002702 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002703 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002704
Jeff Garzikc9d39132005-11-13 17:47:51 -05002705 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002706 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002707 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002708
Jeff Garzik522479f2005-11-12 22:14:02 -05002709 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002710 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002711 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002712
Tejun Heo4447d352007-04-17 23:44:08 +09002713 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002714 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002715 void __iomem *port_mmio = mv_port_base(mmio, port);
2716
Mark Lorde12bef52008-03-31 19:33:56 -04002717 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
Mark Lordeb46d682006-05-19 16:29:21 -04002718 ifctl |= (1 << 7); /* enable gen2i speed */
2719 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Mark Lorde12bef52008-03-31 19:33:56 -04002720 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002721 }
2722
Jeff Garzikc9d39132005-11-13 17:47:51 -05002723 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002724 }
2725
Tejun Heo4447d352007-04-17 23:44:08 +09002726 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002727 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002728 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002729
2730 mv_port_init(&ap->ioaddr, port_mmio);
2731
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002732#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002733 if (HAS_PCI(host)) {
2734 unsigned int offset = port_mmio - mmio;
2735 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2737 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002738#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002739 }
2740
2741 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002742 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2743
2744 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2745 "(before clear)=0x%08x\n", hc,
2746 readl(hc_mmio + HC_CFG_OFS),
2747 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2748
2749 /* Clear any currently outstanding hc interrupt conditions */
2750 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002751 }
2752
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002753 if (HAS_PCI(host)) {
2754 /* Clear any currently outstanding host interrupt conditions */
2755 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002756
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002757 /* and unmask interrupt generation for host regs */
2758 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2759 if (IS_GEN_I(hpriv))
2760 writelfl(~HC_MAIN_MASKED_IRQS_5,
2761 hpriv->main_mask_reg_addr);
2762 else
2763 writelfl(~HC_MAIN_MASKED_IRQS,
2764 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002765
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002766 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2767 "PCI int cause/mask=0x%08x/0x%08x\n",
2768 readl(hpriv->main_cause_reg_addr),
2769 readl(hpriv->main_mask_reg_addr),
2770 readl(mmio + hpriv->irq_cause_ofs),
2771 readl(mmio + hpriv->irq_mask_ofs));
2772 } else {
2773 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2774 hpriv->main_mask_reg_addr);
2775 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2776 readl(hpriv->main_cause_reg_addr),
2777 readl(hpriv->main_mask_reg_addr));
2778 }
Brett Russ31961942005-09-30 01:36:00 -04002779done:
Brett Russ20f733e2005-09-01 18:26:17 -04002780 return rc;
2781}
2782
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002783static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2784{
2785 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2786 MV_CRQB_Q_SZ, 0);
2787 if (!hpriv->crqb_pool)
2788 return -ENOMEM;
2789
2790 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2791 MV_CRPB_Q_SZ, 0);
2792 if (!hpriv->crpb_pool)
2793 return -ENOMEM;
2794
2795 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2796 MV_SG_TBL_SZ, 0);
2797 if (!hpriv->sg_tbl_pool)
2798 return -ENOMEM;
2799
2800 return 0;
2801}
2802
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002803/**
2804 * mv_platform_probe - handle a positive probe of an soc Marvell
2805 * host
2806 * @pdev: platform device found
2807 *
2808 * LOCKING:
2809 * Inherited from caller.
2810 */
2811static int mv_platform_probe(struct platform_device *pdev)
2812{
2813 static int printed_version;
2814 const struct mv_sata_platform_data *mv_platform_data;
2815 const struct ata_port_info *ppi[] =
2816 { &mv_port_info[chip_soc], NULL };
2817 struct ata_host *host;
2818 struct mv_host_priv *hpriv;
2819 struct resource *res;
2820 int n_ports, rc;
2821
2822 if (!printed_version++)
2823 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2824
2825 /*
2826 * Simple resource validation ..
2827 */
2828 if (unlikely(pdev->num_resources != 2)) {
2829 dev_err(&pdev->dev, "invalid number of resources\n");
2830 return -EINVAL;
2831 }
2832
2833 /*
2834 * Get the register base first
2835 */
2836 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2837 if (res == NULL)
2838 return -EINVAL;
2839
2840 /* allocate host */
2841 mv_platform_data = pdev->dev.platform_data;
2842 n_ports = mv_platform_data->n_ports;
2843
2844 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2845 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2846
2847 if (!host || !hpriv)
2848 return -ENOMEM;
2849 host->private_data = hpriv;
2850 hpriv->n_ports = n_ports;
2851
2852 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002853 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2854 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002855 hpriv->base -= MV_SATAHC0_REG_BASE;
2856
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002857 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2858 if (rc)
2859 return rc;
2860
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002861 /* initialize adapter */
2862 rc = mv_init_host(host, chip_soc);
2863 if (rc)
2864 return rc;
2865
2866 dev_printk(KERN_INFO, &pdev->dev,
2867 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2868 host->n_ports);
2869
2870 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2871 IRQF_SHARED, &mv6_sht);
2872}
2873
2874/*
2875 *
2876 * mv_platform_remove - unplug a platform interface
2877 * @pdev: platform device
2878 *
2879 * A platform bus SATA device has been unplugged. Perform the needed
2880 * cleanup. Also called on module unload for any active devices.
2881 */
2882static int __devexit mv_platform_remove(struct platform_device *pdev)
2883{
2884 struct device *dev = &pdev->dev;
2885 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002886
2887 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002888 return 0;
2889}
2890
2891static struct platform_driver mv_platform_driver = {
2892 .probe = mv_platform_probe,
2893 .remove = __devexit_p(mv_platform_remove),
2894 .driver = {
2895 .name = DRV_NAME,
2896 .owner = THIS_MODULE,
2897 },
2898};
2899
2900
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002901#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002902static int mv_pci_init_one(struct pci_dev *pdev,
2903 const struct pci_device_id *ent);
2904
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002905
2906static struct pci_driver mv_pci_driver = {
2907 .name = DRV_NAME,
2908 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002909 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002910 .remove = ata_pci_remove_one,
2911};
2912
2913/*
2914 * module options
2915 */
2916static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2917
2918
2919/* move to PCI layer or libata core? */
2920static int pci_go_64(struct pci_dev *pdev)
2921{
2922 int rc;
2923
2924 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2926 if (rc) {
2927 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2928 if (rc) {
2929 dev_printk(KERN_ERR, &pdev->dev,
2930 "64-bit DMA enable failed\n");
2931 return rc;
2932 }
2933 }
2934 } else {
2935 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2936 if (rc) {
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "32-bit DMA enable failed\n");
2939 return rc;
2940 }
2941 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2942 if (rc) {
2943 dev_printk(KERN_ERR, &pdev->dev,
2944 "32-bit consistent DMA enable failed\n");
2945 return rc;
2946 }
2947 }
2948
2949 return rc;
2950}
2951
Brett Russ05b308e2005-10-05 17:08:53 -04002952/**
2953 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002954 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002955 *
2956 * FIXME: complete this.
2957 *
2958 * LOCKING:
2959 * Inherited from caller.
2960 */
Tejun Heo4447d352007-04-17 23:44:08 +09002961static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002962{
Tejun Heo4447d352007-04-17 23:44:08 +09002963 struct pci_dev *pdev = to_pci_dev(host->dev);
2964 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002965 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002966 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002967
2968 /* Use this to determine the HW stepping of the chip so we know
2969 * what errata to workaround
2970 */
Brett Russ31961942005-09-30 01:36:00 -04002971 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2972 if (scc == 0)
2973 scc_s = "SCSI";
2974 else if (scc == 0x01)
2975 scc_s = "RAID";
2976 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002977 scc_s = "?";
2978
2979 if (IS_GEN_I(hpriv))
2980 gen = "I";
2981 else if (IS_GEN_II(hpriv))
2982 gen = "II";
2983 else if (IS_GEN_IIE(hpriv))
2984 gen = "IIE";
2985 else
2986 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002987
Jeff Garzika9524a72005-10-30 14:39:11 -05002988 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002989 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2990 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002991 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2992}
2993
Brett Russ05b308e2005-10-05 17:08:53 -04002994/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002995 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002996 * @pdev: PCI device found
2997 * @ent: PCI device ID entry for the matched host
2998 *
2999 * LOCKING:
3000 * Inherited from caller.
3001 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003002static int mv_pci_init_one(struct pci_dev *pdev,
3003 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003004{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003005 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003006 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003007 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3008 struct ata_host *host;
3009 struct mv_host_priv *hpriv;
3010 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003011
Jeff Garzika9524a72005-10-30 14:39:11 -05003012 if (!printed_version++)
3013 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003014
Tejun Heo4447d352007-04-17 23:44:08 +09003015 /* allocate host */
3016 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3017
3018 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3019 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3020 if (!host || !hpriv)
3021 return -ENOMEM;
3022 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003023 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003024
3025 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003026 rc = pcim_enable_device(pdev);
3027 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003028 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003029
Tejun Heo0d5ff562007-02-01 15:06:36 +09003030 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3031 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003032 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003033 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003034 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003035 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003036 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003037
Jeff Garzikd88184f2007-02-26 01:26:06 -05003038 rc = pci_go_64(pdev);
3039 if (rc)
3040 return rc;
3041
Mark Lordda2fa9b2008-01-26 18:32:45 -05003042 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3043 if (rc)
3044 return rc;
3045
Brett Russ20f733e2005-09-01 18:26:17 -04003046 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003047 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003048 if (rc)
3049 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003050
Brett Russ31961942005-09-30 01:36:00 -04003051 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003052 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003053 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003054
Brett Russ31961942005-09-30 01:36:00 -04003055 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003056 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003057
Tejun Heo4447d352007-04-17 23:44:08 +09003058 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003059 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003060 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003061 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003062}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003063#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003064
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003065static int mv_platform_probe(struct platform_device *pdev);
3066static int __devexit mv_platform_remove(struct platform_device *pdev);
3067
Brett Russ20f733e2005-09-01 18:26:17 -04003068static int __init mv_init(void)
3069{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003070 int rc = -ENODEV;
3071#ifdef CONFIG_PCI
3072 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003073 if (rc < 0)
3074 return rc;
3075#endif
3076 rc = platform_driver_register(&mv_platform_driver);
3077
3078#ifdef CONFIG_PCI
3079 if (rc < 0)
3080 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003081#endif
3082 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003083}
3084
3085static void __exit mv_exit(void)
3086{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003087#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003088 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003089#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003090 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003091}
3092
3093MODULE_AUTHOR("Brett Russ");
3094MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3095MODULE_LICENSE("GPL");
3096MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3097MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003098MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003099
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003100#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003101module_param(msi, int, 0444);
3102MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003103#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003104
Brett Russ20f733e2005-09-01 18:26:17 -04003105module_init(mv_init);
3106module_exit(mv_exit);