blob: 89a798221a6cea9c0731cd1bf20e0d3cec44c1bc [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
43 6) Add port multiplier support (intermediate)
44
Jeff Garzik4a05e202007-05-24 23:40:15 -040045 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
Jeff Garzik4a05e202007-05-24 23:40:15 -040063*/
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400134 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
149
150 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400153
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
155
Brett Russ20f733e2005-09-01 18:26:17 -0400156 /* PCI interface registers */
157
Brett Russ31961942005-09-30 01:36:00 -0400158 PCI_COMMAND_OFS = 0xc00,
159
Brett Russ20f733e2005-09-01 18:26:17 -0400160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
164
Jeff Garzik522479f2005-11-12 22:14:02 -0500165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
175
Mark Lord02a121d2007-12-01 13:07:22 -0500176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179
Mark Lord02a121d2007-12-01 13:07:22 -0500180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500183
Brett Russ20f733e2005-09-01 18:26:17 -0400184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400210
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
213
214 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
218
219 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400227
Mark Lorde12bef52008-03-31 19:33:56 -0400228 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400229 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
230
Jeff Garzik47c2b672005-11-12 21:13:17 -0500231 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500232 PHY_MODE4 = 0x314,
233 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400234 SATA_IFCTL_OFS = 0x344,
235 SATA_IFSTAT_OFS = 0x34c,
236 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400237
Mark Lorde12bef52008-03-31 19:33:56 -0400238 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400239 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
240
Jeff Garzikc9d39132005-11-13 17:47:51 -0500241 MV5_PHY_MODE = 0x74,
242 MV5_LT_MODE = 0x30,
243 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400244 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500245
246 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400247
248 /* Port registers */
249 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500250 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
251 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
252 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
253 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
254 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400255 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
256 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400257
258 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
259 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400260 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
261 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
262 EDMA_ERR_DEV = (1 << 2), /* device error */
263 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
264 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
265 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400266 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
267 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400268 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400269 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400270 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
271 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
272 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
273 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500274
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400275 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500276 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
277 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
278 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
279 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
280
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400281 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500282
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400283 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500284 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
285 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
286 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
287 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
288 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
289
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400290 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500291
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400292 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400293 EDMA_ERR_OVERRUN_5 = (1 << 5),
294 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500295
296 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
297 EDMA_ERR_LNK_CTRL_RX_1 |
298 EDMA_ERR_LNK_CTRL_RX_3 |
299 EDMA_ERR_LNK_CTRL_TX,
300
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400301 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_SERR |
306 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400307 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400308 EDMA_ERR_CRPB_PAR |
309 EDMA_ERR_INTRL_PAR |
310 EDMA_ERR_IORDY |
311 EDMA_ERR_LNK_CTRL_RX_2 |
312 EDMA_ERR_LNK_DATA_RX |
313 EDMA_ERR_LNK_DATA_TX |
314 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400315
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400316 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
317 EDMA_ERR_PRD_PAR |
318 EDMA_ERR_DEV_DCON |
319 EDMA_ERR_DEV_CON |
320 EDMA_ERR_OVERRUN_5 |
321 EDMA_ERR_UNDERRUN_5 |
322 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400323 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400324 EDMA_ERR_CRPB_PAR |
325 EDMA_ERR_INTRL_PAR |
326 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400327
Brett Russ31961942005-09-30 01:36:00 -0400328 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
329 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400330
331 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
332 EDMA_REQ_Q_PTR_SHIFT = 5,
333
334 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
335 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
336 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400337 EDMA_RSP_Q_PTR_SHIFT = 3,
338
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400339 EDMA_CMD_OFS = 0x28, /* EDMA command register */
340 EDMA_EN = (1 << 0), /* enable EDMA */
341 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
342 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400343
Jeff Garzikc9d39132005-11-13 17:47:51 -0500344 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500345 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500346
Brett Russ31961942005-09-30 01:36:00 -0400347 /* Host private flags (hp_flags) */
348 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500349 MV_HP_ERRATA_50XXB0 = (1 << 1),
350 MV_HP_ERRATA_50XXB2 = (1 << 2),
351 MV_HP_ERRATA_60X1B2 = (1 << 3),
352 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500353 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400354 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
355 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
356 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500357 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400358
Brett Russ31961942005-09-30 01:36:00 -0400359 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400360 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500361 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400362};
363
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400364#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
365#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500366#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100367#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500368
Jeff Garzik095fec82005-11-12 09:50:49 -0500369enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400370 /* DMA boundary 0xffff is required by the s/g splitting
371 * we need on /length/ in mv_fill-sg().
372 */
373 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500374
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400375 /* mask of register bits containing lower 32 bits
376 * of EDMA request queue DMA address
377 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500378 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
379
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400380 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500381 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
382};
383
Jeff Garzik522479f2005-11-12 22:14:02 -0500384enum chip_type {
385 chip_504x,
386 chip_508x,
387 chip_5080,
388 chip_604x,
389 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500390 chip_6042,
391 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500392 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500393};
394
Brett Russ31961942005-09-30 01:36:00 -0400395/* Command ReQuest Block: 32B */
396struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400397 __le32 sg_addr;
398 __le32 sg_addr_hi;
399 __le16 ctrl_flags;
400 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400401};
402
Jeff Garzike4e7b892006-01-31 12:18:41 -0500403struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400404 __le32 addr;
405 __le32 addr_hi;
406 __le32 flags;
407 __le32 len;
408 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500409};
410
Brett Russ31961942005-09-30 01:36:00 -0400411/* Command ResPonse Block: 8B */
412struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400413 __le16 id;
414 __le16 flags;
415 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400416};
417
418/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
419struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400420 __le32 addr;
421 __le32 flags_size;
422 __le32 addr_hi;
423 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400424};
425
426struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400427 struct mv_crqb *crqb;
428 dma_addr_t crqb_dma;
429 struct mv_crpb *crpb;
430 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500431 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
432 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400433
434 unsigned int req_idx;
435 unsigned int resp_idx;
436
Brett Russ31961942005-09-30 01:36:00 -0400437 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400438};
439
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500440struct mv_port_signal {
441 u32 amps;
442 u32 pre;
443};
444
Mark Lord02a121d2007-12-01 13:07:22 -0500445struct mv_host_priv {
446 u32 hp_flags;
447 struct mv_port_signal signal[8];
448 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500449 int n_ports;
450 void __iomem *base;
451 void __iomem *main_cause_reg_addr;
452 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500453 u32 irq_cause_ofs;
454 u32 irq_mask_ofs;
455 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500456 /*
457 * These consistent DMA memory pools give us guaranteed
458 * alignment for hardware-accessed data structures,
459 * and less memory waste in accomplishing the alignment.
460 */
461 struct dma_pool *crqb_pool;
462 struct dma_pool *crpb_pool;
463 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500464};
465
Jeff Garzik47c2b672005-11-12 21:13:17 -0500466struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500467 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
468 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500469 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
470 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
471 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500472 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500474 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100475 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500476};
477
Tejun Heoda3dbb12007-07-16 14:29:40 +0900478static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
479static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
480static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
481static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400482static int mv_port_start(struct ata_port *ap);
483static void mv_port_stop(struct ata_port *ap);
484static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500485static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900486static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900487static int mv_hardreset(struct ata_link *link, unsigned int *class,
488 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400489static void mv_eh_freeze(struct ata_port *ap);
490static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500491static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400492
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500493static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500495static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
496static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
497 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500498static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
499 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500500static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100501static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500502
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500503static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
504 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500505static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
506static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
507 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500508static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
509 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500510static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500511static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
512 void __iomem *mmio);
513static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
514 void __iomem *mmio);
515static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
516 void __iomem *mmio, unsigned int n_hc);
517static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
518 void __iomem *mmio);
519static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100520static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400521static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500522 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400523static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400524static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400525static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500526
Mark Lordeb73d552008-01-29 13:24:00 -0500527/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
528 * because we have to allow room for worst case splitting of
529 * PRDs for 64K boundaries in mv_fill_sg().
530 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400531static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900532 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400533 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400534 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400535};
536
537static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900538 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500539 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400540 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400541 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400542};
543
Tejun Heo029cfd62008-03-25 12:22:49 +0900544static struct ata_port_operations mv5_ops = {
545 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500546
Jeff Garzikc9d39132005-11-13 17:47:51 -0500547 .qc_prep = mv_qc_prep,
548 .qc_issue = mv_qc_issue,
549
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400550 .freeze = mv_eh_freeze,
551 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900552 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900553 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900554 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400555
Jeff Garzikc9d39132005-11-13 17:47:51 -0500556 .scr_read = mv5_scr_read,
557 .scr_write = mv5_scr_write,
558
559 .port_start = mv_port_start,
560 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500561};
562
Tejun Heo029cfd62008-03-25 12:22:49 +0900563static struct ata_port_operations mv6_ops = {
564 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500565 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900566 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400567 .scr_read = mv_scr_read,
568 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400569};
570
Tejun Heo029cfd62008-03-25 12:22:49 +0900571static struct ata_port_operations mv_iie_ops = {
572 .inherits = &mv6_ops,
573 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500574 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500575};
576
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100577static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400578 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400579 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400580 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400581 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500582 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400583 },
584 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400585 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400586 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400587 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500588 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400589 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500590 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400591 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500592 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400593 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500594 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500595 },
Brett Russ20f733e2005-09-01 18:26:17 -0400596 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500597 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
598 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400599 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400600 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500601 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400602 },
603 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400604 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500605 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400606 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400607 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500608 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400609 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
612 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500613 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400614 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500615 .port_ops = &mv_iie_ops,
616 },
617 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500618 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
619 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500622 .port_ops = &mv_iie_ops,
623 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500624 { /* chip_soc */
Mark Lord17c5aab2008-04-16 14:56:51 -0400625 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
626 .pio_mask = 0x1f, /* pio0-4 */
627 .udma_mask = ATA_UDMA6,
628 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500629 },
Brett Russ20f733e2005-09-01 18:26:17 -0400630};
631
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500632static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400633 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
634 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
635 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
636 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100637 /* RocketRAID 1740/174x have different identifiers */
638 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
639 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400640
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400641 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
642 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
643 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
644 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
645 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500646
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400647 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
648
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200649 /* Adaptec 1430SA */
650 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
651
Mark Lord02a121d2007-12-01 13:07:22 -0500652 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800653 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
654
Mark Lord02a121d2007-12-01 13:07:22 -0500655 /* Highpoint RocketRAID PCIe series */
656 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
657 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
658
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400659 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400660};
661
Jeff Garzik47c2b672005-11-12 21:13:17 -0500662static const struct mv_hw_ops mv5xxx_ops = {
663 .phy_errata = mv5_phy_errata,
664 .enable_leds = mv5_enable_leds,
665 .read_preamp = mv5_read_preamp,
666 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500667 .reset_flash = mv5_reset_flash,
668 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500669};
670
671static const struct mv_hw_ops mv6xxx_ops = {
672 .phy_errata = mv6_phy_errata,
673 .enable_leds = mv6_enable_leds,
674 .read_preamp = mv6_read_preamp,
675 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500676 .reset_flash = mv6_reset_flash,
677 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500678};
679
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500680static const struct mv_hw_ops mv_soc_ops = {
681 .phy_errata = mv6_phy_errata,
682 .enable_leds = mv_soc_enable_leds,
683 .read_preamp = mv_soc_read_preamp,
684 .reset_hc = mv_soc_reset_hc,
685 .reset_flash = mv_soc_reset_flash,
686 .reset_bus = mv_soc_reset_bus,
687};
688
Brett Russ20f733e2005-09-01 18:26:17 -0400689/*
690 * Functions
691 */
692
693static inline void writelfl(unsigned long data, void __iomem *addr)
694{
695 writel(data, addr);
696 (void) readl(addr); /* flush to avoid PCI posted write */
697}
698
Brett Russ20f733e2005-09-01 18:26:17 -0400699static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
700{
701 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
702}
703
Jeff Garzikc9d39132005-11-13 17:47:51 -0500704static inline unsigned int mv_hc_from_port(unsigned int port)
705{
706 return port >> MV_PORT_HC_SHIFT;
707}
708
709static inline unsigned int mv_hardport_from_port(unsigned int port)
710{
711 return port & MV_PORT_MASK;
712}
713
714static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
715 unsigned int port)
716{
717 return mv_hc_base(base, mv_hc_from_port(port));
718}
719
Brett Russ20f733e2005-09-01 18:26:17 -0400720static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
721{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500722 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500723 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500724 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400725}
726
Mark Lorde12bef52008-03-31 19:33:56 -0400727static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
728{
729 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
730 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
731
732 return hc_mmio + ofs;
733}
734
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500735static inline void __iomem *mv_host_base(struct ata_host *host)
736{
737 struct mv_host_priv *hpriv = host->private_data;
738 return hpriv->base;
739}
740
Brett Russ20f733e2005-09-01 18:26:17 -0400741static inline void __iomem *mv_ap_base(struct ata_port *ap)
742{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500743 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400744}
745
Jeff Garzikcca39742006-08-24 03:19:22 -0400746static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400747{
Jeff Garzikcca39742006-08-24 03:19:22 -0400748 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400749}
750
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400751static void mv_set_edma_ptrs(void __iomem *port_mmio,
752 struct mv_host_priv *hpriv,
753 struct mv_port_priv *pp)
754{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400755 u32 index;
756
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400757 /*
758 * initialize request queue
759 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400760 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
761
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400762 WARN_ON(pp->crqb_dma & 0x3ff);
763 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400764 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400765 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
766
767 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400768 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400769 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
770 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400771 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400772
773 /*
774 * initialize response queue
775 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400776 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
777
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400778 WARN_ON(pp->crpb_dma & 0xff);
779 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
780
781 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400782 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400783 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
784 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400785 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400787 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400788 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400789}
790
Brett Russ05b308e2005-10-05 17:08:53 -0400791/**
792 * mv_start_dma - Enable eDMA engine
793 * @base: port base address
794 * @pp: port private data
795 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900796 * Verify the local cache of the eDMA state is accurate with a
797 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400798 *
799 * LOCKING:
800 * Inherited from caller.
801 */
Mark Lord0c589122008-01-26 18:31:16 -0500802static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500803 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400804{
Mark Lord72109162008-01-26 18:31:33 -0500805 int want_ncq = (protocol == ATA_PROT_NCQ);
806
807 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
808 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
809 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400810 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500811 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500813 struct mv_host_priv *hpriv = ap->host->private_data;
814 int hard_port = mv_hardport_from_port(ap->port_no);
815 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100816 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500817 u32 hc_irq_cause, ipending;
818
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400819 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500820 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400821
Mark Lord0c589122008-01-26 18:31:16 -0500822 /* clear EDMA interrupt indicator, if any */
823 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
824 ipending = (DEV_IRQ << hard_port) |
825 (CRPB_DMA_DONE << hard_port);
826 if (hc_irq_cause & ipending) {
827 writelfl(hc_irq_cause & ~ipending,
828 hc_mmio + HC_IRQ_CAUSE_OFS);
829 }
830
Mark Lorde12bef52008-03-31 19:33:56 -0400831 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500832
833 /* clear FIS IRQ Cause */
834 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
835
Mark Lordf630d562008-01-26 18:31:00 -0500836 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400837
Mark Lordf630d562008-01-26 18:31:00 -0500838 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400839 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
840 }
Mark Lordf630d562008-01-26 18:31:00 -0500841 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400842}
843
Brett Russ05b308e2005-10-05 17:08:53 -0400844/**
Mark Lorde12bef52008-03-31 19:33:56 -0400845 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400846 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400847 *
848 * LOCKING:
849 * Inherited from caller.
850 */
Mark Lordb5624682008-03-31 19:34:40 -0400851static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400852{
Mark Lordb5624682008-03-31 19:34:40 -0400853 int i;
Brett Russ31961942005-09-30 01:36:00 -0400854
Mark Lordb5624682008-03-31 19:34:40 -0400855 /* Disable eDMA. The disable bit auto clears. */
856 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500857
Mark Lordb5624682008-03-31 19:34:40 -0400858 /* Wait for the chip to confirm eDMA is off. */
859 for (i = 10000; i > 0; i--) {
860 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400861 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400862 return 0;
863 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400864 }
Mark Lordb5624682008-03-31 19:34:40 -0400865 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400866}
867
Mark Lorde12bef52008-03-31 19:33:56 -0400868static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400869{
Mark Lordb5624682008-03-31 19:34:40 -0400870 void __iomem *port_mmio = mv_ap_base(ap);
871 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400872
Mark Lordb5624682008-03-31 19:34:40 -0400873 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
874 return 0;
875 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
876 if (mv_stop_edma_engine(port_mmio)) {
877 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
878 return -EIO;
879 }
880 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400881}
882
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400883#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400884static void mv_dump_mem(void __iomem *start, unsigned bytes)
885{
Brett Russ31961942005-09-30 01:36:00 -0400886 int b, w;
887 for (b = 0; b < bytes; ) {
888 DPRINTK("%p: ", start + b);
889 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400890 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400891 b += sizeof(u32);
892 }
893 printk("\n");
894 }
Brett Russ31961942005-09-30 01:36:00 -0400895}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400896#endif
897
Brett Russ31961942005-09-30 01:36:00 -0400898static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
899{
900#ifdef ATA_DEBUG
901 int b, w;
902 u32 dw;
903 for (b = 0; b < bytes; ) {
904 DPRINTK("%02x: ", b);
905 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400906 (void) pci_read_config_dword(pdev, b, &dw);
907 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400908 b += sizeof(u32);
909 }
910 printk("\n");
911 }
912#endif
913}
914static void mv_dump_all_regs(void __iomem *mmio_base, int port,
915 struct pci_dev *pdev)
916{
917#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500918 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400919 port >> MV_PORT_HC_SHIFT);
920 void __iomem *port_base;
921 int start_port, num_ports, p, start_hc, num_hcs, hc;
922
923 if (0 > port) {
924 start_hc = start_port = 0;
925 num_ports = 8; /* shld be benign for 4 port devs */
926 num_hcs = 2;
927 } else {
928 start_hc = port >> MV_PORT_HC_SHIFT;
929 start_port = port;
930 num_ports = num_hcs = 1;
931 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500932 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400933 num_ports > 1 ? num_ports - 1 : start_port);
934
935 if (NULL != pdev) {
936 DPRINTK("PCI config space regs:\n");
937 mv_dump_pci_cfg(pdev, 0x68);
938 }
939 DPRINTK("PCI regs:\n");
940 mv_dump_mem(mmio_base+0xc00, 0x3c);
941 mv_dump_mem(mmio_base+0xd00, 0x34);
942 mv_dump_mem(mmio_base+0xf00, 0x4);
943 mv_dump_mem(mmio_base+0x1d00, 0x6c);
944 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700945 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400946 DPRINTK("HC regs (HC %i):\n", hc);
947 mv_dump_mem(hc_base, 0x1c);
948 }
949 for (p = start_port; p < start_port + num_ports; p++) {
950 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400951 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400952 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400953 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400954 mv_dump_mem(port_base+0x300, 0x60);
955 }
956#endif
957}
958
Brett Russ20f733e2005-09-01 18:26:17 -0400959static unsigned int mv_scr_offset(unsigned int sc_reg_in)
960{
961 unsigned int ofs;
962
963 switch (sc_reg_in) {
964 case SCR_STATUS:
965 case SCR_CONTROL:
966 case SCR_ERROR:
967 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
968 break;
969 case SCR_ACTIVE:
970 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
971 break;
972 default:
973 ofs = 0xffffffffU;
974 break;
975 }
976 return ofs;
977}
978
Tejun Heoda3dbb12007-07-16 14:29:40 +0900979static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400980{
981 unsigned int ofs = mv_scr_offset(sc_reg_in);
982
Tejun Heoda3dbb12007-07-16 14:29:40 +0900983 if (ofs != 0xffffffffU) {
984 *val = readl(mv_ap_base(ap) + ofs);
985 return 0;
986 } else
987 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400988}
989
Tejun Heoda3dbb12007-07-16 14:29:40 +0900990static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400991{
992 unsigned int ofs = mv_scr_offset(sc_reg_in);
993
Tejun Heoda3dbb12007-07-16 14:29:40 +0900994 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400995 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900996 return 0;
997 } else
998 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400999}
1000
Mark Lordf2738272008-01-26 18:32:29 -05001001static void mv6_dev_config(struct ata_device *adev)
1002{
1003 /*
1004 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1005 * See mv_qc_prep() for more info.
1006 */
1007 if (adev->flags & ATA_DFLAG_NCQ)
1008 if (adev->max_sectors > ATA_MAX_SECTORS)
1009 adev->max_sectors = ATA_MAX_SECTORS;
1010}
1011
Mark Lorde12bef52008-03-31 19:33:56 -04001012static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001013{
Mark Lord0c589122008-01-26 18:31:16 -05001014 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001015 struct mv_port_priv *pp = ap->private_data;
1016 struct mv_host_priv *hpriv = ap->host->private_data;
1017 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001018
1019 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001020 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001021
Mark Lord0c589122008-01-26 18:31:16 -05001022 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001023 cfg |= (1 << 8); /* enab config burst size mask */
1024
Mark Lord0c589122008-01-26 18:31:16 -05001025 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001026 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1027
1028 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001029 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1030 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001031 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001032 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001033 }
1034
Mark Lord72109162008-01-26 18:31:33 -05001035 if (want_ncq) {
1036 cfg |= EDMA_CFG_NCQ;
1037 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1038 } else
1039 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1040
Jeff Garzike4e7b892006-01-31 12:18:41 -05001041 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1042}
1043
Mark Lordda2fa9b2008-01-26 18:32:45 -05001044static void mv_port_free_dma_mem(struct ata_port *ap)
1045{
1046 struct mv_host_priv *hpriv = ap->host->private_data;
1047 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001048 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001049
1050 if (pp->crqb) {
1051 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1052 pp->crqb = NULL;
1053 }
1054 if (pp->crpb) {
1055 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1056 pp->crpb = NULL;
1057 }
Mark Lordeb73d552008-01-29 13:24:00 -05001058 /*
1059 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1060 * For later hardware, we have one unique sg_tbl per NCQ tag.
1061 */
1062 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1063 if (pp->sg_tbl[tag]) {
1064 if (tag == 0 || !IS_GEN_I(hpriv))
1065 dma_pool_free(hpriv->sg_tbl_pool,
1066 pp->sg_tbl[tag],
1067 pp->sg_tbl_dma[tag]);
1068 pp->sg_tbl[tag] = NULL;
1069 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001070 }
1071}
1072
Brett Russ05b308e2005-10-05 17:08:53 -04001073/**
1074 * mv_port_start - Port specific init/start routine.
1075 * @ap: ATA channel to manipulate
1076 *
1077 * Allocate and point to DMA memory, init port private memory,
1078 * zero indices.
1079 *
1080 * LOCKING:
1081 * Inherited from caller.
1082 */
Brett Russ31961942005-09-30 01:36:00 -04001083static int mv_port_start(struct ata_port *ap)
1084{
Jeff Garzikcca39742006-08-24 03:19:22 -04001085 struct device *dev = ap->host->dev;
1086 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001087 struct mv_port_priv *pp;
1088 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001089 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001090 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001091
Tejun Heo24dc5f32007-01-20 16:00:28 +09001092 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001093 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001094 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001095 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001096
Mark Lordda2fa9b2008-01-26 18:32:45 -05001097 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1098 if (!pp->crqb)
1099 return -ENOMEM;
1100 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001101
Mark Lordda2fa9b2008-01-26 18:32:45 -05001102 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1103 if (!pp->crpb)
1104 goto out_port_free_dma_mem;
1105 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001106
Mark Lordeb73d552008-01-29 13:24:00 -05001107 /*
1108 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1109 * For later hardware, we need one unique sg_tbl per NCQ tag.
1110 */
1111 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1112 if (tag == 0 || !IS_GEN_I(hpriv)) {
1113 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1114 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1115 if (!pp->sg_tbl[tag])
1116 goto out_port_free_dma_mem;
1117 } else {
1118 pp->sg_tbl[tag] = pp->sg_tbl[0];
1119 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1120 }
1121 }
Brett Russ31961942005-09-30 01:36:00 -04001122
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001123 spin_lock_irqsave(&ap->host->lock, flags);
1124
Mark Lorde12bef52008-03-31 19:33:56 -04001125 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001126 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001127
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001128 spin_unlock_irqrestore(&ap->host->lock, flags);
1129
Brett Russ31961942005-09-30 01:36:00 -04001130 /* Don't turn on EDMA here...do it before DMA commands only. Else
1131 * we'll be unable to send non-data, PIO, etc due to restricted access
1132 * to shadow regs.
1133 */
Brett Russ31961942005-09-30 01:36:00 -04001134 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001135
1136out_port_free_dma_mem:
1137 mv_port_free_dma_mem(ap);
1138 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001139}
1140
Brett Russ05b308e2005-10-05 17:08:53 -04001141/**
1142 * mv_port_stop - Port specific cleanup/stop routine.
1143 * @ap: ATA channel to manipulate
1144 *
1145 * Stop DMA, cleanup port memory.
1146 *
1147 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001148 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001149 */
Brett Russ31961942005-09-30 01:36:00 -04001150static void mv_port_stop(struct ata_port *ap)
1151{
Mark Lorde12bef52008-03-31 19:33:56 -04001152 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001153 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001154}
1155
Brett Russ05b308e2005-10-05 17:08:53 -04001156/**
1157 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1158 * @qc: queued command whose SG list to source from
1159 *
1160 * Populate the SG list and mark the last entry.
1161 *
1162 * LOCKING:
1163 * Inherited from caller.
1164 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001165static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001166{
1167 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001168 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001169 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001170 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001171
Mark Lordeb73d552008-01-29 13:24:00 -05001172 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001173 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001174 dma_addr_t addr = sg_dma_address(sg);
1175 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001176
Olof Johansson4007b492007-10-02 20:45:27 -05001177 while (sg_len) {
1178 u32 offset = addr & 0xffff;
1179 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001180
Olof Johansson4007b492007-10-02 20:45:27 -05001181 if ((offset + sg_len > 0x10000))
1182 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001183
Olof Johansson4007b492007-10-02 20:45:27 -05001184 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1185 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001186 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001187
1188 sg_len -= len;
1189 addr += len;
1190
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001191 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001192 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001193 }
Brett Russ31961942005-09-30 01:36:00 -04001194 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001195
1196 if (likely(last_sg))
1197 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001198}
1199
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001200static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001201{
Mark Lord559eeda2006-05-19 16:40:15 -04001202 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001203 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001204 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001205}
1206
Brett Russ05b308e2005-10-05 17:08:53 -04001207/**
1208 * mv_qc_prep - Host specific command preparation.
1209 * @qc: queued command to prepare
1210 *
1211 * This routine simply redirects to the general purpose routine
1212 * if command is not DMA. Else, it handles prep of the CRQB
1213 * (command request block), does some sanity checking, and calls
1214 * the SG load routine.
1215 *
1216 * LOCKING:
1217 * Inherited from caller.
1218 */
Brett Russ31961942005-09-30 01:36:00 -04001219static void mv_qc_prep(struct ata_queued_cmd *qc)
1220{
1221 struct ata_port *ap = qc->ap;
1222 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001223 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001224 struct ata_taskfile *tf;
1225 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001226 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001227
Mark Lord138bfdd2008-01-26 18:33:18 -05001228 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1229 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001230 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001231
Brett Russ31961942005-09-30 01:36:00 -04001232 /* Fill in command request block
1233 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001234 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001235 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001236 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001237 flags |= qc->tag << CRQB_TAG_SHIFT;
1238
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001239 /* get current queue index from software */
1240 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001241
Mark Lorda6432432006-05-19 16:36:36 -04001242 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001243 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001244 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001245 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001246 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1247
1248 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001249 tf = &qc->tf;
1250
1251 /* Sadly, the CRQB cannot accomodate all registers--there are
1252 * only 11 bytes...so we must pick and choose required
1253 * registers based on the command. So, we drop feature and
1254 * hob_feature for [RW] DMA commands, but they are needed for
1255 * NCQ. NCQ will drop hob_nsect.
1256 */
1257 switch (tf->command) {
1258 case ATA_CMD_READ:
1259 case ATA_CMD_READ_EXT:
1260 case ATA_CMD_WRITE:
1261 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001262 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001263 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1264 break;
Brett Russ31961942005-09-30 01:36:00 -04001265 case ATA_CMD_FPDMA_READ:
1266 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001267 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001268 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1269 break;
Brett Russ31961942005-09-30 01:36:00 -04001270 default:
1271 /* The only other commands EDMA supports in non-queued and
1272 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1273 * of which are defined/used by Linux. If we get here, this
1274 * driver needs work.
1275 *
1276 * FIXME: modify libata to give qc_prep a return value and
1277 * return error here.
1278 */
1279 BUG_ON(tf->command);
1280 break;
1281 }
1282 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1285 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1286 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1287 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1288 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1289 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1290 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1291
Jeff Garzike4e7b892006-01-31 12:18:41 -05001292 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001293 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001294 mv_fill_sg(qc);
1295}
1296
1297/**
1298 * mv_qc_prep_iie - Host specific command preparation.
1299 * @qc: queued command to prepare
1300 *
1301 * This routine simply redirects to the general purpose routine
1302 * if command is not DMA. Else, it handles prep of the CRQB
1303 * (command request block), does some sanity checking, and calls
1304 * the SG load routine.
1305 *
1306 * LOCKING:
1307 * Inherited from caller.
1308 */
1309static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1310{
1311 struct ata_port *ap = qc->ap;
1312 struct mv_port_priv *pp = ap->private_data;
1313 struct mv_crqb_iie *crqb;
1314 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001315 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001316 u32 flags = 0;
1317
Mark Lord138bfdd2008-01-26 18:33:18 -05001318 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1319 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001320 return;
1321
Mark Lorde12bef52008-03-31 19:33:56 -04001322 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001323 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1324 flags |= CRQB_FLAG_READ;
1325
Tejun Heobeec7db2006-02-11 19:11:13 +09001326 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001327 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001328 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001329
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001330 /* get current queue index from software */
1331 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001332
1333 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001334 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1335 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001336 crqb->flags = cpu_to_le32(flags);
1337
1338 tf = &qc->tf;
1339 crqb->ata_cmd[0] = cpu_to_le32(
1340 (tf->command << 16) |
1341 (tf->feature << 24)
1342 );
1343 crqb->ata_cmd[1] = cpu_to_le32(
1344 (tf->lbal << 0) |
1345 (tf->lbam << 8) |
1346 (tf->lbah << 16) |
1347 (tf->device << 24)
1348 );
1349 crqb->ata_cmd[2] = cpu_to_le32(
1350 (tf->hob_lbal << 0) |
1351 (tf->hob_lbam << 8) |
1352 (tf->hob_lbah << 16) |
1353 (tf->hob_feature << 24)
1354 );
1355 crqb->ata_cmd[3] = cpu_to_le32(
1356 (tf->nsect << 0) |
1357 (tf->hob_nsect << 8)
1358 );
1359
1360 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1361 return;
Brett Russ31961942005-09-30 01:36:00 -04001362 mv_fill_sg(qc);
1363}
1364
Brett Russ05b308e2005-10-05 17:08:53 -04001365/**
1366 * mv_qc_issue - Initiate a command to the host
1367 * @qc: queued command to start
1368 *
1369 * This routine simply redirects to the general purpose routine
1370 * if command is not DMA. Else, it sanity checks our local
1371 * caches of the request producer/consumer indices then enables
1372 * DMA and bumps the request producer index.
1373 *
1374 * LOCKING:
1375 * Inherited from caller.
1376 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001377static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001378{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001379 struct ata_port *ap = qc->ap;
1380 void __iomem *port_mmio = mv_ap_base(ap);
1381 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001382 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001383
Mark Lord138bfdd2008-01-26 18:33:18 -05001384 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1385 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001386 /*
1387 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001388 * port. Turn off EDMA so there won't be problems accessing
1389 * shadow block, etc registers.
1390 */
Mark Lordb5624682008-03-31 19:34:40 -04001391 mv_stop_edma(ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001392 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001393 }
1394
Mark Lord72109162008-01-26 18:31:33 -05001395 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001396
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001397 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001398
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001399 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001400
1401 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001402 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1403 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001404
1405 return 0;
1406}
1407
Brett Russ05b308e2005-10-05 17:08:53 -04001408/**
Brett Russ05b308e2005-10-05 17:08:53 -04001409 * mv_err_intr - Handle error interrupts on the port
1410 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001411 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001412 *
1413 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001414 * some cases require an eDMA reset, which also performs a COMRESET.
1415 * The SERR case requires a clear of pending errors in the SATA
1416 * SERROR register. Finally, if the port disabled DMA,
1417 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001418 *
1419 * LOCKING:
1420 * Inherited from caller.
1421 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001422static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001423{
Brett Russ31961942005-09-30 01:36:00 -04001424 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001425 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1426 struct mv_port_priv *pp = ap->private_data;
1427 struct mv_host_priv *hpriv = ap->host->private_data;
1428 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1429 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001430 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001431
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001432 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001433
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001434 if (!edma_enabled) {
1435 /* just a guess: do we need to do this? should we
1436 * expand this, and do it in all cases?
1437 */
Tejun Heo936fd732007-08-06 18:36:23 +09001438 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1439 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001440 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001441
1442 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1443
1444 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1445
1446 /*
1447 * all generations share these EDMA error cause bits
1448 */
1449
1450 if (edma_err_cause & EDMA_ERR_DEV)
1451 err_mask |= AC_ERR_DEV;
1452 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001453 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454 EDMA_ERR_INTRL_PAR)) {
1455 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001456 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001457 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001458 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1460 ata_ehi_hotplugged(ehi);
1461 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001462 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001463 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 }
1465
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001466 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001467 eh_freeze_mask = EDMA_EH_FREEZE_5;
1468
1469 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001470 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001471 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001472 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473 }
1474 } else {
1475 eh_freeze_mask = EDMA_EH_FREEZE;
1476
1477 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001478 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001479 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001480 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001481 }
1482
1483 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001484 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1485 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001486 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001487 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001488 }
1489 }
Brett Russ20f733e2005-09-01 18:26:17 -04001490
1491 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001492 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001493
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001494 if (!err_mask) {
1495 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001496 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001497 }
1498
1499 ehi->serror |= serr;
1500 ehi->action |= action;
1501
1502 if (qc)
1503 qc->err_mask |= err_mask;
1504 else
1505 ehi->err_mask |= err_mask;
1506
1507 if (edma_err_cause & eh_freeze_mask)
1508 ata_port_freeze(ap);
1509 else
1510 ata_port_abort(ap);
1511}
1512
1513static void mv_intr_pio(struct ata_port *ap)
1514{
1515 struct ata_queued_cmd *qc;
1516 u8 ata_status;
1517
1518 /* ignore spurious intr if drive still BUSY */
1519 ata_status = readb(ap->ioaddr.status_addr);
1520 if (unlikely(ata_status & ATA_BUSY))
1521 return;
1522
1523 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001524 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001525 if (unlikely(!qc)) /* no active tag */
1526 return;
1527 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1528 return;
1529
1530 /* and finally, complete the ATA command */
1531 qc->err_mask |= ac_err_mask(ata_status);
1532 ata_qc_complete(qc);
1533}
1534
1535static void mv_intr_edma(struct ata_port *ap)
1536{
1537 void __iomem *port_mmio = mv_ap_base(ap);
1538 struct mv_host_priv *hpriv = ap->host->private_data;
1539 struct mv_port_priv *pp = ap->private_data;
1540 struct ata_queued_cmd *qc;
1541 u32 out_index, in_index;
1542 bool work_done = false;
1543
1544 /* get h/w response queue pointer */
1545 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1546 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1547
1548 while (1) {
1549 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001550 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001551
1552 /* get s/w response queue last-read pointer, and compare */
1553 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1554 if (in_index == out_index)
1555 break;
1556
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001558 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001559 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001561 /* Gen II/IIE: get active ATA command via tag, to enable
1562 * support for queueing. this works transparently for
1563 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001564 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001565 else
1566 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001567
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001568 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569
Mark Lordcb924412008-01-26 18:32:09 -05001570 /* For non-NCQ mode, the lower 8 bits of status
1571 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1572 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001573 */
1574 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001575 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001576 mv_err_intr(ap, qc);
1577 return;
1578 }
1579
1580 /* and finally, complete the ATA command */
1581 if (qc) {
1582 qc->err_mask |=
1583 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1584 ata_qc_complete(qc);
1585 }
1586
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001587 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001588 * indicate (after the loop completes) to hardware
1589 * that we have consumed a response queue entry.
1590 */
1591 work_done = true;
1592 pp->resp_idx++;
1593 }
1594
1595 if (work_done)
1596 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1597 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1598 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001599}
1600
Brett Russ05b308e2005-10-05 17:08:53 -04001601/**
1602 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001603 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001604 * @relevant: port error bits relevant to this host controller
1605 * @hc: which host controller we're to look at
1606 *
1607 * Read then write clear the HC interrupt status then walk each
1608 * port connected to the HC and see if it needs servicing. Port
1609 * success ints are reported in the HC interrupt status reg, the
1610 * port error ints are reported in the higher level main
1611 * interrupt status register and thus are passed in via the
1612 * 'relevant' argument.
1613 *
1614 * LOCKING:
1615 * Inherited from caller.
1616 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001617static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001618{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001619 struct mv_host_priv *hpriv = host->private_data;
1620 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001621 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001622 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001623 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001624
Jeff Garzik35177262007-02-24 21:26:42 -05001625 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001626 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001627 else
Brett Russ20f733e2005-09-01 18:26:17 -04001628 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001629
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001630 if (HAS_PCI(host))
1631 last_port = port0 + MV_PORTS_PER_HC;
1632 else
1633 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001634 /* we'll need the HC success int register in most cases */
1635 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001636 if (!hc_irq_cause)
1637 return;
1638
1639 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001640
1641 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001642 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001643
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001644 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001645 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001646 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001647 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001648
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001649 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001650 continue;
1651
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001652 pp = ap->private_data;
1653
Brett Russ31961942005-09-30 01:36:00 -04001654 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001655 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001656 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001657
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001658 have_err_bits = ((PORT0_ERR << shift) & relevant);
1659
1660 if (unlikely(have_err_bits)) {
1661 struct ata_queued_cmd *qc;
1662
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001663 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001664 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1665 continue;
1666
1667 mv_err_intr(ap, qc);
1668 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001669 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001670
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001671 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1672
1673 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1674 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1675 mv_intr_edma(ap);
1676 } else {
1677 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1678 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001679 }
1680 }
1681 VPRINTK("EXIT\n");
1682}
1683
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001684static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1685{
Mark Lord02a121d2007-12-01 13:07:22 -05001686 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001687 struct ata_port *ap;
1688 struct ata_queued_cmd *qc;
1689 struct ata_eh_info *ehi;
1690 unsigned int i, err_mask, printed = 0;
1691 u32 err_cause;
1692
Mark Lord02a121d2007-12-01 13:07:22 -05001693 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001694
1695 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1696 err_cause);
1697
1698 DPRINTK("All regs @ PCI error\n");
1699 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1700
Mark Lord02a121d2007-12-01 13:07:22 -05001701 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001702
1703 for (i = 0; i < host->n_ports; i++) {
1704 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001705 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001706 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 ata_ehi_clear_desc(ehi);
1708 if (!printed++)
1709 ata_ehi_push_desc(ehi,
1710 "PCI err cause 0x%08x", err_cause);
1711 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001712 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001713 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001714 if (qc)
1715 qc->err_mask |= err_mask;
1716 else
1717 ehi->err_mask |= err_mask;
1718
1719 ata_port_freeze(ap);
1720 }
1721 }
1722}
1723
Brett Russ05b308e2005-10-05 17:08:53 -04001724/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001725 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001726 * @irq: unused
1727 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001728 *
1729 * Read the read only register to determine if any host
1730 * controllers have pending interrupts. If so, call lower level
1731 * routine to handle. Also check for PCI errors which are only
1732 * reported here.
1733 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001734 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001735 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001736 * interrupts.
1737 */
David Howells7d12e782006-10-05 14:55:46 +01001738static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001739{
Jeff Garzikcca39742006-08-24 03:19:22 -04001740 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001741 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001742 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001743 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001744 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001745
Mark Lorde12bef52008-03-31 19:33:56 -04001746 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001747 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001748
1749 irq_stat = readl(hpriv->main_cause_reg_addr);
1750 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001751
1752 /* check the cases where we either have nothing pending or have read
1753 * a bogus register value which can indicate HW removal or PCI fault
1754 */
Mark Lord646a4da2008-01-26 18:30:37 -05001755 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1756 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001757
Jeff Garzikcca39742006-08-24 03:19:22 -04001758 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001759
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001760 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001761 mv_pci_error(host, mmio);
1762 handled = 1;
1763 goto out_unlock; /* skip all other HC irq handling */
1764 }
1765
Brett Russ20f733e2005-09-01 18:26:17 -04001766 for (hc = 0; hc < n_hcs; hc++) {
1767 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1768 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001769 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001770 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001771 }
1772 }
Mark Lord615ab952006-05-19 16:24:56 -04001773
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001774out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001775 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001776
1777 return IRQ_RETVAL(handled);
1778}
1779
Jeff Garzikc9d39132005-11-13 17:47:51 -05001780static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1781{
1782 unsigned int ofs;
1783
1784 switch (sc_reg_in) {
1785 case SCR_STATUS:
1786 case SCR_ERROR:
1787 case SCR_CONTROL:
1788 ofs = sc_reg_in * sizeof(u32);
1789 break;
1790 default:
1791 ofs = 0xffffffffU;
1792 break;
1793 }
1794 return ofs;
1795}
1796
Tejun Heoda3dbb12007-07-16 14:29:40 +09001797static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001798{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001799 struct mv_host_priv *hpriv = ap->host->private_data;
1800 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001801 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001802 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1803
Tejun Heoda3dbb12007-07-16 14:29:40 +09001804 if (ofs != 0xffffffffU) {
1805 *val = readl(addr + ofs);
1806 return 0;
1807 } else
1808 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001809}
1810
Tejun Heoda3dbb12007-07-16 14:29:40 +09001811static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001812{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001813 struct mv_host_priv *hpriv = ap->host->private_data;
1814 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001815 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001816 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1817
Tejun Heoda3dbb12007-07-16 14:29:40 +09001818 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001819 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001820 return 0;
1821 } else
1822 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001823}
1824
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001825static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001826{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001827 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001828 int early_5080;
1829
Auke Kok44c10132007-06-08 15:46:36 -07001830 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001831
1832 if (!early_5080) {
1833 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1834 tmp |= (1 << 0);
1835 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1836 }
1837
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001838 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001839}
1840
1841static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1842{
1843 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1844}
1845
Jeff Garzik47c2b672005-11-12 21:13:17 -05001846static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001847 void __iomem *mmio)
1848{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001849 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1850 u32 tmp;
1851
1852 tmp = readl(phy_mmio + MV5_PHY_MODE);
1853
1854 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1855 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001856}
1857
Jeff Garzik47c2b672005-11-12 21:13:17 -05001858static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001859{
Jeff Garzik522479f2005-11-12 22:14:02 -05001860 u32 tmp;
1861
1862 writel(0, mmio + MV_GPIO_PORT_CTL);
1863
1864 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1865
1866 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1867 tmp |= ~(1 << 0);
1868 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001869}
1870
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001871static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1872 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001873{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001874 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1875 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1876 u32 tmp;
1877 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1878
1879 if (fix_apm_sq) {
1880 tmp = readl(phy_mmio + MV5_LT_MODE);
1881 tmp |= (1 << 19);
1882 writel(tmp, phy_mmio + MV5_LT_MODE);
1883
1884 tmp = readl(phy_mmio + MV5_PHY_CTL);
1885 tmp &= ~0x3;
1886 tmp |= 0x1;
1887 writel(tmp, phy_mmio + MV5_PHY_CTL);
1888 }
1889
1890 tmp = readl(phy_mmio + MV5_PHY_MODE);
1891 tmp &= ~mask;
1892 tmp |= hpriv->signal[port].pre;
1893 tmp |= hpriv->signal[port].amps;
1894 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001895}
1896
Jeff Garzikc9d39132005-11-13 17:47:51 -05001897
1898#undef ZERO
1899#define ZERO(reg) writel(0, port_mmio + (reg))
1900static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001902{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001903 void __iomem *port_mmio = mv_port_base(mmio, port);
1904
Mark Lordb5624682008-03-31 19:34:40 -04001905 /*
1906 * The datasheet warns against setting ATA_RST when EDMA is active
1907 * (but doesn't say what the problem might be). So we first try
1908 * to disable the EDMA engine before doing the ATA_RST operation.
1909 */
Mark Lorde12bef52008-03-31 19:33:56 -04001910 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001911
1912 ZERO(0x028); /* command */
1913 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1914 ZERO(0x004); /* timer */
1915 ZERO(0x008); /* irq err cause */
1916 ZERO(0x00c); /* irq err mask */
1917 ZERO(0x010); /* rq bah */
1918 ZERO(0x014); /* rq inp */
1919 ZERO(0x018); /* rq outp */
1920 ZERO(0x01c); /* respq bah */
1921 ZERO(0x024); /* respq outp */
1922 ZERO(0x020); /* respq inp */
1923 ZERO(0x02c); /* test control */
1924 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1925}
1926#undef ZERO
1927
1928#define ZERO(reg) writel(0, hc_mmio + (reg))
1929static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int hc)
1931{
1932 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1933 u32 tmp;
1934
1935 ZERO(0x00c);
1936 ZERO(0x010);
1937 ZERO(0x014);
1938 ZERO(0x018);
1939
1940 tmp = readl(hc_mmio + 0x20);
1941 tmp &= 0x1c1c1c1c;
1942 tmp |= 0x03030303;
1943 writel(tmp, hc_mmio + 0x20);
1944}
1945#undef ZERO
1946
1947static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int n_hc)
1949{
1950 unsigned int hc, port;
1951
1952 for (hc = 0; hc < n_hc; hc++) {
1953 for (port = 0; port < MV_PORTS_PER_HC; port++)
1954 mv5_reset_hc_port(hpriv, mmio,
1955 (hc * MV_PORTS_PER_HC) + port);
1956
1957 mv5_reset_one_hc(hpriv, mmio, hc);
1958 }
1959
1960 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001961}
1962
Jeff Garzik101ffae2005-11-12 22:17:49 -05001963#undef ZERO
1964#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001965static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001966{
Mark Lord02a121d2007-12-01 13:07:22 -05001967 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001968 u32 tmp;
1969
1970 tmp = readl(mmio + MV_PCI_MODE);
1971 tmp &= 0xff00ffff;
1972 writel(tmp, mmio + MV_PCI_MODE);
1973
1974 ZERO(MV_PCI_DISC_TIMER);
1975 ZERO(MV_PCI_MSI_TRIGGER);
1976 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1977 ZERO(HC_MAIN_IRQ_MASK_OFS);
1978 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001979 ZERO(hpriv->irq_cause_ofs);
1980 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001981 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1982 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1983 ZERO(MV_PCI_ERR_ATTRIBUTE);
1984 ZERO(MV_PCI_ERR_COMMAND);
1985}
1986#undef ZERO
1987
1988static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1989{
1990 u32 tmp;
1991
1992 mv5_reset_flash(hpriv, mmio);
1993
1994 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1995 tmp &= 0x3;
1996 tmp |= (1 << 5) | (1 << 6);
1997 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1998}
1999
2000/**
2001 * mv6_reset_hc - Perform the 6xxx global soft reset
2002 * @mmio: base address of the HBA
2003 *
2004 * This routine only applies to 6xxx parts.
2005 *
2006 * LOCKING:
2007 * Inherited from caller.
2008 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002009static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2010 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002011{
2012 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2013 int i, rc = 0;
2014 u32 t;
2015
2016 /* Following procedure defined in PCI "main command and status
2017 * register" table.
2018 */
2019 t = readl(reg);
2020 writel(t | STOP_PCI_MASTER, reg);
2021
2022 for (i = 0; i < 1000; i++) {
2023 udelay(1);
2024 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002025 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002026 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002027 }
2028 if (!(PCI_MASTER_EMPTY & t)) {
2029 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2030 rc = 1;
2031 goto done;
2032 }
2033
2034 /* set reset */
2035 i = 5;
2036 do {
2037 writel(t | GLOB_SFT_RST, reg);
2038 t = readl(reg);
2039 udelay(1);
2040 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2041
2042 if (!(GLOB_SFT_RST & t)) {
2043 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2044 rc = 1;
2045 goto done;
2046 }
2047
2048 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2049 i = 5;
2050 do {
2051 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2052 t = readl(reg);
2053 udelay(1);
2054 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2055
2056 if (GLOB_SFT_RST & t) {
2057 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2058 rc = 1;
2059 }
2060done:
2061 return rc;
2062}
2063
Jeff Garzik47c2b672005-11-12 21:13:17 -05002064static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002065 void __iomem *mmio)
2066{
2067 void __iomem *port_mmio;
2068 u32 tmp;
2069
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002070 tmp = readl(mmio + MV_RESET_CFG);
2071 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002072 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002073 hpriv->signal[idx].pre = 0x1 << 5;
2074 return;
2075 }
2076
2077 port_mmio = mv_port_base(mmio, idx);
2078 tmp = readl(port_mmio + PHY_MODE2);
2079
2080 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2081 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2082}
2083
Jeff Garzik47c2b672005-11-12 21:13:17 -05002084static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002085{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002086 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002087}
2088
Jeff Garzikc9d39132005-11-13 17:47:51 -05002089static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002090 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002091{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002092 void __iomem *port_mmio = mv_port_base(mmio, port);
2093
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002094 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002095 int fix_phy_mode2 =
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002097 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002098 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2099 u32 m2, tmp;
2100
2101 if (fix_phy_mode2) {
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~(1 << 16);
2104 m2 |= (1 << 31);
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108
2109 m2 = readl(port_mmio + PHY_MODE2);
2110 m2 &= ~((1 << 16) | (1 << 31));
2111 writel(m2, port_mmio + PHY_MODE2);
2112
2113 udelay(200);
2114 }
2115
2116 /* who knows what this magic does */
2117 tmp = readl(port_mmio + PHY_MODE3);
2118 tmp &= ~0x7F800000;
2119 tmp |= 0x2A800000;
2120 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002121
2122 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002123 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002124
2125 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002126
2127 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002128 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002129
Mark Lorde12bef52008-03-31 19:33:56 -04002130 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002131 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2132
2133 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002134
2135 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002136 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002137 }
2138
2139 /* Revert values of pre-emphasis and signal amps to the saved ones */
2140 m2 = readl(port_mmio + PHY_MODE2);
2141
2142 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002143 m2 |= hpriv->signal[port].amps;
2144 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002145 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002146
Jeff Garzike4e7b892006-01-31 12:18:41 -05002147 /* according to mvSata 3.6.1, some IIE values are fixed */
2148 if (IS_GEN_IIE(hpriv)) {
2149 m2 &= ~0xC30FF01F;
2150 m2 |= 0x0000900F;
2151 }
2152
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153 writel(m2, port_mmio + PHY_MODE2);
2154}
2155
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002156/* TODO: use the generic LED interface to configure the SATA Presence */
2157/* & Acitivy LEDs on the board */
2158static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2159 void __iomem *mmio)
2160{
2161 return;
2162}
2163
2164static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2165 void __iomem *mmio)
2166{
2167 void __iomem *port_mmio;
2168 u32 tmp;
2169
2170 port_mmio = mv_port_base(mmio, idx);
2171 tmp = readl(port_mmio + PHY_MODE2);
2172
2173 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2174 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2175}
2176
2177#undef ZERO
2178#define ZERO(reg) writel(0, port_mmio + (reg))
2179static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2180 void __iomem *mmio, unsigned int port)
2181{
2182 void __iomem *port_mmio = mv_port_base(mmio, port);
2183
Mark Lordb5624682008-03-31 19:34:40 -04002184 /*
2185 * The datasheet warns against setting ATA_RST when EDMA is active
2186 * (but doesn't say what the problem might be). So we first try
2187 * to disable the EDMA engine before doing the ATA_RST operation.
2188 */
Mark Lorde12bef52008-03-31 19:33:56 -04002189 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002190
2191 ZERO(0x028); /* command */
2192 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2193 ZERO(0x004); /* timer */
2194 ZERO(0x008); /* irq err cause */
2195 ZERO(0x00c); /* irq err mask */
2196 ZERO(0x010); /* rq bah */
2197 ZERO(0x014); /* rq inp */
2198 ZERO(0x018); /* rq outp */
2199 ZERO(0x01c); /* respq bah */
2200 ZERO(0x024); /* respq outp */
2201 ZERO(0x020); /* respq inp */
2202 ZERO(0x02c); /* test control */
2203 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2204}
2205
2206#undef ZERO
2207
2208#define ZERO(reg) writel(0, hc_mmio + (reg))
2209static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2210 void __iomem *mmio)
2211{
2212 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2213
2214 ZERO(0x00c);
2215 ZERO(0x010);
2216 ZERO(0x014);
2217
2218}
2219
2220#undef ZERO
2221
2222static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2223 void __iomem *mmio, unsigned int n_hc)
2224{
2225 unsigned int port;
2226
2227 for (port = 0; port < hpriv->n_ports; port++)
2228 mv_soc_reset_hc_port(hpriv, mmio, port);
2229
2230 mv_soc_reset_one_hc(hpriv, mmio);
2231
2232 return 0;
2233}
2234
2235static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2236 void __iomem *mmio)
2237{
2238 return;
2239}
2240
2241static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2242{
2243 return;
2244}
2245
Mark Lordb67a1062008-03-31 19:35:13 -04002246static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2247{
2248 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2249
2250 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2251 if (want_gen2i)
2252 ifctl |= (1 << 7); /* enable gen2i speed */
2253 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2254}
2255
Mark Lordb5624682008-03-31 19:34:40 -04002256/*
2257 * Caller must ensure that EDMA is not active,
2258 * by first doing mv_stop_edma() where needed.
2259 */
Mark Lorde12bef52008-03-31 19:33:56 -04002260static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002261 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002262{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002263 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002264
Mark Lord0d8be5c2008-04-16 14:56:12 -04002265 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002266 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002267
Mark Lordb67a1062008-03-31 19:35:13 -04002268 if (!IS_GEN_I(hpriv)) {
2269 /* Enable 3.0gb/s link speed */
2270 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002271 }
Mark Lordb67a1062008-03-31 19:35:13 -04002272 /*
2273 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2274 * link, and physical layers. It resets all SATA interface registers
2275 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002276 */
Mark Lordb67a1062008-03-31 19:35:13 -04002277 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2278 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002279 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002280
Jeff Garzikc9d39132005-11-13 17:47:51 -05002281 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2282
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002283 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002284 mdelay(1);
2285}
2286
Tejun Heocc0680a2007-08-06 18:36:23 +09002287static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002288 unsigned long deadline)
2289{
Tejun Heocc0680a2007-08-06 18:36:23 +09002290 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002291 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002292 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002293 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002294 int rc, attempts = 0, extra = 0;
2295 u32 sstatus;
2296 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002297
Mark Lorde12bef52008-03-31 19:33:56 -04002298 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002299 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002300
Mark Lord0d8be5c2008-04-16 14:56:12 -04002301 /* Workaround for errata FEr SATA#10 (part 2) */
2302 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002303 const unsigned long *timing =
2304 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002305
Mark Lord17c5aab2008-04-16 14:56:51 -04002306 rc = sata_link_hardreset(link, timing, deadline + extra,
2307 &online, NULL);
2308 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002309 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002310 sata_scr_read(link, SCR_STATUS, &sstatus);
2311 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2312 /* Force 1.5gb/s link speed and try again */
2313 mv_setup_ifctl(mv_ap_base(ap), 0);
2314 if (time_after(jiffies + HZ, deadline))
2315 extra = HZ; /* only extend it once, max */
2316 }
2317 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002318
Mark Lord17c5aab2008-04-16 14:56:51 -04002319 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320}
2321
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002322static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002323{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002324 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002325 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2326 u32 tmp, mask;
2327 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002328
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002329 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002330
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002331 shift = ap->port_no * 2;
2332 if (hc > 0)
2333 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002334
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002335 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002336
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002337 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002338 tmp = readl(hpriv->main_mask_reg_addr);
2339 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340}
2341
2342static void mv_eh_thaw(struct ata_port *ap)
2343{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002344 struct mv_host_priv *hpriv = ap->host->private_data;
2345 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2347 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2348 void __iomem *port_mmio = mv_ap_base(ap);
2349 u32 tmp, mask, hc_irq_cause;
2350 unsigned int shift, hc_port_no = ap->port_no;
2351
2352 /* FIXME: handle coalescing completion events properly */
2353
2354 shift = ap->port_no * 2;
2355 if (hc > 0) {
2356 shift++;
2357 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002358 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002359
2360 mask = 0x3 << shift;
2361
2362 /* clear EDMA errors on this port */
2363 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2364
2365 /* clear pending irq events */
2366 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2367 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2368 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2369 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2370
2371 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002372 tmp = readl(hpriv->main_mask_reg_addr);
2373 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002374}
2375
Brett Russ05b308e2005-10-05 17:08:53 -04002376/**
2377 * mv_port_init - Perform some early initialization on a single port.
2378 * @port: libata data structure storing shadow register addresses
2379 * @port_mmio: base address of the port
2380 *
2381 * Initialize shadow register mmio addresses, clear outstanding
2382 * interrupts on the port, and unmask interrupts for the future
2383 * start of the port.
2384 *
2385 * LOCKING:
2386 * Inherited from caller.
2387 */
Brett Russ31961942005-09-30 01:36:00 -04002388static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2389{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002390 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002391 unsigned serr_ofs;
2392
Jeff Garzik8b260242005-11-12 12:32:50 -05002393 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002394 */
2395 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002396 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002397 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2398 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2399 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2400 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2401 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2402 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002403 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002404 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2405 /* special case: control/altstatus doesn't have ATA_REG_ address */
2406 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2407
2408 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002409 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002410
Brett Russ31961942005-09-30 01:36:00 -04002411 /* Clear any currently outstanding port interrupt conditions */
2412 serr_ofs = mv_scr_offset(SCR_ERROR);
2413 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2414 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2415
Mark Lord646a4da2008-01-26 18:30:37 -05002416 /* unmask all non-transient EDMA error interrupts */
2417 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002418
Jeff Garzik8b260242005-11-12 12:32:50 -05002419 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002420 readl(port_mmio + EDMA_CFG_OFS),
2421 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2422 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002423}
2424
Tejun Heo4447d352007-04-17 23:44:08 +09002425static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002426{
Tejun Heo4447d352007-04-17 23:44:08 +09002427 struct pci_dev *pdev = to_pci_dev(host->dev);
2428 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002429 u32 hp_flags = hpriv->hp_flags;
2430
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002431 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002432 case chip_5080:
2433 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002434 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002435
Auke Kok44c10132007-06-08 15:46:36 -07002436 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002437 case 0x1:
2438 hp_flags |= MV_HP_ERRATA_50XXB0;
2439 break;
2440 case 0x3:
2441 hp_flags |= MV_HP_ERRATA_50XXB2;
2442 break;
2443 default:
2444 dev_printk(KERN_WARNING, &pdev->dev,
2445 "Applying 50XXB2 workarounds to unknown rev\n");
2446 hp_flags |= MV_HP_ERRATA_50XXB2;
2447 break;
2448 }
2449 break;
2450
2451 case chip_504x:
2452 case chip_508x:
2453 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002454 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002455
Auke Kok44c10132007-06-08 15:46:36 -07002456 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002457 case 0x0:
2458 hp_flags |= MV_HP_ERRATA_50XXB0;
2459 break;
2460 case 0x3:
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2462 break;
2463 default:
2464 dev_printk(KERN_WARNING, &pdev->dev,
2465 "Applying B2 workarounds to unknown rev\n");
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002468 }
2469 break;
2470
2471 case chip_604x:
2472 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002473 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002474 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002475
Auke Kok44c10132007-06-08 15:46:36 -07002476 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002477 case 0x7:
2478 hp_flags |= MV_HP_ERRATA_60X1B2;
2479 break;
2480 case 0x9:
2481 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002482 break;
2483 default:
2484 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002485 "Applying B2 workarounds to unknown rev\n");
2486 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002487 break;
2488 }
2489 break;
2490
Jeff Garzike4e7b892006-01-31 12:18:41 -05002491 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002492 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002493 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2494 (pdev->device == 0x2300 || pdev->device == 0x2310))
2495 {
Mark Lord4e520032007-12-11 12:58:05 -05002496 /*
2497 * Highpoint RocketRAID PCIe 23xx series cards:
2498 *
2499 * Unconfigured drives are treated as "Legacy"
2500 * by the BIOS, and it overwrites sector 8 with
2501 * a "Lgcy" metadata block prior to Linux boot.
2502 *
2503 * Configured drives (RAID or JBOD) leave sector 8
2504 * alone, but instead overwrite a high numbered
2505 * sector for the RAID metadata. This sector can
2506 * be determined exactly, by truncating the physical
2507 * drive capacity to a nice even GB value.
2508 *
2509 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2510 *
2511 * Warn the user, lest they think we're just buggy.
2512 */
2513 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2514 " BIOS CORRUPTS DATA on all attached drives,"
2515 " regardless of if/how they are configured."
2516 " BEWARE!\n");
2517 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2518 " use sectors 8-9 on \"Legacy\" drives,"
2519 " and avoid the final two gigabytes on"
2520 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002521 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002522 case chip_6042:
2523 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002524 hp_flags |= MV_HP_GEN_IIE;
2525
Auke Kok44c10132007-06-08 15:46:36 -07002526 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002527 case 0x0:
2528 hp_flags |= MV_HP_ERRATA_XX42A0;
2529 break;
2530 case 0x1:
2531 hp_flags |= MV_HP_ERRATA_60X1C0;
2532 break;
2533 default:
2534 dev_printk(KERN_WARNING, &pdev->dev,
2535 "Applying 60X1C0 workarounds to unknown rev\n");
2536 hp_flags |= MV_HP_ERRATA_60X1C0;
2537 break;
2538 }
2539 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002540 case chip_soc:
2541 hpriv->ops = &mv_soc_ops;
2542 hp_flags |= MV_HP_ERRATA_60X1C0;
2543 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002544
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002545 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002546 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002547 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002548 return 1;
2549 }
2550
2551 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002552 if (hp_flags & MV_HP_PCIE) {
2553 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2554 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2555 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2556 } else {
2557 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2558 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2559 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2560 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002561
2562 return 0;
2563}
2564
Brett Russ05b308e2005-10-05 17:08:53 -04002565/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002566 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002567 * @host: ATA host to initialize
2568 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002569 *
2570 * If possible, do an early global reset of the host. Then do
2571 * our port init and clear/unmask all/relevant host interrupts.
2572 *
2573 * LOCKING:
2574 * Inherited from caller.
2575 */
Tejun Heo4447d352007-04-17 23:44:08 +09002576static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002577{
2578 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002579 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002580 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002581
Tejun Heo4447d352007-04-17 23:44:08 +09002582 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002583 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002584 goto done;
2585
2586 if (HAS_PCI(host)) {
2587 hpriv->main_cause_reg_addr = hpriv->base +
2588 HC_MAIN_IRQ_CAUSE_OFS;
2589 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2590 } else {
2591 hpriv->main_cause_reg_addr = hpriv->base +
2592 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2593 hpriv->main_mask_reg_addr = hpriv->base +
2594 HC_SOC_MAIN_IRQ_MASK_OFS;
2595 }
2596 /* global interrupt mask */
2597 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002598
Tejun Heo4447d352007-04-17 23:44:08 +09002599 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002600
Tejun Heo4447d352007-04-17 23:44:08 +09002601 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002602 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002603
Jeff Garzikc9d39132005-11-13 17:47:51 -05002604 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002605 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002606 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002607
Jeff Garzik522479f2005-11-12 22:14:02 -05002608 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002609 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002610 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002611
Tejun Heo4447d352007-04-17 23:44:08 +09002612 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002613 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002614 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002615
2616 mv_port_init(&ap->ioaddr, port_mmio);
2617
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002618#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002619 if (HAS_PCI(host)) {
2620 unsigned int offset = port_mmio - mmio;
2621 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2622 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2623 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002624#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002625 }
2626
2627 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002628 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2629
2630 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2631 "(before clear)=0x%08x\n", hc,
2632 readl(hc_mmio + HC_CFG_OFS),
2633 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2634
2635 /* Clear any currently outstanding hc interrupt conditions */
2636 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002637 }
2638
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002639 if (HAS_PCI(host)) {
2640 /* Clear any currently outstanding host interrupt conditions */
2641 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002642
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002643 /* and unmask interrupt generation for host regs */
2644 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2645 if (IS_GEN_I(hpriv))
2646 writelfl(~HC_MAIN_MASKED_IRQS_5,
2647 hpriv->main_mask_reg_addr);
2648 else
2649 writelfl(~HC_MAIN_MASKED_IRQS,
2650 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002651
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002652 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2653 "PCI int cause/mask=0x%08x/0x%08x\n",
2654 readl(hpriv->main_cause_reg_addr),
2655 readl(hpriv->main_mask_reg_addr),
2656 readl(mmio + hpriv->irq_cause_ofs),
2657 readl(mmio + hpriv->irq_mask_ofs));
2658 } else {
2659 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2660 hpriv->main_mask_reg_addr);
2661 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2662 readl(hpriv->main_cause_reg_addr),
2663 readl(hpriv->main_mask_reg_addr));
2664 }
Brett Russ31961942005-09-30 01:36:00 -04002665done:
Brett Russ20f733e2005-09-01 18:26:17 -04002666 return rc;
2667}
2668
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002669static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2670{
2671 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2672 MV_CRQB_Q_SZ, 0);
2673 if (!hpriv->crqb_pool)
2674 return -ENOMEM;
2675
2676 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2677 MV_CRPB_Q_SZ, 0);
2678 if (!hpriv->crpb_pool)
2679 return -ENOMEM;
2680
2681 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2682 MV_SG_TBL_SZ, 0);
2683 if (!hpriv->sg_tbl_pool)
2684 return -ENOMEM;
2685
2686 return 0;
2687}
2688
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002689/**
2690 * mv_platform_probe - handle a positive probe of an soc Marvell
2691 * host
2692 * @pdev: platform device found
2693 *
2694 * LOCKING:
2695 * Inherited from caller.
2696 */
2697static int mv_platform_probe(struct platform_device *pdev)
2698{
2699 static int printed_version;
2700 const struct mv_sata_platform_data *mv_platform_data;
2701 const struct ata_port_info *ppi[] =
2702 { &mv_port_info[chip_soc], NULL };
2703 struct ata_host *host;
2704 struct mv_host_priv *hpriv;
2705 struct resource *res;
2706 int n_ports, rc;
2707
2708 if (!printed_version++)
2709 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2710
2711 /*
2712 * Simple resource validation ..
2713 */
2714 if (unlikely(pdev->num_resources != 2)) {
2715 dev_err(&pdev->dev, "invalid number of resources\n");
2716 return -EINVAL;
2717 }
2718
2719 /*
2720 * Get the register base first
2721 */
2722 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2723 if (res == NULL)
2724 return -EINVAL;
2725
2726 /* allocate host */
2727 mv_platform_data = pdev->dev.platform_data;
2728 n_ports = mv_platform_data->n_ports;
2729
2730 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2731 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2732
2733 if (!host || !hpriv)
2734 return -ENOMEM;
2735 host->private_data = hpriv;
2736 hpriv->n_ports = n_ports;
2737
2738 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002739 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2740 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002741 hpriv->base -= MV_SATAHC0_REG_BASE;
2742
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002743 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2744 if (rc)
2745 return rc;
2746
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002747 /* initialize adapter */
2748 rc = mv_init_host(host, chip_soc);
2749 if (rc)
2750 return rc;
2751
2752 dev_printk(KERN_INFO, &pdev->dev,
2753 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2754 host->n_ports);
2755
2756 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2757 IRQF_SHARED, &mv6_sht);
2758}
2759
2760/*
2761 *
2762 * mv_platform_remove - unplug a platform interface
2763 * @pdev: platform device
2764 *
2765 * A platform bus SATA device has been unplugged. Perform the needed
2766 * cleanup. Also called on module unload for any active devices.
2767 */
2768static int __devexit mv_platform_remove(struct platform_device *pdev)
2769{
2770 struct device *dev = &pdev->dev;
2771 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002772
2773 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002774 return 0;
2775}
2776
2777static struct platform_driver mv_platform_driver = {
2778 .probe = mv_platform_probe,
2779 .remove = __devexit_p(mv_platform_remove),
2780 .driver = {
2781 .name = DRV_NAME,
2782 .owner = THIS_MODULE,
2783 },
2784};
2785
2786
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002787#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002788static int mv_pci_init_one(struct pci_dev *pdev,
2789 const struct pci_device_id *ent);
2790
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002791
2792static struct pci_driver mv_pci_driver = {
2793 .name = DRV_NAME,
2794 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002795 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002796 .remove = ata_pci_remove_one,
2797};
2798
2799/*
2800 * module options
2801 */
2802static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2803
2804
2805/* move to PCI layer or libata core? */
2806static int pci_go_64(struct pci_dev *pdev)
2807{
2808 int rc;
2809
2810 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2811 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2812 if (rc) {
2813 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2814 if (rc) {
2815 dev_printk(KERN_ERR, &pdev->dev,
2816 "64-bit DMA enable failed\n");
2817 return rc;
2818 }
2819 }
2820 } else {
2821 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2822 if (rc) {
2823 dev_printk(KERN_ERR, &pdev->dev,
2824 "32-bit DMA enable failed\n");
2825 return rc;
2826 }
2827 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2828 if (rc) {
2829 dev_printk(KERN_ERR, &pdev->dev,
2830 "32-bit consistent DMA enable failed\n");
2831 return rc;
2832 }
2833 }
2834
2835 return rc;
2836}
2837
Brett Russ05b308e2005-10-05 17:08:53 -04002838/**
2839 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002840 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002841 *
2842 * FIXME: complete this.
2843 *
2844 * LOCKING:
2845 * Inherited from caller.
2846 */
Tejun Heo4447d352007-04-17 23:44:08 +09002847static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002848{
Tejun Heo4447d352007-04-17 23:44:08 +09002849 struct pci_dev *pdev = to_pci_dev(host->dev);
2850 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002851 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002852 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002853
2854 /* Use this to determine the HW stepping of the chip so we know
2855 * what errata to workaround
2856 */
Brett Russ31961942005-09-30 01:36:00 -04002857 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2858 if (scc == 0)
2859 scc_s = "SCSI";
2860 else if (scc == 0x01)
2861 scc_s = "RAID";
2862 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002863 scc_s = "?";
2864
2865 if (IS_GEN_I(hpriv))
2866 gen = "I";
2867 else if (IS_GEN_II(hpriv))
2868 gen = "II";
2869 else if (IS_GEN_IIE(hpriv))
2870 gen = "IIE";
2871 else
2872 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002873
Jeff Garzika9524a72005-10-30 14:39:11 -05002874 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002875 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2876 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002877 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2878}
2879
Brett Russ05b308e2005-10-05 17:08:53 -04002880/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002881 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002882 * @pdev: PCI device found
2883 * @ent: PCI device ID entry for the matched host
2884 *
2885 * LOCKING:
2886 * Inherited from caller.
2887 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002888static int mv_pci_init_one(struct pci_dev *pdev,
2889 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04002890{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002891 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002892 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002893 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2894 struct ata_host *host;
2895 struct mv_host_priv *hpriv;
2896 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002897
Jeff Garzika9524a72005-10-30 14:39:11 -05002898 if (!printed_version++)
2899 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002900
Tejun Heo4447d352007-04-17 23:44:08 +09002901 /* allocate host */
2902 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2903
2904 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2905 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2906 if (!host || !hpriv)
2907 return -ENOMEM;
2908 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002909 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09002910
2911 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002912 rc = pcim_enable_device(pdev);
2913 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002914 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002915
Tejun Heo0d5ff562007-02-01 15:06:36 +09002916 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2917 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002918 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002919 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002920 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002921 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002922 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04002923
Jeff Garzikd88184f2007-02-26 01:26:06 -05002924 rc = pci_go_64(pdev);
2925 if (rc)
2926 return rc;
2927
Mark Lordda2fa9b2008-01-26 18:32:45 -05002928 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2929 if (rc)
2930 return rc;
2931
Brett Russ20f733e2005-09-01 18:26:17 -04002932 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002933 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002934 if (rc)
2935 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002936
Brett Russ31961942005-09-30 01:36:00 -04002937 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002938 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002939 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002940
Brett Russ31961942005-09-30 01:36:00 -04002941 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002942 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002943
Tejun Heo4447d352007-04-17 23:44:08 +09002944 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002945 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002946 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002947 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002948}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002949#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002950
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002951static int mv_platform_probe(struct platform_device *pdev);
2952static int __devexit mv_platform_remove(struct platform_device *pdev);
2953
Brett Russ20f733e2005-09-01 18:26:17 -04002954static int __init mv_init(void)
2955{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002956 int rc = -ENODEV;
2957#ifdef CONFIG_PCI
2958 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002959 if (rc < 0)
2960 return rc;
2961#endif
2962 rc = platform_driver_register(&mv_platform_driver);
2963
2964#ifdef CONFIG_PCI
2965 if (rc < 0)
2966 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002967#endif
2968 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002969}
2970
2971static void __exit mv_exit(void)
2972{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002973#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04002974 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002975#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002976 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002977}
2978
2979MODULE_AUTHOR("Brett Russ");
2980MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2981MODULE_LICENSE("GPL");
2982MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2983MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04002984MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04002985
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002986#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002987module_param(msi, int, 0444);
2988MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002989#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002990
Brett Russ20f733e2005-09-01 18:26:17 -04002991module_init(mv_init);
2992module_exit(mv_exit);