blob: 6a74205466093228f15f9b24317d6174bdb86571 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
43 6) Add port multiplier support (intermediate)
44
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
46
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050080#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040081#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040082#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083
84#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050085#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040086
87enum {
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95
96 MV_PCI_REG_BASE = 0,
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040098 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
103
Brett Russ20f733e2005-09-01 18:26:17 -0400104 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500105 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400108
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113
Brett Russ31961942005-09-30 01:36:00 -0400114 MV_MAX_Q_DEPTH = 32,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500123 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400125
Brett Russ20f733e2005-09-01 18:26:17 -0400126 MV_PORTS_PER_HC = 4,
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400130 MV_PORT_MASK = 3,
131
132 /* Host Flags */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400136 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100137
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400142
Brett Russ31961942005-09-30 01:36:00 -0400143 CRQB_FLAG_READ = (1 << 0),
144 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
151
152 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400155
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
157
Brett Russ20f733e2005-09-01 18:26:17 -0400158 /* PCI interface registers */
159
Brett Russ31961942005-09-30 01:36:00 -0400160 PCI_COMMAND_OFS = 0xc00,
161
Brett Russ20f733e2005-09-01 18:26:17 -0400162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
166
Jeff Garzik522479f2005-11-12 22:14:02 -0500167 MV_PCI_MODE = 0xd00,
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
177
Mark Lord02a121d2007-12-01 13:07:22 -0500178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
181
Mark Lord02a121d2007-12-01 13:07:22 -0500182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500185
Brett Russ20f733e2005-09-01 18:26:17 -0400186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
194 PCI_ERR = (1 << 18),
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
208 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
210 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400212
213 /* SATAHC registers */
214 HC_CFG_OFS = 0,
215
216 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
220
221 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400222 SHD_BLK_OFS = 0x100,
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400224
225 /* SATA registers */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400229
Mark Lorde12bef52008-03-31 19:33:56 -0400230 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
232
Jeff Garzik47c2b672005-11-12 21:13:17 -0500233 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500234 PHY_MODE4 = 0x314,
235 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400239
Mark Lorde12bef52008-03-31 19:33:56 -0400240 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
242
Jeff Garzikc9d39132005-11-13 17:47:51 -0500243 MV5_PHY_MODE = 0x74,
244 MV5_LT_MODE = 0x30,
245 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400246 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500247
248 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400249
250 /* Port registers */
251 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400259
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
282
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
291
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500297
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
306 EDMA_ERR_PRD_PAR |
307 EDMA_ERR_DEV_DCON |
308 EDMA_ERR_DEV_CON |
309 EDMA_ERR_SERR |
310 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400311 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400312 EDMA_ERR_CRPB_PAR |
313 EDMA_ERR_INTRL_PAR |
314 EDMA_ERR_IORDY |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400319
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
321 EDMA_ERR_PRD_PAR |
322 EDMA_ERR_DEV_DCON |
323 EDMA_ERR_DEV_CON |
324 EDMA_ERR_OVERRUN_5 |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400327 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400328 EDMA_ERR_CRPB_PAR |
329 EDMA_ERR_INTRL_PAR |
330 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400334
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
337
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400341 EDMA_RSP_Q_PTR_SHIFT = 3,
342
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400347
Jeff Garzikc9d39132005-11-13 17:47:51 -0500348 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500349 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500350
Brett Russ31961942005-09-30 01:36:00 -0400351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500357 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400362
Brett Russ31961942005-09-30 01:36:00 -0400363 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400366};
367
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400368#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500370#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100371#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500372
Jeff Garzik095fec82005-11-12 09:50:49 -0500373enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
376 */
377 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500378
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
381 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
383
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400384 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
386};
387
Jeff Garzik522479f2005-11-12 22:14:02 -0500388enum chip_type {
389 chip_504x,
390 chip_508x,
391 chip_5080,
392 chip_604x,
393 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500394 chip_6042,
395 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500396 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500397};
398
Brett Russ31961942005-09-30 01:36:00 -0400399/* Command ReQuest Block: 32B */
400struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400401 __le32 sg_addr;
402 __le32 sg_addr_hi;
403 __le16 ctrl_flags;
404 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400405};
406
Jeff Garzike4e7b892006-01-31 12:18:41 -0500407struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 addr;
409 __le32 addr_hi;
410 __le32 flags;
411 __le32 len;
412 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500413};
414
Brett Russ31961942005-09-30 01:36:00 -0400415/* Command ResPonse Block: 8B */
416struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400417 __le16 id;
418 __le16 flags;
419 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400420};
421
422/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
423struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le32 addr;
425 __le32 flags_size;
426 __le32 addr_hi;
427 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400428};
429
430struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400431 struct mv_crqb *crqb;
432 dma_addr_t crqb_dma;
433 struct mv_crpb *crpb;
434 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400437
438 unsigned int req_idx;
439 unsigned int resp_idx;
440
Brett Russ31961942005-09-30 01:36:00 -0400441 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400442};
443
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500444struct mv_port_signal {
445 u32 amps;
446 u32 pre;
447};
448
Mark Lord02a121d2007-12-01 13:07:22 -0500449struct mv_host_priv {
450 u32 hp_flags;
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500453 int n_ports;
454 void __iomem *base;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500457 u32 irq_cause_ofs;
458 u32 irq_mask_ofs;
459 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500460 /*
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
464 */
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500468};
469
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480};
481
Tejun Heoda3dbb12007-07-16 14:29:40 +0900482static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400486static int mv_port_start(struct ata_port *ap);
487static void mv_port_stop(struct ata_port *ap);
488static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500489static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900490static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900491static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400493static void mv_eh_freeze(struct ata_port *ap);
494static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500495static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400496
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500497static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
498 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500499static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
501 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500502static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
503 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500504static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500507static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500509static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
511 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500512static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
513 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500514static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500515static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
516 void __iomem *mmio);
517static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
519static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
522 void __iomem *mmio);
523static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100524static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400525static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400527static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400528static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400529static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500530
Mark Lordeb73d552008-01-29 13:24:00 -0500531/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
532 * because we have to allow room for worst case splitting of
533 * PRDs for 64K boundaries in mv_fill_sg().
534 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400535static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900536 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400537 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400538 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400539};
540
541static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900542 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500543 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400544 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400545 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400546};
547
Tejun Heo029cfd62008-03-25 12:22:49 +0900548static struct ata_port_operations mv5_ops = {
549 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500550
Jeff Garzikc9d39132005-11-13 17:47:51 -0500551 .qc_prep = mv_qc_prep,
552 .qc_issue = mv_qc_issue,
553
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400554 .freeze = mv_eh_freeze,
555 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900556 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900557 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900558 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400559
Jeff Garzikc9d39132005-11-13 17:47:51 -0500560 .scr_read = mv5_scr_read,
561 .scr_write = mv5_scr_write,
562
563 .port_start = mv_port_start,
564 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500565};
566
Tejun Heo029cfd62008-03-25 12:22:49 +0900567static struct ata_port_operations mv6_ops = {
568 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500569 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900570 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400573};
574
Tejun Heo029cfd62008-03-25 12:22:49 +0900575static struct ata_port_operations mv_iie_ops = {
576 .inherits = &mv6_ops,
577 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500578 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579};
580
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100581static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400582 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400583 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400584 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400585 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500586 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400587 },
588 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400590 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400591 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500592 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400593 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500594 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400595 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500599 },
Brett Russ20f733e2005-09-01 18:26:17 -0400600 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500605 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400606 },
607 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500609 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400610 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400611 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500612 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400613 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500614 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500615 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
616 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500617 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400618 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500619 .port_ops = &mv_iie_ops,
620 },
621 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500622 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
623 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500624 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400625 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500626 .port_ops = &mv_iie_ops,
627 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500628 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
630 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500634 },
Brett Russ20f733e2005-09-01 18:26:17 -0400635};
636
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500637static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400638 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
639 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
640 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
641 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100642 /* RocketRAID 1740/174x have different identifiers */
643 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
644 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400645
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400646 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
647 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
648 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
649 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
650 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500651
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400652 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
653
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200654 /* Adaptec 1430SA */
655 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
656
Mark Lord02a121d2007-12-01 13:07:22 -0500657 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800658 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
659
Mark Lord02a121d2007-12-01 13:07:22 -0500660 /* Highpoint RocketRAID PCIe series */
661 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
662 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
663
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400664 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400665};
666
Jeff Garzik47c2b672005-11-12 21:13:17 -0500667static const struct mv_hw_ops mv5xxx_ops = {
668 .phy_errata = mv5_phy_errata,
669 .enable_leds = mv5_enable_leds,
670 .read_preamp = mv5_read_preamp,
671 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500672 .reset_flash = mv5_reset_flash,
673 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500674};
675
676static const struct mv_hw_ops mv6xxx_ops = {
677 .phy_errata = mv6_phy_errata,
678 .enable_leds = mv6_enable_leds,
679 .read_preamp = mv6_read_preamp,
680 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500681 .reset_flash = mv6_reset_flash,
682 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500683};
684
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500685static const struct mv_hw_ops mv_soc_ops = {
686 .phy_errata = mv6_phy_errata,
687 .enable_leds = mv_soc_enable_leds,
688 .read_preamp = mv_soc_read_preamp,
689 .reset_hc = mv_soc_reset_hc,
690 .reset_flash = mv_soc_reset_flash,
691 .reset_bus = mv_soc_reset_bus,
692};
693
Brett Russ20f733e2005-09-01 18:26:17 -0400694/*
695 * Functions
696 */
697
698static inline void writelfl(unsigned long data, void __iomem *addr)
699{
700 writel(data, addr);
701 (void) readl(addr); /* flush to avoid PCI posted write */
702}
703
Brett Russ20f733e2005-09-01 18:26:17 -0400704static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
705{
706 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
707}
708
Jeff Garzikc9d39132005-11-13 17:47:51 -0500709static inline unsigned int mv_hc_from_port(unsigned int port)
710{
711 return port >> MV_PORT_HC_SHIFT;
712}
713
714static inline unsigned int mv_hardport_from_port(unsigned int port)
715{
716 return port & MV_PORT_MASK;
717}
718
719static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
720 unsigned int port)
721{
722 return mv_hc_base(base, mv_hc_from_port(port));
723}
724
Brett Russ20f733e2005-09-01 18:26:17 -0400725static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
726{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500727 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500728 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500729 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400730}
731
Mark Lorde12bef52008-03-31 19:33:56 -0400732static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
733{
734 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
735 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
736
737 return hc_mmio + ofs;
738}
739
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500740static inline void __iomem *mv_host_base(struct ata_host *host)
741{
742 struct mv_host_priv *hpriv = host->private_data;
743 return hpriv->base;
744}
745
Brett Russ20f733e2005-09-01 18:26:17 -0400746static inline void __iomem *mv_ap_base(struct ata_port *ap)
747{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500748 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400749}
750
Jeff Garzikcca39742006-08-24 03:19:22 -0400751static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400752{
Jeff Garzikcca39742006-08-24 03:19:22 -0400753 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400754}
755
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400756static void mv_set_edma_ptrs(void __iomem *port_mmio,
757 struct mv_host_priv *hpriv,
758 struct mv_port_priv *pp)
759{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400760 u32 index;
761
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400762 /*
763 * initialize request queue
764 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400765 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
766
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400767 WARN_ON(pp->crqb_dma & 0x3ff);
768 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400769 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400770 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
771
772 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400773 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400774 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
775 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400776 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400777
778 /*
779 * initialize response queue
780 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400781 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
782
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400783 WARN_ON(pp->crpb_dma & 0xff);
784 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
785
786 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400787 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400788 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
789 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400792 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794}
795
Brett Russ05b308e2005-10-05 17:08:53 -0400796/**
797 * mv_start_dma - Enable eDMA engine
798 * @base: port base address
799 * @pp: port private data
800 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900801 * Verify the local cache of the eDMA state is accurate with a
802 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400803 *
804 * LOCKING:
805 * Inherited from caller.
806 */
Mark Lord0c589122008-01-26 18:31:16 -0500807static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500808 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400809{
Mark Lord72109162008-01-26 18:31:33 -0500810 int want_ncq = (protocol == ATA_PROT_NCQ);
811
812 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
813 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
814 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400815 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500816 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500818 struct mv_host_priv *hpriv = ap->host->private_data;
819 int hard_port = mv_hardport_from_port(ap->port_no);
820 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100821 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500822 u32 hc_irq_cause, ipending;
823
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400824 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500825 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400826
Mark Lord0c589122008-01-26 18:31:16 -0500827 /* clear EDMA interrupt indicator, if any */
828 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
829 ipending = (DEV_IRQ << hard_port) |
830 (CRPB_DMA_DONE << hard_port);
831 if (hc_irq_cause & ipending) {
832 writelfl(hc_irq_cause & ~ipending,
833 hc_mmio + HC_IRQ_CAUSE_OFS);
834 }
835
Mark Lorde12bef52008-03-31 19:33:56 -0400836 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500837
838 /* clear FIS IRQ Cause */
839 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
840
Mark Lordf630d562008-01-26 18:31:00 -0500841 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400842
Mark Lordf630d562008-01-26 18:31:00 -0500843 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400844 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
845 }
Mark Lordf630d562008-01-26 18:31:00 -0500846 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400847}
848
Brett Russ05b308e2005-10-05 17:08:53 -0400849/**
Mark Lorde12bef52008-03-31 19:33:56 -0400850 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400851 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400852 *
853 * LOCKING:
854 * Inherited from caller.
855 */
Mark Lordb5624682008-03-31 19:34:40 -0400856static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400857{
Mark Lordb5624682008-03-31 19:34:40 -0400858 int i;
Brett Russ31961942005-09-30 01:36:00 -0400859
Mark Lordb5624682008-03-31 19:34:40 -0400860 /* Disable eDMA. The disable bit auto clears. */
861 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500862
Mark Lordb5624682008-03-31 19:34:40 -0400863 /* Wait for the chip to confirm eDMA is off. */
864 for (i = 10000; i > 0; i--) {
865 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400866 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400867 return 0;
868 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400869 }
Mark Lordb5624682008-03-31 19:34:40 -0400870 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400871}
872
Mark Lorde12bef52008-03-31 19:33:56 -0400873static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400874{
Mark Lordb5624682008-03-31 19:34:40 -0400875 void __iomem *port_mmio = mv_ap_base(ap);
876 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400877
Mark Lordb5624682008-03-31 19:34:40 -0400878 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
879 return 0;
880 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
881 if (mv_stop_edma_engine(port_mmio)) {
882 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
883 return -EIO;
884 }
885 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400886}
887
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400888#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400889static void mv_dump_mem(void __iomem *start, unsigned bytes)
890{
Brett Russ31961942005-09-30 01:36:00 -0400891 int b, w;
892 for (b = 0; b < bytes; ) {
893 DPRINTK("%p: ", start + b);
894 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400895 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400896 b += sizeof(u32);
897 }
898 printk("\n");
899 }
Brett Russ31961942005-09-30 01:36:00 -0400900}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400901#endif
902
Brett Russ31961942005-09-30 01:36:00 -0400903static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904{
905#ifdef ATA_DEBUG
906 int b, w;
907 u32 dw;
908 for (b = 0; b < bytes; ) {
909 DPRINTK("%02x: ", b);
910 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400911 (void) pci_read_config_dword(pdev, b, &dw);
912 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400913 b += sizeof(u32);
914 }
915 printk("\n");
916 }
917#endif
918}
919static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920 struct pci_dev *pdev)
921{
922#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500923 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400924 port >> MV_PORT_HC_SHIFT);
925 void __iomem *port_base;
926 int start_port, num_ports, p, start_hc, num_hcs, hc;
927
928 if (0 > port) {
929 start_hc = start_port = 0;
930 num_ports = 8; /* shld be benign for 4 port devs */
931 num_hcs = 2;
932 } else {
933 start_hc = port >> MV_PORT_HC_SHIFT;
934 start_port = port;
935 num_ports = num_hcs = 1;
936 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500937 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400938 num_ports > 1 ? num_ports - 1 : start_port);
939
940 if (NULL != pdev) {
941 DPRINTK("PCI config space regs:\n");
942 mv_dump_pci_cfg(pdev, 0x68);
943 }
944 DPRINTK("PCI regs:\n");
945 mv_dump_mem(mmio_base+0xc00, 0x3c);
946 mv_dump_mem(mmio_base+0xd00, 0x34);
947 mv_dump_mem(mmio_base+0xf00, 0x4);
948 mv_dump_mem(mmio_base+0x1d00, 0x6c);
949 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700950 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400951 DPRINTK("HC regs (HC %i):\n", hc);
952 mv_dump_mem(hc_base, 0x1c);
953 }
954 for (p = start_port; p < start_port + num_ports; p++) {
955 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400956 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400957 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400958 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400959 mv_dump_mem(port_base+0x300, 0x60);
960 }
961#endif
962}
963
Brett Russ20f733e2005-09-01 18:26:17 -0400964static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965{
966 unsigned int ofs;
967
968 switch (sc_reg_in) {
969 case SCR_STATUS:
970 case SCR_CONTROL:
971 case SCR_ERROR:
972 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
973 break;
974 case SCR_ACTIVE:
975 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
976 break;
977 default:
978 ofs = 0xffffffffU;
979 break;
980 }
981 return ofs;
982}
983
Tejun Heoda3dbb12007-07-16 14:29:40 +0900984static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400985{
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
987
Tejun Heoda3dbb12007-07-16 14:29:40 +0900988 if (ofs != 0xffffffffU) {
989 *val = readl(mv_ap_base(ap) + ofs);
990 return 0;
991 } else
992 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400993}
994
Tejun Heoda3dbb12007-07-16 14:29:40 +0900995static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400996{
997 unsigned int ofs = mv_scr_offset(sc_reg_in);
998
Tejun Heoda3dbb12007-07-16 14:29:40 +0900999 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001000 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001001 return 0;
1002 } else
1003 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001004}
1005
Mark Lordf2738272008-01-26 18:32:29 -05001006static void mv6_dev_config(struct ata_device *adev)
1007{
1008 /*
1009 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1010 * See mv_qc_prep() for more info.
1011 */
1012 if (adev->flags & ATA_DFLAG_NCQ)
1013 if (adev->max_sectors > ATA_MAX_SECTORS)
1014 adev->max_sectors = ATA_MAX_SECTORS;
1015}
1016
Mark Lorde12bef52008-03-31 19:33:56 -04001017static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001018{
Mark Lord0c589122008-01-26 18:31:16 -05001019 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001020 struct mv_port_priv *pp = ap->private_data;
1021 struct mv_host_priv *hpriv = ap->host->private_data;
1022 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001023
1024 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001025 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001026
Mark Lord0c589122008-01-26 18:31:16 -05001027 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001028 cfg |= (1 << 8); /* enab config burst size mask */
1029
Mark Lord0c589122008-01-26 18:31:16 -05001030 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001031 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1032
1033 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001034 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1035 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001036 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001037 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001038 }
1039
Mark Lord72109162008-01-26 18:31:33 -05001040 if (want_ncq) {
1041 cfg |= EDMA_CFG_NCQ;
1042 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1043 } else
1044 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1045
Jeff Garzike4e7b892006-01-31 12:18:41 -05001046 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1047}
1048
Mark Lordda2fa9b2008-01-26 18:32:45 -05001049static void mv_port_free_dma_mem(struct ata_port *ap)
1050{
1051 struct mv_host_priv *hpriv = ap->host->private_data;
1052 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001053 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001054
1055 if (pp->crqb) {
1056 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1057 pp->crqb = NULL;
1058 }
1059 if (pp->crpb) {
1060 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1061 pp->crpb = NULL;
1062 }
Mark Lordeb73d552008-01-29 13:24:00 -05001063 /*
1064 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1065 * For later hardware, we have one unique sg_tbl per NCQ tag.
1066 */
1067 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1068 if (pp->sg_tbl[tag]) {
1069 if (tag == 0 || !IS_GEN_I(hpriv))
1070 dma_pool_free(hpriv->sg_tbl_pool,
1071 pp->sg_tbl[tag],
1072 pp->sg_tbl_dma[tag]);
1073 pp->sg_tbl[tag] = NULL;
1074 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001075 }
1076}
1077
Brett Russ05b308e2005-10-05 17:08:53 -04001078/**
1079 * mv_port_start - Port specific init/start routine.
1080 * @ap: ATA channel to manipulate
1081 *
1082 * Allocate and point to DMA memory, init port private memory,
1083 * zero indices.
1084 *
1085 * LOCKING:
1086 * Inherited from caller.
1087 */
Brett Russ31961942005-09-30 01:36:00 -04001088static int mv_port_start(struct ata_port *ap)
1089{
Jeff Garzikcca39742006-08-24 03:19:22 -04001090 struct device *dev = ap->host->dev;
1091 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001092 struct mv_port_priv *pp;
1093 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001094 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001095 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001096
Tejun Heo24dc5f32007-01-20 16:00:28 +09001097 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001098 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001099 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001100 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001101
Mark Lordda2fa9b2008-01-26 18:32:45 -05001102 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1103 if (!pp->crqb)
1104 return -ENOMEM;
1105 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001106
Mark Lordda2fa9b2008-01-26 18:32:45 -05001107 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1108 if (!pp->crpb)
1109 goto out_port_free_dma_mem;
1110 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001111
Mark Lordeb73d552008-01-29 13:24:00 -05001112 /*
1113 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1114 * For later hardware, we need one unique sg_tbl per NCQ tag.
1115 */
1116 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1117 if (tag == 0 || !IS_GEN_I(hpriv)) {
1118 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1119 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1120 if (!pp->sg_tbl[tag])
1121 goto out_port_free_dma_mem;
1122 } else {
1123 pp->sg_tbl[tag] = pp->sg_tbl[0];
1124 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1125 }
1126 }
Brett Russ31961942005-09-30 01:36:00 -04001127
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001128 spin_lock_irqsave(&ap->host->lock, flags);
1129
Mark Lorde12bef52008-03-31 19:33:56 -04001130 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001131 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001132
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001133 spin_unlock_irqrestore(&ap->host->lock, flags);
1134
Brett Russ31961942005-09-30 01:36:00 -04001135 /* Don't turn on EDMA here...do it before DMA commands only. Else
1136 * we'll be unable to send non-data, PIO, etc due to restricted access
1137 * to shadow regs.
1138 */
Brett Russ31961942005-09-30 01:36:00 -04001139 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001140
1141out_port_free_dma_mem:
1142 mv_port_free_dma_mem(ap);
1143 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001144}
1145
Brett Russ05b308e2005-10-05 17:08:53 -04001146/**
1147 * mv_port_stop - Port specific cleanup/stop routine.
1148 * @ap: ATA channel to manipulate
1149 *
1150 * Stop DMA, cleanup port memory.
1151 *
1152 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001153 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001154 */
Brett Russ31961942005-09-30 01:36:00 -04001155static void mv_port_stop(struct ata_port *ap)
1156{
Mark Lorde12bef52008-03-31 19:33:56 -04001157 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001158 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001159}
1160
Brett Russ05b308e2005-10-05 17:08:53 -04001161/**
1162 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1163 * @qc: queued command whose SG list to source from
1164 *
1165 * Populate the SG list and mark the last entry.
1166 *
1167 * LOCKING:
1168 * Inherited from caller.
1169 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001170static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001171{
1172 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001173 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001174 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001175 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001176
Mark Lordeb73d552008-01-29 13:24:00 -05001177 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001178 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001179 dma_addr_t addr = sg_dma_address(sg);
1180 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001181
Olof Johansson4007b492007-10-02 20:45:27 -05001182 while (sg_len) {
1183 u32 offset = addr & 0xffff;
1184 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001185
Olof Johansson4007b492007-10-02 20:45:27 -05001186 if ((offset + sg_len > 0x10000))
1187 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001188
Olof Johansson4007b492007-10-02 20:45:27 -05001189 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1190 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001191 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001192
1193 sg_len -= len;
1194 addr += len;
1195
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001196 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001197 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001198 }
Brett Russ31961942005-09-30 01:36:00 -04001199 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001200
1201 if (likely(last_sg))
1202 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001203}
1204
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001205static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001206{
Mark Lord559eeda2006-05-19 16:40:15 -04001207 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001208 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001209 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001210}
1211
Brett Russ05b308e2005-10-05 17:08:53 -04001212/**
1213 * mv_qc_prep - Host specific command preparation.
1214 * @qc: queued command to prepare
1215 *
1216 * This routine simply redirects to the general purpose routine
1217 * if command is not DMA. Else, it handles prep of the CRQB
1218 * (command request block), does some sanity checking, and calls
1219 * the SG load routine.
1220 *
1221 * LOCKING:
1222 * Inherited from caller.
1223 */
Brett Russ31961942005-09-30 01:36:00 -04001224static void mv_qc_prep(struct ata_queued_cmd *qc)
1225{
1226 struct ata_port *ap = qc->ap;
1227 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001228 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001229 struct ata_taskfile *tf;
1230 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001231 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001232
Mark Lord138bfdd2008-01-26 18:33:18 -05001233 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1234 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001235 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001236
Brett Russ31961942005-09-30 01:36:00 -04001237 /* Fill in command request block
1238 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001239 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001240 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001241 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001242 flags |= qc->tag << CRQB_TAG_SHIFT;
1243
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001244 /* get current queue index from software */
1245 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001246
Mark Lorda6432432006-05-19 16:36:36 -04001247 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001248 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001249 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001250 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001251 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1252
1253 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001254 tf = &qc->tf;
1255
1256 /* Sadly, the CRQB cannot accomodate all registers--there are
1257 * only 11 bytes...so we must pick and choose required
1258 * registers based on the command. So, we drop feature and
1259 * hob_feature for [RW] DMA commands, but they are needed for
1260 * NCQ. NCQ will drop hob_nsect.
1261 */
1262 switch (tf->command) {
1263 case ATA_CMD_READ:
1264 case ATA_CMD_READ_EXT:
1265 case ATA_CMD_WRITE:
1266 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001267 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001268 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1269 break;
Brett Russ31961942005-09-30 01:36:00 -04001270 case ATA_CMD_FPDMA_READ:
1271 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001272 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001273 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1274 break;
Brett Russ31961942005-09-30 01:36:00 -04001275 default:
1276 /* The only other commands EDMA supports in non-queued and
1277 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1278 * of which are defined/used by Linux. If we get here, this
1279 * driver needs work.
1280 *
1281 * FIXME: modify libata to give qc_prep a return value and
1282 * return error here.
1283 */
1284 BUG_ON(tf->command);
1285 break;
1286 }
1287 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1288 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1290 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1292 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1294 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1295 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1296
Jeff Garzike4e7b892006-01-31 12:18:41 -05001297 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001298 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001299 mv_fill_sg(qc);
1300}
1301
1302/**
1303 * mv_qc_prep_iie - Host specific command preparation.
1304 * @qc: queued command to prepare
1305 *
1306 * This routine simply redirects to the general purpose routine
1307 * if command is not DMA. Else, it handles prep of the CRQB
1308 * (command request block), does some sanity checking, and calls
1309 * the SG load routine.
1310 *
1311 * LOCKING:
1312 * Inherited from caller.
1313 */
1314static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1315{
1316 struct ata_port *ap = qc->ap;
1317 struct mv_port_priv *pp = ap->private_data;
1318 struct mv_crqb_iie *crqb;
1319 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001320 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001321 u32 flags = 0;
1322
Mark Lord138bfdd2008-01-26 18:33:18 -05001323 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1324 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001325 return;
1326
Mark Lorde12bef52008-03-31 19:33:56 -04001327 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001328 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1329 flags |= CRQB_FLAG_READ;
1330
Tejun Heobeec7db2006-02-11 19:11:13 +09001331 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001332 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001333 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001334
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001335 /* get current queue index from software */
1336 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001337
1338 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001339 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1340 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001341 crqb->flags = cpu_to_le32(flags);
1342
1343 tf = &qc->tf;
1344 crqb->ata_cmd[0] = cpu_to_le32(
1345 (tf->command << 16) |
1346 (tf->feature << 24)
1347 );
1348 crqb->ata_cmd[1] = cpu_to_le32(
1349 (tf->lbal << 0) |
1350 (tf->lbam << 8) |
1351 (tf->lbah << 16) |
1352 (tf->device << 24)
1353 );
1354 crqb->ata_cmd[2] = cpu_to_le32(
1355 (tf->hob_lbal << 0) |
1356 (tf->hob_lbam << 8) |
1357 (tf->hob_lbah << 16) |
1358 (tf->hob_feature << 24)
1359 );
1360 crqb->ata_cmd[3] = cpu_to_le32(
1361 (tf->nsect << 0) |
1362 (tf->hob_nsect << 8)
1363 );
1364
1365 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1366 return;
Brett Russ31961942005-09-30 01:36:00 -04001367 mv_fill_sg(qc);
1368}
1369
Brett Russ05b308e2005-10-05 17:08:53 -04001370/**
1371 * mv_qc_issue - Initiate a command to the host
1372 * @qc: queued command to start
1373 *
1374 * This routine simply redirects to the general purpose routine
1375 * if command is not DMA. Else, it sanity checks our local
1376 * caches of the request producer/consumer indices then enables
1377 * DMA and bumps the request producer index.
1378 *
1379 * LOCKING:
1380 * Inherited from caller.
1381 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001382static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001383{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001384 struct ata_port *ap = qc->ap;
1385 void __iomem *port_mmio = mv_ap_base(ap);
1386 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001387 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001388
Mark Lord138bfdd2008-01-26 18:33:18 -05001389 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1390 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001391 /*
1392 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001393 * port. Turn off EDMA so there won't be problems accessing
1394 * shadow block, etc registers.
1395 */
Mark Lordb5624682008-03-31 19:34:40 -04001396 mv_stop_edma(ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001397 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001398 }
1399
Mark Lord72109162008-01-26 18:31:33 -05001400 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001402 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001403
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001404 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001405
1406 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001407 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1408 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001409
1410 return 0;
1411}
1412
Brett Russ05b308e2005-10-05 17:08:53 -04001413/**
Brett Russ05b308e2005-10-05 17:08:53 -04001414 * mv_err_intr - Handle error interrupts on the port
1415 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001416 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001417 *
1418 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001419 * some cases require an eDMA reset, which also performs a COMRESET.
1420 * The SERR case requires a clear of pending errors in the SATA
1421 * SERROR register. Finally, if the port disabled DMA,
1422 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001423 *
1424 * LOCKING:
1425 * Inherited from caller.
1426 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001427static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001428{
Brett Russ31961942005-09-30 01:36:00 -04001429 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1431 struct mv_port_priv *pp = ap->private_data;
1432 struct mv_host_priv *hpriv = ap->host->private_data;
1433 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1434 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001435 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001436
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001437 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001438
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001439 if (!edma_enabled) {
1440 /* just a guess: do we need to do this? should we
1441 * expand this, and do it in all cases?
1442 */
Tejun Heo936fd732007-08-06 18:36:23 +09001443 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1444 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001445 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001446
1447 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1448
1449 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1450
1451 /*
1452 * all generations share these EDMA error cause bits
1453 */
1454
1455 if (edma_err_cause & EDMA_ERR_DEV)
1456 err_mask |= AC_ERR_DEV;
1457 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001458 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 EDMA_ERR_INTRL_PAR)) {
1460 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001461 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001462 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001463 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1465 ata_ehi_hotplugged(ehi);
1466 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001467 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001468 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001469 }
1470
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001471 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472 eh_freeze_mask = EDMA_EH_FREEZE_5;
1473
1474 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001475 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001476 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001477 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001478 }
1479 } else {
1480 eh_freeze_mask = EDMA_EH_FREEZE;
1481
1482 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001483 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001485 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001486 }
1487
1488 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001489 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1490 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001492 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001493 }
1494 }
Brett Russ20f733e2005-09-01 18:26:17 -04001495
1496 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001497 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001498
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001499 if (!err_mask) {
1500 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001501 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001502 }
1503
1504 ehi->serror |= serr;
1505 ehi->action |= action;
1506
1507 if (qc)
1508 qc->err_mask |= err_mask;
1509 else
1510 ehi->err_mask |= err_mask;
1511
1512 if (edma_err_cause & eh_freeze_mask)
1513 ata_port_freeze(ap);
1514 else
1515 ata_port_abort(ap);
1516}
1517
1518static void mv_intr_pio(struct ata_port *ap)
1519{
1520 struct ata_queued_cmd *qc;
1521 u8 ata_status;
1522
1523 /* ignore spurious intr if drive still BUSY */
1524 ata_status = readb(ap->ioaddr.status_addr);
1525 if (unlikely(ata_status & ATA_BUSY))
1526 return;
1527
1528 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001529 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 if (unlikely(!qc)) /* no active tag */
1531 return;
1532 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1533 return;
1534
1535 /* and finally, complete the ATA command */
1536 qc->err_mask |= ac_err_mask(ata_status);
1537 ata_qc_complete(qc);
1538}
1539
1540static void mv_intr_edma(struct ata_port *ap)
1541{
1542 void __iomem *port_mmio = mv_ap_base(ap);
1543 struct mv_host_priv *hpriv = ap->host->private_data;
1544 struct mv_port_priv *pp = ap->private_data;
1545 struct ata_queued_cmd *qc;
1546 u32 out_index, in_index;
1547 bool work_done = false;
1548
1549 /* get h/w response queue pointer */
1550 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1551 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1552
1553 while (1) {
1554 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001555 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556
1557 /* get s/w response queue last-read pointer, and compare */
1558 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1559 if (in_index == out_index)
1560 break;
1561
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001562 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001563 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001564 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001565
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001566 /* Gen II/IIE: get active ATA command via tag, to enable
1567 * support for queueing. this works transparently for
1568 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001570 else
1571 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001572
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001573 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001574
Mark Lordcb924412008-01-26 18:32:09 -05001575 /* For non-NCQ mode, the lower 8 bits of status
1576 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1577 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001578 */
1579 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001580 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001581 mv_err_intr(ap, qc);
1582 return;
1583 }
1584
1585 /* and finally, complete the ATA command */
1586 if (qc) {
1587 qc->err_mask |=
1588 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1589 ata_qc_complete(qc);
1590 }
1591
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001592 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001593 * indicate (after the loop completes) to hardware
1594 * that we have consumed a response queue entry.
1595 */
1596 work_done = true;
1597 pp->resp_idx++;
1598 }
1599
1600 if (work_done)
1601 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1602 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1603 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001604}
1605
Brett Russ05b308e2005-10-05 17:08:53 -04001606/**
1607 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001608 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001609 * @relevant: port error bits relevant to this host controller
1610 * @hc: which host controller we're to look at
1611 *
1612 * Read then write clear the HC interrupt status then walk each
1613 * port connected to the HC and see if it needs servicing. Port
1614 * success ints are reported in the HC interrupt status reg, the
1615 * port error ints are reported in the higher level main
1616 * interrupt status register and thus are passed in via the
1617 * 'relevant' argument.
1618 *
1619 * LOCKING:
1620 * Inherited from caller.
1621 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001622static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001623{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001624 struct mv_host_priv *hpriv = host->private_data;
1625 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001626 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001627 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001628 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001629
Jeff Garzik35177262007-02-24 21:26:42 -05001630 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001631 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001632 else
Brett Russ20f733e2005-09-01 18:26:17 -04001633 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001634
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001635 if (HAS_PCI(host))
1636 last_port = port0 + MV_PORTS_PER_HC;
1637 else
1638 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001639 /* we'll need the HC success int register in most cases */
1640 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001641 if (!hc_irq_cause)
1642 return;
1643
1644 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001645
1646 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001647 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001648
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001649 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001650 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001651 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001652 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001653
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001654 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001655 continue;
1656
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001657 pp = ap->private_data;
1658
Brett Russ31961942005-09-30 01:36:00 -04001659 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001660 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001661 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001662
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001663 have_err_bits = ((PORT0_ERR << shift) & relevant);
1664
1665 if (unlikely(have_err_bits)) {
1666 struct ata_queued_cmd *qc;
1667
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001668 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001669 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1670 continue;
1671
1672 mv_err_intr(ap, qc);
1673 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001674 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001675
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001676 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1677
1678 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1679 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1680 mv_intr_edma(ap);
1681 } else {
1682 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1683 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001684 }
1685 }
1686 VPRINTK("EXIT\n");
1687}
1688
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001689static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1690{
Mark Lord02a121d2007-12-01 13:07:22 -05001691 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001692 struct ata_port *ap;
1693 struct ata_queued_cmd *qc;
1694 struct ata_eh_info *ehi;
1695 unsigned int i, err_mask, printed = 0;
1696 u32 err_cause;
1697
Mark Lord02a121d2007-12-01 13:07:22 -05001698 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001699
1700 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1701 err_cause);
1702
1703 DPRINTK("All regs @ PCI error\n");
1704 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1705
Mark Lord02a121d2007-12-01 13:07:22 -05001706 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707
1708 for (i = 0; i < host->n_ports; i++) {
1709 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001710 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001711 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001712 ata_ehi_clear_desc(ehi);
1713 if (!printed++)
1714 ata_ehi_push_desc(ehi,
1715 "PCI err cause 0x%08x", err_cause);
1716 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001717 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001718 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001719 if (qc)
1720 qc->err_mask |= err_mask;
1721 else
1722 ehi->err_mask |= err_mask;
1723
1724 ata_port_freeze(ap);
1725 }
1726 }
1727}
1728
Brett Russ05b308e2005-10-05 17:08:53 -04001729/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001730 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001731 * @irq: unused
1732 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001733 *
1734 * Read the read only register to determine if any host
1735 * controllers have pending interrupts. If so, call lower level
1736 * routine to handle. Also check for PCI errors which are only
1737 * reported here.
1738 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001739 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001740 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001741 * interrupts.
1742 */
David Howells7d12e782006-10-05 14:55:46 +01001743static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001744{
Jeff Garzikcca39742006-08-24 03:19:22 -04001745 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001746 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001747 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001748 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001749 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001750
Mark Lorde12bef52008-03-31 19:33:56 -04001751 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001752 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001753
1754 irq_stat = readl(hpriv->main_cause_reg_addr);
1755 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001756
1757 /* check the cases where we either have nothing pending or have read
1758 * a bogus register value which can indicate HW removal or PCI fault
1759 */
Mark Lord646a4da2008-01-26 18:30:37 -05001760 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1761 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001762
Jeff Garzikcca39742006-08-24 03:19:22 -04001763 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001764
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001765 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001766 mv_pci_error(host, mmio);
1767 handled = 1;
1768 goto out_unlock; /* skip all other HC irq handling */
1769 }
1770
Brett Russ20f733e2005-09-01 18:26:17 -04001771 for (hc = 0; hc < n_hcs; hc++) {
1772 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1773 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001774 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001775 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001776 }
1777 }
Mark Lord615ab952006-05-19 16:24:56 -04001778
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001779out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001780 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001781
1782 return IRQ_RETVAL(handled);
1783}
1784
Jeff Garzikc9d39132005-11-13 17:47:51 -05001785static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1786{
1787 unsigned int ofs;
1788
1789 switch (sc_reg_in) {
1790 case SCR_STATUS:
1791 case SCR_ERROR:
1792 case SCR_CONTROL:
1793 ofs = sc_reg_in * sizeof(u32);
1794 break;
1795 default:
1796 ofs = 0xffffffffU;
1797 break;
1798 }
1799 return ofs;
1800}
1801
Tejun Heoda3dbb12007-07-16 14:29:40 +09001802static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001803{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001804 struct mv_host_priv *hpriv = ap->host->private_data;
1805 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001806 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001807 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1808
Tejun Heoda3dbb12007-07-16 14:29:40 +09001809 if (ofs != 0xffffffffU) {
1810 *val = readl(addr + ofs);
1811 return 0;
1812 } else
1813 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001814}
1815
Tejun Heoda3dbb12007-07-16 14:29:40 +09001816static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001817{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001818 struct mv_host_priv *hpriv = ap->host->private_data;
1819 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001820 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001821 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1822
Tejun Heoda3dbb12007-07-16 14:29:40 +09001823 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001824 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001825 return 0;
1826 } else
1827 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001828}
1829
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001830static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001831{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001832 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001833 int early_5080;
1834
Auke Kok44c10132007-06-08 15:46:36 -07001835 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001836
1837 if (!early_5080) {
1838 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1839 tmp |= (1 << 0);
1840 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1841 }
1842
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001843 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001844}
1845
1846static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1847{
1848 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1849}
1850
Jeff Garzik47c2b672005-11-12 21:13:17 -05001851static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001852 void __iomem *mmio)
1853{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001854 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1855 u32 tmp;
1856
1857 tmp = readl(phy_mmio + MV5_PHY_MODE);
1858
1859 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1860 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001861}
1862
Jeff Garzik47c2b672005-11-12 21:13:17 -05001863static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001864{
Jeff Garzik522479f2005-11-12 22:14:02 -05001865 u32 tmp;
1866
1867 writel(0, mmio + MV_GPIO_PORT_CTL);
1868
1869 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1870
1871 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1872 tmp |= ~(1 << 0);
1873 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001874}
1875
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001876static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1877 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001878{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001879 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1880 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1881 u32 tmp;
1882 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1883
1884 if (fix_apm_sq) {
1885 tmp = readl(phy_mmio + MV5_LT_MODE);
1886 tmp |= (1 << 19);
1887 writel(tmp, phy_mmio + MV5_LT_MODE);
1888
1889 tmp = readl(phy_mmio + MV5_PHY_CTL);
1890 tmp &= ~0x3;
1891 tmp |= 0x1;
1892 writel(tmp, phy_mmio + MV5_PHY_CTL);
1893 }
1894
1895 tmp = readl(phy_mmio + MV5_PHY_MODE);
1896 tmp &= ~mask;
1897 tmp |= hpriv->signal[port].pre;
1898 tmp |= hpriv->signal[port].amps;
1899 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001900}
1901
Jeff Garzikc9d39132005-11-13 17:47:51 -05001902
1903#undef ZERO
1904#define ZERO(reg) writel(0, port_mmio + (reg))
1905static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1906 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001907{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001908 void __iomem *port_mmio = mv_port_base(mmio, port);
1909
Mark Lordb5624682008-03-31 19:34:40 -04001910 /*
1911 * The datasheet warns against setting ATA_RST when EDMA is active
1912 * (but doesn't say what the problem might be). So we first try
1913 * to disable the EDMA engine before doing the ATA_RST operation.
1914 */
Mark Lorde12bef52008-03-31 19:33:56 -04001915 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001916
1917 ZERO(0x028); /* command */
1918 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1919 ZERO(0x004); /* timer */
1920 ZERO(0x008); /* irq err cause */
1921 ZERO(0x00c); /* irq err mask */
1922 ZERO(0x010); /* rq bah */
1923 ZERO(0x014); /* rq inp */
1924 ZERO(0x018); /* rq outp */
1925 ZERO(0x01c); /* respq bah */
1926 ZERO(0x024); /* respq outp */
1927 ZERO(0x020); /* respq inp */
1928 ZERO(0x02c); /* test control */
1929 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1930}
1931#undef ZERO
1932
1933#define ZERO(reg) writel(0, hc_mmio + (reg))
1934static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1935 unsigned int hc)
1936{
1937 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1938 u32 tmp;
1939
1940 ZERO(0x00c);
1941 ZERO(0x010);
1942 ZERO(0x014);
1943 ZERO(0x018);
1944
1945 tmp = readl(hc_mmio + 0x20);
1946 tmp &= 0x1c1c1c1c;
1947 tmp |= 0x03030303;
1948 writel(tmp, hc_mmio + 0x20);
1949}
1950#undef ZERO
1951
1952static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int n_hc)
1954{
1955 unsigned int hc, port;
1956
1957 for (hc = 0; hc < n_hc; hc++) {
1958 for (port = 0; port < MV_PORTS_PER_HC; port++)
1959 mv5_reset_hc_port(hpriv, mmio,
1960 (hc * MV_PORTS_PER_HC) + port);
1961
1962 mv5_reset_one_hc(hpriv, mmio, hc);
1963 }
1964
1965 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001966}
1967
Jeff Garzik101ffae2005-11-12 22:17:49 -05001968#undef ZERO
1969#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001970static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001971{
Mark Lord02a121d2007-12-01 13:07:22 -05001972 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001973 u32 tmp;
1974
1975 tmp = readl(mmio + MV_PCI_MODE);
1976 tmp &= 0xff00ffff;
1977 writel(tmp, mmio + MV_PCI_MODE);
1978
1979 ZERO(MV_PCI_DISC_TIMER);
1980 ZERO(MV_PCI_MSI_TRIGGER);
1981 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1982 ZERO(HC_MAIN_IRQ_MASK_OFS);
1983 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001984 ZERO(hpriv->irq_cause_ofs);
1985 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001986 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1987 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1988 ZERO(MV_PCI_ERR_ATTRIBUTE);
1989 ZERO(MV_PCI_ERR_COMMAND);
1990}
1991#undef ZERO
1992
1993static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1994{
1995 u32 tmp;
1996
1997 mv5_reset_flash(hpriv, mmio);
1998
1999 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2000 tmp &= 0x3;
2001 tmp |= (1 << 5) | (1 << 6);
2002 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2003}
2004
2005/**
2006 * mv6_reset_hc - Perform the 6xxx global soft reset
2007 * @mmio: base address of the HBA
2008 *
2009 * This routine only applies to 6xxx parts.
2010 *
2011 * LOCKING:
2012 * Inherited from caller.
2013 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002014static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2015 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002016{
2017 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2018 int i, rc = 0;
2019 u32 t;
2020
2021 /* Following procedure defined in PCI "main command and status
2022 * register" table.
2023 */
2024 t = readl(reg);
2025 writel(t | STOP_PCI_MASTER, reg);
2026
2027 for (i = 0; i < 1000; i++) {
2028 udelay(1);
2029 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002030 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002031 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002032 }
2033 if (!(PCI_MASTER_EMPTY & t)) {
2034 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2035 rc = 1;
2036 goto done;
2037 }
2038
2039 /* set reset */
2040 i = 5;
2041 do {
2042 writel(t | GLOB_SFT_RST, reg);
2043 t = readl(reg);
2044 udelay(1);
2045 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2046
2047 if (!(GLOB_SFT_RST & t)) {
2048 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2049 rc = 1;
2050 goto done;
2051 }
2052
2053 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2054 i = 5;
2055 do {
2056 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2057 t = readl(reg);
2058 udelay(1);
2059 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2060
2061 if (GLOB_SFT_RST & t) {
2062 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2063 rc = 1;
2064 }
2065done:
2066 return rc;
2067}
2068
Jeff Garzik47c2b672005-11-12 21:13:17 -05002069static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002070 void __iomem *mmio)
2071{
2072 void __iomem *port_mmio;
2073 u32 tmp;
2074
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002075 tmp = readl(mmio + MV_RESET_CFG);
2076 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002077 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002078 hpriv->signal[idx].pre = 0x1 << 5;
2079 return;
2080 }
2081
2082 port_mmio = mv_port_base(mmio, idx);
2083 tmp = readl(port_mmio + PHY_MODE2);
2084
2085 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2086 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2087}
2088
Jeff Garzik47c2b672005-11-12 21:13:17 -05002089static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002090{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002091 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002092}
2093
Jeff Garzikc9d39132005-11-13 17:47:51 -05002094static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002095 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002096{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002097 void __iomem *port_mmio = mv_port_base(mmio, port);
2098
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002099 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002100 int fix_phy_mode2 =
2101 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002102 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002103 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2104 u32 m2, tmp;
2105
2106 if (fix_phy_mode2) {
2107 m2 = readl(port_mmio + PHY_MODE2);
2108 m2 &= ~(1 << 16);
2109 m2 |= (1 << 31);
2110 writel(m2, port_mmio + PHY_MODE2);
2111
2112 udelay(200);
2113
2114 m2 = readl(port_mmio + PHY_MODE2);
2115 m2 &= ~((1 << 16) | (1 << 31));
2116 writel(m2, port_mmio + PHY_MODE2);
2117
2118 udelay(200);
2119 }
2120
2121 /* who knows what this magic does */
2122 tmp = readl(port_mmio + PHY_MODE3);
2123 tmp &= ~0x7F800000;
2124 tmp |= 0x2A800000;
2125 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002126
2127 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002128 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002129
2130 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002131
2132 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002133 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002134
Mark Lorde12bef52008-03-31 19:33:56 -04002135 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002136 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2137
2138 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002139
2140 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002141 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002142 }
2143
2144 /* Revert values of pre-emphasis and signal amps to the saved ones */
2145 m2 = readl(port_mmio + PHY_MODE2);
2146
2147 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002148 m2 |= hpriv->signal[port].amps;
2149 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002150 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002151
Jeff Garzike4e7b892006-01-31 12:18:41 -05002152 /* according to mvSata 3.6.1, some IIE values are fixed */
2153 if (IS_GEN_IIE(hpriv)) {
2154 m2 &= ~0xC30FF01F;
2155 m2 |= 0x0000900F;
2156 }
2157
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002158 writel(m2, port_mmio + PHY_MODE2);
2159}
2160
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002161/* TODO: use the generic LED interface to configure the SATA Presence */
2162/* & Acitivy LEDs on the board */
2163static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2164 void __iomem *mmio)
2165{
2166 return;
2167}
2168
2169static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2170 void __iomem *mmio)
2171{
2172 void __iomem *port_mmio;
2173 u32 tmp;
2174
2175 port_mmio = mv_port_base(mmio, idx);
2176 tmp = readl(port_mmio + PHY_MODE2);
2177
2178 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2179 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2180}
2181
2182#undef ZERO
2183#define ZERO(reg) writel(0, port_mmio + (reg))
2184static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2185 void __iomem *mmio, unsigned int port)
2186{
2187 void __iomem *port_mmio = mv_port_base(mmio, port);
2188
Mark Lordb5624682008-03-31 19:34:40 -04002189 /*
2190 * The datasheet warns against setting ATA_RST when EDMA is active
2191 * (but doesn't say what the problem might be). So we first try
2192 * to disable the EDMA engine before doing the ATA_RST operation.
2193 */
Mark Lorde12bef52008-03-31 19:33:56 -04002194 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002195
2196 ZERO(0x028); /* command */
2197 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2198 ZERO(0x004); /* timer */
2199 ZERO(0x008); /* irq err cause */
2200 ZERO(0x00c); /* irq err mask */
2201 ZERO(0x010); /* rq bah */
2202 ZERO(0x014); /* rq inp */
2203 ZERO(0x018); /* rq outp */
2204 ZERO(0x01c); /* respq bah */
2205 ZERO(0x024); /* respq outp */
2206 ZERO(0x020); /* respq inp */
2207 ZERO(0x02c); /* test control */
2208 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2209}
2210
2211#undef ZERO
2212
2213#define ZERO(reg) writel(0, hc_mmio + (reg))
2214static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2215 void __iomem *mmio)
2216{
2217 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2218
2219 ZERO(0x00c);
2220 ZERO(0x010);
2221 ZERO(0x014);
2222
2223}
2224
2225#undef ZERO
2226
2227static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2228 void __iomem *mmio, unsigned int n_hc)
2229{
2230 unsigned int port;
2231
2232 for (port = 0; port < hpriv->n_ports; port++)
2233 mv_soc_reset_hc_port(hpriv, mmio, port);
2234
2235 mv_soc_reset_one_hc(hpriv, mmio);
2236
2237 return 0;
2238}
2239
2240static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2241 void __iomem *mmio)
2242{
2243 return;
2244}
2245
2246static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2247{
2248 return;
2249}
2250
Mark Lordb67a1062008-03-31 19:35:13 -04002251static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2252{
2253 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2254
2255 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2256 if (want_gen2i)
2257 ifctl |= (1 << 7); /* enable gen2i speed */
2258 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2259}
2260
Mark Lordb5624682008-03-31 19:34:40 -04002261/*
2262 * Caller must ensure that EDMA is not active,
2263 * by first doing mv_stop_edma() where needed.
2264 */
Mark Lorde12bef52008-03-31 19:33:56 -04002265static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002266 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002267{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002268 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002269
Mark Lord0d8be5c2008-04-16 14:56:12 -04002270 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002271 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002272
Mark Lordb67a1062008-03-31 19:35:13 -04002273 if (!IS_GEN_I(hpriv)) {
2274 /* Enable 3.0gb/s link speed */
2275 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002276 }
Mark Lordb67a1062008-03-31 19:35:13 -04002277 /*
2278 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2279 * link, and physical layers. It resets all SATA interface registers
2280 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002281 */
Mark Lordb67a1062008-03-31 19:35:13 -04002282 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2283 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002284 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002285
Jeff Garzikc9d39132005-11-13 17:47:51 -05002286 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2287
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002288 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002289 mdelay(1);
2290}
2291
Tejun Heocc0680a2007-08-06 18:36:23 +09002292static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002293 unsigned long deadline)
2294{
Tejun Heocc0680a2007-08-06 18:36:23 +09002295 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002296 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002297 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002298 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002299 int rc, attempts = 0, extra = 0;
2300 u32 sstatus;
2301 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002302
Mark Lorde12bef52008-03-31 19:33:56 -04002303 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002304 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002305
Mark Lord0d8be5c2008-04-16 14:56:12 -04002306 /* Workaround for errata FEr SATA#10 (part 2) */
2307 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002308 const unsigned long *timing =
2309 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002310
Mark Lord17c5aab2008-04-16 14:56:51 -04002311 rc = sata_link_hardreset(link, timing, deadline + extra,
2312 &online, NULL);
2313 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002314 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002315 sata_scr_read(link, SCR_STATUS, &sstatus);
2316 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2317 /* Force 1.5gb/s link speed and try again */
2318 mv_setup_ifctl(mv_ap_base(ap), 0);
2319 if (time_after(jiffies + HZ, deadline))
2320 extra = HZ; /* only extend it once, max */
2321 }
2322 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002323
Mark Lord17c5aab2008-04-16 14:56:51 -04002324 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002325}
2326
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002327static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002328{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002329 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002330 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2331 u32 tmp, mask;
2332 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002333
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002335
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002336 shift = ap->port_no * 2;
2337 if (hc > 0)
2338 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002339
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002341
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002342 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002343 tmp = readl(hpriv->main_mask_reg_addr);
2344 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002345}
2346
2347static void mv_eh_thaw(struct ata_port *ap)
2348{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002349 struct mv_host_priv *hpriv = ap->host->private_data;
2350 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002351 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2352 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2353 void __iomem *port_mmio = mv_ap_base(ap);
2354 u32 tmp, mask, hc_irq_cause;
2355 unsigned int shift, hc_port_no = ap->port_no;
2356
2357 /* FIXME: handle coalescing completion events properly */
2358
2359 shift = ap->port_no * 2;
2360 if (hc > 0) {
2361 shift++;
2362 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002363 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002364
2365 mask = 0x3 << shift;
2366
2367 /* clear EDMA errors on this port */
2368 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2369
2370 /* clear pending irq events */
2371 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2372 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2373 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2374 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2375
2376 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002377 tmp = readl(hpriv->main_mask_reg_addr);
2378 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002379}
2380
Brett Russ05b308e2005-10-05 17:08:53 -04002381/**
2382 * mv_port_init - Perform some early initialization on a single port.
2383 * @port: libata data structure storing shadow register addresses
2384 * @port_mmio: base address of the port
2385 *
2386 * Initialize shadow register mmio addresses, clear outstanding
2387 * interrupts on the port, and unmask interrupts for the future
2388 * start of the port.
2389 *
2390 * LOCKING:
2391 * Inherited from caller.
2392 */
Brett Russ31961942005-09-30 01:36:00 -04002393static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2394{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002395 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002396 unsigned serr_ofs;
2397
Jeff Garzik8b260242005-11-12 12:32:50 -05002398 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002399 */
2400 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002401 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002402 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2403 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2404 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2405 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2406 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2407 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002408 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002409 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2410 /* special case: control/altstatus doesn't have ATA_REG_ address */
2411 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2412
2413 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002414 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002415
Brett Russ31961942005-09-30 01:36:00 -04002416 /* Clear any currently outstanding port interrupt conditions */
2417 serr_ofs = mv_scr_offset(SCR_ERROR);
2418 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2419 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2420
Mark Lord646a4da2008-01-26 18:30:37 -05002421 /* unmask all non-transient EDMA error interrupts */
2422 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002423
Jeff Garzik8b260242005-11-12 12:32:50 -05002424 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002425 readl(port_mmio + EDMA_CFG_OFS),
2426 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2427 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002428}
2429
Tejun Heo4447d352007-04-17 23:44:08 +09002430static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002431{
Tejun Heo4447d352007-04-17 23:44:08 +09002432 struct pci_dev *pdev = to_pci_dev(host->dev);
2433 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002434 u32 hp_flags = hpriv->hp_flags;
2435
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002436 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002437 case chip_5080:
2438 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002439 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002440
Auke Kok44c10132007-06-08 15:46:36 -07002441 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002442 case 0x1:
2443 hp_flags |= MV_HP_ERRATA_50XXB0;
2444 break;
2445 case 0x3:
2446 hp_flags |= MV_HP_ERRATA_50XXB2;
2447 break;
2448 default:
2449 dev_printk(KERN_WARNING, &pdev->dev,
2450 "Applying 50XXB2 workarounds to unknown rev\n");
2451 hp_flags |= MV_HP_ERRATA_50XXB2;
2452 break;
2453 }
2454 break;
2455
2456 case chip_504x:
2457 case chip_508x:
2458 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002459 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002460
Auke Kok44c10132007-06-08 15:46:36 -07002461 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002462 case 0x0:
2463 hp_flags |= MV_HP_ERRATA_50XXB0;
2464 break;
2465 case 0x3:
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 break;
2468 default:
2469 dev_printk(KERN_WARNING, &pdev->dev,
2470 "Applying B2 workarounds to unknown rev\n");
2471 hp_flags |= MV_HP_ERRATA_50XXB2;
2472 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002473 }
2474 break;
2475
2476 case chip_604x:
2477 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002478 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002479 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002480
Auke Kok44c10132007-06-08 15:46:36 -07002481 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002482 case 0x7:
2483 hp_flags |= MV_HP_ERRATA_60X1B2;
2484 break;
2485 case 0x9:
2486 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002487 break;
2488 default:
2489 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002490 "Applying B2 workarounds to unknown rev\n");
2491 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002492 break;
2493 }
2494 break;
2495
Jeff Garzike4e7b892006-01-31 12:18:41 -05002496 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002497 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002498 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2499 (pdev->device == 0x2300 || pdev->device == 0x2310))
2500 {
Mark Lord4e520032007-12-11 12:58:05 -05002501 /*
2502 * Highpoint RocketRAID PCIe 23xx series cards:
2503 *
2504 * Unconfigured drives are treated as "Legacy"
2505 * by the BIOS, and it overwrites sector 8 with
2506 * a "Lgcy" metadata block prior to Linux boot.
2507 *
2508 * Configured drives (RAID or JBOD) leave sector 8
2509 * alone, but instead overwrite a high numbered
2510 * sector for the RAID metadata. This sector can
2511 * be determined exactly, by truncating the physical
2512 * drive capacity to a nice even GB value.
2513 *
2514 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2515 *
2516 * Warn the user, lest they think we're just buggy.
2517 */
2518 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2519 " BIOS CORRUPTS DATA on all attached drives,"
2520 " regardless of if/how they are configured."
2521 " BEWARE!\n");
2522 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2523 " use sectors 8-9 on \"Legacy\" drives,"
2524 " and avoid the final two gigabytes on"
2525 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002526 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002527 case chip_6042:
2528 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002529 hp_flags |= MV_HP_GEN_IIE;
2530
Auke Kok44c10132007-06-08 15:46:36 -07002531 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002532 case 0x0:
2533 hp_flags |= MV_HP_ERRATA_XX42A0;
2534 break;
2535 case 0x1:
2536 hp_flags |= MV_HP_ERRATA_60X1C0;
2537 break;
2538 default:
2539 dev_printk(KERN_WARNING, &pdev->dev,
2540 "Applying 60X1C0 workarounds to unknown rev\n");
2541 hp_flags |= MV_HP_ERRATA_60X1C0;
2542 break;
2543 }
2544 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002545 case chip_soc:
2546 hpriv->ops = &mv_soc_ops;
2547 hp_flags |= MV_HP_ERRATA_60X1C0;
2548 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002549
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002550 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002551 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002552 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002553 return 1;
2554 }
2555
2556 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002557 if (hp_flags & MV_HP_PCIE) {
2558 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2559 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2560 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2561 } else {
2562 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2563 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2564 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2565 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002566
2567 return 0;
2568}
2569
Brett Russ05b308e2005-10-05 17:08:53 -04002570/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002571 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002572 * @host: ATA host to initialize
2573 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002574 *
2575 * If possible, do an early global reset of the host. Then do
2576 * our port init and clear/unmask all/relevant host interrupts.
2577 *
2578 * LOCKING:
2579 * Inherited from caller.
2580 */
Tejun Heo4447d352007-04-17 23:44:08 +09002581static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002582{
2583 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002584 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002585 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002586
Tejun Heo4447d352007-04-17 23:44:08 +09002587 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002588 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002589 goto done;
2590
2591 if (HAS_PCI(host)) {
2592 hpriv->main_cause_reg_addr = hpriv->base +
2593 HC_MAIN_IRQ_CAUSE_OFS;
2594 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2595 } else {
2596 hpriv->main_cause_reg_addr = hpriv->base +
2597 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2598 hpriv->main_mask_reg_addr = hpriv->base +
2599 HC_SOC_MAIN_IRQ_MASK_OFS;
2600 }
2601 /* global interrupt mask */
2602 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002603
Tejun Heo4447d352007-04-17 23:44:08 +09002604 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002605
Tejun Heo4447d352007-04-17 23:44:08 +09002606 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002607 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002608
Jeff Garzikc9d39132005-11-13 17:47:51 -05002609 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002610 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002611 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002612
Jeff Garzik522479f2005-11-12 22:14:02 -05002613 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002614 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002615 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002616
Tejun Heo4447d352007-04-17 23:44:08 +09002617 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002618 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002619 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002620
2621 mv_port_init(&ap->ioaddr, port_mmio);
2622
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002623#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002624 if (HAS_PCI(host)) {
2625 unsigned int offset = port_mmio - mmio;
2626 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2627 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2628 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002629#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002630 }
2631
2632 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002633 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2634
2635 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2636 "(before clear)=0x%08x\n", hc,
2637 readl(hc_mmio + HC_CFG_OFS),
2638 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2639
2640 /* Clear any currently outstanding hc interrupt conditions */
2641 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002642 }
2643
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002644 if (HAS_PCI(host)) {
2645 /* Clear any currently outstanding host interrupt conditions */
2646 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002647
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002648 /* and unmask interrupt generation for host regs */
2649 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2650 if (IS_GEN_I(hpriv))
2651 writelfl(~HC_MAIN_MASKED_IRQS_5,
2652 hpriv->main_mask_reg_addr);
2653 else
2654 writelfl(~HC_MAIN_MASKED_IRQS,
2655 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002656
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002657 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2658 "PCI int cause/mask=0x%08x/0x%08x\n",
2659 readl(hpriv->main_cause_reg_addr),
2660 readl(hpriv->main_mask_reg_addr),
2661 readl(mmio + hpriv->irq_cause_ofs),
2662 readl(mmio + hpriv->irq_mask_ofs));
2663 } else {
2664 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2665 hpriv->main_mask_reg_addr);
2666 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2667 readl(hpriv->main_cause_reg_addr),
2668 readl(hpriv->main_mask_reg_addr));
2669 }
Brett Russ31961942005-09-30 01:36:00 -04002670done:
Brett Russ20f733e2005-09-01 18:26:17 -04002671 return rc;
2672}
2673
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002674static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2675{
2676 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2677 MV_CRQB_Q_SZ, 0);
2678 if (!hpriv->crqb_pool)
2679 return -ENOMEM;
2680
2681 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2682 MV_CRPB_Q_SZ, 0);
2683 if (!hpriv->crpb_pool)
2684 return -ENOMEM;
2685
2686 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2687 MV_SG_TBL_SZ, 0);
2688 if (!hpriv->sg_tbl_pool)
2689 return -ENOMEM;
2690
2691 return 0;
2692}
2693
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002694/**
2695 * mv_platform_probe - handle a positive probe of an soc Marvell
2696 * host
2697 * @pdev: platform device found
2698 *
2699 * LOCKING:
2700 * Inherited from caller.
2701 */
2702static int mv_platform_probe(struct platform_device *pdev)
2703{
2704 static int printed_version;
2705 const struct mv_sata_platform_data *mv_platform_data;
2706 const struct ata_port_info *ppi[] =
2707 { &mv_port_info[chip_soc], NULL };
2708 struct ata_host *host;
2709 struct mv_host_priv *hpriv;
2710 struct resource *res;
2711 int n_ports, rc;
2712
2713 if (!printed_version++)
2714 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2715
2716 /*
2717 * Simple resource validation ..
2718 */
2719 if (unlikely(pdev->num_resources != 2)) {
2720 dev_err(&pdev->dev, "invalid number of resources\n");
2721 return -EINVAL;
2722 }
2723
2724 /*
2725 * Get the register base first
2726 */
2727 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2728 if (res == NULL)
2729 return -EINVAL;
2730
2731 /* allocate host */
2732 mv_platform_data = pdev->dev.platform_data;
2733 n_ports = mv_platform_data->n_ports;
2734
2735 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2736 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2737
2738 if (!host || !hpriv)
2739 return -ENOMEM;
2740 host->private_data = hpriv;
2741 hpriv->n_ports = n_ports;
2742
2743 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002744 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2745 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002746 hpriv->base -= MV_SATAHC0_REG_BASE;
2747
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002748 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2749 if (rc)
2750 return rc;
2751
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002752 /* initialize adapter */
2753 rc = mv_init_host(host, chip_soc);
2754 if (rc)
2755 return rc;
2756
2757 dev_printk(KERN_INFO, &pdev->dev,
2758 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2759 host->n_ports);
2760
2761 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2762 IRQF_SHARED, &mv6_sht);
2763}
2764
2765/*
2766 *
2767 * mv_platform_remove - unplug a platform interface
2768 * @pdev: platform device
2769 *
2770 * A platform bus SATA device has been unplugged. Perform the needed
2771 * cleanup. Also called on module unload for any active devices.
2772 */
2773static int __devexit mv_platform_remove(struct platform_device *pdev)
2774{
2775 struct device *dev = &pdev->dev;
2776 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002777
2778 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002779 return 0;
2780}
2781
2782static struct platform_driver mv_platform_driver = {
2783 .probe = mv_platform_probe,
2784 .remove = __devexit_p(mv_platform_remove),
2785 .driver = {
2786 .name = DRV_NAME,
2787 .owner = THIS_MODULE,
2788 },
2789};
2790
2791
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002792#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002793static int mv_pci_init_one(struct pci_dev *pdev,
2794 const struct pci_device_id *ent);
2795
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002796
2797static struct pci_driver mv_pci_driver = {
2798 .name = DRV_NAME,
2799 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002800 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002801 .remove = ata_pci_remove_one,
2802};
2803
2804/*
2805 * module options
2806 */
2807static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2808
2809
2810/* move to PCI layer or libata core? */
2811static int pci_go_64(struct pci_dev *pdev)
2812{
2813 int rc;
2814
2815 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2816 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2817 if (rc) {
2818 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2819 if (rc) {
2820 dev_printk(KERN_ERR, &pdev->dev,
2821 "64-bit DMA enable failed\n");
2822 return rc;
2823 }
2824 }
2825 } else {
2826 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2827 if (rc) {
2828 dev_printk(KERN_ERR, &pdev->dev,
2829 "32-bit DMA enable failed\n");
2830 return rc;
2831 }
2832 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2833 if (rc) {
2834 dev_printk(KERN_ERR, &pdev->dev,
2835 "32-bit consistent DMA enable failed\n");
2836 return rc;
2837 }
2838 }
2839
2840 return rc;
2841}
2842
Brett Russ05b308e2005-10-05 17:08:53 -04002843/**
2844 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002845 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002846 *
2847 * FIXME: complete this.
2848 *
2849 * LOCKING:
2850 * Inherited from caller.
2851 */
Tejun Heo4447d352007-04-17 23:44:08 +09002852static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002853{
Tejun Heo4447d352007-04-17 23:44:08 +09002854 struct pci_dev *pdev = to_pci_dev(host->dev);
2855 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002856 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002857 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002858
2859 /* Use this to determine the HW stepping of the chip so we know
2860 * what errata to workaround
2861 */
Brett Russ31961942005-09-30 01:36:00 -04002862 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2863 if (scc == 0)
2864 scc_s = "SCSI";
2865 else if (scc == 0x01)
2866 scc_s = "RAID";
2867 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002868 scc_s = "?";
2869
2870 if (IS_GEN_I(hpriv))
2871 gen = "I";
2872 else if (IS_GEN_II(hpriv))
2873 gen = "II";
2874 else if (IS_GEN_IIE(hpriv))
2875 gen = "IIE";
2876 else
2877 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002878
Jeff Garzika9524a72005-10-30 14:39:11 -05002879 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002880 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2881 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002882 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2883}
2884
Brett Russ05b308e2005-10-05 17:08:53 -04002885/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002886 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002887 * @pdev: PCI device found
2888 * @ent: PCI device ID entry for the matched host
2889 *
2890 * LOCKING:
2891 * Inherited from caller.
2892 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002893static int mv_pci_init_one(struct pci_dev *pdev,
2894 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04002895{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002896 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002897 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002898 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2899 struct ata_host *host;
2900 struct mv_host_priv *hpriv;
2901 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002902
Jeff Garzika9524a72005-10-30 14:39:11 -05002903 if (!printed_version++)
2904 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002905
Tejun Heo4447d352007-04-17 23:44:08 +09002906 /* allocate host */
2907 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2908
2909 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2910 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2911 if (!host || !hpriv)
2912 return -ENOMEM;
2913 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002914 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09002915
2916 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002917 rc = pcim_enable_device(pdev);
2918 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002919 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002920
Tejun Heo0d5ff562007-02-01 15:06:36 +09002921 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2922 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002923 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002924 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002925 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002926 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002927 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04002928
Jeff Garzikd88184f2007-02-26 01:26:06 -05002929 rc = pci_go_64(pdev);
2930 if (rc)
2931 return rc;
2932
Mark Lordda2fa9b2008-01-26 18:32:45 -05002933 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2934 if (rc)
2935 return rc;
2936
Brett Russ20f733e2005-09-01 18:26:17 -04002937 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002938 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002939 if (rc)
2940 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002941
Brett Russ31961942005-09-30 01:36:00 -04002942 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002943 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002944 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002945
Brett Russ31961942005-09-30 01:36:00 -04002946 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002947 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002948
Tejun Heo4447d352007-04-17 23:44:08 +09002949 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002950 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002951 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002952 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002953}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002954#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002955
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002956static int mv_platform_probe(struct platform_device *pdev);
2957static int __devexit mv_platform_remove(struct platform_device *pdev);
2958
Brett Russ20f733e2005-09-01 18:26:17 -04002959static int __init mv_init(void)
2960{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002961 int rc = -ENODEV;
2962#ifdef CONFIG_PCI
2963 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002964 if (rc < 0)
2965 return rc;
2966#endif
2967 rc = platform_driver_register(&mv_platform_driver);
2968
2969#ifdef CONFIG_PCI
2970 if (rc < 0)
2971 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002972#endif
2973 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002974}
2975
2976static void __exit mv_exit(void)
2977{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002978#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04002979 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002980#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002981 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002982}
2983
2984MODULE_AUTHOR("Brett Russ");
2985MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2986MODULE_LICENSE("GPL");
2987MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2988MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04002989MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04002990
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002991#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002992module_param(msi, int, 0444);
2993MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002994#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002995
Brett Russ20f733e2005-09-01 18:26:17 -04002996module_init(mv_init);
2997module_exit(mv_exit);