blob: 3e2f779ae9c6cc6fe7ad447bffc041f6763adc86 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
43 6) Add port multiplier support (intermediate)
44
Jeff Garzik4a05e202007-05-24 23:40:15 -040045 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
Jeff Garzik4a05e202007-05-24 23:40:15 -040063*/
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400134 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
149
150 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400153
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
155
Brett Russ20f733e2005-09-01 18:26:17 -0400156 /* PCI interface registers */
157
Brett Russ31961942005-09-30 01:36:00 -0400158 PCI_COMMAND_OFS = 0xc00,
159
Brett Russ20f733e2005-09-01 18:26:17 -0400160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
164
Jeff Garzik522479f2005-11-12 22:14:02 -0500165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
175
Mark Lord02a121d2007-12-01 13:07:22 -0500176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179
Mark Lord02a121d2007-12-01 13:07:22 -0500180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500183
Brett Russ20f733e2005-09-01 18:26:17 -0400184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400210
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
213
214 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
218
219 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lorde12bef52008-03-31 19:33:56 -0400227 LTMODE_OFS = 0x30c,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500228 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500229 PHY_MODE4 = 0x314,
230 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400231 SATA_IFCTL_OFS = 0x344,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
234 FIS_CFG_OFS = 0x360,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500235 MV5_PHY_MODE = 0x74,
236 MV5_LT_MODE = 0x30,
237 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400238 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500239
240 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400241
242 /* Port registers */
243 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500244 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
245 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
246 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
247 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
248 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400249 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
250 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400251
252 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
253 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
255 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
256 EDMA_ERR_DEV = (1 << 2), /* device error */
257 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
258 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
259 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400260 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
261 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400263 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
265 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
266 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
267 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
273 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
274
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400275 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
281 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
282 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
283
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400284 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500285
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400287 EDMA_ERR_OVERRUN_5 = (1 << 5),
288 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500289
290 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
291 EDMA_ERR_LNK_CTRL_RX_1 |
292 EDMA_ERR_LNK_CTRL_RX_3 |
293 EDMA_ERR_LNK_CTRL_TX,
294
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
296 EDMA_ERR_PRD_PAR |
297 EDMA_ERR_DEV_DCON |
298 EDMA_ERR_DEV_CON |
299 EDMA_ERR_SERR |
300 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400301 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400302 EDMA_ERR_CRPB_PAR |
303 EDMA_ERR_INTRL_PAR |
304 EDMA_ERR_IORDY |
305 EDMA_ERR_LNK_CTRL_RX_2 |
306 EDMA_ERR_LNK_DATA_RX |
307 EDMA_ERR_LNK_DATA_TX |
308 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400309
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400310 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
311 EDMA_ERR_PRD_PAR |
312 EDMA_ERR_DEV_DCON |
313 EDMA_ERR_DEV_CON |
314 EDMA_ERR_OVERRUN_5 |
315 EDMA_ERR_UNDERRUN_5 |
316 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400317 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400318 EDMA_ERR_CRPB_PAR |
319 EDMA_ERR_INTRL_PAR |
320 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400321
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
323 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400324
325 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
326 EDMA_REQ_Q_PTR_SHIFT = 5,
327
328 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
329 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
330 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400331 EDMA_RSP_Q_PTR_SHIFT = 3,
332
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 EDMA_CMD_OFS = 0x28, /* EDMA command register */
334 EDMA_EN = (1 << 0), /* enable EDMA */
335 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
336 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400337
Jeff Garzikc9d39132005-11-13 17:47:51 -0500338 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500340
Brett Russ31961942005-09-30 01:36:00 -0400341 /* Host private flags (hp_flags) */
342 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500343 MV_HP_ERRATA_50XXB0 = (1 << 1),
344 MV_HP_ERRATA_50XXB2 = (1 << 2),
345 MV_HP_ERRATA_60X1B2 = (1 << 3),
346 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500347 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400348 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
349 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
350 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500351 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400352
Brett Russ31961942005-09-30 01:36:00 -0400353 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400354 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500355 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400356};
357
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400358#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
359#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500360#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100361#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500362
Jeff Garzik095fec82005-11-12 09:50:49 -0500363enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400364 /* DMA boundary 0xffff is required by the s/g splitting
365 * we need on /length/ in mv_fill-sg().
366 */
367 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500368
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400369 /* mask of register bits containing lower 32 bits
370 * of EDMA request queue DMA address
371 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500372 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
373
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400374 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500375 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
376};
377
Jeff Garzik522479f2005-11-12 22:14:02 -0500378enum chip_type {
379 chip_504x,
380 chip_508x,
381 chip_5080,
382 chip_604x,
383 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500384 chip_6042,
385 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500386 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500387};
388
Brett Russ31961942005-09-30 01:36:00 -0400389/* Command ReQuest Block: 32B */
390struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400391 __le32 sg_addr;
392 __le32 sg_addr_hi;
393 __le16 ctrl_flags;
394 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400395};
396
Jeff Garzike4e7b892006-01-31 12:18:41 -0500397struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400398 __le32 addr;
399 __le32 addr_hi;
400 __le32 flags;
401 __le32 len;
402 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500403};
404
Brett Russ31961942005-09-30 01:36:00 -0400405/* Command ResPonse Block: 8B */
406struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400407 __le16 id;
408 __le16 flags;
409 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400410};
411
412/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
413struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400414 __le32 addr;
415 __le32 flags_size;
416 __le32 addr_hi;
417 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400418};
419
420struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400421 struct mv_crqb *crqb;
422 dma_addr_t crqb_dma;
423 struct mv_crpb *crpb;
424 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500425 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
426 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400427
428 unsigned int req_idx;
429 unsigned int resp_idx;
430
Brett Russ31961942005-09-30 01:36:00 -0400431 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400432};
433
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500434struct mv_port_signal {
435 u32 amps;
436 u32 pre;
437};
438
Mark Lord02a121d2007-12-01 13:07:22 -0500439struct mv_host_priv {
440 u32 hp_flags;
441 struct mv_port_signal signal[8];
442 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500443 int n_ports;
444 void __iomem *base;
445 void __iomem *main_cause_reg_addr;
446 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500447 u32 irq_cause_ofs;
448 u32 irq_mask_ofs;
449 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500450 /*
451 * These consistent DMA memory pools give us guaranteed
452 * alignment for hardware-accessed data structures,
453 * and less memory waste in accomplishing the alignment.
454 */
455 struct dma_pool *crqb_pool;
456 struct dma_pool *crpb_pool;
457 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500458};
459
Jeff Garzik47c2b672005-11-12 21:13:17 -0500460struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500461 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
464 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500466 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500468 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100469 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470};
471
Tejun Heoda3dbb12007-07-16 14:29:40 +0900472static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
473static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
474static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
475static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400476static int mv_port_start(struct ata_port *ap);
477static void mv_port_stop(struct ata_port *ap);
478static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500479static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900480static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900481static int mv_hardreset(struct ata_link *link, unsigned int *class,
482 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400483static void mv_eh_freeze(struct ata_port *ap);
484static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500485static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400486
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500487static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
488 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500489static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
490static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
491 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500492static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
493 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500494static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100495static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500496
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500497static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
498 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500499static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
501 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500502static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
503 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500504static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500505static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
506 void __iomem *mmio);
507static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
509static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
510 void __iomem *mmio, unsigned int n_hc);
511static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
512 void __iomem *mmio);
513static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100514static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400515static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500516 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400517static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400518static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400519static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500520
Mark Lordeb73d552008-01-29 13:24:00 -0500521/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
522 * because we have to allow room for worst case splitting of
523 * PRDs for 64K boundaries in mv_fill_sg().
524 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400525static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900526 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400527 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400528 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400529};
530
531static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900532 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500533 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400534 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400535 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400536};
537
Tejun Heo029cfd62008-03-25 12:22:49 +0900538static struct ata_port_operations mv5_ops = {
539 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500540
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541 .qc_prep = mv_qc_prep,
542 .qc_issue = mv_qc_issue,
543
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400544 .freeze = mv_eh_freeze,
545 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900546 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900547 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900548 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400549
Jeff Garzikc9d39132005-11-13 17:47:51 -0500550 .scr_read = mv5_scr_read,
551 .scr_write = mv5_scr_write,
552
553 .port_start = mv_port_start,
554 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555};
556
Tejun Heo029cfd62008-03-25 12:22:49 +0900557static struct ata_port_operations mv6_ops = {
558 .inherits = &mv5_ops,
Mark Lord138bfdd2008-01-26 18:33:18 -0500559 .qc_defer = ata_std_qc_defer,
Tejun Heo029cfd62008-03-25 12:22:49 +0900560 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400561 .scr_read = mv_scr_read,
562 .scr_write = mv_scr_write,
Brett Russ20f733e2005-09-01 18:26:17 -0400563};
564
Tejun Heo029cfd62008-03-25 12:22:49 +0900565static struct ata_port_operations mv_iie_ops = {
566 .inherits = &mv6_ops,
567 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500568 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500569};
570
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100571static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400572 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400573 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400574 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400575 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500576 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400577 },
578 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400579 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400580 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400581 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500582 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400583 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500584 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400585 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500586 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400587 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500588 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500589 },
Brett Russ20f733e2005-09-01 18:26:17 -0400590 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500591 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
592 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400593 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400594 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500595 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400596 },
597 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500599 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400600 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400601 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500602 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400603 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500605 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
606 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500607 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400608 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609 .port_ops = &mv_iie_ops,
610 },
611 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
613 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500614 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400615 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500616 .port_ops = &mv_iie_ops,
617 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500618 { /* chip_soc */
619 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv_iie_ops,
623 },
Brett Russ20f733e2005-09-01 18:26:17 -0400624};
625
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500626static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400627 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
628 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
629 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
630 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100631 /* RocketRAID 1740/174x have different identifiers */
632 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
633 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400634
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400635 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
636 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
637 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
638 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
639 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500640
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400641 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
642
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200643 /* Adaptec 1430SA */
644 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
645
Mark Lord02a121d2007-12-01 13:07:22 -0500646 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800647 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
648
Mark Lord02a121d2007-12-01 13:07:22 -0500649 /* Highpoint RocketRAID PCIe series */
650 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
651 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
652
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400653 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400654};
655
Jeff Garzik47c2b672005-11-12 21:13:17 -0500656static const struct mv_hw_ops mv5xxx_ops = {
657 .phy_errata = mv5_phy_errata,
658 .enable_leds = mv5_enable_leds,
659 .read_preamp = mv5_read_preamp,
660 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500661 .reset_flash = mv5_reset_flash,
662 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500663};
664
665static const struct mv_hw_ops mv6xxx_ops = {
666 .phy_errata = mv6_phy_errata,
667 .enable_leds = mv6_enable_leds,
668 .read_preamp = mv6_read_preamp,
669 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500670 .reset_flash = mv6_reset_flash,
671 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500672};
673
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500674static const struct mv_hw_ops mv_soc_ops = {
675 .phy_errata = mv6_phy_errata,
676 .enable_leds = mv_soc_enable_leds,
677 .read_preamp = mv_soc_read_preamp,
678 .reset_hc = mv_soc_reset_hc,
679 .reset_flash = mv_soc_reset_flash,
680 .reset_bus = mv_soc_reset_bus,
681};
682
Brett Russ20f733e2005-09-01 18:26:17 -0400683/*
684 * Functions
685 */
686
687static inline void writelfl(unsigned long data, void __iomem *addr)
688{
689 writel(data, addr);
690 (void) readl(addr); /* flush to avoid PCI posted write */
691}
692
Brett Russ20f733e2005-09-01 18:26:17 -0400693static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
694{
695 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
696}
697
Jeff Garzikc9d39132005-11-13 17:47:51 -0500698static inline unsigned int mv_hc_from_port(unsigned int port)
699{
700 return port >> MV_PORT_HC_SHIFT;
701}
702
703static inline unsigned int mv_hardport_from_port(unsigned int port)
704{
705 return port & MV_PORT_MASK;
706}
707
708static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
709 unsigned int port)
710{
711 return mv_hc_base(base, mv_hc_from_port(port));
712}
713
Brett Russ20f733e2005-09-01 18:26:17 -0400714static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
715{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500716 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500717 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500718 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400719}
720
Mark Lorde12bef52008-03-31 19:33:56 -0400721static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
722{
723 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
724 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
725
726 return hc_mmio + ofs;
727}
728
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500729static inline void __iomem *mv_host_base(struct ata_host *host)
730{
731 struct mv_host_priv *hpriv = host->private_data;
732 return hpriv->base;
733}
734
Brett Russ20f733e2005-09-01 18:26:17 -0400735static inline void __iomem *mv_ap_base(struct ata_port *ap)
736{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500737 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400738}
739
Jeff Garzikcca39742006-08-24 03:19:22 -0400740static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400741{
Jeff Garzikcca39742006-08-24 03:19:22 -0400742 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400743}
744
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400745static void mv_set_edma_ptrs(void __iomem *port_mmio,
746 struct mv_host_priv *hpriv,
747 struct mv_port_priv *pp)
748{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400749 u32 index;
750
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400751 /*
752 * initialize request queue
753 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400754 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
755
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400756 WARN_ON(pp->crqb_dma & 0x3ff);
757 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400758 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400759 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
760
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400762 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400763 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
764 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400765 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400766
767 /*
768 * initialize response queue
769 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400770 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
771
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400772 WARN_ON(pp->crpb_dma & 0xff);
773 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
774
775 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400776 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400777 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
778 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400779 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400780
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400781 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400782 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400783}
784
Brett Russ05b308e2005-10-05 17:08:53 -0400785/**
786 * mv_start_dma - Enable eDMA engine
787 * @base: port base address
788 * @pp: port private data
789 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900790 * Verify the local cache of the eDMA state is accurate with a
791 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400792 *
793 * LOCKING:
794 * Inherited from caller.
795 */
Mark Lord0c589122008-01-26 18:31:16 -0500796static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500797 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400798{
Mark Lord72109162008-01-26 18:31:33 -0500799 int want_ncq = (protocol == ATA_PROT_NCQ);
800
801 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
802 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
803 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400804 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500805 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400806 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500807 struct mv_host_priv *hpriv = ap->host->private_data;
808 int hard_port = mv_hardport_from_port(ap->port_no);
809 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100810 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500811 u32 hc_irq_cause, ipending;
812
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500814 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400815
Mark Lord0c589122008-01-26 18:31:16 -0500816 /* clear EDMA interrupt indicator, if any */
817 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
818 ipending = (DEV_IRQ << hard_port) |
819 (CRPB_DMA_DONE << hard_port);
820 if (hc_irq_cause & ipending) {
821 writelfl(hc_irq_cause & ~ipending,
822 hc_mmio + HC_IRQ_CAUSE_OFS);
823 }
824
Mark Lorde12bef52008-03-31 19:33:56 -0400825 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500826
827 /* clear FIS IRQ Cause */
828 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
829
Mark Lordf630d562008-01-26 18:31:00 -0500830 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400831
Mark Lordf630d562008-01-26 18:31:00 -0500832 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400833 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
834 }
Mark Lordf630d562008-01-26 18:31:00 -0500835 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400836}
837
Brett Russ05b308e2005-10-05 17:08:53 -0400838/**
Mark Lorde12bef52008-03-31 19:33:56 -0400839 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400840 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400841 *
842 * LOCKING:
843 * Inherited from caller.
844 */
Mark Lordb5624682008-03-31 19:34:40 -0400845static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400846{
Mark Lordb5624682008-03-31 19:34:40 -0400847 int i;
Brett Russ31961942005-09-30 01:36:00 -0400848
Mark Lordb5624682008-03-31 19:34:40 -0400849 /* Disable eDMA. The disable bit auto clears. */
850 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500851
Mark Lordb5624682008-03-31 19:34:40 -0400852 /* Wait for the chip to confirm eDMA is off. */
853 for (i = 10000; i > 0; i--) {
854 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400855 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400856 return 0;
857 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400858 }
Mark Lordb5624682008-03-31 19:34:40 -0400859 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400860}
861
Mark Lorde12bef52008-03-31 19:33:56 -0400862static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400863{
Mark Lordb5624682008-03-31 19:34:40 -0400864 void __iomem *port_mmio = mv_ap_base(ap);
865 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400866
Mark Lordb5624682008-03-31 19:34:40 -0400867 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
868 return 0;
869 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
870 if (mv_stop_edma_engine(port_mmio)) {
871 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
872 return -EIO;
873 }
874 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400875}
876
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400877#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400878static void mv_dump_mem(void __iomem *start, unsigned bytes)
879{
Brett Russ31961942005-09-30 01:36:00 -0400880 int b, w;
881 for (b = 0; b < bytes; ) {
882 DPRINTK("%p: ", start + b);
883 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400884 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400885 b += sizeof(u32);
886 }
887 printk("\n");
888 }
Brett Russ31961942005-09-30 01:36:00 -0400889}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400890#endif
891
Brett Russ31961942005-09-30 01:36:00 -0400892static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
893{
894#ifdef ATA_DEBUG
895 int b, w;
896 u32 dw;
897 for (b = 0; b < bytes; ) {
898 DPRINTK("%02x: ", b);
899 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400900 (void) pci_read_config_dword(pdev, b, &dw);
901 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400902 b += sizeof(u32);
903 }
904 printk("\n");
905 }
906#endif
907}
908static void mv_dump_all_regs(void __iomem *mmio_base, int port,
909 struct pci_dev *pdev)
910{
911#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500912 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400913 port >> MV_PORT_HC_SHIFT);
914 void __iomem *port_base;
915 int start_port, num_ports, p, start_hc, num_hcs, hc;
916
917 if (0 > port) {
918 start_hc = start_port = 0;
919 num_ports = 8; /* shld be benign for 4 port devs */
920 num_hcs = 2;
921 } else {
922 start_hc = port >> MV_PORT_HC_SHIFT;
923 start_port = port;
924 num_ports = num_hcs = 1;
925 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500926 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400927 num_ports > 1 ? num_ports - 1 : start_port);
928
929 if (NULL != pdev) {
930 DPRINTK("PCI config space regs:\n");
931 mv_dump_pci_cfg(pdev, 0x68);
932 }
933 DPRINTK("PCI regs:\n");
934 mv_dump_mem(mmio_base+0xc00, 0x3c);
935 mv_dump_mem(mmio_base+0xd00, 0x34);
936 mv_dump_mem(mmio_base+0xf00, 0x4);
937 mv_dump_mem(mmio_base+0x1d00, 0x6c);
938 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700939 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400940 DPRINTK("HC regs (HC %i):\n", hc);
941 mv_dump_mem(hc_base, 0x1c);
942 }
943 for (p = start_port; p < start_port + num_ports; p++) {
944 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400945 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400946 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400947 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400948 mv_dump_mem(port_base+0x300, 0x60);
949 }
950#endif
951}
952
Brett Russ20f733e2005-09-01 18:26:17 -0400953static unsigned int mv_scr_offset(unsigned int sc_reg_in)
954{
955 unsigned int ofs;
956
957 switch (sc_reg_in) {
958 case SCR_STATUS:
959 case SCR_CONTROL:
960 case SCR_ERROR:
961 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
962 break;
963 case SCR_ACTIVE:
964 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
965 break;
966 default:
967 ofs = 0xffffffffU;
968 break;
969 }
970 return ofs;
971}
972
Tejun Heoda3dbb12007-07-16 14:29:40 +0900973static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -0400974{
975 unsigned int ofs = mv_scr_offset(sc_reg_in);
976
Tejun Heoda3dbb12007-07-16 14:29:40 +0900977 if (ofs != 0xffffffffU) {
978 *val = readl(mv_ap_base(ap) + ofs);
979 return 0;
980 } else
981 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400982}
983
Tejun Heoda3dbb12007-07-16 14:29:40 +0900984static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -0400985{
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
987
Tejun Heoda3dbb12007-07-16 14:29:40 +0900988 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -0400989 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900990 return 0;
991 } else
992 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -0400993}
994
Mark Lordf2738272008-01-26 18:32:29 -0500995static void mv6_dev_config(struct ata_device *adev)
996{
997 /*
998 * We don't have hob_nsect when doing NCQ commands on Gen-II.
999 * See mv_qc_prep() for more info.
1000 */
1001 if (adev->flags & ATA_DFLAG_NCQ)
1002 if (adev->max_sectors > ATA_MAX_SECTORS)
1003 adev->max_sectors = ATA_MAX_SECTORS;
1004}
1005
Mark Lorde12bef52008-03-31 19:33:56 -04001006static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001007{
Mark Lord0c589122008-01-26 18:31:16 -05001008 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001009 struct mv_port_priv *pp = ap->private_data;
1010 struct mv_host_priv *hpriv = ap->host->private_data;
1011 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001012
1013 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001014 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001015
Mark Lord0c589122008-01-26 18:31:16 -05001016 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001017 cfg |= (1 << 8); /* enab config burst size mask */
1018
Mark Lord0c589122008-01-26 18:31:16 -05001019 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001020 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1021
1022 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001023 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1024 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001025 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001027 }
1028
Mark Lord72109162008-01-26 18:31:33 -05001029 if (want_ncq) {
1030 cfg |= EDMA_CFG_NCQ;
1031 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1032 } else
1033 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1034
Jeff Garzike4e7b892006-01-31 12:18:41 -05001035 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1036}
1037
Mark Lordda2fa9b2008-01-26 18:32:45 -05001038static void mv_port_free_dma_mem(struct ata_port *ap)
1039{
1040 struct mv_host_priv *hpriv = ap->host->private_data;
1041 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001042 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001043
1044 if (pp->crqb) {
1045 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1046 pp->crqb = NULL;
1047 }
1048 if (pp->crpb) {
1049 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1050 pp->crpb = NULL;
1051 }
Mark Lordeb73d552008-01-29 13:24:00 -05001052 /*
1053 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1054 * For later hardware, we have one unique sg_tbl per NCQ tag.
1055 */
1056 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1057 if (pp->sg_tbl[tag]) {
1058 if (tag == 0 || !IS_GEN_I(hpriv))
1059 dma_pool_free(hpriv->sg_tbl_pool,
1060 pp->sg_tbl[tag],
1061 pp->sg_tbl_dma[tag]);
1062 pp->sg_tbl[tag] = NULL;
1063 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001064 }
1065}
1066
Brett Russ05b308e2005-10-05 17:08:53 -04001067/**
1068 * mv_port_start - Port specific init/start routine.
1069 * @ap: ATA channel to manipulate
1070 *
1071 * Allocate and point to DMA memory, init port private memory,
1072 * zero indices.
1073 *
1074 * LOCKING:
1075 * Inherited from caller.
1076 */
Brett Russ31961942005-09-30 01:36:00 -04001077static int mv_port_start(struct ata_port *ap)
1078{
Jeff Garzikcca39742006-08-24 03:19:22 -04001079 struct device *dev = ap->host->dev;
1080 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001081 struct mv_port_priv *pp;
1082 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001083 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001084 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001085
Tejun Heo24dc5f32007-01-20 16:00:28 +09001086 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001087 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001088 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001089 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001090
Mark Lordda2fa9b2008-01-26 18:32:45 -05001091 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1092 if (!pp->crqb)
1093 return -ENOMEM;
1094 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001095
Mark Lordda2fa9b2008-01-26 18:32:45 -05001096 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1097 if (!pp->crpb)
1098 goto out_port_free_dma_mem;
1099 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001100
Mark Lordeb73d552008-01-29 13:24:00 -05001101 /*
1102 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1103 * For later hardware, we need one unique sg_tbl per NCQ tag.
1104 */
1105 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1106 if (tag == 0 || !IS_GEN_I(hpriv)) {
1107 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1108 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1109 if (!pp->sg_tbl[tag])
1110 goto out_port_free_dma_mem;
1111 } else {
1112 pp->sg_tbl[tag] = pp->sg_tbl[0];
1113 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1114 }
1115 }
Brett Russ31961942005-09-30 01:36:00 -04001116
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001117 spin_lock_irqsave(&ap->host->lock, flags);
1118
Mark Lorde12bef52008-03-31 19:33:56 -04001119 mv_edma_cfg(ap, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001120 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001121
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001122 spin_unlock_irqrestore(&ap->host->lock, flags);
1123
Brett Russ31961942005-09-30 01:36:00 -04001124 /* Don't turn on EDMA here...do it before DMA commands only. Else
1125 * we'll be unable to send non-data, PIO, etc due to restricted access
1126 * to shadow regs.
1127 */
Brett Russ31961942005-09-30 01:36:00 -04001128 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001129
1130out_port_free_dma_mem:
1131 mv_port_free_dma_mem(ap);
1132 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001133}
1134
Brett Russ05b308e2005-10-05 17:08:53 -04001135/**
1136 * mv_port_stop - Port specific cleanup/stop routine.
1137 * @ap: ATA channel to manipulate
1138 *
1139 * Stop DMA, cleanup port memory.
1140 *
1141 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001142 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001143 */
Brett Russ31961942005-09-30 01:36:00 -04001144static void mv_port_stop(struct ata_port *ap)
1145{
Mark Lorde12bef52008-03-31 19:33:56 -04001146 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001147 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001148}
1149
Brett Russ05b308e2005-10-05 17:08:53 -04001150/**
1151 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1152 * @qc: queued command whose SG list to source from
1153 *
1154 * Populate the SG list and mark the last entry.
1155 *
1156 * LOCKING:
1157 * Inherited from caller.
1158 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001159static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001160{
1161 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001162 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001163 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001164 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001165
Mark Lordeb73d552008-01-29 13:24:00 -05001166 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001167 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001168 dma_addr_t addr = sg_dma_address(sg);
1169 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001170
Olof Johansson4007b492007-10-02 20:45:27 -05001171 while (sg_len) {
1172 u32 offset = addr & 0xffff;
1173 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001174
Olof Johansson4007b492007-10-02 20:45:27 -05001175 if ((offset + sg_len > 0x10000))
1176 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001177
Olof Johansson4007b492007-10-02 20:45:27 -05001178 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1179 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001180 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001181
1182 sg_len -= len;
1183 addr += len;
1184
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001185 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001186 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001187 }
Brett Russ31961942005-09-30 01:36:00 -04001188 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001189
1190 if (likely(last_sg))
1191 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001192}
1193
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001194static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001195{
Mark Lord559eeda2006-05-19 16:40:15 -04001196 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001197 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001198 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001199}
1200
Brett Russ05b308e2005-10-05 17:08:53 -04001201/**
1202 * mv_qc_prep - Host specific command preparation.
1203 * @qc: queued command to prepare
1204 *
1205 * This routine simply redirects to the general purpose routine
1206 * if command is not DMA. Else, it handles prep of the CRQB
1207 * (command request block), does some sanity checking, and calls
1208 * the SG load routine.
1209 *
1210 * LOCKING:
1211 * Inherited from caller.
1212 */
Brett Russ31961942005-09-30 01:36:00 -04001213static void mv_qc_prep(struct ata_queued_cmd *qc)
1214{
1215 struct ata_port *ap = qc->ap;
1216 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001217 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001218 struct ata_taskfile *tf;
1219 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001220 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001221
Mark Lord138bfdd2008-01-26 18:33:18 -05001222 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1223 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001224 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001225
Brett Russ31961942005-09-30 01:36:00 -04001226 /* Fill in command request block
1227 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001228 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001229 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001230 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001231 flags |= qc->tag << CRQB_TAG_SHIFT;
1232
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001233 /* get current queue index from software */
1234 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001235
Mark Lorda6432432006-05-19 16:36:36 -04001236 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001237 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001238 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001239 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001240 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1241
1242 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001243 tf = &qc->tf;
1244
1245 /* Sadly, the CRQB cannot accomodate all registers--there are
1246 * only 11 bytes...so we must pick and choose required
1247 * registers based on the command. So, we drop feature and
1248 * hob_feature for [RW] DMA commands, but they are needed for
1249 * NCQ. NCQ will drop hob_nsect.
1250 */
1251 switch (tf->command) {
1252 case ATA_CMD_READ:
1253 case ATA_CMD_READ_EXT:
1254 case ATA_CMD_WRITE:
1255 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001256 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001257 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1258 break;
Brett Russ31961942005-09-30 01:36:00 -04001259 case ATA_CMD_FPDMA_READ:
1260 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001261 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001262 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1263 break;
Brett Russ31961942005-09-30 01:36:00 -04001264 default:
1265 /* The only other commands EDMA supports in non-queued and
1266 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1267 * of which are defined/used by Linux. If we get here, this
1268 * driver needs work.
1269 *
1270 * FIXME: modify libata to give qc_prep a return value and
1271 * return error here.
1272 */
1273 BUG_ON(tf->command);
1274 break;
1275 }
1276 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1279 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1280 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1283 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1284 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1285
Jeff Garzike4e7b892006-01-31 12:18:41 -05001286 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001287 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001288 mv_fill_sg(qc);
1289}
1290
1291/**
1292 * mv_qc_prep_iie - Host specific command preparation.
1293 * @qc: queued command to prepare
1294 *
1295 * This routine simply redirects to the general purpose routine
1296 * if command is not DMA. Else, it handles prep of the CRQB
1297 * (command request block), does some sanity checking, and calls
1298 * the SG load routine.
1299 *
1300 * LOCKING:
1301 * Inherited from caller.
1302 */
1303static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1304{
1305 struct ata_port *ap = qc->ap;
1306 struct mv_port_priv *pp = ap->private_data;
1307 struct mv_crqb_iie *crqb;
1308 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001309 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001310 u32 flags = 0;
1311
Mark Lord138bfdd2008-01-26 18:33:18 -05001312 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1313 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001314 return;
1315
Mark Lorde12bef52008-03-31 19:33:56 -04001316 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001317 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1318 flags |= CRQB_FLAG_READ;
1319
Tejun Heobeec7db2006-02-11 19:11:13 +09001320 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001321 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001322 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001323
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001324 /* get current queue index from software */
1325 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001326
1327 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001328 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1329 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001330 crqb->flags = cpu_to_le32(flags);
1331
1332 tf = &qc->tf;
1333 crqb->ata_cmd[0] = cpu_to_le32(
1334 (tf->command << 16) |
1335 (tf->feature << 24)
1336 );
1337 crqb->ata_cmd[1] = cpu_to_le32(
1338 (tf->lbal << 0) |
1339 (tf->lbam << 8) |
1340 (tf->lbah << 16) |
1341 (tf->device << 24)
1342 );
1343 crqb->ata_cmd[2] = cpu_to_le32(
1344 (tf->hob_lbal << 0) |
1345 (tf->hob_lbam << 8) |
1346 (tf->hob_lbah << 16) |
1347 (tf->hob_feature << 24)
1348 );
1349 crqb->ata_cmd[3] = cpu_to_le32(
1350 (tf->nsect << 0) |
1351 (tf->hob_nsect << 8)
1352 );
1353
1354 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1355 return;
Brett Russ31961942005-09-30 01:36:00 -04001356 mv_fill_sg(qc);
1357}
1358
Brett Russ05b308e2005-10-05 17:08:53 -04001359/**
1360 * mv_qc_issue - Initiate a command to the host
1361 * @qc: queued command to start
1362 *
1363 * This routine simply redirects to the general purpose routine
1364 * if command is not DMA. Else, it sanity checks our local
1365 * caches of the request producer/consumer indices then enables
1366 * DMA and bumps the request producer index.
1367 *
1368 * LOCKING:
1369 * Inherited from caller.
1370 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001371static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001372{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001373 struct ata_port *ap = qc->ap;
1374 void __iomem *port_mmio = mv_ap_base(ap);
1375 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001376 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001377
Mark Lord138bfdd2008-01-26 18:33:18 -05001378 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1379 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001380 /* We're about to send a non-EDMA capable command to the
1381 * port. Turn off EDMA so there won't be problems accessing
1382 * shadow block, etc registers.
1383 */
Mark Lordb5624682008-03-31 19:34:40 -04001384 mv_stop_edma(ap);
Tejun Heo9363c382008-04-07 22:47:16 +09001385 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001386 }
1387
Mark Lord72109162008-01-26 18:31:33 -05001388 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001389
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001390 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001391
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001392 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001393
1394 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001395 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1396 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001397
1398 return 0;
1399}
1400
Brett Russ05b308e2005-10-05 17:08:53 -04001401/**
Brett Russ05b308e2005-10-05 17:08:53 -04001402 * mv_err_intr - Handle error interrupts on the port
1403 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001404 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001405 *
1406 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001407 * some cases require an eDMA reset, which also performs a COMRESET.
1408 * The SERR case requires a clear of pending errors in the SATA
1409 * SERROR register. Finally, if the port disabled DMA,
1410 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001411 *
1412 * LOCKING:
1413 * Inherited from caller.
1414 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001415static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001416{
Brett Russ31961942005-09-30 01:36:00 -04001417 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001418 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1419 struct mv_port_priv *pp = ap->private_data;
1420 struct mv_host_priv *hpriv = ap->host->private_data;
1421 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1422 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001423 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001424
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001425 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001426
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001427 if (!edma_enabled) {
1428 /* just a guess: do we need to do this? should we
1429 * expand this, and do it in all cases?
1430 */
Tejun Heo936fd732007-08-06 18:36:23 +09001431 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1432 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001433 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001434
1435 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1436
1437 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1438
1439 /*
1440 * all generations share these EDMA error cause bits
1441 */
1442
1443 if (edma_err_cause & EDMA_ERR_DEV)
1444 err_mask |= AC_ERR_DEV;
1445 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001446 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 EDMA_ERR_INTRL_PAR)) {
1448 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001449 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001450 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001451 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001452 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1453 ata_ehi_hotplugged(ehi);
1454 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001455 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001456 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 }
1458
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001459 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 eh_freeze_mask = EDMA_EH_FREEZE_5;
1461
1462 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001463 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001464 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001465 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001466 }
1467 } else {
1468 eh_freeze_mask = EDMA_EH_FREEZE;
1469
1470 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001471 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001473 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474 }
1475
1476 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001477 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1478 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001479 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001480 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001481 }
1482 }
Brett Russ20f733e2005-09-01 18:26:17 -04001483
1484 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001485 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001486
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001487 if (!err_mask) {
1488 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001489 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 }
1491
1492 ehi->serror |= serr;
1493 ehi->action |= action;
1494
1495 if (qc)
1496 qc->err_mask |= err_mask;
1497 else
1498 ehi->err_mask |= err_mask;
1499
1500 if (edma_err_cause & eh_freeze_mask)
1501 ata_port_freeze(ap);
1502 else
1503 ata_port_abort(ap);
1504}
1505
1506static void mv_intr_pio(struct ata_port *ap)
1507{
1508 struct ata_queued_cmd *qc;
1509 u8 ata_status;
1510
1511 /* ignore spurious intr if drive still BUSY */
1512 ata_status = readb(ap->ioaddr.status_addr);
1513 if (unlikely(ata_status & ATA_BUSY))
1514 return;
1515
1516 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001517 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001518 if (unlikely(!qc)) /* no active tag */
1519 return;
1520 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1521 return;
1522
1523 /* and finally, complete the ATA command */
1524 qc->err_mask |= ac_err_mask(ata_status);
1525 ata_qc_complete(qc);
1526}
1527
1528static void mv_intr_edma(struct ata_port *ap)
1529{
1530 void __iomem *port_mmio = mv_ap_base(ap);
1531 struct mv_host_priv *hpriv = ap->host->private_data;
1532 struct mv_port_priv *pp = ap->private_data;
1533 struct ata_queued_cmd *qc;
1534 u32 out_index, in_index;
1535 bool work_done = false;
1536
1537 /* get h/w response queue pointer */
1538 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1539 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1540
1541 while (1) {
1542 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001543 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001544
1545 /* get s/w response queue last-read pointer, and compare */
1546 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1547 if (in_index == out_index)
1548 break;
1549
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001551 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001552 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001553
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001554 /* Gen II/IIE: get active ATA command via tag, to enable
1555 * support for queueing. this works transparently for
1556 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001558 else
1559 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001561 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001562
Mark Lordcb924412008-01-26 18:32:09 -05001563 /* For non-NCQ mode, the lower 8 bits of status
1564 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1565 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001566 */
1567 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001568 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 mv_err_intr(ap, qc);
1570 return;
1571 }
1572
1573 /* and finally, complete the ATA command */
1574 if (qc) {
1575 qc->err_mask |=
1576 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1577 ata_qc_complete(qc);
1578 }
1579
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001580 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001581 * indicate (after the loop completes) to hardware
1582 * that we have consumed a response queue entry.
1583 */
1584 work_done = true;
1585 pp->resp_idx++;
1586 }
1587
1588 if (work_done)
1589 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1590 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1591 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001592}
1593
Brett Russ05b308e2005-10-05 17:08:53 -04001594/**
1595 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001596 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001597 * @relevant: port error bits relevant to this host controller
1598 * @hc: which host controller we're to look at
1599 *
1600 * Read then write clear the HC interrupt status then walk each
1601 * port connected to the HC and see if it needs servicing. Port
1602 * success ints are reported in the HC interrupt status reg, the
1603 * port error ints are reported in the higher level main
1604 * interrupt status register and thus are passed in via the
1605 * 'relevant' argument.
1606 *
1607 * LOCKING:
1608 * Inherited from caller.
1609 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001610static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001611{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001612 struct mv_host_priv *hpriv = host->private_data;
1613 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001614 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001615 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001616 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001617
Jeff Garzik35177262007-02-24 21:26:42 -05001618 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001619 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001620 else
Brett Russ20f733e2005-09-01 18:26:17 -04001621 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001622
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001623 if (HAS_PCI(host))
1624 last_port = port0 + MV_PORTS_PER_HC;
1625 else
1626 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001627 /* we'll need the HC success int register in most cases */
1628 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001629 if (!hc_irq_cause)
1630 return;
1631
1632 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001633
1634 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001635 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001636
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001637 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001638 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001639 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001640 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001641
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001642 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001643 continue;
1644
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001645 pp = ap->private_data;
1646
Brett Russ31961942005-09-30 01:36:00 -04001647 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001648 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001649 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001650
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001651 have_err_bits = ((PORT0_ERR << shift) & relevant);
1652
1653 if (unlikely(have_err_bits)) {
1654 struct ata_queued_cmd *qc;
1655
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001656 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001657 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1658 continue;
1659
1660 mv_err_intr(ap, qc);
1661 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001662 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001663
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001664 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1665
1666 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1667 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1668 mv_intr_edma(ap);
1669 } else {
1670 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1671 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001672 }
1673 }
1674 VPRINTK("EXIT\n");
1675}
1676
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001677static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1678{
Mark Lord02a121d2007-12-01 13:07:22 -05001679 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001680 struct ata_port *ap;
1681 struct ata_queued_cmd *qc;
1682 struct ata_eh_info *ehi;
1683 unsigned int i, err_mask, printed = 0;
1684 u32 err_cause;
1685
Mark Lord02a121d2007-12-01 13:07:22 -05001686 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001687
1688 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1689 err_cause);
1690
1691 DPRINTK("All regs @ PCI error\n");
1692 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1693
Mark Lord02a121d2007-12-01 13:07:22 -05001694 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001695
1696 for (i = 0; i < host->n_ports; i++) {
1697 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001698 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001699 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001700 ata_ehi_clear_desc(ehi);
1701 if (!printed++)
1702 ata_ehi_push_desc(ehi,
1703 "PCI err cause 0x%08x", err_cause);
1704 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001705 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001706 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 if (qc)
1708 qc->err_mask |= err_mask;
1709 else
1710 ehi->err_mask |= err_mask;
1711
1712 ata_port_freeze(ap);
1713 }
1714 }
1715}
1716
Brett Russ05b308e2005-10-05 17:08:53 -04001717/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001718 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001719 * @irq: unused
1720 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001721 *
1722 * Read the read only register to determine if any host
1723 * controllers have pending interrupts. If so, call lower level
1724 * routine to handle. Also check for PCI errors which are only
1725 * reported here.
1726 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001727 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001728 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001729 * interrupts.
1730 */
David Howells7d12e782006-10-05 14:55:46 +01001731static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001732{
Jeff Garzikcca39742006-08-24 03:19:22 -04001733 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001734 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001735 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001736 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001737 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001738
Mark Lorde12bef52008-03-31 19:33:56 -04001739 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001740 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001741
1742 irq_stat = readl(hpriv->main_cause_reg_addr);
1743 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001744
1745 /* check the cases where we either have nothing pending or have read
1746 * a bogus register value which can indicate HW removal or PCI fault
1747 */
Mark Lord646a4da2008-01-26 18:30:37 -05001748 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1749 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001750
Jeff Garzikcca39742006-08-24 03:19:22 -04001751 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001752
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001753 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001754 mv_pci_error(host, mmio);
1755 handled = 1;
1756 goto out_unlock; /* skip all other HC irq handling */
1757 }
1758
Brett Russ20f733e2005-09-01 18:26:17 -04001759 for (hc = 0; hc < n_hcs; hc++) {
1760 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1761 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001762 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001763 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001764 }
1765 }
Mark Lord615ab952006-05-19 16:24:56 -04001766
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001767out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001768 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001769
1770 return IRQ_RETVAL(handled);
1771}
1772
Jeff Garzikc9d39132005-11-13 17:47:51 -05001773static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1774{
1775 unsigned int ofs;
1776
1777 switch (sc_reg_in) {
1778 case SCR_STATUS:
1779 case SCR_ERROR:
1780 case SCR_CONTROL:
1781 ofs = sc_reg_in * sizeof(u32);
1782 break;
1783 default:
1784 ofs = 0xffffffffU;
1785 break;
1786 }
1787 return ofs;
1788}
1789
Tejun Heoda3dbb12007-07-16 14:29:40 +09001790static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001791{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001792 struct mv_host_priv *hpriv = ap->host->private_data;
1793 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001794 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001795 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1796
Tejun Heoda3dbb12007-07-16 14:29:40 +09001797 if (ofs != 0xffffffffU) {
1798 *val = readl(addr + ofs);
1799 return 0;
1800 } else
1801 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001802}
1803
Tejun Heoda3dbb12007-07-16 14:29:40 +09001804static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001805{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001806 struct mv_host_priv *hpriv = ap->host->private_data;
1807 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001808 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001809 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1810
Tejun Heoda3dbb12007-07-16 14:29:40 +09001811 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001812 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001813 return 0;
1814 } else
1815 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001816}
1817
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001818static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001819{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001820 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001821 int early_5080;
1822
Auke Kok44c10132007-06-08 15:46:36 -07001823 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001824
1825 if (!early_5080) {
1826 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1827 tmp |= (1 << 0);
1828 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1829 }
1830
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001831 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001832}
1833
1834static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1835{
1836 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1837}
1838
Jeff Garzik47c2b672005-11-12 21:13:17 -05001839static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001840 void __iomem *mmio)
1841{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001842 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1843 u32 tmp;
1844
1845 tmp = readl(phy_mmio + MV5_PHY_MODE);
1846
1847 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1848 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001849}
1850
Jeff Garzik47c2b672005-11-12 21:13:17 -05001851static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001852{
Jeff Garzik522479f2005-11-12 22:14:02 -05001853 u32 tmp;
1854
1855 writel(0, mmio + MV_GPIO_PORT_CTL);
1856
1857 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1858
1859 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1860 tmp |= ~(1 << 0);
1861 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001862}
1863
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001864static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1865 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001866{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001867 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1868 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1869 u32 tmp;
1870 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1871
1872 if (fix_apm_sq) {
1873 tmp = readl(phy_mmio + MV5_LT_MODE);
1874 tmp |= (1 << 19);
1875 writel(tmp, phy_mmio + MV5_LT_MODE);
1876
1877 tmp = readl(phy_mmio + MV5_PHY_CTL);
1878 tmp &= ~0x3;
1879 tmp |= 0x1;
1880 writel(tmp, phy_mmio + MV5_PHY_CTL);
1881 }
1882
1883 tmp = readl(phy_mmio + MV5_PHY_MODE);
1884 tmp &= ~mask;
1885 tmp |= hpriv->signal[port].pre;
1886 tmp |= hpriv->signal[port].amps;
1887 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001888}
1889
Jeff Garzikc9d39132005-11-13 17:47:51 -05001890
1891#undef ZERO
1892#define ZERO(reg) writel(0, port_mmio + (reg))
1893static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1894 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001895{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001896 void __iomem *port_mmio = mv_port_base(mmio, port);
1897
Mark Lordb5624682008-03-31 19:34:40 -04001898 /*
1899 * The datasheet warns against setting ATA_RST when EDMA is active
1900 * (but doesn't say what the problem might be). So we first try
1901 * to disable the EDMA engine before doing the ATA_RST operation.
1902 */
Mark Lorde12bef52008-03-31 19:33:56 -04001903 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001904
1905 ZERO(0x028); /* command */
1906 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1907 ZERO(0x004); /* timer */
1908 ZERO(0x008); /* irq err cause */
1909 ZERO(0x00c); /* irq err mask */
1910 ZERO(0x010); /* rq bah */
1911 ZERO(0x014); /* rq inp */
1912 ZERO(0x018); /* rq outp */
1913 ZERO(0x01c); /* respq bah */
1914 ZERO(0x024); /* respq outp */
1915 ZERO(0x020); /* respq inp */
1916 ZERO(0x02c); /* test control */
1917 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1918}
1919#undef ZERO
1920
1921#define ZERO(reg) writel(0, hc_mmio + (reg))
1922static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1923 unsigned int hc)
1924{
1925 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1926 u32 tmp;
1927
1928 ZERO(0x00c);
1929 ZERO(0x010);
1930 ZERO(0x014);
1931 ZERO(0x018);
1932
1933 tmp = readl(hc_mmio + 0x20);
1934 tmp &= 0x1c1c1c1c;
1935 tmp |= 0x03030303;
1936 writel(tmp, hc_mmio + 0x20);
1937}
1938#undef ZERO
1939
1940static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1941 unsigned int n_hc)
1942{
1943 unsigned int hc, port;
1944
1945 for (hc = 0; hc < n_hc; hc++) {
1946 for (port = 0; port < MV_PORTS_PER_HC; port++)
1947 mv5_reset_hc_port(hpriv, mmio,
1948 (hc * MV_PORTS_PER_HC) + port);
1949
1950 mv5_reset_one_hc(hpriv, mmio, hc);
1951 }
1952
1953 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001954}
1955
Jeff Garzik101ffae2005-11-12 22:17:49 -05001956#undef ZERO
1957#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001958static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001959{
Mark Lord02a121d2007-12-01 13:07:22 -05001960 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001961 u32 tmp;
1962
1963 tmp = readl(mmio + MV_PCI_MODE);
1964 tmp &= 0xff00ffff;
1965 writel(tmp, mmio + MV_PCI_MODE);
1966
1967 ZERO(MV_PCI_DISC_TIMER);
1968 ZERO(MV_PCI_MSI_TRIGGER);
1969 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1970 ZERO(HC_MAIN_IRQ_MASK_OFS);
1971 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001972 ZERO(hpriv->irq_cause_ofs);
1973 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001974 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1975 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1976 ZERO(MV_PCI_ERR_ATTRIBUTE);
1977 ZERO(MV_PCI_ERR_COMMAND);
1978}
1979#undef ZERO
1980
1981static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1982{
1983 u32 tmp;
1984
1985 mv5_reset_flash(hpriv, mmio);
1986
1987 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1988 tmp &= 0x3;
1989 tmp |= (1 << 5) | (1 << 6);
1990 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1991}
1992
1993/**
1994 * mv6_reset_hc - Perform the 6xxx global soft reset
1995 * @mmio: base address of the HBA
1996 *
1997 * This routine only applies to 6xxx parts.
1998 *
1999 * LOCKING:
2000 * Inherited from caller.
2001 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002002static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2003 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002004{
2005 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2006 int i, rc = 0;
2007 u32 t;
2008
2009 /* Following procedure defined in PCI "main command and status
2010 * register" table.
2011 */
2012 t = readl(reg);
2013 writel(t | STOP_PCI_MASTER, reg);
2014
2015 for (i = 0; i < 1000; i++) {
2016 udelay(1);
2017 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002018 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002019 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002020 }
2021 if (!(PCI_MASTER_EMPTY & t)) {
2022 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2023 rc = 1;
2024 goto done;
2025 }
2026
2027 /* set reset */
2028 i = 5;
2029 do {
2030 writel(t | GLOB_SFT_RST, reg);
2031 t = readl(reg);
2032 udelay(1);
2033 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2034
2035 if (!(GLOB_SFT_RST & t)) {
2036 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2037 rc = 1;
2038 goto done;
2039 }
2040
2041 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2042 i = 5;
2043 do {
2044 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2045 t = readl(reg);
2046 udelay(1);
2047 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2048
2049 if (GLOB_SFT_RST & t) {
2050 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2051 rc = 1;
2052 }
2053done:
2054 return rc;
2055}
2056
Jeff Garzik47c2b672005-11-12 21:13:17 -05002057static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002058 void __iomem *mmio)
2059{
2060 void __iomem *port_mmio;
2061 u32 tmp;
2062
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002063 tmp = readl(mmio + MV_RESET_CFG);
2064 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002065 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002066 hpriv->signal[idx].pre = 0x1 << 5;
2067 return;
2068 }
2069
2070 port_mmio = mv_port_base(mmio, idx);
2071 tmp = readl(port_mmio + PHY_MODE2);
2072
2073 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2074 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2075}
2076
Jeff Garzik47c2b672005-11-12 21:13:17 -05002077static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002078{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002079 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002080}
2081
Jeff Garzikc9d39132005-11-13 17:47:51 -05002082static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002083 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002084{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002085 void __iomem *port_mmio = mv_port_base(mmio, port);
2086
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002087 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002088 int fix_phy_mode2 =
2089 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002090 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002091 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2092 u32 m2, tmp;
2093
2094 if (fix_phy_mode2) {
2095 m2 = readl(port_mmio + PHY_MODE2);
2096 m2 &= ~(1 << 16);
2097 m2 |= (1 << 31);
2098 writel(m2, port_mmio + PHY_MODE2);
2099
2100 udelay(200);
2101
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~((1 << 16) | (1 << 31));
2104 writel(m2, port_mmio + PHY_MODE2);
2105
2106 udelay(200);
2107 }
2108
2109 /* who knows what this magic does */
2110 tmp = readl(port_mmio + PHY_MODE3);
2111 tmp &= ~0x7F800000;
2112 tmp |= 0x2A800000;
2113 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002114
2115 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002116 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002117
2118 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002119
2120 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002121 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002122
Mark Lorde12bef52008-03-31 19:33:56 -04002123 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002124 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2125
2126 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127
2128 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002129 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002130 }
2131
2132 /* Revert values of pre-emphasis and signal amps to the saved ones */
2133 m2 = readl(port_mmio + PHY_MODE2);
2134
2135 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002136 m2 |= hpriv->signal[port].amps;
2137 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002138 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002139
Jeff Garzike4e7b892006-01-31 12:18:41 -05002140 /* according to mvSata 3.6.1, some IIE values are fixed */
2141 if (IS_GEN_IIE(hpriv)) {
2142 m2 &= ~0xC30FF01F;
2143 m2 |= 0x0000900F;
2144 }
2145
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002146 writel(m2, port_mmio + PHY_MODE2);
2147}
2148
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002149/* TODO: use the generic LED interface to configure the SATA Presence */
2150/* & Acitivy LEDs on the board */
2151static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2152 void __iomem *mmio)
2153{
2154 return;
2155}
2156
2157static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2158 void __iomem *mmio)
2159{
2160 void __iomem *port_mmio;
2161 u32 tmp;
2162
2163 port_mmio = mv_port_base(mmio, idx);
2164 tmp = readl(port_mmio + PHY_MODE2);
2165
2166 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2167 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2168}
2169
2170#undef ZERO
2171#define ZERO(reg) writel(0, port_mmio + (reg))
2172static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2173 void __iomem *mmio, unsigned int port)
2174{
2175 void __iomem *port_mmio = mv_port_base(mmio, port);
2176
Mark Lordb5624682008-03-31 19:34:40 -04002177 /*
2178 * The datasheet warns against setting ATA_RST when EDMA is active
2179 * (but doesn't say what the problem might be). So we first try
2180 * to disable the EDMA engine before doing the ATA_RST operation.
2181 */
Mark Lorde12bef52008-03-31 19:33:56 -04002182 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002183
2184 ZERO(0x028); /* command */
2185 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2186 ZERO(0x004); /* timer */
2187 ZERO(0x008); /* irq err cause */
2188 ZERO(0x00c); /* irq err mask */
2189 ZERO(0x010); /* rq bah */
2190 ZERO(0x014); /* rq inp */
2191 ZERO(0x018); /* rq outp */
2192 ZERO(0x01c); /* respq bah */
2193 ZERO(0x024); /* respq outp */
2194 ZERO(0x020); /* respq inp */
2195 ZERO(0x02c); /* test control */
2196 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2197}
2198
2199#undef ZERO
2200
2201#define ZERO(reg) writel(0, hc_mmio + (reg))
2202static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2203 void __iomem *mmio)
2204{
2205 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2206
2207 ZERO(0x00c);
2208 ZERO(0x010);
2209 ZERO(0x014);
2210
2211}
2212
2213#undef ZERO
2214
2215static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2216 void __iomem *mmio, unsigned int n_hc)
2217{
2218 unsigned int port;
2219
2220 for (port = 0; port < hpriv->n_ports; port++)
2221 mv_soc_reset_hc_port(hpriv, mmio, port);
2222
2223 mv_soc_reset_one_hc(hpriv, mmio);
2224
2225 return 0;
2226}
2227
2228static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2229 void __iomem *mmio)
2230{
2231 return;
2232}
2233
2234static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2235{
2236 return;
2237}
2238
Mark Lordb67a1062008-03-31 19:35:13 -04002239static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2240{
2241 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2242
2243 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2244 if (want_gen2i)
2245 ifctl |= (1 << 7); /* enable gen2i speed */
2246 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2247}
2248
Mark Lordb5624682008-03-31 19:34:40 -04002249/*
2250 * Caller must ensure that EDMA is not active,
2251 * by first doing mv_stop_edma() where needed.
2252 */
Mark Lorde12bef52008-03-31 19:33:56 -04002253static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002254 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002255{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002256 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002257
Mark Lord0d8be5c2008-04-16 14:56:12 -04002258 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002259 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002260
Mark Lordb67a1062008-03-31 19:35:13 -04002261 if (!IS_GEN_I(hpriv)) {
2262 /* Enable 3.0gb/s link speed */
2263 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002264 }
Mark Lordb67a1062008-03-31 19:35:13 -04002265 /*
2266 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2267 * link, and physical layers. It resets all SATA interface registers
2268 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002269 */
Mark Lordb67a1062008-03-31 19:35:13 -04002270 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2271 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002272 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002273
Jeff Garzikc9d39132005-11-13 17:47:51 -05002274 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2275
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002276 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002277 mdelay(1);
2278}
2279
Tejun Heocc0680a2007-08-06 18:36:23 +09002280static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002281 unsigned long deadline)
2282{
Tejun Heocc0680a2007-08-06 18:36:23 +09002283 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002284 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002285 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002286 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002287 int rc, attempts = 0, extra = 0;
2288 u32 sstatus;
2289 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002290
Mark Lorde12bef52008-03-31 19:33:56 -04002291 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002292 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002293
Mark Lord0d8be5c2008-04-16 14:56:12 -04002294 /* Workaround for errata FEr SATA#10 (part 2) */
2295 do {
2296 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002297
Mark Lord0d8be5c2008-04-16 14:56:12 -04002298 rc = sata_link_hardreset(link, timing, deadline + extra, &online, NULL);
2299 if (rc) {
2300 ata_link_printk(link, KERN_ERR,
2301 "COMRESET failed (errno=%d)\n", rc);
2302 return rc;
2303 }
2304 sata_scr_read(link, SCR_STATUS, &sstatus);
2305 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2306 /* Force 1.5gb/s link speed and try again */
2307 mv_setup_ifctl(mv_ap_base(ap), 0);
2308 if (time_after(jiffies + HZ, deadline))
2309 extra = HZ; /* only extend it once, max */
2310 }
2311 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002312
Mark Lord0d8be5c2008-04-16 14:56:12 -04002313 return online ? -EAGAIN : rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002314}
2315
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002316static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002317{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002318 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002319 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2320 u32 tmp, mask;
2321 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002322
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002323 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002324
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002325 shift = ap->port_no * 2;
2326 if (hc > 0)
2327 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002328
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002329 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002330
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002331 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002332 tmp = readl(hpriv->main_mask_reg_addr);
2333 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334}
2335
2336static void mv_eh_thaw(struct ata_port *ap)
2337{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002338 struct mv_host_priv *hpriv = ap->host->private_data;
2339 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2342 void __iomem *port_mmio = mv_ap_base(ap);
2343 u32 tmp, mask, hc_irq_cause;
2344 unsigned int shift, hc_port_no = ap->port_no;
2345
2346 /* FIXME: handle coalescing completion events properly */
2347
2348 shift = ap->port_no * 2;
2349 if (hc > 0) {
2350 shift++;
2351 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002352 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002353
2354 mask = 0x3 << shift;
2355
2356 /* clear EDMA errors on this port */
2357 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2358
2359 /* clear pending irq events */
2360 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2361 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2362 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2363 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2364
2365 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002366 tmp = readl(hpriv->main_mask_reg_addr);
2367 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002368}
2369
Brett Russ05b308e2005-10-05 17:08:53 -04002370/**
2371 * mv_port_init - Perform some early initialization on a single port.
2372 * @port: libata data structure storing shadow register addresses
2373 * @port_mmio: base address of the port
2374 *
2375 * Initialize shadow register mmio addresses, clear outstanding
2376 * interrupts on the port, and unmask interrupts for the future
2377 * start of the port.
2378 *
2379 * LOCKING:
2380 * Inherited from caller.
2381 */
Brett Russ31961942005-09-30 01:36:00 -04002382static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2383{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002384 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002385 unsigned serr_ofs;
2386
Jeff Garzik8b260242005-11-12 12:32:50 -05002387 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002388 */
2389 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002390 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002391 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2392 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2393 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2394 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2395 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2396 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002397 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002398 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2399 /* special case: control/altstatus doesn't have ATA_REG_ address */
2400 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2401
2402 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002403 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002404
Brett Russ31961942005-09-30 01:36:00 -04002405 /* Clear any currently outstanding port interrupt conditions */
2406 serr_ofs = mv_scr_offset(SCR_ERROR);
2407 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2408 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2409
Mark Lord646a4da2008-01-26 18:30:37 -05002410 /* unmask all non-transient EDMA error interrupts */
2411 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002412
Jeff Garzik8b260242005-11-12 12:32:50 -05002413 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002414 readl(port_mmio + EDMA_CFG_OFS),
2415 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2416 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002417}
2418
Tejun Heo4447d352007-04-17 23:44:08 +09002419static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002420{
Tejun Heo4447d352007-04-17 23:44:08 +09002421 struct pci_dev *pdev = to_pci_dev(host->dev);
2422 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002423 u32 hp_flags = hpriv->hp_flags;
2424
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002425 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002426 case chip_5080:
2427 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002428 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002429
Auke Kok44c10132007-06-08 15:46:36 -07002430 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002431 case 0x1:
2432 hp_flags |= MV_HP_ERRATA_50XXB0;
2433 break;
2434 case 0x3:
2435 hp_flags |= MV_HP_ERRATA_50XXB2;
2436 break;
2437 default:
2438 dev_printk(KERN_WARNING, &pdev->dev,
2439 "Applying 50XXB2 workarounds to unknown rev\n");
2440 hp_flags |= MV_HP_ERRATA_50XXB2;
2441 break;
2442 }
2443 break;
2444
2445 case chip_504x:
2446 case chip_508x:
2447 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002448 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002449
Auke Kok44c10132007-06-08 15:46:36 -07002450 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002451 case 0x0:
2452 hp_flags |= MV_HP_ERRATA_50XXB0;
2453 break;
2454 case 0x3:
2455 hp_flags |= MV_HP_ERRATA_50XXB2;
2456 break;
2457 default:
2458 dev_printk(KERN_WARNING, &pdev->dev,
2459 "Applying B2 workarounds to unknown rev\n");
2460 hp_flags |= MV_HP_ERRATA_50XXB2;
2461 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002462 }
2463 break;
2464
2465 case chip_604x:
2466 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002467 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002468 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002469
Auke Kok44c10132007-06-08 15:46:36 -07002470 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002471 case 0x7:
2472 hp_flags |= MV_HP_ERRATA_60X1B2;
2473 break;
2474 case 0x9:
2475 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002476 break;
2477 default:
2478 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002479 "Applying B2 workarounds to unknown rev\n");
2480 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002481 break;
2482 }
2483 break;
2484
Jeff Garzike4e7b892006-01-31 12:18:41 -05002485 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002486 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002487 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2488 (pdev->device == 0x2300 || pdev->device == 0x2310))
2489 {
Mark Lord4e520032007-12-11 12:58:05 -05002490 /*
2491 * Highpoint RocketRAID PCIe 23xx series cards:
2492 *
2493 * Unconfigured drives are treated as "Legacy"
2494 * by the BIOS, and it overwrites sector 8 with
2495 * a "Lgcy" metadata block prior to Linux boot.
2496 *
2497 * Configured drives (RAID or JBOD) leave sector 8
2498 * alone, but instead overwrite a high numbered
2499 * sector for the RAID metadata. This sector can
2500 * be determined exactly, by truncating the physical
2501 * drive capacity to a nice even GB value.
2502 *
2503 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2504 *
2505 * Warn the user, lest they think we're just buggy.
2506 */
2507 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2508 " BIOS CORRUPTS DATA on all attached drives,"
2509 " regardless of if/how they are configured."
2510 " BEWARE!\n");
2511 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2512 " use sectors 8-9 on \"Legacy\" drives,"
2513 " and avoid the final two gigabytes on"
2514 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002515 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002516 case chip_6042:
2517 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002518 hp_flags |= MV_HP_GEN_IIE;
2519
Auke Kok44c10132007-06-08 15:46:36 -07002520 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002521 case 0x0:
2522 hp_flags |= MV_HP_ERRATA_XX42A0;
2523 break;
2524 case 0x1:
2525 hp_flags |= MV_HP_ERRATA_60X1C0;
2526 break;
2527 default:
2528 dev_printk(KERN_WARNING, &pdev->dev,
2529 "Applying 60X1C0 workarounds to unknown rev\n");
2530 hp_flags |= MV_HP_ERRATA_60X1C0;
2531 break;
2532 }
2533 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002534 case chip_soc:
2535 hpriv->ops = &mv_soc_ops;
2536 hp_flags |= MV_HP_ERRATA_60X1C0;
2537 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002538
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002539 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002540 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002541 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002542 return 1;
2543 }
2544
2545 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002546 if (hp_flags & MV_HP_PCIE) {
2547 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2548 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2549 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2550 } else {
2551 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2552 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2553 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2554 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002555
2556 return 0;
2557}
2558
Brett Russ05b308e2005-10-05 17:08:53 -04002559/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002560 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002561 * @host: ATA host to initialize
2562 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002563 *
2564 * If possible, do an early global reset of the host. Then do
2565 * our port init and clear/unmask all/relevant host interrupts.
2566 *
2567 * LOCKING:
2568 * Inherited from caller.
2569 */
Tejun Heo4447d352007-04-17 23:44:08 +09002570static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002571{
2572 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002573 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002574 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002575
Tejun Heo4447d352007-04-17 23:44:08 +09002576 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002577 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002578 goto done;
2579
2580 if (HAS_PCI(host)) {
2581 hpriv->main_cause_reg_addr = hpriv->base +
2582 HC_MAIN_IRQ_CAUSE_OFS;
2583 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2584 } else {
2585 hpriv->main_cause_reg_addr = hpriv->base +
2586 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2587 hpriv->main_mask_reg_addr = hpriv->base +
2588 HC_SOC_MAIN_IRQ_MASK_OFS;
2589 }
2590 /* global interrupt mask */
2591 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002592
Tejun Heo4447d352007-04-17 23:44:08 +09002593 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002594
Tejun Heo4447d352007-04-17 23:44:08 +09002595 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002596 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002597
Jeff Garzikc9d39132005-11-13 17:47:51 -05002598 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002599 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002600 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002601
Jeff Garzik522479f2005-11-12 22:14:02 -05002602 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002603 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002604 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002605
Tejun Heo4447d352007-04-17 23:44:08 +09002606 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002607 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002608 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002609
2610 mv_port_init(&ap->ioaddr, port_mmio);
2611
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002612#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002613 if (HAS_PCI(host)) {
2614 unsigned int offset = port_mmio - mmio;
2615 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2616 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2617 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002618#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002619 }
2620
2621 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002622 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2623
2624 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2625 "(before clear)=0x%08x\n", hc,
2626 readl(hc_mmio + HC_CFG_OFS),
2627 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2628
2629 /* Clear any currently outstanding hc interrupt conditions */
2630 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002631 }
2632
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002633 if (HAS_PCI(host)) {
2634 /* Clear any currently outstanding host interrupt conditions */
2635 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002636
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002637 /* and unmask interrupt generation for host regs */
2638 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2639 if (IS_GEN_I(hpriv))
2640 writelfl(~HC_MAIN_MASKED_IRQS_5,
2641 hpriv->main_mask_reg_addr);
2642 else
2643 writelfl(~HC_MAIN_MASKED_IRQS,
2644 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002645
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002646 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2647 "PCI int cause/mask=0x%08x/0x%08x\n",
2648 readl(hpriv->main_cause_reg_addr),
2649 readl(hpriv->main_mask_reg_addr),
2650 readl(mmio + hpriv->irq_cause_ofs),
2651 readl(mmio + hpriv->irq_mask_ofs));
2652 } else {
2653 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2654 hpriv->main_mask_reg_addr);
2655 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2656 readl(hpriv->main_cause_reg_addr),
2657 readl(hpriv->main_mask_reg_addr));
2658 }
Brett Russ31961942005-09-30 01:36:00 -04002659done:
Brett Russ20f733e2005-09-01 18:26:17 -04002660 return rc;
2661}
2662
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002663static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2664{
2665 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2666 MV_CRQB_Q_SZ, 0);
2667 if (!hpriv->crqb_pool)
2668 return -ENOMEM;
2669
2670 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2671 MV_CRPB_Q_SZ, 0);
2672 if (!hpriv->crpb_pool)
2673 return -ENOMEM;
2674
2675 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2676 MV_SG_TBL_SZ, 0);
2677 if (!hpriv->sg_tbl_pool)
2678 return -ENOMEM;
2679
2680 return 0;
2681}
2682
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002683/**
2684 * mv_platform_probe - handle a positive probe of an soc Marvell
2685 * host
2686 * @pdev: platform device found
2687 *
2688 * LOCKING:
2689 * Inherited from caller.
2690 */
2691static int mv_platform_probe(struct platform_device *pdev)
2692{
2693 static int printed_version;
2694 const struct mv_sata_platform_data *mv_platform_data;
2695 const struct ata_port_info *ppi[] =
2696 { &mv_port_info[chip_soc], NULL };
2697 struct ata_host *host;
2698 struct mv_host_priv *hpriv;
2699 struct resource *res;
2700 int n_ports, rc;
2701
2702 if (!printed_version++)
2703 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2704
2705 /*
2706 * Simple resource validation ..
2707 */
2708 if (unlikely(pdev->num_resources != 2)) {
2709 dev_err(&pdev->dev, "invalid number of resources\n");
2710 return -EINVAL;
2711 }
2712
2713 /*
2714 * Get the register base first
2715 */
2716 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2717 if (res == NULL)
2718 return -EINVAL;
2719
2720 /* allocate host */
2721 mv_platform_data = pdev->dev.platform_data;
2722 n_ports = mv_platform_data->n_ports;
2723
2724 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2725 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2726
2727 if (!host || !hpriv)
2728 return -ENOMEM;
2729 host->private_data = hpriv;
2730 hpriv->n_ports = n_ports;
2731
2732 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002733 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2734 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002735 hpriv->base -= MV_SATAHC0_REG_BASE;
2736
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002737 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2738 if (rc)
2739 return rc;
2740
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002741 /* initialize adapter */
2742 rc = mv_init_host(host, chip_soc);
2743 if (rc)
2744 return rc;
2745
2746 dev_printk(KERN_INFO, &pdev->dev,
2747 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2748 host->n_ports);
2749
2750 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2751 IRQF_SHARED, &mv6_sht);
2752}
2753
2754/*
2755 *
2756 * mv_platform_remove - unplug a platform interface
2757 * @pdev: platform device
2758 *
2759 * A platform bus SATA device has been unplugged. Perform the needed
2760 * cleanup. Also called on module unload for any active devices.
2761 */
2762static int __devexit mv_platform_remove(struct platform_device *pdev)
2763{
2764 struct device *dev = &pdev->dev;
2765 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002766
2767 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002768 return 0;
2769}
2770
2771static struct platform_driver mv_platform_driver = {
2772 .probe = mv_platform_probe,
2773 .remove = __devexit_p(mv_platform_remove),
2774 .driver = {
2775 .name = DRV_NAME,
2776 .owner = THIS_MODULE,
2777 },
2778};
2779
2780
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002781#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002782static int mv_pci_init_one(struct pci_dev *pdev,
2783 const struct pci_device_id *ent);
2784
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002785
2786static struct pci_driver mv_pci_driver = {
2787 .name = DRV_NAME,
2788 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002789 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002790 .remove = ata_pci_remove_one,
2791};
2792
2793/*
2794 * module options
2795 */
2796static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2797
2798
2799/* move to PCI layer or libata core? */
2800static int pci_go_64(struct pci_dev *pdev)
2801{
2802 int rc;
2803
2804 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2805 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2806 if (rc) {
2807 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2808 if (rc) {
2809 dev_printk(KERN_ERR, &pdev->dev,
2810 "64-bit DMA enable failed\n");
2811 return rc;
2812 }
2813 }
2814 } else {
2815 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2816 if (rc) {
2817 dev_printk(KERN_ERR, &pdev->dev,
2818 "32-bit DMA enable failed\n");
2819 return rc;
2820 }
2821 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2822 if (rc) {
2823 dev_printk(KERN_ERR, &pdev->dev,
2824 "32-bit consistent DMA enable failed\n");
2825 return rc;
2826 }
2827 }
2828
2829 return rc;
2830}
2831
Brett Russ05b308e2005-10-05 17:08:53 -04002832/**
2833 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002834 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002835 *
2836 * FIXME: complete this.
2837 *
2838 * LOCKING:
2839 * Inherited from caller.
2840 */
Tejun Heo4447d352007-04-17 23:44:08 +09002841static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002842{
Tejun Heo4447d352007-04-17 23:44:08 +09002843 struct pci_dev *pdev = to_pci_dev(host->dev);
2844 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002845 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002846 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002847
2848 /* Use this to determine the HW stepping of the chip so we know
2849 * what errata to workaround
2850 */
Brett Russ31961942005-09-30 01:36:00 -04002851 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2852 if (scc == 0)
2853 scc_s = "SCSI";
2854 else if (scc == 0x01)
2855 scc_s = "RAID";
2856 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002857 scc_s = "?";
2858
2859 if (IS_GEN_I(hpriv))
2860 gen = "I";
2861 else if (IS_GEN_II(hpriv))
2862 gen = "II";
2863 else if (IS_GEN_IIE(hpriv))
2864 gen = "IIE";
2865 else
2866 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002867
Jeff Garzika9524a72005-10-30 14:39:11 -05002868 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002869 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2870 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002871 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2872}
2873
Brett Russ05b308e2005-10-05 17:08:53 -04002874/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002875 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002876 * @pdev: PCI device found
2877 * @ent: PCI device ID entry for the matched host
2878 *
2879 * LOCKING:
2880 * Inherited from caller.
2881 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002882static int mv_pci_init_one(struct pci_dev *pdev,
2883 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04002884{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002885 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002886 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002887 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2888 struct ata_host *host;
2889 struct mv_host_priv *hpriv;
2890 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002891
Jeff Garzika9524a72005-10-30 14:39:11 -05002892 if (!printed_version++)
2893 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002894
Tejun Heo4447d352007-04-17 23:44:08 +09002895 /* allocate host */
2896 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2897
2898 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2899 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2900 if (!host || !hpriv)
2901 return -ENOMEM;
2902 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002903 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09002904
2905 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002906 rc = pcim_enable_device(pdev);
2907 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002908 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002909
Tejun Heo0d5ff562007-02-01 15:06:36 +09002910 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2911 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002912 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002913 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002914 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002915 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002916 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04002917
Jeff Garzikd88184f2007-02-26 01:26:06 -05002918 rc = pci_go_64(pdev);
2919 if (rc)
2920 return rc;
2921
Mark Lordda2fa9b2008-01-26 18:32:45 -05002922 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2923 if (rc)
2924 return rc;
2925
Brett Russ20f733e2005-09-01 18:26:17 -04002926 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002927 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002928 if (rc)
2929 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002930
Brett Russ31961942005-09-30 01:36:00 -04002931 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002932 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002933 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002934
Brett Russ31961942005-09-30 01:36:00 -04002935 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002936 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002937
Tejun Heo4447d352007-04-17 23:44:08 +09002938 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002939 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002940 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002941 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002942}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002943#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002944
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002945static int mv_platform_probe(struct platform_device *pdev);
2946static int __devexit mv_platform_remove(struct platform_device *pdev);
2947
Brett Russ20f733e2005-09-01 18:26:17 -04002948static int __init mv_init(void)
2949{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002950 int rc = -ENODEV;
2951#ifdef CONFIG_PCI
2952 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002953 if (rc < 0)
2954 return rc;
2955#endif
2956 rc = platform_driver_register(&mv_platform_driver);
2957
2958#ifdef CONFIG_PCI
2959 if (rc < 0)
2960 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002961#endif
2962 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002963}
2964
2965static void __exit mv_exit(void)
2966{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002967#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04002968 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002969#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002970 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002971}
2972
2973MODULE_AUTHOR("Brett Russ");
2974MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2975MODULE_LICENSE("GPL");
2976MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2977MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01002978MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04002979
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002980#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002981module_param(msi, int, 0444);
2982MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002983#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002984
Brett Russ20f733e2005-09-01 18:26:17 -04002985module_init(mv_init);
2986module_exit(mv_exit);