blob: b0c929d362341d6e921385c700c1621445e2d55d [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lord40f21b12009-03-10 18:51:04 -04004 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
Mark Lord40f21b12009-03-10 18:51:04 -04008 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
Brett Russ20f733e2005-09-01 18:26:17 -040011 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
Jeff Garzik4a05e202007-05-24 23:40:15 -040028/*
Mark Lord85afb932008-04-19 14:54:41 -040029 * sata_mv TODO list:
30 *
Mark Lord85afb932008-04-19 14:54:41 -040031 * --> Develop a low-power-consumption strategy, and implement it.
32 *
Mark Lord2b748a02009-03-10 22:01:17 -040033 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
Mark Lord85afb932008-04-19 14:54:41 -040034 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
Jeff Garzik4a05e202007-05-24 23:40:15 -040042
Mark Lord65ad7fef2009-04-06 15:24:14 -040043/*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
Brett Russ20f733e2005-09-01 18:26:17 -040052#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080059#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040060#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050061#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050062#include <linux/platform_device.h>
63#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040064#include <linux/mbus.h>
Mark Lordc46938c2008-05-02 14:02:28 -040065#include <linux/bitops.h>
Brett Russ20f733e2005-09-01 18:26:17 -040066#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050067#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040068#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040069#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070
71#define DRV_NAME "sata_mv"
Mark Lord2b748a02009-03-10 22:01:17 -040072#define DRV_VERSION "1.27"
Brett Russ20f733e2005-09-01 18:26:17 -040073
Mark Lord40f21b12009-03-10 18:51:04 -040074/*
75 * module options
76 */
77
78static int msi;
79#ifdef CONFIG_PCI
80module_param(msi, int, S_IRUGO);
81MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
82#endif
83
Mark Lord2b748a02009-03-10 22:01:17 -040084static int irq_coalescing_io_count;
85module_param(irq_coalescing_io_count, int, S_IRUGO);
86MODULE_PARM_DESC(irq_coalescing_io_count,
87 "IRQ coalescing I/O count threshold (0..255)");
88
89static int irq_coalescing_usecs;
90module_param(irq_coalescing_usecs, int, S_IRUGO);
91MODULE_PARM_DESC(irq_coalescing_usecs,
92 "IRQ coalescing time threshold in usecs");
93
Brett Russ20f733e2005-09-01 18:26:17 -040094enum {
95 /* BAR's are enumerated in terms of pci_resource_start() terms */
96 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
97 MV_IO_BAR = 2, /* offset 0x18: IO space */
98 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
99
100 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
101 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
102
Mark Lord2b748a02009-03-10 22:01:17 -0400103 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
104 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
105 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
106 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
107
Brett Russ20f733e2005-09-01 18:26:17 -0400108 MV_PCI_REG_BASE = 0,
Mark Lord615ab952006-05-19 16:24:56 -0400109
Mark Lord2b748a02009-03-10 22:01:17 -0400110 /*
111 * Per-chip ("all ports") interrupt coalescing feature.
112 * This is only for GEN_II / GEN_IIE hardware.
113 *
114 * Coalescing defers the interrupt until either the IO_THRESHOLD
115 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
116 */
117 MV_COAL_REG_BASE = 0x18000,
118 MV_IRQ_COAL_CAUSE = (MV_COAL_REG_BASE + 0x08),
119 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
120
121 MV_IRQ_COAL_IO_THRESHOLD = (MV_COAL_REG_BASE + 0xcc),
122 MV_IRQ_COAL_TIME_THRESHOLD = (MV_COAL_REG_BASE + 0xd0),
123
124 /*
125 * Registers for the (unused here) transaction coalescing feature:
126 */
127 MV_TRAN_COAL_CAUSE_LO = (MV_COAL_REG_BASE + 0x88),
128 MV_TRAN_COAL_CAUSE_HI = (MV_COAL_REG_BASE + 0x8c),
129
Brett Russ20f733e2005-09-01 18:26:17 -0400130 MV_SATAHC0_REG_BASE = 0x20000,
Mark Lord8e7decd2008-05-02 02:07:51 -0400131 MV_FLASH_CTL_OFS = 0x1046c,
132 MV_GPIO_PORT_CTL_OFS = 0x104f0,
133 MV_RESET_CFG_OFS = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400134
135 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
136 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
137 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
138 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
139
Brett Russ31961942005-09-30 01:36:00 -0400140 MV_MAX_Q_DEPTH = 32,
141 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
142
143 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
144 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400145 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
146 */
147 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
148 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500149 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400150 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400151
Mark Lord352fab72008-04-19 14:43:42 -0400152 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400153 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400154 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
155 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
156 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400157
158 /* Host Flags */
159 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100160
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400161 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Mark Lord91b1a842009-01-30 18:46:39 -0500162 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
Mark Lordad3aef52008-05-14 09:21:43 -0400163
Mark Lord91b1a842009-01-30 18:46:39 -0500164 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
Brett Russ20f733e2005-09-01 18:26:17 -0400165
Mark Lord40f21b12009-03-10 18:51:04 -0400166 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
167 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
Mark Lord91b1a842009-01-30 18:46:39 -0500168
169 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
Mark Lordad3aef52008-05-14 09:21:43 -0400170
Brett Russ31961942005-09-30 01:36:00 -0400171 CRQB_FLAG_READ = (1 << 0),
172 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400173 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400174 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400175 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400176 CRQB_CMD_ADDR_SHIFT = 8,
177 CRQB_CMD_CS = (0x2 << 11),
178 CRQB_CMD_LAST = (1 << 15),
179
180 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400181 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
182 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400183
184 EPRD_FLAG_END_OF_TBL = (1 << 31),
185
Brett Russ20f733e2005-09-01 18:26:17 -0400186 /* PCI interface registers */
187
Brett Russ31961942005-09-30 01:36:00 -0400188 PCI_COMMAND_OFS = 0xc00,
Mark Lord65ad7fef2009-04-06 15:24:14 -0400189 PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
Mark Lord8e7decd2008-05-02 02:07:51 -0400190 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
Brett Russ31961942005-09-30 01:36:00 -0400191
Brett Russ20f733e2005-09-01 18:26:17 -0400192 PCI_MAIN_CMD_STS_OFS = 0xd30,
193 STOP_PCI_MASTER = (1 << 2),
194 PCI_MASTER_EMPTY = (1 << 3),
195 GLOB_SFT_RST = (1 << 4),
196
Mark Lord8e7decd2008-05-02 02:07:51 -0400197 MV_PCI_MODE_OFS = 0xd00,
198 MV_PCI_MODE_MASK = 0x30,
199
Jeff Garzik522479f2005-11-12 22:14:02 -0500200 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
201 MV_PCI_DISC_TIMER = 0xd04,
202 MV_PCI_MSI_TRIGGER = 0xc38,
203 MV_PCI_SERR_MASK = 0xc28,
Mark Lord8e7decd2008-05-02 02:07:51 -0400204 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
Jeff Garzik522479f2005-11-12 22:14:02 -0500205 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
206 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
207 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
208 MV_PCI_ERR_COMMAND = 0x1d50,
209
Mark Lord02a121d2007-12-01 13:07:22 -0500210 PCI_IRQ_CAUSE_OFS = 0x1d58,
211 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400212 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
213
Mark Lord02a121d2007-12-01 13:07:22 -0500214 PCIE_IRQ_CAUSE_OFS = 0x1900,
215 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500216 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500217
Mark Lord7368f912008-04-25 11:24:24 -0400218 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
219 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
220 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
221 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
222 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
Mark Lord40f21b12009-03-10 18:51:04 -0400223 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
224 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
Brett Russ20f733e2005-09-01 18:26:17 -0400225 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
226 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
Mark Lord2b748a02009-03-10 22:01:17 -0400227 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
228 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
Brett Russ20f733e2005-09-01 18:26:17 -0400229 PCI_ERR = (1 << 18),
Mark Lord40f21b12009-03-10 18:51:04 -0400230 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
231 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
232 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
233 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
234 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400235 GPIO_INT = (1 << 22),
236 SELF_INT = (1 << 23),
237 TWSI_INT = (1 << 24),
238 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500239 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400240 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Brett Russ20f733e2005-09-01 18:26:17 -0400241
242 /* SATAHC registers */
243 HC_CFG_OFS = 0,
244
245 HC_IRQ_CAUSE_OFS = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400246 DMA_IRQ = (1 << 0), /* shift by port # */
247 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400248 DEV_IRQ = (1 << 8), /* shift by port # */
249
Mark Lord2b748a02009-03-10 22:01:17 -0400250 /*
251 * Per-HC (Host-Controller) interrupt coalescing feature.
252 * This is present on all chip generations.
253 *
254 * Coalescing defers the interrupt until either the IO_THRESHOLD
255 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
256 */
257 HC_IRQ_COAL_IO_THRESHOLD_OFS = 0x000c,
258 HC_IRQ_COAL_TIME_THRESHOLD_OFS = 0x0010,
259
Mark Lord000b3442009-03-15 11:33:19 -0400260 SOC_LED_CTRL_OFS = 0x2c,
261 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
262 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
263 /* with dev activity LED */
264
Brett Russ20f733e2005-09-01 18:26:17 -0400265 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400266 SHD_BLK_OFS = 0x100,
267 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400268
269 /* SATA registers */
270 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
271 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500272 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lordc443c502008-05-14 09:24:39 -0400273 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
Mark Lord17c5aab2008-04-16 14:56:51 -0400274
Mark Lorde12bef52008-03-31 19:33:56 -0400275 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400276 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
277
Jeff Garzik47c2b672005-11-12 21:13:17 -0500278 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500279 PHY_MODE4 = 0x314,
Mark Lordba069e32008-05-31 16:46:34 -0400280 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
281 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
282 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
283 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
284
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500285 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400286 SATA_IFCTL_OFS = 0x344,
Mark Lord8e7decd2008-05-02 02:07:51 -0400287 SATA_TESTCTL_OFS = 0x348,
Mark Lorde12bef52008-03-31 19:33:56 -0400288 SATA_IFSTAT_OFS = 0x34c,
289 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400290
Mark Lord8e7decd2008-05-02 02:07:51 -0400291 FISCFG_OFS = 0x360,
292 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
293 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
Mark Lord17c5aab2008-04-16 14:56:51 -0400294
Jeff Garzikc9d39132005-11-13 17:47:51 -0500295 MV5_PHY_MODE = 0x74,
Mark Lord8e7decd2008-05-02 02:07:51 -0400296 MV5_LTMODE_OFS = 0x30,
297 MV5_PHY_CTL_OFS = 0x0C,
298 SATA_INTERFACE_CFG_OFS = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500299
300 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400301
302 /* Port registers */
303 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500304 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
305 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
306 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
307 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
308 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400309 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
310 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400311
312 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
313 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400314 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
315 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
316 EDMA_ERR_DEV = (1 << 2), /* device error */
317 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
318 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
319 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400320 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
321 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400322 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400323 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400324 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
325 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
326 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
327 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500328
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400329 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500330 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
331 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
332 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
333 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
334
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400335 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500336
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400337 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500338 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
339 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
340 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
341 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
342 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
343
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400344 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500345
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400346 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400347 EDMA_ERR_OVERRUN_5 = (1 << 5),
348 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500349
350 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
351 EDMA_ERR_LNK_CTRL_RX_1 |
352 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord85afb932008-04-19 14:54:41 -0400353 EDMA_ERR_LNK_CTRL_TX,
Mark Lord646a4da2008-01-26 18:30:37 -0500354
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400355 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
356 EDMA_ERR_PRD_PAR |
357 EDMA_ERR_DEV_DCON |
358 EDMA_ERR_DEV_CON |
359 EDMA_ERR_SERR |
360 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400361 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400362 EDMA_ERR_CRPB_PAR |
363 EDMA_ERR_INTRL_PAR |
364 EDMA_ERR_IORDY |
365 EDMA_ERR_LNK_CTRL_RX_2 |
366 EDMA_ERR_LNK_DATA_RX |
367 EDMA_ERR_LNK_DATA_TX |
368 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400369
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400370 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
371 EDMA_ERR_PRD_PAR |
372 EDMA_ERR_DEV_DCON |
373 EDMA_ERR_DEV_CON |
374 EDMA_ERR_OVERRUN_5 |
375 EDMA_ERR_UNDERRUN_5 |
376 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400377 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400378 EDMA_ERR_CRPB_PAR |
379 EDMA_ERR_INTRL_PAR |
380 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400381
Brett Russ31961942005-09-30 01:36:00 -0400382 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
383 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400384
385 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
386 EDMA_REQ_Q_PTR_SHIFT = 5,
387
388 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
389 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
390 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400391 EDMA_RSP_Q_PTR_SHIFT = 3,
392
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400393 EDMA_CMD_OFS = 0x28, /* EDMA command register */
394 EDMA_EN = (1 << 0), /* enable EDMA */
395 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
Mark Lord8e7decd2008-05-02 02:07:51 -0400396 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400397
Mark Lord8e7decd2008-05-02 02:07:51 -0400398 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
399 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
400 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
401
402 EDMA_IORDY_TMOUT_OFS = 0x34,
403 EDMA_ARB_CFG_OFS = 0x38,
404
405 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
Mark Lordc01e8a22009-02-25 15:14:48 -0500406 EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */
Mark Lordda142652009-01-30 18:51:54 -0500407
408 BMDMA_CMD_OFS = 0x224, /* bmdma command register */
409 BMDMA_STATUS_OFS = 0x228, /* bmdma status register */
410 BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */
411 BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */
412
Brett Russ31961942005-09-30 01:36:00 -0400413 /* Host private flags (hp_flags) */
414 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500415 MV_HP_ERRATA_50XXB0 = (1 << 1),
416 MV_HP_ERRATA_50XXB2 = (1 << 2),
417 MV_HP_ERRATA_60X1B2 = (1 << 3),
418 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400419 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
420 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
421 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500422 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Mark Lord616d4a92008-05-02 02:08:32 -0400423 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
Mark Lord1f398472008-05-27 17:54:48 -0400424 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
Mark Lord000b3442009-03-15 11:33:19 -0400425 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
Brett Russ20f733e2005-09-01 18:26:17 -0400426
Brett Russ31961942005-09-30 01:36:00 -0400427 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400428 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500429 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Mark Lord00f42ea2008-05-02 02:11:45 -0400430 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
Mark Lord29d187b2008-05-02 02:15:37 -0400431 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
Mark Lordd16ab3f2009-02-25 15:17:43 -0500432 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
Brett Russ31961942005-09-30 01:36:00 -0400433};
434
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400435#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
436#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500437#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Mark Lord8e7decd2008-05-02 02:07:51 -0400438#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
Mark Lord1f398472008-05-27 17:54:48 -0400439#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500440
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400441#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
442#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
443
Jeff Garzik095fec82005-11-12 09:50:49 -0500444enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400445 /* DMA boundary 0xffff is required by the s/g splitting
446 * we need on /length/ in mv_fill-sg().
447 */
448 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500449
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400450 /* mask of register bits containing lower 32 bits
451 * of EDMA request queue DMA address
452 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500453 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
454
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400455 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500456 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
457};
458
Jeff Garzik522479f2005-11-12 22:14:02 -0500459enum chip_type {
460 chip_504x,
461 chip_508x,
462 chip_5080,
463 chip_604x,
464 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500465 chip_6042,
466 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500467 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500468};
469
Brett Russ31961942005-09-30 01:36:00 -0400470/* Command ReQuest Block: 32B */
471struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400472 __le32 sg_addr;
473 __le32 sg_addr_hi;
474 __le16 ctrl_flags;
475 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400476};
477
Jeff Garzike4e7b892006-01-31 12:18:41 -0500478struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400479 __le32 addr;
480 __le32 addr_hi;
481 __le32 flags;
482 __le32 len;
483 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500484};
485
Brett Russ31961942005-09-30 01:36:00 -0400486/* Command ResPonse Block: 8B */
487struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400488 __le16 id;
489 __le16 flags;
490 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400491};
492
493/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
494struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400495 __le32 addr;
496 __le32 flags_size;
497 __le32 addr_hi;
498 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400499};
500
Mark Lord08da1752009-02-25 15:13:03 -0500501/*
502 * We keep a local cache of a few frequently accessed port
503 * registers here, to avoid having to read them (very slow)
504 * when switching between EDMA and non-EDMA modes.
505 */
506struct mv_cached_regs {
507 u32 fiscfg;
508 u32 ltmode;
509 u32 haltcond;
Mark Lordc01e8a22009-02-25 15:14:48 -0500510 u32 unknown_rsvd;
Mark Lord08da1752009-02-25 15:13:03 -0500511};
512
Brett Russ20f733e2005-09-01 18:26:17 -0400513struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400514 struct mv_crqb *crqb;
515 dma_addr_t crqb_dma;
516 struct mv_crpb *crpb;
517 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500518 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
519 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400520
521 unsigned int req_idx;
522 unsigned int resp_idx;
523
Brett Russ31961942005-09-30 01:36:00 -0400524 u32 pp_flags;
Mark Lord08da1752009-02-25 15:13:03 -0500525 struct mv_cached_regs cached;
Mark Lord29d187b2008-05-02 02:15:37 -0400526 unsigned int delayed_eh_pmp_map;
Brett Russ20f733e2005-09-01 18:26:17 -0400527};
528
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500529struct mv_port_signal {
530 u32 amps;
531 u32 pre;
532};
533
Mark Lord02a121d2007-12-01 13:07:22 -0500534struct mv_host_priv {
535 u32 hp_flags;
Mark Lord96e2c4872008-05-17 13:38:00 -0400536 u32 main_irq_mask;
Mark Lord02a121d2007-12-01 13:07:22 -0500537 struct mv_port_signal signal[8];
538 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500539 int n_ports;
540 void __iomem *base;
Mark Lord7368f912008-04-25 11:24:24 -0400541 void __iomem *main_irq_cause_addr;
542 void __iomem *main_irq_mask_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500543 u32 irq_cause_ofs;
544 u32 irq_mask_ofs;
545 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500546 /*
547 * These consistent DMA memory pools give us guaranteed
548 * alignment for hardware-accessed data structures,
549 * and less memory waste in accomplishing the alignment.
550 */
551 struct dma_pool *crqb_pool;
552 struct dma_pool *crpb_pool;
553 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500554};
555
Jeff Garzik47c2b672005-11-12 21:13:17 -0500556struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500557 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
558 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500559 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
560 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
561 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500562 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
563 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500564 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100565 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500566};
567
Tejun Heo82ef04f2008-07-31 17:02:40 +0900568static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
569static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
570static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
571static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400572static int mv_port_start(struct ata_port *ap);
573static void mv_port_stop(struct ata_port *ap);
Mark Lord3e4a1392008-05-02 02:10:02 -0400574static int mv_qc_defer(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400575static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500576static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900577static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900578static int mv_hardreset(struct ata_link *link, unsigned int *class,
579 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400580static void mv_eh_freeze(struct ata_port *ap);
581static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500582static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400583
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500584static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
585 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500586static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
587static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
588 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500589static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
590 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500591static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100592static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500593
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500594static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
595 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500596static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
597static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
598 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500599static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
600 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500601static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500602static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
603 void __iomem *mmio);
604static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
605 void __iomem *mmio);
606static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
607 void __iomem *mmio, unsigned int n_hc);
608static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
609 void __iomem *mmio);
610static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100611static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400612static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500613 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400614static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400615static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lord00b81232009-01-30 18:47:51 -0500616static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500617
Mark Lorde49856d2008-04-16 14:59:07 -0400618static void mv_pmp_select(struct ata_port *ap, int pmp);
619static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
620 unsigned long deadline);
621static int mv_softreset(struct ata_link *link, unsigned int *class,
622 unsigned long deadline);
Mark Lord29d187b2008-05-02 02:15:37 -0400623static void mv_pmp_error_handler(struct ata_port *ap);
Mark Lord4c299ca2008-05-02 02:16:20 -0400624static void mv_process_crpb_entries(struct ata_port *ap,
625 struct mv_port_priv *pp);
Brett Russ20f733e2005-09-01 18:26:17 -0400626
Mark Lordda142652009-01-30 18:51:54 -0500627static void mv_sff_irq_clear(struct ata_port *ap);
628static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
629static void mv_bmdma_setup(struct ata_queued_cmd *qc);
630static void mv_bmdma_start(struct ata_queued_cmd *qc);
631static void mv_bmdma_stop(struct ata_queued_cmd *qc);
632static u8 mv_bmdma_status(struct ata_port *ap);
Mark Lordd16ab3f2009-02-25 15:17:43 -0500633static u8 mv_sff_check_status(struct ata_port *ap);
Mark Lordda142652009-01-30 18:51:54 -0500634
Mark Lordeb73d552008-01-29 13:24:00 -0500635/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
636 * because we have to allow room for worst case splitting of
637 * PRDs for 64K boundaries in mv_fill_sg().
638 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400639static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900640 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400641 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400642 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400643};
644
645static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900646 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500647 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400648 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400649 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400650};
651
Tejun Heo029cfd62008-03-25 12:22:49 +0900652static struct ata_port_operations mv5_ops = {
653 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500654
Alan Coxc96f1732009-03-24 10:23:46 +0000655 .lost_interrupt = ATA_OP_NULL,
656
Mark Lord3e4a1392008-05-02 02:10:02 -0400657 .qc_defer = mv_qc_defer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500658 .qc_prep = mv_qc_prep,
659 .qc_issue = mv_qc_issue,
660
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400661 .freeze = mv_eh_freeze,
662 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900663 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900664 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900665 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400666
Jeff Garzikc9d39132005-11-13 17:47:51 -0500667 .scr_read = mv5_scr_read,
668 .scr_write = mv5_scr_write,
669
670 .port_start = mv_port_start,
671 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500672};
673
Tejun Heo029cfd62008-03-25 12:22:49 +0900674static struct ata_port_operations mv6_ops = {
675 .inherits = &mv5_ops,
Mark Lordf2738272008-01-26 18:32:29 -0500676 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400677 .scr_read = mv_scr_read,
678 .scr_write = mv_scr_write,
679
Mark Lorde49856d2008-04-16 14:59:07 -0400680 .pmp_hardreset = mv_pmp_hardreset,
681 .pmp_softreset = mv_softreset,
682 .softreset = mv_softreset,
Mark Lord29d187b2008-05-02 02:15:37 -0400683 .error_handler = mv_pmp_error_handler,
Mark Lordda142652009-01-30 18:51:54 -0500684
Mark Lord40f21b12009-03-10 18:51:04 -0400685 .sff_check_status = mv_sff_check_status,
Mark Lordda142652009-01-30 18:51:54 -0500686 .sff_irq_clear = mv_sff_irq_clear,
687 .check_atapi_dma = mv_check_atapi_dma,
688 .bmdma_setup = mv_bmdma_setup,
689 .bmdma_start = mv_bmdma_start,
690 .bmdma_stop = mv_bmdma_stop,
691 .bmdma_status = mv_bmdma_status,
Brett Russ20f733e2005-09-01 18:26:17 -0400692};
693
Tejun Heo029cfd62008-03-25 12:22:49 +0900694static struct ata_port_operations mv_iie_ops = {
695 .inherits = &mv6_ops,
696 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500697 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500698};
699
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100700static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400701 { /* chip_504x */
Mark Lord91b1a842009-01-30 18:46:39 -0500702 .flags = MV_GEN_I_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400703 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400704 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500705 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400706 },
707 { /* chip_508x */
Mark Lord91b1a842009-01-30 18:46:39 -0500708 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400709 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400710 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500711 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400712 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500713 { /* chip_5080 */
Mark Lord91b1a842009-01-30 18:46:39 -0500714 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400715 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400716 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500717 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500718 },
Brett Russ20f733e2005-09-01 18:26:17 -0400719 { /* chip_604x */
Mark Lord91b1a842009-01-30 18:46:39 -0500720 .flags = MV_GEN_II_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400721 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400722 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500723 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400724 },
725 { /* chip_608x */
Mark Lord91b1a842009-01-30 18:46:39 -0500726 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400727 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400728 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500729 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400730 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500731 { /* chip_6042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500732 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400733 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400734 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500735 .port_ops = &mv_iie_ops,
736 },
737 { /* chip_7042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500738 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400739 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400740 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500741 .port_ops = &mv_iie_ops,
742 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500743 { /* chip_soc */
Mark Lord91b1a842009-01-30 18:46:39 -0500744 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400745 .pio_mask = ATA_PIO4,
Mark Lord17c5aab2008-04-16 14:56:51 -0400746 .udma_mask = ATA_UDMA6,
747 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500748 },
Brett Russ20f733e2005-09-01 18:26:17 -0400749};
750
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500751static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400752 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
753 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
754 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
755 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Mark Lord46c57842008-09-04 18:21:07 -0400756 /* RocketRAID 1720/174x have different identifiers */
757 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
Mark Lord44622542009-01-27 16:33:13 -0500758 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
759 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
Brett Russ20f733e2005-09-01 18:26:17 -0400760
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400761 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
762 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
763 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
764 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
765 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500766
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400767 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
768
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200769 /* Adaptec 1430SA */
770 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
771
Mark Lord02a121d2007-12-01 13:07:22 -0500772 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800773 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
774
Mark Lord02a121d2007-12-01 13:07:22 -0500775 /* Highpoint RocketRAID PCIe series */
776 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
777 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
778
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400779 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400780};
781
Jeff Garzik47c2b672005-11-12 21:13:17 -0500782static const struct mv_hw_ops mv5xxx_ops = {
783 .phy_errata = mv5_phy_errata,
784 .enable_leds = mv5_enable_leds,
785 .read_preamp = mv5_read_preamp,
786 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500787 .reset_flash = mv5_reset_flash,
788 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500789};
790
791static const struct mv_hw_ops mv6xxx_ops = {
792 .phy_errata = mv6_phy_errata,
793 .enable_leds = mv6_enable_leds,
794 .read_preamp = mv6_read_preamp,
795 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500796 .reset_flash = mv6_reset_flash,
797 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500798};
799
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500800static const struct mv_hw_ops mv_soc_ops = {
801 .phy_errata = mv6_phy_errata,
802 .enable_leds = mv_soc_enable_leds,
803 .read_preamp = mv_soc_read_preamp,
804 .reset_hc = mv_soc_reset_hc,
805 .reset_flash = mv_soc_reset_flash,
806 .reset_bus = mv_soc_reset_bus,
807};
808
Brett Russ20f733e2005-09-01 18:26:17 -0400809/*
810 * Functions
811 */
812
813static inline void writelfl(unsigned long data, void __iomem *addr)
814{
815 writel(data, addr);
816 (void) readl(addr); /* flush to avoid PCI posted write */
817}
818
Jeff Garzikc9d39132005-11-13 17:47:51 -0500819static inline unsigned int mv_hc_from_port(unsigned int port)
820{
821 return port >> MV_PORT_HC_SHIFT;
822}
823
824static inline unsigned int mv_hardport_from_port(unsigned int port)
825{
826 return port & MV_PORT_MASK;
827}
828
Mark Lord1cfd19a2008-04-19 15:05:50 -0400829/*
830 * Consolidate some rather tricky bit shift calculations.
831 * This is hot-path stuff, so not a function.
832 * Simple code, with two return values, so macro rather than inline.
833 *
834 * port is the sole input, in range 0..7.
Mark Lord7368f912008-04-25 11:24:24 -0400835 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
836 * hardport is the other output, in range 0..3.
Mark Lord1cfd19a2008-04-19 15:05:50 -0400837 *
838 * Note that port and hardport may be the same variable in some cases.
839 */
840#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
841{ \
842 shift = mv_hc_from_port(port) * HC_SHIFT; \
843 hardport = mv_hardport_from_port(port); \
844 shift += hardport * 2; \
845}
846
Mark Lord352fab72008-04-19 14:43:42 -0400847static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
848{
849 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
850}
851
Jeff Garzikc9d39132005-11-13 17:47:51 -0500852static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
853 unsigned int port)
854{
855 return mv_hc_base(base, mv_hc_from_port(port));
856}
857
Brett Russ20f733e2005-09-01 18:26:17 -0400858static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
859{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500860 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500861 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500862 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400863}
864
Mark Lorde12bef52008-03-31 19:33:56 -0400865static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
866{
867 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
868 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
869
870 return hc_mmio + ofs;
871}
872
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500873static inline void __iomem *mv_host_base(struct ata_host *host)
874{
875 struct mv_host_priv *hpriv = host->private_data;
876 return hpriv->base;
877}
878
Brett Russ20f733e2005-09-01 18:26:17 -0400879static inline void __iomem *mv_ap_base(struct ata_port *ap)
880{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500881 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400882}
883
Jeff Garzikcca39742006-08-24 03:19:22 -0400884static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400885{
Jeff Garzikcca39742006-08-24 03:19:22 -0400886 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400887}
888
Mark Lord08da1752009-02-25 15:13:03 -0500889/**
890 * mv_save_cached_regs - (re-)initialize cached port registers
891 * @ap: the port whose registers we are caching
892 *
893 * Initialize the local cache of port registers,
894 * so that reading them over and over again can
895 * be avoided on the hotter paths of this driver.
896 * This saves a few microseconds each time we switch
897 * to/from EDMA mode to perform (eg.) a drive cache flush.
898 */
899static void mv_save_cached_regs(struct ata_port *ap)
900{
901 void __iomem *port_mmio = mv_ap_base(ap);
902 struct mv_port_priv *pp = ap->private_data;
903
904 pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS);
905 pp->cached.ltmode = readl(port_mmio + LTMODE_OFS);
906 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
Mark Lordc01e8a22009-02-25 15:14:48 -0500907 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS);
Mark Lord08da1752009-02-25 15:13:03 -0500908}
909
910/**
911 * mv_write_cached_reg - write to a cached port register
912 * @addr: hardware address of the register
913 * @old: pointer to cached value of the register
914 * @new: new value for the register
915 *
916 * Write a new value to a cached register,
917 * but only if the value is different from before.
918 */
919static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
920{
921 if (new != *old) {
922 *old = new;
923 writel(new, addr);
924 }
925}
926
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400927static void mv_set_edma_ptrs(void __iomem *port_mmio,
928 struct mv_host_priv *hpriv,
929 struct mv_port_priv *pp)
930{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400931 u32 index;
932
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400933 /*
934 * initialize request queue
935 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400936 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
937 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400938
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400939 WARN_ON(pp->crqb_dma & 0x3ff);
940 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400941 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400942 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Mark Lord5cf73bf2008-05-27 17:58:56 -0400943 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400944
945 /*
946 * initialize response queue
947 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400948 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
949 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400950
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400951 WARN_ON(pp->crpb_dma & 0xff);
952 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
Mark Lord5cf73bf2008-05-27 17:58:56 -0400953 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400954 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400955 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400956}
957
Mark Lord2b748a02009-03-10 22:01:17 -0400958static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
959{
960 /*
961 * When writing to the main_irq_mask in hardware,
962 * we must ensure exclusivity between the interrupt coalescing bits
963 * and the corresponding individual port DONE_IRQ bits.
964 *
965 * Note that this register is really an "IRQ enable" register,
966 * not an "IRQ mask" register as Marvell's naming might suggest.
967 */
968 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
969 mask &= ~DONE_IRQ_0_3;
970 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
971 mask &= ~DONE_IRQ_4_7;
972 writelfl(mask, hpriv->main_irq_mask_addr);
973}
974
Mark Lordc4de5732008-05-17 13:35:21 -0400975static void mv_set_main_irq_mask(struct ata_host *host,
976 u32 disable_bits, u32 enable_bits)
977{
978 struct mv_host_priv *hpriv = host->private_data;
979 u32 old_mask, new_mask;
980
Mark Lord96e2c4872008-05-17 13:38:00 -0400981 old_mask = hpriv->main_irq_mask;
Mark Lordc4de5732008-05-17 13:35:21 -0400982 new_mask = (old_mask & ~disable_bits) | enable_bits;
Mark Lord96e2c4872008-05-17 13:38:00 -0400983 if (new_mask != old_mask) {
984 hpriv->main_irq_mask = new_mask;
Mark Lord2b748a02009-03-10 22:01:17 -0400985 mv_write_main_irq_mask(new_mask, hpriv);
Mark Lord96e2c4872008-05-17 13:38:00 -0400986 }
Mark Lordc4de5732008-05-17 13:35:21 -0400987}
988
989static void mv_enable_port_irqs(struct ata_port *ap,
990 unsigned int port_bits)
991{
992 unsigned int shift, hardport, port = ap->port_no;
993 u32 disable_bits, enable_bits;
994
995 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
996
997 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
998 enable_bits = port_bits << shift;
999 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1000}
1001
Mark Lord00b81232009-01-30 18:47:51 -05001002static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1003 void __iomem *port_mmio,
1004 unsigned int port_irqs)
1005{
1006 struct mv_host_priv *hpriv = ap->host->private_data;
1007 int hardport = mv_hardport_from_port(ap->port_no);
1008 void __iomem *hc_mmio = mv_hc_base_from_port(
1009 mv_host_base(ap->host), ap->port_no);
1010 u32 hc_irq_cause;
1011
1012 /* clear EDMA event indicators, if any */
1013 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1014
1015 /* clear pending irq events */
1016 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1017 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1018
1019 /* clear FIS IRQ Cause */
1020 if (IS_GEN_IIE(hpriv))
1021 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1022
1023 mv_enable_port_irqs(ap, port_irqs);
1024}
1025
Mark Lord2b748a02009-03-10 22:01:17 -04001026static void mv_set_irq_coalescing(struct ata_host *host,
1027 unsigned int count, unsigned int usecs)
1028{
1029 struct mv_host_priv *hpriv = host->private_data;
1030 void __iomem *mmio = hpriv->base, *hc_mmio;
1031 u32 coal_enable = 0;
1032 unsigned long flags;
Mark Lord6abf4672009-03-11 00:56:00 -04001033 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
Mark Lord2b748a02009-03-10 22:01:17 -04001034 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1035 ALL_PORTS_COAL_DONE;
1036
1037 /* Disable IRQ coalescing if either threshold is zero */
1038 if (!usecs || !count) {
1039 clks = count = 0;
1040 } else {
1041 /* Respect maximum limits of the hardware */
1042 clks = usecs * COAL_CLOCKS_PER_USEC;
1043 if (clks > MAX_COAL_TIME_THRESHOLD)
1044 clks = MAX_COAL_TIME_THRESHOLD;
1045 if (count > MAX_COAL_IO_COUNT)
1046 count = MAX_COAL_IO_COUNT;
1047 }
1048
1049 spin_lock_irqsave(&host->lock, flags);
Mark Lord6abf4672009-03-11 00:56:00 -04001050 mv_set_main_irq_mask(host, coal_disable, 0);
Mark Lord2b748a02009-03-10 22:01:17 -04001051
Mark Lord6abf4672009-03-11 00:56:00 -04001052 if (is_dual_hc && !IS_GEN_I(hpriv)) {
Mark Lord2b748a02009-03-10 22:01:17 -04001053 /*
Mark Lord6abf4672009-03-11 00:56:00 -04001054 * GEN_II/GEN_IIE with dual host controllers:
1055 * one set of global thresholds for the entire chip.
Mark Lord2b748a02009-03-10 22:01:17 -04001056 */
1057 writel(clks, mmio + MV_IRQ_COAL_TIME_THRESHOLD);
1058 writel(count, mmio + MV_IRQ_COAL_IO_THRESHOLD);
1059 /* clear leftover coal IRQ bit */
Mark Lord6abf4672009-03-11 00:56:00 -04001060 writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE);
1061 if (count)
1062 coal_enable = ALL_PORTS_COAL_DONE;
1063 clks = count = 0; /* force clearing of regular regs below */
Mark Lord2b748a02009-03-10 22:01:17 -04001064 }
Mark Lord6abf4672009-03-11 00:56:00 -04001065
Mark Lord2b748a02009-03-10 22:01:17 -04001066 /*
1067 * All chips: independent thresholds for each HC on the chip.
1068 */
1069 hc_mmio = mv_hc_base_from_port(mmio, 0);
1070 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS);
1071 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS);
Mark Lord6abf4672009-03-11 00:56:00 -04001072 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS);
1073 if (count)
1074 coal_enable |= PORTS_0_3_COAL_DONE;
1075 if (is_dual_hc) {
Mark Lord2b748a02009-03-10 22:01:17 -04001076 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1077 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD_OFS);
1078 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD_OFS);
Mark Lord6abf4672009-03-11 00:56:00 -04001079 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE_OFS);
1080 if (count)
1081 coal_enable |= PORTS_4_7_COAL_DONE;
Mark Lord2b748a02009-03-10 22:01:17 -04001082 }
Mark Lord2b748a02009-03-10 22:01:17 -04001083
Mark Lord6abf4672009-03-11 00:56:00 -04001084 mv_set_main_irq_mask(host, 0, coal_enable);
Mark Lord2b748a02009-03-10 22:01:17 -04001085 spin_unlock_irqrestore(&host->lock, flags);
1086}
1087
Brett Russ05b308e2005-10-05 17:08:53 -04001088/**
Mark Lord00b81232009-01-30 18:47:51 -05001089 * mv_start_edma - Enable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -04001090 * @base: port base address
1091 * @pp: port private data
1092 *
Tejun Heobeec7db2006-02-11 19:11:13 +09001093 * Verify the local cache of the eDMA state is accurate with a
1094 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -04001095 *
1096 * LOCKING:
1097 * Inherited from caller.
1098 */
Mark Lord00b81232009-01-30 18:47:51 -05001099static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -05001100 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -04001101{
Mark Lord72109162008-01-26 18:31:33 -05001102 int want_ncq = (protocol == ATA_PROT_NCQ);
1103
1104 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1105 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1106 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -04001107 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -05001108 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001109 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -05001110 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord0c589122008-01-26 18:31:16 -05001111
Mark Lord00b81232009-01-30 18:47:51 -05001112 mv_edma_cfg(ap, want_ncq, 1);
Mark Lord0c589122008-01-26 18:31:16 -05001113
Mark Lordf630d562008-01-26 18:31:00 -05001114 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Mark Lord00b81232009-01-30 18:47:51 -05001115 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001116
Mark Lordf630d562008-01-26 18:31:00 -05001117 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -04001118 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1119 }
Brett Russ31961942005-09-30 01:36:00 -04001120}
1121
Mark Lord9b2c4e02008-05-02 02:09:14 -04001122static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1123{
1124 void __iomem *port_mmio = mv_ap_base(ap);
1125 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1126 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1127 int i;
1128
1129 /*
1130 * Wait for the EDMA engine to finish transactions in progress.
Mark Lordc46938c2008-05-02 14:02:28 -04001131 * No idea what a good "timeout" value might be, but measurements
1132 * indicate that it often requires hundreds of microseconds
1133 * with two drives in-use. So we use the 15msec value above
1134 * as a rough guess at what even more drives might require.
Mark Lord9b2c4e02008-05-02 02:09:14 -04001135 */
1136 for (i = 0; i < timeout; ++i) {
1137 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
1138 if ((edma_stat & empty_idle) == empty_idle)
1139 break;
1140 udelay(per_loop);
1141 }
1142 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1143}
1144
Brett Russ05b308e2005-10-05 17:08:53 -04001145/**
Mark Lorde12bef52008-03-31 19:33:56 -04001146 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -04001147 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -04001148 *
1149 * LOCKING:
1150 * Inherited from caller.
1151 */
Mark Lordb5624682008-03-31 19:34:40 -04001152static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -04001153{
Mark Lordb5624682008-03-31 19:34:40 -04001154 int i;
Brett Russ31961942005-09-30 01:36:00 -04001155
Mark Lordb5624682008-03-31 19:34:40 -04001156 /* Disable eDMA. The disable bit auto clears. */
1157 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -05001158
Mark Lordb5624682008-03-31 19:34:40 -04001159 /* Wait for the chip to confirm eDMA is off. */
1160 for (i = 10000; i > 0; i--) {
1161 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -04001162 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -04001163 return 0;
1164 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -04001165 }
Mark Lordb5624682008-03-31 19:34:40 -04001166 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -04001167}
1168
Mark Lorde12bef52008-03-31 19:33:56 -04001169static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001170{
Mark Lordb5624682008-03-31 19:34:40 -04001171 void __iomem *port_mmio = mv_ap_base(ap);
1172 struct mv_port_priv *pp = ap->private_data;
Mark Lord66e57a22009-01-30 18:52:58 -05001173 int err = 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001174
Mark Lordb5624682008-03-31 19:34:40 -04001175 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1176 return 0;
1177 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lord9b2c4e02008-05-02 02:09:14 -04001178 mv_wait_for_edma_empty_idle(ap);
Mark Lordb5624682008-03-31 19:34:40 -04001179 if (mv_stop_edma_engine(port_mmio)) {
1180 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Mark Lord66e57a22009-01-30 18:52:58 -05001181 err = -EIO;
Mark Lordb5624682008-03-31 19:34:40 -04001182 }
Mark Lord66e57a22009-01-30 18:52:58 -05001183 mv_edma_cfg(ap, 0, 0);
1184 return err;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001185}
1186
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001187#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -04001188static void mv_dump_mem(void __iomem *start, unsigned bytes)
1189{
Brett Russ31961942005-09-30 01:36:00 -04001190 int b, w;
1191 for (b = 0; b < bytes; ) {
1192 DPRINTK("%p: ", start + b);
1193 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001194 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -04001195 b += sizeof(u32);
1196 }
1197 printk("\n");
1198 }
Brett Russ31961942005-09-30 01:36:00 -04001199}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001200#endif
1201
Brett Russ31961942005-09-30 01:36:00 -04001202static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1203{
1204#ifdef ATA_DEBUG
1205 int b, w;
1206 u32 dw;
1207 for (b = 0; b < bytes; ) {
1208 DPRINTK("%02x: ", b);
1209 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001210 (void) pci_read_config_dword(pdev, b, &dw);
1211 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -04001212 b += sizeof(u32);
1213 }
1214 printk("\n");
1215 }
1216#endif
1217}
1218static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1219 struct pci_dev *pdev)
1220{
1221#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -05001222 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -04001223 port >> MV_PORT_HC_SHIFT);
1224 void __iomem *port_base;
1225 int start_port, num_ports, p, start_hc, num_hcs, hc;
1226
1227 if (0 > port) {
1228 start_hc = start_port = 0;
1229 num_ports = 8; /* shld be benign for 4 port devs */
1230 num_hcs = 2;
1231 } else {
1232 start_hc = port >> MV_PORT_HC_SHIFT;
1233 start_port = port;
1234 num_ports = num_hcs = 1;
1235 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001236 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001237 num_ports > 1 ? num_ports - 1 : start_port);
1238
1239 if (NULL != pdev) {
1240 DPRINTK("PCI config space regs:\n");
1241 mv_dump_pci_cfg(pdev, 0x68);
1242 }
1243 DPRINTK("PCI regs:\n");
1244 mv_dump_mem(mmio_base+0xc00, 0x3c);
1245 mv_dump_mem(mmio_base+0xd00, 0x34);
1246 mv_dump_mem(mmio_base+0xf00, 0x4);
1247 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1248 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001249 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001250 DPRINTK("HC regs (HC %i):\n", hc);
1251 mv_dump_mem(hc_base, 0x1c);
1252 }
1253 for (p = start_port; p < start_port + num_ports; p++) {
1254 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001255 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001256 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001257 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001258 mv_dump_mem(port_base+0x300, 0x60);
1259 }
1260#endif
1261}
1262
Brett Russ20f733e2005-09-01 18:26:17 -04001263static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1264{
1265 unsigned int ofs;
1266
1267 switch (sc_reg_in) {
1268 case SCR_STATUS:
1269 case SCR_CONTROL:
1270 case SCR_ERROR:
1271 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1272 break;
1273 case SCR_ACTIVE:
1274 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1275 break;
1276 default:
1277 ofs = 0xffffffffU;
1278 break;
1279 }
1280 return ofs;
1281}
1282
Tejun Heo82ef04f2008-07-31 17:02:40 +09001283static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001284{
1285 unsigned int ofs = mv_scr_offset(sc_reg_in);
1286
Tejun Heoda3dbb12007-07-16 14:29:40 +09001287 if (ofs != 0xffffffffU) {
Tejun Heo82ef04f2008-07-31 17:02:40 +09001288 *val = readl(mv_ap_base(link->ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001289 return 0;
1290 } else
1291 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001292}
1293
Tejun Heo82ef04f2008-07-31 17:02:40 +09001294static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001295{
1296 unsigned int ofs = mv_scr_offset(sc_reg_in);
1297
Tejun Heoda3dbb12007-07-16 14:29:40 +09001298 if (ofs != 0xffffffffU) {
Tejun Heo82ef04f2008-07-31 17:02:40 +09001299 writelfl(val, mv_ap_base(link->ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001300 return 0;
1301 } else
1302 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001303}
1304
Mark Lordf2738272008-01-26 18:32:29 -05001305static void mv6_dev_config(struct ata_device *adev)
1306{
1307 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001308 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1309 *
1310 * Gen-II does not support NCQ over a port multiplier
1311 * (no FIS-based switching).
Mark Lordf2738272008-01-26 18:32:29 -05001312 */
Mark Lorde49856d2008-04-16 14:59:07 -04001313 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001314 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001315 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001316 ata_dev_printk(adev, KERN_INFO,
1317 "NCQ disabled for command-based switching\n");
Mark Lord352fab72008-04-19 14:43:42 -04001318 }
Mark Lorde49856d2008-04-16 14:59:07 -04001319 }
Mark Lordf2738272008-01-26 18:32:29 -05001320}
1321
Mark Lord3e4a1392008-05-02 02:10:02 -04001322static int mv_qc_defer(struct ata_queued_cmd *qc)
1323{
1324 struct ata_link *link = qc->dev->link;
1325 struct ata_port *ap = link->ap;
1326 struct mv_port_priv *pp = ap->private_data;
1327
1328 /*
Mark Lord29d187b2008-05-02 02:15:37 -04001329 * Don't allow new commands if we're in a delayed EH state
1330 * for NCQ and/or FIS-based switching.
1331 */
1332 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1333 return ATA_DEFER_PORT;
1334 /*
Mark Lord3e4a1392008-05-02 02:10:02 -04001335 * If the port is completely idle, then allow the new qc.
1336 */
1337 if (ap->nr_active_links == 0)
1338 return 0;
1339
Tejun Heo4bdee6c2008-08-13 20:24:16 +09001340 /*
1341 * The port is operating in host queuing mode (EDMA) with NCQ
1342 * enabled, allow multiple NCQ commands. EDMA also allows
1343 * queueing multiple DMA commands but libata core currently
1344 * doesn't allow it.
1345 */
1346 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1347 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1348 return 0;
1349
Mark Lord3e4a1392008-05-02 02:10:02 -04001350 return ATA_DEFER_PORT;
1351}
1352
Mark Lord08da1752009-02-25 15:13:03 -05001353static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
Mark Lorde49856d2008-04-16 14:59:07 -04001354{
Mark Lord08da1752009-02-25 15:13:03 -05001355 struct mv_port_priv *pp = ap->private_data;
1356 void __iomem *port_mmio;
Mark Lord00f42ea2008-05-02 02:11:45 -04001357
Mark Lord08da1752009-02-25 15:13:03 -05001358 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1359 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1360 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
Mark Lord00f42ea2008-05-02 02:11:45 -04001361
Mark Lord08da1752009-02-25 15:13:03 -05001362 ltmode = *old_ltmode & ~LTMODE_BIT8;
1363 haltcond = *old_haltcond | EDMA_ERR_DEV;
Mark Lord00f42ea2008-05-02 02:11:45 -04001364
1365 if (want_fbs) {
Mark Lord08da1752009-02-25 15:13:03 -05001366 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1367 ltmode = *old_ltmode | LTMODE_BIT8;
Mark Lord4c299ca2008-05-02 02:16:20 -04001368 if (want_ncq)
Mark Lord08da1752009-02-25 15:13:03 -05001369 haltcond &= ~EDMA_ERR_DEV;
Mark Lord4c299ca2008-05-02 02:16:20 -04001370 else
Mark Lord08da1752009-02-25 15:13:03 -05001371 fiscfg |= FISCFG_WAIT_DEV_ERR;
1372 } else {
1373 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
Mark Lorde49856d2008-04-16 14:59:07 -04001374 }
Mark Lord00f42ea2008-05-02 02:11:45 -04001375
Mark Lord08da1752009-02-25 15:13:03 -05001376 port_mmio = mv_ap_base(ap);
1377 mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg);
1378 mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode);
1379 mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond);
Mark Lord0c589122008-01-26 18:31:16 -05001380}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001381
Mark Lorddd2890f2008-05-02 02:10:56 -04001382static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1383{
1384 struct mv_host_priv *hpriv = ap->host->private_data;
1385 u32 old, new;
1386
1387 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1388 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1389 if (want_ncq)
1390 new = old | (1 << 22);
1391 else
1392 new = old & ~(1 << 22);
1393 if (new != old)
1394 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1395}
1396
Mark Lordc01e8a22009-02-25 15:14:48 -05001397/**
Mark Lord40f21b12009-03-10 18:51:04 -04001398 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1399 * @ap: Port being initialized
Mark Lordc01e8a22009-02-25 15:14:48 -05001400 *
1401 * There are two DMA modes on these chips: basic DMA, and EDMA.
1402 *
1403 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1404 * of basic DMA on the GEN_IIE versions of the chips.
1405 *
1406 * This bit survives EDMA resets, and must be set for basic DMA
1407 * to function, and should be cleared when EDMA is active.
1408 */
1409static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1410{
1411 struct mv_port_priv *pp = ap->private_data;
1412 u32 new, *old = &pp->cached.unknown_rsvd;
1413
1414 if (enable_bmdma)
1415 new = *old | 1;
1416 else
1417 new = *old & ~1;
1418 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new);
1419}
1420
Mark Lord000b3442009-03-15 11:33:19 -04001421/*
1422 * SOC chips have an issue whereby the HDD LEDs don't always blink
1423 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1424 * of the SOC takes care of it, generating a steady blink rate when
1425 * any drive on the chip is active.
1426 *
1427 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1428 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1429 *
1430 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1431 * LED operation works then, and provides better (more accurate) feedback.
1432 *
1433 * Note that this code assumes that an SOC never has more than one HC onboard.
1434 */
1435static void mv_soc_led_blink_enable(struct ata_port *ap)
1436{
1437 struct ata_host *host = ap->host;
1438 struct mv_host_priv *hpriv = host->private_data;
1439 void __iomem *hc_mmio;
1440 u32 led_ctrl;
1441
1442 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1443 return;
1444 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1445 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1446 led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS);
1447 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS);
1448}
1449
1450static void mv_soc_led_blink_disable(struct ata_port *ap)
1451{
1452 struct ata_host *host = ap->host;
1453 struct mv_host_priv *hpriv = host->private_data;
1454 void __iomem *hc_mmio;
1455 u32 led_ctrl;
1456 unsigned int port;
1457
1458 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1459 return;
1460
1461 /* disable led-blink only if no ports are using NCQ */
1462 for (port = 0; port < hpriv->n_ports; port++) {
1463 struct ata_port *this_ap = host->ports[port];
1464 struct mv_port_priv *pp = this_ap->private_data;
1465
1466 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1467 return;
1468 }
1469
1470 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1471 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1472 led_ctrl = readl(hc_mmio + SOC_LED_CTRL_OFS);
1473 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL_OFS);
1474}
1475
Mark Lord00b81232009-01-30 18:47:51 -05001476static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001477{
1478 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001479 struct mv_port_priv *pp = ap->private_data;
1480 struct mv_host_priv *hpriv = ap->host->private_data;
1481 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001482
1483 /* set up non-NCQ EDMA configuration */
1484 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Mark Lordd16ab3f2009-02-25 15:17:43 -05001485 pp->pp_flags &=
1486 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001487
1488 if (IS_GEN_I(hpriv))
1489 cfg |= (1 << 8); /* enab config burst size mask */
1490
Mark Lorddd2890f2008-05-02 02:10:56 -04001491 else if (IS_GEN_II(hpriv)) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05001492 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Mark Lorddd2890f2008-05-02 02:10:56 -04001493 mv_60x1_errata_sata25(ap, want_ncq);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001494
Mark Lorddd2890f2008-05-02 02:10:56 -04001495 } else if (IS_GEN_IIE(hpriv)) {
Mark Lord00f42ea2008-05-02 02:11:45 -04001496 int want_fbs = sata_pmp_attached(ap);
1497 /*
1498 * Possible future enhancement:
1499 *
1500 * The chip can use FBS with non-NCQ, if we allow it,
1501 * But first we need to have the error handling in place
1502 * for this mode (datasheet section 7.3.15.4.2.3).
1503 * So disallow non-NCQ FBS for now.
1504 */
1505 want_fbs &= want_ncq;
1506
Mark Lord08da1752009-02-25 15:13:03 -05001507 mv_config_fbs(ap, want_ncq, want_fbs);
Mark Lord00f42ea2008-05-02 02:11:45 -04001508
1509 if (want_fbs) {
1510 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1511 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1512 }
1513
Jeff Garzike728eab2007-02-25 02:53:41 -05001514 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
Mark Lord00b81232009-01-30 18:47:51 -05001515 if (want_edma) {
1516 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1517 if (!IS_SOC(hpriv))
1518 cfg |= (1 << 18); /* enab early completion */
1519 }
Mark Lord616d4a92008-05-02 02:08:32 -04001520 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1521 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
Mark Lordc01e8a22009-02-25 15:14:48 -05001522 mv_bmdma_enable_iie(ap, !want_edma);
Mark Lord000b3442009-03-15 11:33:19 -04001523
1524 if (IS_SOC(hpriv)) {
1525 if (want_ncq)
1526 mv_soc_led_blink_enable(ap);
1527 else
1528 mv_soc_led_blink_disable(ap);
1529 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001530 }
1531
Mark Lord72109162008-01-26 18:31:33 -05001532 if (want_ncq) {
1533 cfg |= EDMA_CFG_NCQ;
1534 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
Mark Lord00b81232009-01-30 18:47:51 -05001535 }
Mark Lord72109162008-01-26 18:31:33 -05001536
Jeff Garzike4e7b892006-01-31 12:18:41 -05001537 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1538}
1539
Mark Lordda2fa9b2008-01-26 18:32:45 -05001540static void mv_port_free_dma_mem(struct ata_port *ap)
1541{
1542 struct mv_host_priv *hpriv = ap->host->private_data;
1543 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001544 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001545
1546 if (pp->crqb) {
1547 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1548 pp->crqb = NULL;
1549 }
1550 if (pp->crpb) {
1551 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1552 pp->crpb = NULL;
1553 }
Mark Lordeb73d552008-01-29 13:24:00 -05001554 /*
1555 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1556 * For later hardware, we have one unique sg_tbl per NCQ tag.
1557 */
1558 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1559 if (pp->sg_tbl[tag]) {
1560 if (tag == 0 || !IS_GEN_I(hpriv))
1561 dma_pool_free(hpriv->sg_tbl_pool,
1562 pp->sg_tbl[tag],
1563 pp->sg_tbl_dma[tag]);
1564 pp->sg_tbl[tag] = NULL;
1565 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001566 }
1567}
1568
Brett Russ05b308e2005-10-05 17:08:53 -04001569/**
1570 * mv_port_start - Port specific init/start routine.
1571 * @ap: ATA channel to manipulate
1572 *
1573 * Allocate and point to DMA memory, init port private memory,
1574 * zero indices.
1575 *
1576 * LOCKING:
1577 * Inherited from caller.
1578 */
Brett Russ31961942005-09-30 01:36:00 -04001579static int mv_port_start(struct ata_port *ap)
1580{
Jeff Garzikcca39742006-08-24 03:19:22 -04001581 struct device *dev = ap->host->dev;
1582 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001583 struct mv_port_priv *pp;
Mark Lord933cb8e2009-04-06 12:30:43 -04001584 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001585 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001586
Tejun Heo24dc5f32007-01-20 16:00:28 +09001587 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001588 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001589 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001590 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001591
Mark Lordda2fa9b2008-01-26 18:32:45 -05001592 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1593 if (!pp->crqb)
1594 return -ENOMEM;
1595 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001596
Mark Lordda2fa9b2008-01-26 18:32:45 -05001597 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1598 if (!pp->crpb)
1599 goto out_port_free_dma_mem;
1600 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001601
Mark Lord3bd0a702008-06-18 12:11:16 -04001602 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1603 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1604 ap->flags |= ATA_FLAG_AN;
Mark Lordeb73d552008-01-29 13:24:00 -05001605 /*
1606 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1607 * For later hardware, we need one unique sg_tbl per NCQ tag.
1608 */
1609 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1610 if (tag == 0 || !IS_GEN_I(hpriv)) {
1611 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1612 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1613 if (!pp->sg_tbl[tag])
1614 goto out_port_free_dma_mem;
1615 } else {
1616 pp->sg_tbl[tag] = pp->sg_tbl[0];
1617 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1618 }
1619 }
Mark Lord933cb8e2009-04-06 12:30:43 -04001620
1621 spin_lock_irqsave(ap->lock, flags);
Mark Lord08da1752009-02-25 15:13:03 -05001622 mv_save_cached_regs(ap);
Mark Lord66e57a22009-01-30 18:52:58 -05001623 mv_edma_cfg(ap, 0, 0);
Mark Lord933cb8e2009-04-06 12:30:43 -04001624 spin_unlock_irqrestore(ap->lock, flags);
1625
Brett Russ31961942005-09-30 01:36:00 -04001626 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001627
1628out_port_free_dma_mem:
1629 mv_port_free_dma_mem(ap);
1630 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001631}
1632
Brett Russ05b308e2005-10-05 17:08:53 -04001633/**
1634 * mv_port_stop - Port specific cleanup/stop routine.
1635 * @ap: ATA channel to manipulate
1636 *
1637 * Stop DMA, cleanup port memory.
1638 *
1639 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001640 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001641 */
Brett Russ31961942005-09-30 01:36:00 -04001642static void mv_port_stop(struct ata_port *ap)
1643{
Mark Lord933cb8e2009-04-06 12:30:43 -04001644 unsigned long flags;
1645
1646 spin_lock_irqsave(ap->lock, flags);
Mark Lorde12bef52008-03-31 19:33:56 -04001647 mv_stop_edma(ap);
Mark Lord88e675e2008-05-17 13:36:30 -04001648 mv_enable_port_irqs(ap, 0);
Mark Lord933cb8e2009-04-06 12:30:43 -04001649 spin_unlock_irqrestore(ap->lock, flags);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001650 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001651}
1652
Brett Russ05b308e2005-10-05 17:08:53 -04001653/**
1654 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1655 * @qc: queued command whose SG list to source from
1656 *
1657 * Populate the SG list and mark the last entry.
1658 *
1659 * LOCKING:
1660 * Inherited from caller.
1661 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001662static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001663{
1664 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001665 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001666 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001667 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001668
Mark Lordeb73d552008-01-29 13:24:00 -05001669 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001670 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001671 dma_addr_t addr = sg_dma_address(sg);
1672 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001673
Olof Johansson4007b492007-10-02 20:45:27 -05001674 while (sg_len) {
1675 u32 offset = addr & 0xffff;
1676 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001677
Mark Lord32cd11a2009-02-01 16:50:32 -05001678 if (offset + len > 0x10000)
Olof Johansson4007b492007-10-02 20:45:27 -05001679 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001680
Olof Johansson4007b492007-10-02 20:45:27 -05001681 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1682 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001683 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Mark Lord32cd11a2009-02-01 16:50:32 -05001684 mv_sg->reserved = 0;
Olof Johansson4007b492007-10-02 20:45:27 -05001685
1686 sg_len -= len;
1687 addr += len;
1688
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001689 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001690 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001691 }
Brett Russ31961942005-09-30 01:36:00 -04001692 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001693
1694 if (likely(last_sg))
1695 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Mark Lord32cd11a2009-02-01 16:50:32 -05001696 mb(); /* ensure data structure is visible to the chipset */
Brett Russ31961942005-09-30 01:36:00 -04001697}
1698
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001699static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001700{
Mark Lord559eeda2006-05-19 16:40:15 -04001701 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001702 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001703 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001704}
1705
Brett Russ05b308e2005-10-05 17:08:53 -04001706/**
Mark Lordda142652009-01-30 18:51:54 -05001707 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1708 * @ap: Port associated with this ATA transaction.
1709 *
1710 * We need this only for ATAPI bmdma transactions,
1711 * as otherwise we experience spurious interrupts
1712 * after libata-sff handles the bmdma interrupts.
1713 */
1714static void mv_sff_irq_clear(struct ata_port *ap)
1715{
1716 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1717}
1718
1719/**
1720 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1721 * @qc: queued command to check for chipset/DMA compatibility.
1722 *
1723 * The bmdma engines cannot handle speculative data sizes
1724 * (bytecount under/over flow). So only allow DMA for
1725 * data transfer commands with known data sizes.
1726 *
1727 * LOCKING:
1728 * Inherited from caller.
1729 */
1730static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1731{
1732 struct scsi_cmnd *scmd = qc->scsicmd;
1733
1734 if (scmd) {
1735 switch (scmd->cmnd[0]) {
1736 case READ_6:
1737 case READ_10:
1738 case READ_12:
1739 case WRITE_6:
1740 case WRITE_10:
1741 case WRITE_12:
1742 case GPCMD_READ_CD:
1743 case GPCMD_SEND_DVD_STRUCTURE:
1744 case GPCMD_SEND_CUE_SHEET:
1745 return 0; /* DMA is safe */
1746 }
1747 }
1748 return -EOPNOTSUPP; /* use PIO instead */
1749}
1750
1751/**
1752 * mv_bmdma_setup - Set up BMDMA transaction
1753 * @qc: queued command to prepare DMA for.
1754 *
1755 * LOCKING:
1756 * Inherited from caller.
1757 */
1758static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1759{
1760 struct ata_port *ap = qc->ap;
1761 void __iomem *port_mmio = mv_ap_base(ap);
1762 struct mv_port_priv *pp = ap->private_data;
1763
1764 mv_fill_sg(qc);
1765
1766 /* clear all DMA cmd bits */
1767 writel(0, port_mmio + BMDMA_CMD_OFS);
1768
1769 /* load PRD table addr. */
1770 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1771 port_mmio + BMDMA_PRD_HIGH_OFS);
1772 writelfl(pp->sg_tbl_dma[qc->tag],
1773 port_mmio + BMDMA_PRD_LOW_OFS);
1774
1775 /* issue r/w command */
1776 ap->ops->sff_exec_command(ap, &qc->tf);
1777}
1778
1779/**
1780 * mv_bmdma_start - Start a BMDMA transaction
1781 * @qc: queued command to start DMA on.
1782 *
1783 * LOCKING:
1784 * Inherited from caller.
1785 */
1786static void mv_bmdma_start(struct ata_queued_cmd *qc)
1787{
1788 struct ata_port *ap = qc->ap;
1789 void __iomem *port_mmio = mv_ap_base(ap);
1790 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1791 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1792
1793 /* start host DMA transaction */
1794 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1795}
1796
1797/**
1798 * mv_bmdma_stop - Stop BMDMA transfer
1799 * @qc: queued command to stop DMA on.
1800 *
1801 * Clears the ATA_DMA_START flag in the bmdma control register
1802 *
1803 * LOCKING:
1804 * Inherited from caller.
1805 */
1806static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1807{
1808 struct ata_port *ap = qc->ap;
1809 void __iomem *port_mmio = mv_ap_base(ap);
1810 u32 cmd;
1811
1812 /* clear start/stop bit */
1813 cmd = readl(port_mmio + BMDMA_CMD_OFS);
1814 cmd &= ~ATA_DMA_START;
1815 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1816
1817 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1818 ata_sff_dma_pause(ap);
1819}
1820
1821/**
1822 * mv_bmdma_status - Read BMDMA status
1823 * @ap: port for which to retrieve DMA status.
1824 *
1825 * Read and return equivalent of the sff BMDMA status register.
1826 *
1827 * LOCKING:
1828 * Inherited from caller.
1829 */
1830static u8 mv_bmdma_status(struct ata_port *ap)
1831{
1832 void __iomem *port_mmio = mv_ap_base(ap);
1833 u32 reg, status;
1834
1835 /*
1836 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1837 * and the ATA_DMA_INTR bit doesn't exist.
1838 */
1839 reg = readl(port_mmio + BMDMA_STATUS_OFS);
1840 if (reg & ATA_DMA_ACTIVE)
1841 status = ATA_DMA_ACTIVE;
1842 else
1843 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1844 return status;
1845}
1846
1847/**
Brett Russ05b308e2005-10-05 17:08:53 -04001848 * mv_qc_prep - Host specific command preparation.
1849 * @qc: queued command to prepare
1850 *
1851 * This routine simply redirects to the general purpose routine
1852 * if command is not DMA. Else, it handles prep of the CRQB
1853 * (command request block), does some sanity checking, and calls
1854 * the SG load routine.
1855 *
1856 * LOCKING:
1857 * Inherited from caller.
1858 */
Brett Russ31961942005-09-30 01:36:00 -04001859static void mv_qc_prep(struct ata_queued_cmd *qc)
1860{
1861 struct ata_port *ap = qc->ap;
1862 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001863 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001864 struct ata_taskfile *tf;
1865 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001866 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001867
Mark Lord138bfdd2008-01-26 18:33:18 -05001868 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1869 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001870 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001871
Brett Russ31961942005-09-30 01:36:00 -04001872 /* Fill in command request block
1873 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001874 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001875 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001876 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001877 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001878 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001879
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001880 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001881 in_index = pp->req_idx;
Brett Russ31961942005-09-30 01:36:00 -04001882
Mark Lorda6432432006-05-19 16:36:36 -04001883 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001884 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001885 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001886 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001887 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1888
1889 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001890 tf = &qc->tf;
1891
1892 /* Sadly, the CRQB cannot accomodate all registers--there are
1893 * only 11 bytes...so we must pick and choose required
1894 * registers based on the command. So, we drop feature and
1895 * hob_feature for [RW] DMA commands, but they are needed for
Mark Lordcd12e1f2009-01-19 18:06:28 -05001896 * NCQ. NCQ will drop hob_nsect, which is not needed there
1897 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
Brett Russ31961942005-09-30 01:36:00 -04001898 */
1899 switch (tf->command) {
1900 case ATA_CMD_READ:
1901 case ATA_CMD_READ_EXT:
1902 case ATA_CMD_WRITE:
1903 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001904 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001905 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1906 break;
Brett Russ31961942005-09-30 01:36:00 -04001907 case ATA_CMD_FPDMA_READ:
1908 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001909 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001910 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1911 break;
Brett Russ31961942005-09-30 01:36:00 -04001912 default:
1913 /* The only other commands EDMA supports in non-queued and
1914 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1915 * of which are defined/used by Linux. If we get here, this
1916 * driver needs work.
1917 *
1918 * FIXME: modify libata to give qc_prep a return value and
1919 * return error here.
1920 */
1921 BUG_ON(tf->command);
1922 break;
1923 }
1924 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1925 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1926 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1927 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1928 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1929 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1930 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1931 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1932 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1933
Jeff Garzike4e7b892006-01-31 12:18:41 -05001934 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001935 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001936 mv_fill_sg(qc);
1937}
1938
1939/**
1940 * mv_qc_prep_iie - Host specific command preparation.
1941 * @qc: queued command to prepare
1942 *
1943 * This routine simply redirects to the general purpose routine
1944 * if command is not DMA. Else, it handles prep of the CRQB
1945 * (command request block), does some sanity checking, and calls
1946 * the SG load routine.
1947 *
1948 * LOCKING:
1949 * Inherited from caller.
1950 */
1951static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1952{
1953 struct ata_port *ap = qc->ap;
1954 struct mv_port_priv *pp = ap->private_data;
1955 struct mv_crqb_iie *crqb;
1956 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001957 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001958 u32 flags = 0;
1959
Mark Lord138bfdd2008-01-26 18:33:18 -05001960 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1961 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001962 return;
1963
Mark Lorde12bef52008-03-31 19:33:56 -04001964 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001965 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1966 flags |= CRQB_FLAG_READ;
1967
Tejun Heobeec7db2006-02-11 19:11:13 +09001968 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001969 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001970 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001971 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001972
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001973 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001974 in_index = pp->req_idx;
Mark Lorda6432432006-05-19 16:36:36 -04001975
1976 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001977 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1978 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001979 crqb->flags = cpu_to_le32(flags);
1980
1981 tf = &qc->tf;
1982 crqb->ata_cmd[0] = cpu_to_le32(
1983 (tf->command << 16) |
1984 (tf->feature << 24)
1985 );
1986 crqb->ata_cmd[1] = cpu_to_le32(
1987 (tf->lbal << 0) |
1988 (tf->lbam << 8) |
1989 (tf->lbah << 16) |
1990 (tf->device << 24)
1991 );
1992 crqb->ata_cmd[2] = cpu_to_le32(
1993 (tf->hob_lbal << 0) |
1994 (tf->hob_lbam << 8) |
1995 (tf->hob_lbah << 16) |
1996 (tf->hob_feature << 24)
1997 );
1998 crqb->ata_cmd[3] = cpu_to_le32(
1999 (tf->nsect << 0) |
2000 (tf->hob_nsect << 8)
2001 );
2002
2003 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2004 return;
Brett Russ31961942005-09-30 01:36:00 -04002005 mv_fill_sg(qc);
2006}
2007
Brett Russ05b308e2005-10-05 17:08:53 -04002008/**
Mark Lordd16ab3f2009-02-25 15:17:43 -05002009 * mv_sff_check_status - fetch device status, if valid
2010 * @ap: ATA port to fetch status from
2011 *
2012 * When using command issue via mv_qc_issue_fis(),
2013 * the initial ATA_BUSY state does not show up in the
2014 * ATA status (shadow) register. This can confuse libata!
2015 *
2016 * So we have a hook here to fake ATA_BUSY for that situation,
2017 * until the first time a BUSY, DRQ, or ERR bit is seen.
2018 *
2019 * The rest of the time, it simply returns the ATA status register.
2020 */
2021static u8 mv_sff_check_status(struct ata_port *ap)
2022{
2023 u8 stat = ioread8(ap->ioaddr.status_addr);
2024 struct mv_port_priv *pp = ap->private_data;
2025
2026 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2027 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2028 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2029 else
2030 stat = ATA_BUSY;
2031 }
2032 return stat;
2033}
2034
2035/**
Mark Lord70f8b792009-02-25 15:19:20 -05002036 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2037 * @fis: fis to be sent
2038 * @nwords: number of 32-bit words in the fis
2039 */
2040static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2041{
2042 void __iomem *port_mmio = mv_ap_base(ap);
2043 u32 ifctl, old_ifctl, ifstat;
2044 int i, timeout = 200, final_word = nwords - 1;
2045
2046 /* Initiate FIS transmission mode */
2047 old_ifctl = readl(port_mmio + SATA_IFCTL_OFS);
2048 ifctl = 0x100 | (old_ifctl & 0xf);
2049 writelfl(ifctl, port_mmio + SATA_IFCTL_OFS);
2050
2051 /* Send all words of the FIS except for the final word */
2052 for (i = 0; i < final_word; ++i)
2053 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS);
2054
2055 /* Flag end-of-transmission, and then send the final word */
2056 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS);
2057 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS);
2058
2059 /*
2060 * Wait for FIS transmission to complete.
2061 * This typically takes just a single iteration.
2062 */
2063 do {
2064 ifstat = readl(port_mmio + SATA_IFSTAT_OFS);
2065 } while (!(ifstat & 0x1000) && --timeout);
2066
2067 /* Restore original port configuration */
2068 writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS);
2069
2070 /* See if it worked */
2071 if ((ifstat & 0x3000) != 0x1000) {
2072 ata_port_printk(ap, KERN_WARNING,
2073 "%s transmission error, ifstat=%08x\n",
2074 __func__, ifstat);
2075 return AC_ERR_OTHER;
2076 }
2077 return 0;
2078}
2079
2080/**
2081 * mv_qc_issue_fis - Issue a command directly as a FIS
2082 * @qc: queued command to start
2083 *
2084 * Note that the ATA shadow registers are not updated
2085 * after command issue, so the device will appear "READY"
2086 * if polled, even while it is BUSY processing the command.
2087 *
2088 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2089 *
2090 * Note: we don't get updated shadow regs on *completion*
2091 * of non-data commands. So avoid sending them via this function,
2092 * as they will appear to have completed immediately.
2093 *
2094 * GEN_IIE has special registers that we could get the result tf from,
2095 * but earlier chipsets do not. For now, we ignore those registers.
2096 */
2097static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2098{
2099 struct ata_port *ap = qc->ap;
2100 struct mv_port_priv *pp = ap->private_data;
2101 struct ata_link *link = qc->dev->link;
2102 u32 fis[5];
2103 int err = 0;
2104
2105 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2106 err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0]));
2107 if (err)
2108 return err;
2109
2110 switch (qc->tf.protocol) {
2111 case ATAPI_PROT_PIO:
2112 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2113 /* fall through */
2114 case ATAPI_PROT_NODATA:
2115 ap->hsm_task_state = HSM_ST_FIRST;
2116 break;
2117 case ATA_PROT_PIO:
2118 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2119 if (qc->tf.flags & ATA_TFLAG_WRITE)
2120 ap->hsm_task_state = HSM_ST_FIRST;
2121 else
2122 ap->hsm_task_state = HSM_ST;
2123 break;
2124 default:
2125 ap->hsm_task_state = HSM_ST_LAST;
2126 break;
2127 }
2128
2129 if (qc->tf.flags & ATA_TFLAG_POLLING)
2130 ata_pio_queue_task(ap, qc, 0);
2131 return 0;
2132}
2133
2134/**
Brett Russ05b308e2005-10-05 17:08:53 -04002135 * mv_qc_issue - Initiate a command to the host
2136 * @qc: queued command to start
2137 *
2138 * This routine simply redirects to the general purpose routine
2139 * if command is not DMA. Else, it sanity checks our local
2140 * caches of the request producer/consumer indices then enables
2141 * DMA and bumps the request producer index.
2142 *
2143 * LOCKING:
2144 * Inherited from caller.
2145 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09002146static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04002147{
Mark Lordf48765c2009-01-30 18:48:41 -05002148 static int limit_warnings = 10;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002149 struct ata_port *ap = qc->ap;
2150 void __iomem *port_mmio = mv_ap_base(ap);
2151 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002152 u32 in_index;
Mark Lord42ed8932009-02-25 15:15:39 -05002153 unsigned int port_irqs;
Brett Russ31961942005-09-30 01:36:00 -04002154
Mark Lordd16ab3f2009-02-25 15:17:43 -05002155 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2156
Mark Lordf48765c2009-01-30 18:48:41 -05002157 switch (qc->tf.protocol) {
2158 case ATA_PROT_DMA:
2159 case ATA_PROT_NCQ:
2160 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2161 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2162 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2163
2164 /* Write the request in pointer to kick the EDMA to life */
2165 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2166 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
2167 return 0;
2168
2169 case ATA_PROT_PIO:
Mark Lordc6112bd2008-06-18 12:13:02 -04002170 /*
2171 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2172 *
2173 * Someday, we might implement special polling workarounds
2174 * for these, but it all seems rather unnecessary since we
2175 * normally use only DMA for commands which transfer more
2176 * than a single block of data.
2177 *
2178 * Much of the time, this could just work regardless.
2179 * So for now, just log the incident, and allow the attempt.
2180 */
Mark Lordc7843e82008-06-18 21:57:42 -04002181 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
Mark Lordc6112bd2008-06-18 12:13:02 -04002182 --limit_warnings;
2183 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2184 ": attempting PIO w/multiple DRQ: "
2185 "this may fail due to h/w errata\n");
2186 }
Mark Lordf48765c2009-01-30 18:48:41 -05002187 /* drop through */
Mark Lord42ed8932009-02-25 15:15:39 -05002188 case ATA_PROT_NODATA:
Mark Lordf48765c2009-01-30 18:48:41 -05002189 case ATAPI_PROT_PIO:
Mark Lord42ed8932009-02-25 15:15:39 -05002190 case ATAPI_PROT_NODATA:
2191 if (ap->flags & ATA_FLAG_PIO_POLLING)
2192 qc->tf.flags |= ATA_TFLAG_POLLING;
2193 break;
Brett Russ31961942005-09-30 01:36:00 -04002194 }
Mark Lord42ed8932009-02-25 15:15:39 -05002195
2196 if (qc->tf.flags & ATA_TFLAG_POLLING)
2197 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2198 else
2199 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2200
2201 /*
2202 * We're about to send a non-EDMA capable command to the
2203 * port. Turn off EDMA so there won't be problems accessing
2204 * shadow block, etc registers.
2205 */
2206 mv_stop_edma(ap);
2207 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2208 mv_pmp_select(ap, qc->dev->link->pmp);
Mark Lord70f8b792009-02-25 15:19:20 -05002209
2210 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2211 struct mv_host_priv *hpriv = ap->host->private_data;
2212 /*
2213 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
Mark Lord40f21b12009-03-10 18:51:04 -04002214 *
Mark Lord70f8b792009-02-25 15:19:20 -05002215 * After any NCQ error, the READ_LOG_EXT command
2216 * from libata-eh *must* use mv_qc_issue_fis().
2217 * Otherwise it might fail, due to chip errata.
2218 *
2219 * Rather than special-case it, we'll just *always*
2220 * use this method here for READ_LOG_EXT, making for
2221 * easier testing.
2222 */
2223 if (IS_GEN_II(hpriv))
2224 return mv_qc_issue_fis(qc);
2225 }
Mark Lord42ed8932009-02-25 15:15:39 -05002226 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04002227}
2228
Mark Lord8f767f82008-04-19 14:53:07 -04002229static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2230{
2231 struct mv_port_priv *pp = ap->private_data;
2232 struct ata_queued_cmd *qc;
2233
2234 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2235 return NULL;
2236 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Mark Lord95db5052009-01-30 18:49:29 -05002237 if (qc) {
2238 if (qc->tf.flags & ATA_TFLAG_POLLING)
2239 qc = NULL;
2240 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2241 qc = NULL;
2242 }
Mark Lord8f767f82008-04-19 14:53:07 -04002243 return qc;
2244}
2245
Mark Lord29d187b2008-05-02 02:15:37 -04002246static void mv_pmp_error_handler(struct ata_port *ap)
2247{
2248 unsigned int pmp, pmp_map;
2249 struct mv_port_priv *pp = ap->private_data;
2250
2251 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2252 /*
2253 * Perform NCQ error analysis on failed PMPs
2254 * before we freeze the port entirely.
2255 *
2256 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2257 */
2258 pmp_map = pp->delayed_eh_pmp_map;
2259 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2260 for (pmp = 0; pmp_map != 0; pmp++) {
2261 unsigned int this_pmp = (1 << pmp);
2262 if (pmp_map & this_pmp) {
2263 struct ata_link *link = &ap->pmp_link[pmp];
2264 pmp_map &= ~this_pmp;
2265 ata_eh_analyze_ncq_error(link);
2266 }
2267 }
2268 ata_port_freeze(ap);
2269 }
2270 sata_pmp_error_handler(ap);
2271}
2272
Mark Lord4c299ca2008-05-02 02:16:20 -04002273static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2274{
2275 void __iomem *port_mmio = mv_ap_base(ap);
2276
2277 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
2278}
2279
Mark Lord4c299ca2008-05-02 02:16:20 -04002280static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2281{
2282 struct ata_eh_info *ehi;
2283 unsigned int pmp;
2284
2285 /*
2286 * Initialize EH info for PMPs which saw device errors
2287 */
2288 ehi = &ap->link.eh_info;
2289 for (pmp = 0; pmp_map != 0; pmp++) {
2290 unsigned int this_pmp = (1 << pmp);
2291 if (pmp_map & this_pmp) {
2292 struct ata_link *link = &ap->pmp_link[pmp];
2293
2294 pmp_map &= ~this_pmp;
2295 ehi = &link->eh_info;
2296 ata_ehi_clear_desc(ehi);
2297 ata_ehi_push_desc(ehi, "dev err");
2298 ehi->err_mask |= AC_ERR_DEV;
2299 ehi->action |= ATA_EH_RESET;
2300 ata_link_abort(link);
2301 }
2302 }
2303}
2304
Mark Lord06aaca32008-05-19 09:01:24 -04002305static int mv_req_q_empty(struct ata_port *ap)
2306{
2307 void __iomem *port_mmio = mv_ap_base(ap);
2308 u32 in_ptr, out_ptr;
2309
2310 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
2311 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2312 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
2313 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2314 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2315}
2316
Mark Lord4c299ca2008-05-02 02:16:20 -04002317static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2318{
2319 struct mv_port_priv *pp = ap->private_data;
2320 int failed_links;
2321 unsigned int old_map, new_map;
2322
2323 /*
2324 * Device error during FBS+NCQ operation:
2325 *
2326 * Set a port flag to prevent further I/O being enqueued.
2327 * Leave the EDMA running to drain outstanding commands from this port.
2328 * Perform the post-mortem/EH only when all responses are complete.
2329 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2330 */
2331 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2332 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2333 pp->delayed_eh_pmp_map = 0;
2334 }
2335 old_map = pp->delayed_eh_pmp_map;
2336 new_map = old_map | mv_get_err_pmp_map(ap);
2337
2338 if (old_map != new_map) {
2339 pp->delayed_eh_pmp_map = new_map;
2340 mv_pmp_eh_prep(ap, new_map & ~old_map);
2341 }
Mark Lordc46938c2008-05-02 14:02:28 -04002342 failed_links = hweight16(new_map);
Mark Lord4c299ca2008-05-02 02:16:20 -04002343
2344 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2345 "failed_links=%d nr_active_links=%d\n",
2346 __func__, pp->delayed_eh_pmp_map,
2347 ap->qc_active, failed_links,
2348 ap->nr_active_links);
2349
Mark Lord06aaca32008-05-19 09:01:24 -04002350 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
Mark Lord4c299ca2008-05-02 02:16:20 -04002351 mv_process_crpb_entries(ap, pp);
2352 mv_stop_edma(ap);
2353 mv_eh_freeze(ap);
2354 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2355 return 1; /* handled */
2356 }
2357 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2358 return 1; /* handled */
2359}
2360
2361static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2362{
2363 /*
2364 * Possible future enhancement:
2365 *
2366 * FBS+non-NCQ operation is not yet implemented.
2367 * See related notes in mv_edma_cfg().
2368 *
2369 * Device error during FBS+non-NCQ operation:
2370 *
2371 * We need to snapshot the shadow registers for each failed command.
2372 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2373 */
2374 return 0; /* not handled */
2375}
2376
2377static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2378{
2379 struct mv_port_priv *pp = ap->private_data;
2380
2381 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2382 return 0; /* EDMA was not active: not handled */
2383 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2384 return 0; /* FBS was not active: not handled */
2385
2386 if (!(edma_err_cause & EDMA_ERR_DEV))
2387 return 0; /* non DEV error: not handled */
2388 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2389 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2390 return 0; /* other problems: not handled */
2391
2392 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2393 /*
2394 * EDMA should NOT have self-disabled for this case.
2395 * If it did, then something is wrong elsewhere,
2396 * and we cannot handle it here.
2397 */
2398 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2399 ata_port_printk(ap, KERN_WARNING,
2400 "%s: err_cause=0x%x pp_flags=0x%x\n",
2401 __func__, edma_err_cause, pp->pp_flags);
2402 return 0; /* not handled */
2403 }
2404 return mv_handle_fbs_ncq_dev_err(ap);
2405 } else {
2406 /*
2407 * EDMA should have self-disabled for this case.
2408 * If it did not, then something is wrong elsewhere,
2409 * and we cannot handle it here.
2410 */
2411 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2412 ata_port_printk(ap, KERN_WARNING,
2413 "%s: err_cause=0x%x pp_flags=0x%x\n",
2414 __func__, edma_err_cause, pp->pp_flags);
2415 return 0; /* not handled */
2416 }
2417 return mv_handle_fbs_non_ncq_dev_err(ap);
2418 }
2419 return 0; /* not handled */
2420}
2421
Mark Lorda9010322008-05-02 02:14:02 -04002422static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
Mark Lord8f767f82008-04-19 14:53:07 -04002423{
Mark Lord8f767f82008-04-19 14:53:07 -04002424 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lorda9010322008-05-02 02:14:02 -04002425 char *when = "idle";
Mark Lord8f767f82008-04-19 14:53:07 -04002426
Mark Lord8f767f82008-04-19 14:53:07 -04002427 ata_ehi_clear_desc(ehi);
Mark Lorda9010322008-05-02 02:14:02 -04002428 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2429 when = "disabled";
2430 } else if (edma_was_enabled) {
2431 when = "EDMA enabled";
Mark Lord8f767f82008-04-19 14:53:07 -04002432 } else {
2433 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2434 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
Mark Lorda9010322008-05-02 02:14:02 -04002435 when = "polling";
Mark Lord8f767f82008-04-19 14:53:07 -04002436 }
Mark Lorda9010322008-05-02 02:14:02 -04002437 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
Mark Lord8f767f82008-04-19 14:53:07 -04002438 ehi->err_mask |= AC_ERR_OTHER;
2439 ehi->action |= ATA_EH_RESET;
2440 ata_port_freeze(ap);
2441}
2442
Brett Russ05b308e2005-10-05 17:08:53 -04002443/**
Brett Russ05b308e2005-10-05 17:08:53 -04002444 * mv_err_intr - Handle error interrupts on the port
2445 * @ap: ATA channel to manipulate
2446 *
Mark Lord8d073792008-04-19 15:07:49 -04002447 * Most cases require a full reset of the chip's state machine,
2448 * which also performs a COMRESET.
2449 * Also, if the port disabled DMA, update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04002450 *
2451 * LOCKING:
2452 * Inherited from caller.
2453 */
Mark Lord37b90462008-05-02 02:12:34 -04002454static void mv_err_intr(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002455{
Brett Russ31961942005-09-30 01:36:00 -04002456 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002457 u32 edma_err_cause, eh_freeze_mask, serr = 0;
Mark Lorde4006072008-05-14 09:19:30 -04002458 u32 fis_cause = 0;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002459 struct mv_port_priv *pp = ap->private_data;
2460 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002461 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002462 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lord37b90462008-05-02 02:12:34 -04002463 struct ata_queued_cmd *qc;
2464 int abort = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002465
Mark Lord8d073792008-04-19 15:07:49 -04002466 /*
Mark Lord37b90462008-05-02 02:12:34 -04002467 * Read and clear the SError and err_cause bits.
Mark Lorde4006072008-05-14 09:19:30 -04002468 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2469 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
Mark Lord8d073792008-04-19 15:07:49 -04002470 */
Mark Lord37b90462008-05-02 02:12:34 -04002471 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2472 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2473
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002474 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Mark Lorde4006072008-05-14 09:19:30 -04002475 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2476 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
2477 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
2478 }
Mark Lord8d073792008-04-19 15:07:49 -04002479 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002480
Mark Lord4c299ca2008-05-02 02:16:20 -04002481 if (edma_err_cause & EDMA_ERR_DEV) {
2482 /*
2483 * Device errors during FIS-based switching operation
2484 * require special handling.
2485 */
2486 if (mv_handle_dev_err(ap, edma_err_cause))
2487 return;
2488 }
2489
Mark Lord37b90462008-05-02 02:12:34 -04002490 qc = mv_get_active_qc(ap);
2491 ata_ehi_clear_desc(ehi);
2492 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2493 edma_err_cause, pp->pp_flags);
Mark Lorde4006072008-05-14 09:19:30 -04002494
Mark Lordc443c502008-05-14 09:24:39 -04002495 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
Mark Lorde4006072008-05-14 09:19:30 -04002496 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
Mark Lordc443c502008-05-14 09:24:39 -04002497 if (fis_cause & SATA_FIS_IRQ_AN) {
2498 u32 ec = edma_err_cause &
2499 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2500 sata_async_notification(ap);
2501 if (!ec)
2502 return; /* Just an AN; no need for the nukes */
2503 ata_ehi_push_desc(ehi, "SDB notify");
2504 }
2505 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002506 /*
Mark Lord352fab72008-04-19 14:43:42 -04002507 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002508 */
Mark Lord37b90462008-05-02 02:12:34 -04002509 if (edma_err_cause & EDMA_ERR_DEV) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002510 err_mask |= AC_ERR_DEV;
Mark Lord37b90462008-05-02 02:12:34 -04002511 action |= ATA_EH_RESET;
2512 ata_ehi_push_desc(ehi, "dev error");
2513 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002514 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002515 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002516 EDMA_ERR_INTRL_PAR)) {
2517 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002518 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09002519 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04002520 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002521 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2522 ata_ehi_hotplugged(ehi);
2523 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09002524 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09002525 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002526 }
2527
Mark Lord352fab72008-04-19 14:43:42 -04002528 /*
2529 * Gen-I has a different SELF_DIS bit,
2530 * different FREEZE bits, and no SERR bit:
2531 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002532 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002533 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002534 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002535 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002536 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002537 }
2538 } else {
2539 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002540 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002541 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002542 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002543 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002544 if (edma_err_cause & EDMA_ERR_SERR) {
Mark Lord8d073792008-04-19 15:07:49 -04002545 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2546 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002547 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002548 }
2549 }
Brett Russ20f733e2005-09-01 18:26:17 -04002550
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002551 if (!err_mask) {
2552 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09002553 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002554 }
2555
2556 ehi->serror |= serr;
2557 ehi->action |= action;
2558
2559 if (qc)
2560 qc->err_mask |= err_mask;
2561 else
2562 ehi->err_mask |= err_mask;
2563
Mark Lord37b90462008-05-02 02:12:34 -04002564 if (err_mask == AC_ERR_DEV) {
2565 /*
2566 * Cannot do ata_port_freeze() here,
2567 * because it would kill PIO access,
2568 * which is needed for further diagnosis.
2569 */
2570 mv_eh_freeze(ap);
2571 abort = 1;
2572 } else if (edma_err_cause & eh_freeze_mask) {
2573 /*
2574 * Note to self: ata_port_freeze() calls ata_port_abort()
2575 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002576 ata_port_freeze(ap);
Mark Lord37b90462008-05-02 02:12:34 -04002577 } else {
2578 abort = 1;
2579 }
2580
2581 if (abort) {
2582 if (qc)
2583 ata_link_abort(qc->dev->link);
2584 else
2585 ata_port_abort(ap);
2586 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002587}
2588
Mark Lordfcfb1f72008-04-19 15:06:40 -04002589static void mv_process_crpb_response(struct ata_port *ap,
2590 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2591{
2592 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2593
2594 if (qc) {
2595 u8 ata_status;
2596 u16 edma_status = le16_to_cpu(response->flags);
2597 /*
2598 * edma_status from a response queue entry:
2599 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
2600 * MSB is saved ATA status from command completion.
2601 */
2602 if (!ncq_enabled) {
2603 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2604 if (err_cause) {
2605 /*
2606 * Error will be seen/handled by mv_err_intr().
2607 * So do nothing at all here.
2608 */
2609 return;
2610 }
2611 }
2612 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord37b90462008-05-02 02:12:34 -04002613 if (!ac_err_mask(ata_status))
2614 ata_qc_complete(qc);
2615 /* else: leave it for mv_err_intr() */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002616 } else {
2617 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2618 __func__, tag);
2619 }
2620}
2621
2622static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002623{
2624 void __iomem *port_mmio = mv_ap_base(ap);
2625 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002626 u32 in_index;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002627 bool work_done = false;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002628 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002629
Mark Lordfcfb1f72008-04-19 15:06:40 -04002630 /* Get the hardware queue position index */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002631 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
2632 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2633
Mark Lordfcfb1f72008-04-19 15:06:40 -04002634 /* Process new responses from since the last time we looked */
2635 while (in_index != pp->resp_idx) {
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002636 unsigned int tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002637 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002638
Mark Lordfcfb1f72008-04-19 15:06:40 -04002639 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002640
Mark Lordfcfb1f72008-04-19 15:06:40 -04002641 if (IS_GEN_I(hpriv)) {
2642 /* 50xx: no NCQ, only one command active at a time */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002643 tag = ap->link.active_tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002644 } else {
2645 /* Gen II/IIE: get command tag from CRPB entry */
2646 tag = le16_to_cpu(response->id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002647 }
Mark Lordfcfb1f72008-04-19 15:06:40 -04002648 mv_process_crpb_response(ap, response, tag, ncq_enabled);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002649 work_done = true;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002650 }
2651
Mark Lord352fab72008-04-19 14:43:42 -04002652 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002653 if (work_done)
2654 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
Mark Lordfcfb1f72008-04-19 15:06:40 -04002655 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002656 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002657}
2658
Mark Lorda9010322008-05-02 02:14:02 -04002659static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2660{
2661 struct mv_port_priv *pp;
2662 int edma_was_enabled;
2663
2664 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2665 mv_unexpected_intr(ap, 0);
2666 return;
2667 }
2668 /*
2669 * Grab a snapshot of the EDMA_EN flag setting,
2670 * so that we have a consistent view for this port,
2671 * even if something we call of our routines changes it.
2672 */
2673 pp = ap->private_data;
2674 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2675 /*
2676 * Process completed CRPB response(s) before other events.
2677 */
2678 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2679 mv_process_crpb_entries(ap, pp);
Mark Lord4c299ca2008-05-02 02:16:20 -04002680 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2681 mv_handle_fbs_ncq_dev_err(ap);
Mark Lorda9010322008-05-02 02:14:02 -04002682 }
2683 /*
2684 * Handle chip-reported errors, or continue on to handle PIO.
2685 */
2686 if (unlikely(port_cause & ERR_IRQ)) {
2687 mv_err_intr(ap);
2688 } else if (!edma_was_enabled) {
2689 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2690 if (qc)
2691 ata_sff_host_intr(ap, qc);
2692 else
2693 mv_unexpected_intr(ap, edma_was_enabled);
2694 }
2695}
2696
Brett Russ05b308e2005-10-05 17:08:53 -04002697/**
2698 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04002699 * @host: host specific structure
Mark Lord7368f912008-04-25 11:24:24 -04002700 * @main_irq_cause: Main interrupt cause register for the chip.
Brett Russ05b308e2005-10-05 17:08:53 -04002701 *
2702 * LOCKING:
2703 * Inherited from caller.
2704 */
Mark Lord7368f912008-04-25 11:24:24 -04002705static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
Brett Russ20f733e2005-09-01 18:26:17 -04002706{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002707 struct mv_host_priv *hpriv = host->private_data;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002708 void __iomem *mmio = hpriv->base, *hc_mmio;
Mark Lorda3718c12008-04-19 15:07:18 -04002709 unsigned int handled = 0, port;
Brett Russ20f733e2005-09-01 18:26:17 -04002710
Mark Lord2b748a02009-03-10 22:01:17 -04002711 /* If asserted, clear the "all ports" IRQ coalescing bit */
2712 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2713 writel(~ALL_PORTS_COAL_IRQ, mmio + MV_IRQ_COAL_CAUSE);
2714
Mark Lorda3718c12008-04-19 15:07:18 -04002715 for (port = 0; port < hpriv->n_ports; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04002716 struct ata_port *ap = host->ports[port];
Mark Lordeabd5eb2008-05-02 02:13:27 -04002717 unsigned int p, shift, hardport, port_cause;
2718
Mark Lorda3718c12008-04-19 15:07:18 -04002719 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Mark Lorda3718c12008-04-19 15:07:18 -04002720 /*
Mark Lordeabd5eb2008-05-02 02:13:27 -04002721 * Each hc within the host has its own hc_irq_cause register,
2722 * where the interrupting ports bits get ack'd.
Mark Lorda3718c12008-04-19 15:07:18 -04002723 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002724 if (hardport == 0) { /* first port on this hc ? */
2725 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2726 u32 port_mask, ack_irqs;
2727 /*
2728 * Skip this entire hc if nothing pending for any ports
2729 */
2730 if (!hc_cause) {
2731 port += MV_PORTS_PER_HC - 1;
2732 continue;
2733 }
2734 /*
2735 * We don't need/want to read the hc_irq_cause register,
2736 * because doing so hurts performance, and
2737 * main_irq_cause already gives us everything we need.
2738 *
2739 * But we do have to *write* to the hc_irq_cause to ack
2740 * the ports that we are handling this time through.
2741 *
2742 * This requires that we create a bitmap for those
2743 * ports which interrupted us, and use that bitmap
2744 * to ack (only) those ports via hc_irq_cause.
2745 */
2746 ack_irqs = 0;
Mark Lord2b748a02009-03-10 22:01:17 -04002747 if (hc_cause & PORTS_0_3_COAL_DONE)
2748 ack_irqs = HC_COAL_IRQ;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002749 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2750 if ((port + p) >= hpriv->n_ports)
2751 break;
2752 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2753 if (hc_cause & port_mask)
2754 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2755 }
Mark Lorda3718c12008-04-19 15:07:18 -04002756 hc_mmio = mv_hc_base_from_port(mmio, port);
Mark Lordeabd5eb2008-05-02 02:13:27 -04002757 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lorda3718c12008-04-19 15:07:18 -04002758 handled = 1;
2759 }
Mark Lorda9010322008-05-02 02:14:02 -04002760 /*
2761 * Handle interrupts signalled for this port:
2762 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002763 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
Mark Lorda9010322008-05-02 02:14:02 -04002764 if (port_cause)
2765 mv_port_intr(ap, port_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04002766 }
Mark Lorda3718c12008-04-19 15:07:18 -04002767 return handled;
Brett Russ20f733e2005-09-01 18:26:17 -04002768}
2769
Mark Lorda3718c12008-04-19 15:07:18 -04002770static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002771{
Mark Lord02a121d2007-12-01 13:07:22 -05002772 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002773 struct ata_port *ap;
2774 struct ata_queued_cmd *qc;
2775 struct ata_eh_info *ehi;
2776 unsigned int i, err_mask, printed = 0;
2777 u32 err_cause;
2778
Mark Lord02a121d2007-12-01 13:07:22 -05002779 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002780
2781 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2782 err_cause);
2783
2784 DPRINTK("All regs @ PCI error\n");
2785 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2786
Mark Lord02a121d2007-12-01 13:07:22 -05002787 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002788
2789 for (i = 0; i < host->n_ports; i++) {
2790 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09002791 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002792 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002793 ata_ehi_clear_desc(ehi);
2794 if (!printed++)
2795 ata_ehi_push_desc(ehi,
2796 "PCI err cause 0x%08x", err_cause);
2797 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002798 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002799 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002800 if (qc)
2801 qc->err_mask |= err_mask;
2802 else
2803 ehi->err_mask |= err_mask;
2804
2805 ata_port_freeze(ap);
2806 }
2807 }
Mark Lorda3718c12008-04-19 15:07:18 -04002808 return 1; /* handled */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002809}
2810
Brett Russ05b308e2005-10-05 17:08:53 -04002811/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002812 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04002813 * @irq: unused
2814 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04002815 *
2816 * Read the read only register to determine if any host
2817 * controllers have pending interrupts. If so, call lower level
2818 * routine to handle. Also check for PCI errors which are only
2819 * reported here.
2820 *
Jeff Garzik8b260242005-11-12 12:32:50 -05002821 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002822 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04002823 * interrupts.
2824 */
David Howells7d12e782006-10-05 14:55:46 +01002825static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04002826{
Jeff Garzikcca39742006-08-24 03:19:22 -04002827 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002828 struct mv_host_priv *hpriv = host->private_data;
Mark Lorda3718c12008-04-19 15:07:18 -04002829 unsigned int handled = 0;
Mark Lord6d3c30e2009-01-21 10:31:29 -05002830 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
Mark Lord96e2c4872008-05-17 13:38:00 -04002831 u32 main_irq_cause, pending_irqs;
Brett Russ20f733e2005-09-01 18:26:17 -04002832
Mark Lord646a4da2008-01-26 18:30:37 -05002833 spin_lock(&host->lock);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002834
2835 /* for MSI: block new interrupts while in here */
2836 if (using_msi)
Mark Lord2b748a02009-03-10 22:01:17 -04002837 mv_write_main_irq_mask(0, hpriv);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002838
Mark Lord7368f912008-04-25 11:24:24 -04002839 main_irq_cause = readl(hpriv->main_irq_cause_addr);
Mark Lord96e2c4872008-05-17 13:38:00 -04002840 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
Mark Lord352fab72008-04-19 14:43:42 -04002841 /*
2842 * Deal with cases where we either have nothing pending, or have read
2843 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04002844 */
Mark Lorda44253d2008-05-17 13:37:07 -04002845 if (pending_irqs && main_irq_cause != 0xffffffffU) {
Mark Lord1f398472008-05-27 17:54:48 -04002846 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
Mark Lorda3718c12008-04-19 15:07:18 -04002847 handled = mv_pci_error(host, hpriv->base);
2848 else
Mark Lorda44253d2008-05-17 13:37:07 -04002849 handled = mv_host_intr(host, pending_irqs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002850 }
Mark Lord6d3c30e2009-01-21 10:31:29 -05002851
2852 /* for MSI: unmask; interrupt cause bits will retrigger now */
2853 if (using_msi)
Mark Lord2b748a02009-03-10 22:01:17 -04002854 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002855
Mark Lord9d51af72009-03-10 16:28:51 -04002856 spin_unlock(&host->lock);
2857
Brett Russ20f733e2005-09-01 18:26:17 -04002858 return IRQ_RETVAL(handled);
2859}
2860
Jeff Garzikc9d39132005-11-13 17:47:51 -05002861static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2862{
2863 unsigned int ofs;
2864
2865 switch (sc_reg_in) {
2866 case SCR_STATUS:
2867 case SCR_ERROR:
2868 case SCR_CONTROL:
2869 ofs = sc_reg_in * sizeof(u32);
2870 break;
2871 default:
2872 ofs = 0xffffffffU;
2873 break;
2874 }
2875 return ofs;
2876}
2877
Tejun Heo82ef04f2008-07-31 17:02:40 +09002878static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002879{
Tejun Heo82ef04f2008-07-31 17:02:40 +09002880 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002881 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002882 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002883 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2884
Tejun Heoda3dbb12007-07-16 14:29:40 +09002885 if (ofs != 0xffffffffU) {
2886 *val = readl(addr + ofs);
2887 return 0;
2888 } else
2889 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002890}
2891
Tejun Heo82ef04f2008-07-31 17:02:40 +09002892static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002893{
Tejun Heo82ef04f2008-07-31 17:02:40 +09002894 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002895 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002896 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002897 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2898
Tejun Heoda3dbb12007-07-16 14:29:40 +09002899 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09002900 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002901 return 0;
2902 } else
2903 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002904}
2905
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002906static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05002907{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002908 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05002909 int early_5080;
2910
Auke Kok44c10132007-06-08 15:46:36 -07002911 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05002912
2913 if (!early_5080) {
2914 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2915 tmp |= (1 << 0);
2916 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2917 }
2918
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002919 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05002920}
2921
2922static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2923{
Mark Lord8e7decd2008-05-02 02:07:51 -04002924 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
Jeff Garzik522479f2005-11-12 22:14:02 -05002925}
2926
Jeff Garzik47c2b672005-11-12 21:13:17 -05002927static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002928 void __iomem *mmio)
2929{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002930 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2931 u32 tmp;
2932
2933 tmp = readl(phy_mmio + MV5_PHY_MODE);
2934
2935 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2936 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002937}
2938
Jeff Garzik47c2b672005-11-12 21:13:17 -05002939static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002940{
Jeff Garzik522479f2005-11-12 22:14:02 -05002941 u32 tmp;
2942
Mark Lord8e7decd2008-05-02 02:07:51 -04002943 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzik522479f2005-11-12 22:14:02 -05002944
2945 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2946
2947 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2948 tmp |= ~(1 << 0);
2949 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002950}
2951
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002952static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2953 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002954{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002955 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2956 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2957 u32 tmp;
2958 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2959
2960 if (fix_apm_sq) {
Mark Lord8e7decd2008-05-02 02:07:51 -04002961 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002962 tmp |= (1 << 19);
Mark Lord8e7decd2008-05-02 02:07:51 -04002963 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002964
Mark Lord8e7decd2008-05-02 02:07:51 -04002965 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002966 tmp &= ~0x3;
2967 tmp |= 0x1;
Mark Lord8e7decd2008-05-02 02:07:51 -04002968 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002969 }
2970
2971 tmp = readl(phy_mmio + MV5_PHY_MODE);
2972 tmp &= ~mask;
2973 tmp |= hpriv->signal[port].pre;
2974 tmp |= hpriv->signal[port].amps;
2975 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002976}
2977
Jeff Garzikc9d39132005-11-13 17:47:51 -05002978
2979#undef ZERO
2980#define ZERO(reg) writel(0, port_mmio + (reg))
2981static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
2982 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002983{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002984 void __iomem *port_mmio = mv_port_base(mmio, port);
2985
Mark Lorde12bef52008-03-31 19:33:56 -04002986 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002987
2988 ZERO(0x028); /* command */
2989 writel(0x11f, port_mmio + EDMA_CFG_OFS);
2990 ZERO(0x004); /* timer */
2991 ZERO(0x008); /* irq err cause */
2992 ZERO(0x00c); /* irq err mask */
2993 ZERO(0x010); /* rq bah */
2994 ZERO(0x014); /* rq inp */
2995 ZERO(0x018); /* rq outp */
2996 ZERO(0x01c); /* respq bah */
2997 ZERO(0x024); /* respq outp */
2998 ZERO(0x020); /* respq inp */
2999 ZERO(0x02c); /* test control */
Mark Lord8e7decd2008-05-02 02:07:51 -04003000 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003001}
3002#undef ZERO
3003
3004#define ZERO(reg) writel(0, hc_mmio + (reg))
3005static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3006 unsigned int hc)
3007{
3008 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3009 u32 tmp;
3010
3011 ZERO(0x00c);
3012 ZERO(0x010);
3013 ZERO(0x014);
3014 ZERO(0x018);
3015
3016 tmp = readl(hc_mmio + 0x20);
3017 tmp &= 0x1c1c1c1c;
3018 tmp |= 0x03030303;
3019 writel(tmp, hc_mmio + 0x20);
3020}
3021#undef ZERO
3022
3023static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3024 unsigned int n_hc)
3025{
3026 unsigned int hc, port;
3027
3028 for (hc = 0; hc < n_hc; hc++) {
3029 for (port = 0; port < MV_PORTS_PER_HC; port++)
3030 mv5_reset_hc_port(hpriv, mmio,
3031 (hc * MV_PORTS_PER_HC) + port);
3032
3033 mv5_reset_one_hc(hpriv, mmio, hc);
3034 }
3035
3036 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003037}
3038
Jeff Garzik101ffae2005-11-12 22:17:49 -05003039#undef ZERO
3040#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003041static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003042{
Mark Lord02a121d2007-12-01 13:07:22 -05003043 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003044 u32 tmp;
3045
Mark Lord8e7decd2008-05-02 02:07:51 -04003046 tmp = readl(mmio + MV_PCI_MODE_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003047 tmp &= 0xff00ffff;
Mark Lord8e7decd2008-05-02 02:07:51 -04003048 writel(tmp, mmio + MV_PCI_MODE_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003049
3050 ZERO(MV_PCI_DISC_TIMER);
3051 ZERO(MV_PCI_MSI_TRIGGER);
Mark Lord8e7decd2008-05-02 02:07:51 -04003052 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003053 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05003054 ZERO(hpriv->irq_cause_ofs);
3055 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003056 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3057 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3058 ZERO(MV_PCI_ERR_ATTRIBUTE);
3059 ZERO(MV_PCI_ERR_COMMAND);
3060}
3061#undef ZERO
3062
3063static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3064{
3065 u32 tmp;
3066
3067 mv5_reset_flash(hpriv, mmio);
3068
Mark Lord8e7decd2008-05-02 02:07:51 -04003069 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003070 tmp &= 0x3;
3071 tmp |= (1 << 5) | (1 << 6);
Mark Lord8e7decd2008-05-02 02:07:51 -04003072 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003073}
3074
3075/**
3076 * mv6_reset_hc - Perform the 6xxx global soft reset
3077 * @mmio: base address of the HBA
3078 *
3079 * This routine only applies to 6xxx parts.
3080 *
3081 * LOCKING:
3082 * Inherited from caller.
3083 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05003084static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3085 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003086{
3087 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
3088 int i, rc = 0;
3089 u32 t;
3090
3091 /* Following procedure defined in PCI "main command and status
3092 * register" table.
3093 */
3094 t = readl(reg);
3095 writel(t | STOP_PCI_MASTER, reg);
3096
3097 for (i = 0; i < 1000; i++) {
3098 udelay(1);
3099 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003100 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003101 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003102 }
3103 if (!(PCI_MASTER_EMPTY & t)) {
3104 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3105 rc = 1;
3106 goto done;
3107 }
3108
3109 /* set reset */
3110 i = 5;
3111 do {
3112 writel(t | GLOB_SFT_RST, reg);
3113 t = readl(reg);
3114 udelay(1);
3115 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3116
3117 if (!(GLOB_SFT_RST & t)) {
3118 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3119 rc = 1;
3120 goto done;
3121 }
3122
3123 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3124 i = 5;
3125 do {
3126 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3127 t = readl(reg);
3128 udelay(1);
3129 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3130
3131 if (GLOB_SFT_RST & t) {
3132 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3133 rc = 1;
3134 }
3135done:
3136 return rc;
3137}
3138
Jeff Garzik47c2b672005-11-12 21:13:17 -05003139static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003140 void __iomem *mmio)
3141{
3142 void __iomem *port_mmio;
3143 u32 tmp;
3144
Mark Lord8e7decd2008-05-02 02:07:51 -04003145 tmp = readl(mmio + MV_RESET_CFG_OFS);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003146 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003147 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003148 hpriv->signal[idx].pre = 0x1 << 5;
3149 return;
3150 }
3151
3152 port_mmio = mv_port_base(mmio, idx);
3153 tmp = readl(port_mmio + PHY_MODE2);
3154
3155 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3156 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3157}
3158
Jeff Garzik47c2b672005-11-12 21:13:17 -05003159static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003160{
Mark Lord8e7decd2008-05-02 02:07:51 -04003161 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003162}
3163
Jeff Garzikc9d39132005-11-13 17:47:51 -05003164static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003165 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003166{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003167 void __iomem *port_mmio = mv_port_base(mmio, port);
3168
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003169 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003170 int fix_phy_mode2 =
3171 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003172 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05003173 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Mark Lord8c30a8b2008-05-27 17:56:31 -04003174 u32 m2, m3;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003175
3176 if (fix_phy_mode2) {
3177 m2 = readl(port_mmio + PHY_MODE2);
3178 m2 &= ~(1 << 16);
3179 m2 |= (1 << 31);
3180 writel(m2, port_mmio + PHY_MODE2);
3181
3182 udelay(200);
3183
3184 m2 = readl(port_mmio + PHY_MODE2);
3185 m2 &= ~((1 << 16) | (1 << 31));
3186 writel(m2, port_mmio + PHY_MODE2);
3187
3188 udelay(200);
3189 }
3190
Mark Lord8c30a8b2008-05-27 17:56:31 -04003191 /*
3192 * Gen-II/IIe PHY_MODE3 errata RM#2:
3193 * Achieves better receiver noise performance than the h/w default:
3194 */
3195 m3 = readl(port_mmio + PHY_MODE3);
3196 m3 = (m3 & 0x1f) | (0x5555601 << 5);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003197
Mark Lord0388a8c2008-05-28 13:41:52 -04003198 /* Guideline 88F5182 (GL# SATA-S11) */
3199 if (IS_SOC(hpriv))
3200 m3 &= ~0x1c;
3201
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003202 if (fix_phy_mode4) {
Mark Lordba069e32008-05-31 16:46:34 -04003203 u32 m4 = readl(port_mmio + PHY_MODE4);
3204 /*
3205 * Enforce reserved-bit restrictions on GenIIe devices only.
3206 * For earlier chipsets, force only the internal config field
3207 * (workaround for errata FEr SATA#10 part 1).
3208 */
Mark Lord8c30a8b2008-05-27 17:56:31 -04003209 if (IS_GEN_IIE(hpriv))
Mark Lordba069e32008-05-31 16:46:34 -04003210 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3211 else
3212 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
Mark Lord8c30a8b2008-05-27 17:56:31 -04003213 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003214 }
Mark Lordb406c7a2008-05-28 12:01:12 -04003215 /*
3216 * Workaround for 60x1-B2 errata SATA#13:
3217 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3218 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3219 */
3220 writel(m3, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003221
3222 /* Revert values of pre-emphasis and signal amps to the saved ones */
3223 m2 = readl(port_mmio + PHY_MODE2);
3224
3225 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003226 m2 |= hpriv->signal[port].amps;
3227 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003228 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003229
Jeff Garzike4e7b892006-01-31 12:18:41 -05003230 /* according to mvSata 3.6.1, some IIE values are fixed */
3231 if (IS_GEN_IIE(hpriv)) {
3232 m2 &= ~0xC30FF01F;
3233 m2 |= 0x0000900F;
3234 }
3235
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003236 writel(m2, port_mmio + PHY_MODE2);
3237}
3238
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003239/* TODO: use the generic LED interface to configure the SATA Presence */
3240/* & Acitivy LEDs on the board */
3241static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3242 void __iomem *mmio)
3243{
3244 return;
3245}
3246
3247static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3248 void __iomem *mmio)
3249{
3250 void __iomem *port_mmio;
3251 u32 tmp;
3252
3253 port_mmio = mv_port_base(mmio, idx);
3254 tmp = readl(port_mmio + PHY_MODE2);
3255
3256 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3257 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3258}
3259
3260#undef ZERO
3261#define ZERO(reg) writel(0, port_mmio + (reg))
3262static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3263 void __iomem *mmio, unsigned int port)
3264{
3265 void __iomem *port_mmio = mv_port_base(mmio, port);
3266
Mark Lorde12bef52008-03-31 19:33:56 -04003267 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003268
3269 ZERO(0x028); /* command */
3270 writel(0x101f, port_mmio + EDMA_CFG_OFS);
3271 ZERO(0x004); /* timer */
3272 ZERO(0x008); /* irq err cause */
3273 ZERO(0x00c); /* irq err mask */
3274 ZERO(0x010); /* rq bah */
3275 ZERO(0x014); /* rq inp */
3276 ZERO(0x018); /* rq outp */
3277 ZERO(0x01c); /* respq bah */
3278 ZERO(0x024); /* respq outp */
3279 ZERO(0x020); /* respq inp */
3280 ZERO(0x02c); /* test control */
Mark Lord8e7decd2008-05-02 02:07:51 -04003281 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003282}
3283
3284#undef ZERO
3285
3286#define ZERO(reg) writel(0, hc_mmio + (reg))
3287static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3288 void __iomem *mmio)
3289{
3290 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3291
3292 ZERO(0x00c);
3293 ZERO(0x010);
3294 ZERO(0x014);
3295
3296}
3297
3298#undef ZERO
3299
3300static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3301 void __iomem *mmio, unsigned int n_hc)
3302{
3303 unsigned int port;
3304
3305 for (port = 0; port < hpriv->n_ports; port++)
3306 mv_soc_reset_hc_port(hpriv, mmio, port);
3307
3308 mv_soc_reset_one_hc(hpriv, mmio);
3309
3310 return 0;
3311}
3312
3313static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3314 void __iomem *mmio)
3315{
3316 return;
3317}
3318
3319static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3320{
3321 return;
3322}
3323
Mark Lord8e7decd2008-05-02 02:07:51 -04003324static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
Mark Lordb67a1062008-03-31 19:35:13 -04003325{
Mark Lord8e7decd2008-05-02 02:07:51 -04003326 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
Mark Lordb67a1062008-03-31 19:35:13 -04003327
Mark Lord8e7decd2008-05-02 02:07:51 -04003328 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
Mark Lordb67a1062008-03-31 19:35:13 -04003329 if (want_gen2i)
Mark Lord8e7decd2008-05-02 02:07:51 -04003330 ifcfg |= (1 << 7); /* enable gen2i speed */
3331 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
Mark Lordb67a1062008-03-31 19:35:13 -04003332}
3333
Mark Lorde12bef52008-03-31 19:33:56 -04003334static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05003335 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04003336{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003337 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04003338
Mark Lord8e7decd2008-05-02 02:07:51 -04003339 /*
3340 * The datasheet warns against setting EDMA_RESET when EDMA is active
3341 * (but doesn't say what the problem might be). So we first try
3342 * to disable the EDMA engine before doing the EDMA_RESET operation.
3343 */
Mark Lord0d8be5c2008-04-16 14:56:12 -04003344 mv_stop_edma_engine(port_mmio);
Mark Lord8e7decd2008-05-02 02:07:51 -04003345 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003346
Mark Lordb67a1062008-03-31 19:35:13 -04003347 if (!IS_GEN_I(hpriv)) {
Mark Lord8e7decd2008-05-02 02:07:51 -04003348 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3349 mv_setup_ifcfg(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003350 }
Mark Lordb67a1062008-03-31 19:35:13 -04003351 /*
Mark Lord8e7decd2008-05-02 02:07:51 -04003352 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
Mark Lordb67a1062008-03-31 19:35:13 -04003353 * link, and physical layers. It resets all SATA interface registers
3354 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04003355 */
Mark Lord8e7decd2008-05-02 02:07:51 -04003356 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
Mark Lordb67a1062008-03-31 19:35:13 -04003357 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04003358 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04003359
Jeff Garzikc9d39132005-11-13 17:47:51 -05003360 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3361
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003362 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05003363 mdelay(1);
3364}
3365
Mark Lorde49856d2008-04-16 14:59:07 -04003366static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05003367{
Mark Lorde49856d2008-04-16 14:59:07 -04003368 if (sata_pmp_supported(ap)) {
3369 void __iomem *port_mmio = mv_ap_base(ap);
3370 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
3371 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05003372
Mark Lorde49856d2008-04-16 14:59:07 -04003373 if (old != pmp) {
3374 reg = (reg & ~0xf) | pmp;
3375 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
3376 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09003377 }
Brett Russ20f733e2005-09-01 18:26:17 -04003378}
3379
Mark Lorde49856d2008-04-16 14:59:07 -04003380static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3381 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05003382{
Mark Lorde49856d2008-04-16 14:59:07 -04003383 mv_pmp_select(link->ap, sata_srst_pmp(link));
3384 return sata_std_hardreset(link, class, deadline);
3385}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04003386
Mark Lorde49856d2008-04-16 14:59:07 -04003387static int mv_softreset(struct ata_link *link, unsigned int *class,
3388 unsigned long deadline)
3389{
3390 mv_pmp_select(link->ap, sata_srst_pmp(link));
3391 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05003392}
3393
Tejun Heocc0680a2007-08-06 18:36:23 +09003394static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003395 unsigned long deadline)
3396{
Tejun Heocc0680a2007-08-06 18:36:23 +09003397 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003398 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04003399 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003400 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04003401 int rc, attempts = 0, extra = 0;
3402 u32 sstatus;
3403 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003404
Mark Lorde12bef52008-03-31 19:33:56 -04003405 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04003406 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lordd16ab3f2009-02-25 15:17:43 -05003407 pp->pp_flags &=
3408 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003409
Mark Lord0d8be5c2008-04-16 14:56:12 -04003410 /* Workaround for errata FEr SATA#10 (part 2) */
3411 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04003412 const unsigned long *timing =
3413 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003414
Mark Lord17c5aab2008-04-16 14:56:51 -04003415 rc = sata_link_hardreset(link, timing, deadline + extra,
3416 &online, NULL);
Mark Lord9dcffd92008-05-14 09:18:12 -04003417 rc = online ? -EAGAIN : rc;
Mark Lord17c5aab2008-04-16 14:56:51 -04003418 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04003419 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04003420 sata_scr_read(link, SCR_STATUS, &sstatus);
3421 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3422 /* Force 1.5gb/s link speed and try again */
Mark Lord8e7decd2008-05-02 02:07:51 -04003423 mv_setup_ifcfg(mv_ap_base(ap), 0);
Mark Lord0d8be5c2008-04-16 14:56:12 -04003424 if (time_after(jiffies + HZ, deadline))
3425 extra = HZ; /* only extend it once, max */
3426 }
3427 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Mark Lord08da1752009-02-25 15:13:03 -05003428 mv_save_cached_regs(ap);
Mark Lord66e57a22009-01-30 18:52:58 -05003429 mv_edma_cfg(ap, 0, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003430
Mark Lord17c5aab2008-04-16 14:56:51 -04003431 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003432}
3433
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003434static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04003435{
Mark Lord1cfd19a2008-04-19 15:05:50 -04003436 mv_stop_edma(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003437 mv_enable_port_irqs(ap, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003438}
3439
3440static void mv_eh_thaw(struct ata_port *ap)
3441{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003442 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordc4de5732008-05-17 13:35:21 -04003443 unsigned int port = ap->port_no;
3444 unsigned int hardport = mv_hardport_from_port(port);
Mark Lord1cfd19a2008-04-19 15:05:50 -04003445 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003446 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003447 u32 hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003448
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003449 /* clear EDMA errors on this port */
3450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
3451
3452 /* clear pending irq events */
Mark Lordcae6edc2009-01-19 18:05:42 -05003453 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
Mark Lord1cfd19a2008-04-19 15:05:50 -04003454 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003455
Mark Lord88e675e2008-05-17 13:36:30 -04003456 mv_enable_port_irqs(ap, ERR_IRQ);
Brett Russ31961942005-09-30 01:36:00 -04003457}
3458
Brett Russ05b308e2005-10-05 17:08:53 -04003459/**
3460 * mv_port_init - Perform some early initialization on a single port.
3461 * @port: libata data structure storing shadow register addresses
3462 * @port_mmio: base address of the port
3463 *
3464 * Initialize shadow register mmio addresses, clear outstanding
3465 * interrupts on the port, and unmask interrupts for the future
3466 * start of the port.
3467 *
3468 * LOCKING:
3469 * Inherited from caller.
3470 */
Brett Russ31961942005-09-30 01:36:00 -04003471static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3472{
Tejun Heo0d5ff562007-02-01 15:06:36 +09003473 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04003474 unsigned serr_ofs;
3475
Jeff Garzik8b260242005-11-12 12:32:50 -05003476 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04003477 */
3478 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05003479 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04003480 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3481 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3482 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3483 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3484 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3485 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05003486 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04003487 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3488 /* special case: control/altstatus doesn't have ATA_REG_ address */
3489 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
3490
3491 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08003492 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04003493
Brett Russ31961942005-09-30 01:36:00 -04003494 /* Clear any currently outstanding port interrupt conditions */
3495 serr_ofs = mv_scr_offset(SCR_ERROR);
3496 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
3497 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
3498
Mark Lord646a4da2008-01-26 18:30:37 -05003499 /* unmask all non-transient EDMA error interrupts */
3500 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04003501
Jeff Garzik8b260242005-11-12 12:32:50 -05003502 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04003503 readl(port_mmio + EDMA_CFG_OFS),
3504 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
3505 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04003506}
3507
Mark Lord616d4a92008-05-02 02:08:32 -04003508static unsigned int mv_in_pcix_mode(struct ata_host *host)
3509{
3510 struct mv_host_priv *hpriv = host->private_data;
3511 void __iomem *mmio = hpriv->base;
3512 u32 reg;
3513
Mark Lord1f398472008-05-27 17:54:48 -04003514 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
Mark Lord616d4a92008-05-02 02:08:32 -04003515 return 0; /* not PCI-X capable */
3516 reg = readl(mmio + MV_PCI_MODE_OFS);
3517 if ((reg & MV_PCI_MODE_MASK) == 0)
3518 return 0; /* conventional PCI mode */
3519 return 1; /* chip is in PCI-X mode */
3520}
3521
3522static int mv_pci_cut_through_okay(struct ata_host *host)
3523{
3524 struct mv_host_priv *hpriv = host->private_data;
3525 void __iomem *mmio = hpriv->base;
3526 u32 reg;
3527
3528 if (!mv_in_pcix_mode(host)) {
3529 reg = readl(mmio + PCI_COMMAND_OFS);
3530 if (reg & PCI_COMMAND_MRDTRIG)
3531 return 0; /* not okay */
3532 }
3533 return 1; /* okay */
3534}
3535
Mark Lord65ad7fef2009-04-06 15:24:14 -04003536static void mv_60x1b2_errata_pci7(struct ata_host *host)
3537{
3538 struct mv_host_priv *hpriv = host->private_data;
3539 void __iomem *mmio = hpriv->base;
3540
3541 /* workaround for 60x1-B2 errata PCI#7 */
3542 if (mv_in_pcix_mode(host)) {
3543 u32 reg = readl(mmio + PCI_COMMAND_OFS);
3544 writelfl(reg & ~PCI_COMMAND_MWRCOM, mmio + PCI_COMMAND_OFS);
3545 }
3546}
3547
Tejun Heo4447d352007-04-17 23:44:08 +09003548static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003549{
Tejun Heo4447d352007-04-17 23:44:08 +09003550 struct pci_dev *pdev = to_pci_dev(host->dev);
3551 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003552 u32 hp_flags = hpriv->hp_flags;
3553
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003554 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003555 case chip_5080:
3556 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003557 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003558
Auke Kok44c10132007-06-08 15:46:36 -07003559 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003560 case 0x1:
3561 hp_flags |= MV_HP_ERRATA_50XXB0;
3562 break;
3563 case 0x3:
3564 hp_flags |= MV_HP_ERRATA_50XXB2;
3565 break;
3566 default:
3567 dev_printk(KERN_WARNING, &pdev->dev,
3568 "Applying 50XXB2 workarounds to unknown rev\n");
3569 hp_flags |= MV_HP_ERRATA_50XXB2;
3570 break;
3571 }
3572 break;
3573
3574 case chip_504x:
3575 case chip_508x:
3576 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003577 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003578
Auke Kok44c10132007-06-08 15:46:36 -07003579 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003580 case 0x0:
3581 hp_flags |= MV_HP_ERRATA_50XXB0;
3582 break;
3583 case 0x3:
3584 hp_flags |= MV_HP_ERRATA_50XXB2;
3585 break;
3586 default:
3587 dev_printk(KERN_WARNING, &pdev->dev,
3588 "Applying B2 workarounds to unknown rev\n");
3589 hp_flags |= MV_HP_ERRATA_50XXB2;
3590 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003591 }
3592 break;
3593
3594 case chip_604x:
3595 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05003596 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003597 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003598
Auke Kok44c10132007-06-08 15:46:36 -07003599 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003600 case 0x7:
Mark Lord65ad7fef2009-04-06 15:24:14 -04003601 mv_60x1b2_errata_pci7(host);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003602 hp_flags |= MV_HP_ERRATA_60X1B2;
3603 break;
3604 case 0x9:
3605 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003606 break;
3607 default:
3608 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05003609 "Applying B2 workarounds to unknown rev\n");
3610 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003611 break;
3612 }
3613 break;
3614
Jeff Garzike4e7b892006-01-31 12:18:41 -05003615 case chip_7042:
Mark Lord616d4a92008-05-02 02:08:32 -04003616 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
Mark Lord306b30f2007-12-04 14:07:52 -05003617 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3618 (pdev->device == 0x2300 || pdev->device == 0x2310))
3619 {
Mark Lord4e520032007-12-11 12:58:05 -05003620 /*
3621 * Highpoint RocketRAID PCIe 23xx series cards:
3622 *
3623 * Unconfigured drives are treated as "Legacy"
3624 * by the BIOS, and it overwrites sector 8 with
3625 * a "Lgcy" metadata block prior to Linux boot.
3626 *
3627 * Configured drives (RAID or JBOD) leave sector 8
3628 * alone, but instead overwrite a high numbered
3629 * sector for the RAID metadata. This sector can
3630 * be determined exactly, by truncating the physical
3631 * drive capacity to a nice even GB value.
3632 *
3633 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3634 *
3635 * Warn the user, lest they think we're just buggy.
3636 */
3637 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3638 " BIOS CORRUPTS DATA on all attached drives,"
3639 " regardless of if/how they are configured."
3640 " BEWARE!\n");
3641 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3642 " use sectors 8-9 on \"Legacy\" drives,"
3643 " and avoid the final two gigabytes on"
3644 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05003645 }
Mark Lord8e7decd2008-05-02 02:07:51 -04003646 /* drop through */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003647 case chip_6042:
3648 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003649 hp_flags |= MV_HP_GEN_IIE;
Mark Lord616d4a92008-05-02 02:08:32 -04003650 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3651 hp_flags |= MV_HP_CUT_THROUGH;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003652
Auke Kok44c10132007-06-08 15:46:36 -07003653 switch (pdev->revision) {
Mark Lord5cf73bf2008-05-27 17:58:56 -04003654 case 0x2: /* Rev.B0: the first/only public release */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003655 hp_flags |= MV_HP_ERRATA_60X1C0;
3656 break;
3657 default:
3658 dev_printk(KERN_WARNING, &pdev->dev,
3659 "Applying 60X1C0 workarounds to unknown rev\n");
3660 hp_flags |= MV_HP_ERRATA_60X1C0;
3661 break;
3662 }
3663 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003664 case chip_soc:
3665 hpriv->ops = &mv_soc_ops;
Saeed Bisharaeb3a55a2008-08-04 00:52:55 -11003666 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3667 MV_HP_ERRATA_60X1C0;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003668 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003669
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003670 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003671 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003672 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003673 return 1;
3674 }
3675
3676 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05003677 if (hp_flags & MV_HP_PCIE) {
3678 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
3679 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
3680 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3681 } else {
3682 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
3683 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
3684 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3685 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003686
3687 return 0;
3688}
3689
Brett Russ05b308e2005-10-05 17:08:53 -04003690/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05003691 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09003692 * @host: ATA host to initialize
3693 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04003694 *
3695 * If possible, do an early global reset of the host. Then do
3696 * our port init and clear/unmask all/relevant host interrupts.
3697 *
3698 * LOCKING:
3699 * Inherited from caller.
3700 */
Tejun Heo4447d352007-04-17 23:44:08 +09003701static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04003702{
3703 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09003704 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003705 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003706
Tejun Heo4447d352007-04-17 23:44:08 +09003707 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003708 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04003709 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003710
Mark Lord1f398472008-05-27 17:54:48 -04003711 if (IS_SOC(hpriv)) {
Mark Lord7368f912008-04-25 11:24:24 -04003712 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3713 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
Mark Lord1f398472008-05-27 17:54:48 -04003714 } else {
3715 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3716 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003717 }
Mark Lord352fab72008-04-19 14:43:42 -04003718
Thomas Reitmayr5d0fb2e2009-01-24 20:24:58 +01003719 /* initialize shadow irq mask with register's value */
3720 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3721
Mark Lord352fab72008-04-19 14:43:42 -04003722 /* global interrupt mask: 0 == mask everything */
Mark Lordc4de5732008-05-17 13:35:21 -04003723 mv_set_main_irq_mask(host, ~0, 0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003724
Tejun Heo4447d352007-04-17 23:44:08 +09003725 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003726
Tejun Heo4447d352007-04-17 23:44:08 +09003727 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05003728 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003729
Jeff Garzikc9d39132005-11-13 17:47:51 -05003730 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003731 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003732 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04003733
Jeff Garzik522479f2005-11-12 22:14:02 -05003734 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003735 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003736 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003737
Tejun Heo4447d352007-04-17 23:44:08 +09003738 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09003739 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003740 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09003741
3742 mv_port_init(&ap->ioaddr, port_mmio);
3743
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003744#ifdef CONFIG_PCI
Mark Lord1f398472008-05-27 17:54:48 -04003745 if (!IS_SOC(hpriv)) {
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003746 unsigned int offset = port_mmio - mmio;
3747 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3748 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3749 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003750#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003751 }
3752
3753 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04003754 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3755
3756 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3757 "(before clear)=0x%08x\n", hc,
3758 readl(hc_mmio + HC_CFG_OFS),
3759 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
3760
3761 /* Clear any currently outstanding hc interrupt conditions */
3762 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04003763 }
3764
Mark Lord44c65d12009-04-06 12:29:49 -04003765 if (!IS_SOC(hpriv)) {
3766 /* Clear any currently outstanding host interrupt conditions */
3767 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04003768
Mark Lord44c65d12009-04-06 12:29:49 -04003769 /* and unmask interrupt generation for host regs */
3770 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3771 }
Jeff Garzikfb621e22007-02-25 04:19:45 -05003772
Mark Lord6be96ac2009-02-19 10:38:04 -05003773 /*
3774 * enable only global host interrupts for now.
3775 * The per-port interrupts get done later as ports are set up.
3776 */
3777 mv_set_main_irq_mask(host, 0, PCI_ERR);
Mark Lord2b748a02009-03-10 22:01:17 -04003778 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3779 irq_coalescing_usecs);
Brett Russ31961942005-09-30 01:36:00 -04003780done:
Brett Russ20f733e2005-09-01 18:26:17 -04003781 return rc;
3782}
3783
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003784static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3785{
3786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3787 MV_CRQB_Q_SZ, 0);
3788 if (!hpriv->crqb_pool)
3789 return -ENOMEM;
3790
3791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3792 MV_CRPB_Q_SZ, 0);
3793 if (!hpriv->crpb_pool)
3794 return -ENOMEM;
3795
3796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3797 MV_SG_TBL_SZ, 0);
3798 if (!hpriv->sg_tbl_pool)
3799 return -ENOMEM;
3800
3801 return 0;
3802}
3803
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003804static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3805 struct mbus_dram_target_info *dram)
3806{
3807 int i;
3808
3809 for (i = 0; i < 4; i++) {
3810 writel(0, hpriv->base + WINDOW_CTRL(i));
3811 writel(0, hpriv->base + WINDOW_BASE(i));
3812 }
3813
3814 for (i = 0; i < dram->num_cs; i++) {
3815 struct mbus_dram_window *cs = dram->cs + i;
3816
3817 writel(((cs->size - 1) & 0xffff0000) |
3818 (cs->mbus_attr << 8) |
3819 (dram->mbus_dram_target_id << 4) | 1,
3820 hpriv->base + WINDOW_CTRL(i));
3821 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3822 }
3823}
3824
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003825/**
3826 * mv_platform_probe - handle a positive probe of an soc Marvell
3827 * host
3828 * @pdev: platform device found
3829 *
3830 * LOCKING:
3831 * Inherited from caller.
3832 */
3833static int mv_platform_probe(struct platform_device *pdev)
3834{
3835 static int printed_version;
3836 const struct mv_sata_platform_data *mv_platform_data;
3837 const struct ata_port_info *ppi[] =
3838 { &mv_port_info[chip_soc], NULL };
3839 struct ata_host *host;
3840 struct mv_host_priv *hpriv;
3841 struct resource *res;
3842 int n_ports, rc;
3843
3844 if (!printed_version++)
3845 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3846
3847 /*
3848 * Simple resource validation ..
3849 */
3850 if (unlikely(pdev->num_resources != 2)) {
3851 dev_err(&pdev->dev, "invalid number of resources\n");
3852 return -EINVAL;
3853 }
3854
3855 /*
3856 * Get the register base first
3857 */
3858 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3859 if (res == NULL)
3860 return -EINVAL;
3861
3862 /* allocate host */
3863 mv_platform_data = pdev->dev.platform_data;
3864 n_ports = mv_platform_data->n_ports;
3865
3866 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3867 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3868
3869 if (!host || !hpriv)
3870 return -ENOMEM;
3871 host->private_data = hpriv;
3872 hpriv->n_ports = n_ports;
3873
3874 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11003875 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3876 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003877 hpriv->base -= MV_SATAHC0_REG_BASE;
3878
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003879 /*
3880 * (Re-)program MBUS remapping windows if we are asked to.
3881 */
3882 if (mv_platform_data->dram != NULL)
3883 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3884
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003885 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3886 if (rc)
3887 return rc;
3888
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003889 /* initialize adapter */
3890 rc = mv_init_host(host, chip_soc);
3891 if (rc)
3892 return rc;
3893
3894 dev_printk(KERN_INFO, &pdev->dev,
3895 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3896 host->n_ports);
3897
3898 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3899 IRQF_SHARED, &mv6_sht);
3900}
3901
3902/*
3903 *
3904 * mv_platform_remove - unplug a platform interface
3905 * @pdev: platform device
3906 *
3907 * A platform bus SATA device has been unplugged. Perform the needed
3908 * cleanup. Also called on module unload for any active devices.
3909 */
3910static int __devexit mv_platform_remove(struct platform_device *pdev)
3911{
3912 struct device *dev = &pdev->dev;
3913 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003914
3915 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003916 return 0;
3917}
3918
3919static struct platform_driver mv_platform_driver = {
3920 .probe = mv_platform_probe,
3921 .remove = __devexit_p(mv_platform_remove),
3922 .driver = {
3923 .name = DRV_NAME,
3924 .owner = THIS_MODULE,
3925 },
3926};
3927
3928
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003929#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003930static int mv_pci_init_one(struct pci_dev *pdev,
3931 const struct pci_device_id *ent);
3932
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003933
3934static struct pci_driver mv_pci_driver = {
3935 .name = DRV_NAME,
3936 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003937 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003938 .remove = ata_pci_remove_one,
3939};
3940
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003941/* move to PCI layer or libata core? */
3942static int pci_go_64(struct pci_dev *pdev)
3943{
3944 int rc;
3945
3946 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3947 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3948 if (rc) {
3949 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3950 if (rc) {
3951 dev_printk(KERN_ERR, &pdev->dev,
3952 "64-bit DMA enable failed\n");
3953 return rc;
3954 }
3955 }
3956 } else {
3957 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3958 if (rc) {
3959 dev_printk(KERN_ERR, &pdev->dev,
3960 "32-bit DMA enable failed\n");
3961 return rc;
3962 }
3963 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3964 if (rc) {
3965 dev_printk(KERN_ERR, &pdev->dev,
3966 "32-bit consistent DMA enable failed\n");
3967 return rc;
3968 }
3969 }
3970
3971 return rc;
3972}
3973
Brett Russ05b308e2005-10-05 17:08:53 -04003974/**
3975 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003976 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003977 *
3978 * FIXME: complete this.
3979 *
3980 * LOCKING:
3981 * Inherited from caller.
3982 */
Tejun Heo4447d352007-04-17 23:44:08 +09003983static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003984{
Tejun Heo4447d352007-04-17 23:44:08 +09003985 struct pci_dev *pdev = to_pci_dev(host->dev);
3986 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003987 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003988 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003989
3990 /* Use this to determine the HW stepping of the chip so we know
3991 * what errata to workaround
3992 */
Brett Russ31961942005-09-30 01:36:00 -04003993 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3994 if (scc == 0)
3995 scc_s = "SCSI";
3996 else if (scc == 0x01)
3997 scc_s = "RAID";
3998 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003999 scc_s = "?";
4000
4001 if (IS_GEN_I(hpriv))
4002 gen = "I";
4003 else if (IS_GEN_II(hpriv))
4004 gen = "II";
4005 else if (IS_GEN_IIE(hpriv))
4006 gen = "IIE";
4007 else
4008 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04004009
Jeff Garzika9524a72005-10-30 14:39:11 -05004010 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004011 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4012 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04004013 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4014}
4015
Brett Russ05b308e2005-10-05 17:08:53 -04004016/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004017 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04004018 * @pdev: PCI device found
4019 * @ent: PCI device ID entry for the matched host
4020 *
4021 * LOCKING:
4022 * Inherited from caller.
4023 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004024static int mv_pci_init_one(struct pci_dev *pdev,
4025 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04004026{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04004027 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04004028 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09004029 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4030 struct ata_host *host;
4031 struct mv_host_priv *hpriv;
4032 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004033
Jeff Garzika9524a72005-10-30 14:39:11 -05004034 if (!printed_version++)
4035 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04004036
Tejun Heo4447d352007-04-17 23:44:08 +09004037 /* allocate host */
4038 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4039
4040 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4041 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4042 if (!host || !hpriv)
4043 return -ENOMEM;
4044 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004045 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09004046
4047 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09004048 rc = pcim_enable_device(pdev);
4049 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04004050 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004051
Tejun Heo0d5ff562007-02-01 15:06:36 +09004052 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4053 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09004054 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09004055 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09004056 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09004057 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004058 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04004059
Jeff Garzikd88184f2007-02-26 01:26:06 -05004060 rc = pci_go_64(pdev);
4061 if (rc)
4062 return rc;
4063
Mark Lordda2fa9b2008-01-26 18:32:45 -05004064 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4065 if (rc)
4066 return rc;
4067
Brett Russ20f733e2005-09-01 18:26:17 -04004068 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09004069 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09004070 if (rc)
4071 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004072
Mark Lord6d3c30e2009-01-21 10:31:29 -05004073 /* Enable message-switched interrupts, if requested */
4074 if (msi && pci_enable_msi(pdev) == 0)
4075 hpriv->hp_flags |= MV_HP_FLAG_MSI;
Brett Russ20f733e2005-09-01 18:26:17 -04004076
Brett Russ31961942005-09-30 01:36:00 -04004077 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09004078 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04004079
Tejun Heo4447d352007-04-17 23:44:08 +09004080 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04004081 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09004082 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04004083 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04004084}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004085#endif
Brett Russ20f733e2005-09-01 18:26:17 -04004086
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004087static int mv_platform_probe(struct platform_device *pdev);
4088static int __devexit mv_platform_remove(struct platform_device *pdev);
4089
Brett Russ20f733e2005-09-01 18:26:17 -04004090static int __init mv_init(void)
4091{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004092 int rc = -ENODEV;
4093#ifdef CONFIG_PCI
4094 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004095 if (rc < 0)
4096 return rc;
4097#endif
4098 rc = platform_driver_register(&mv_platform_driver);
4099
4100#ifdef CONFIG_PCI
4101 if (rc < 0)
4102 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004103#endif
4104 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004105}
4106
4107static void __exit mv_exit(void)
4108{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004109#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04004110 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004111#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004112 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04004113}
4114
4115MODULE_AUTHOR("Brett Russ");
4116MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4117MODULE_LICENSE("GPL");
4118MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4119MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04004120MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04004121
Brett Russ20f733e2005-09-01 18:26:17 -04004122module_init(mv_init);
4123module_exit(mv_exit);