blob: 81982594a014b603aa2ebd8b5944ed13049c929b [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lord40f21b12009-03-10 18:51:04 -04004 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
Mark Lord40f21b12009-03-10 18:51:04 -04008 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
Brett Russ20f733e2005-09-01 18:26:17 -040011 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
Jeff Garzik4a05e202007-05-24 23:40:15 -040028/*
Mark Lord85afb932008-04-19 14:54:41 -040029 * sata_mv TODO list:
30 *
Mark Lord85afb932008-04-19 14:54:41 -040031 * --> Develop a low-power-consumption strategy, and implement it.
32 *
Mark Lord2b748a02009-03-10 22:01:17 -040033 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
Mark Lord85afb932008-04-19 14:54:41 -040034 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
Jeff Garzik4a05e202007-05-24 23:40:15 -040042
Mark Lord65ad7fef2009-04-06 15:24:14 -040043/*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
Brett Russ20f733e2005-09-01 18:26:17 -040052#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080059#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040060#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050061#include <linux/device.h>
Saeed Bisharac77a2f42009-12-06 18:26:18 +020062#include <linux/clk.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050063#include <linux/platform_device.h>
64#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040065#include <linux/mbus.h>
Mark Lordc46938c2008-05-02 14:02:28 -040066#include <linux/bitops.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090067#include <linux/gfp.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050069#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040070#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040071#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072
73#define DRV_NAME "sata_mv"
Mark Lordcae5a292009-04-06 16:43:45 -040074#define DRV_VERSION "1.28"
Brett Russ20f733e2005-09-01 18:26:17 -040075
Mark Lord40f21b12009-03-10 18:51:04 -040076/*
77 * module options
78 */
79
80static int msi;
81#ifdef CONFIG_PCI
82module_param(msi, int, S_IRUGO);
83MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
84#endif
85
Mark Lord2b748a02009-03-10 22:01:17 -040086static int irq_coalescing_io_count;
87module_param(irq_coalescing_io_count, int, S_IRUGO);
88MODULE_PARM_DESC(irq_coalescing_io_count,
89 "IRQ coalescing I/O count threshold (0..255)");
90
91static int irq_coalescing_usecs;
92module_param(irq_coalescing_usecs, int, S_IRUGO);
93MODULE_PARM_DESC(irq_coalescing_usecs,
94 "IRQ coalescing time threshold in usecs");
95
Brett Russ20f733e2005-09-01 18:26:17 -040096enum {
97 /* BAR's are enumerated in terms of pci_resource_start() terms */
98 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
99 MV_IO_BAR = 2, /* offset 0x18: IO space */
100 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
101
102 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
103 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
104
Mark Lord2b748a02009-03-10 22:01:17 -0400105 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
106 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
108 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
109
Brett Russ20f733e2005-09-01 18:26:17 -0400110 MV_PCI_REG_BASE = 0,
Mark Lord615ab952006-05-19 16:24:56 -0400111
Mark Lord2b748a02009-03-10 22:01:17 -0400112 /*
113 * Per-chip ("all ports") interrupt coalescing feature.
114 * This is only for GEN_II / GEN_IIE hardware.
115 *
116 * Coalescing defers the interrupt until either the IO_THRESHOLD
117 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
118 */
Mark Lordcae5a292009-04-06 16:43:45 -0400119 COAL_REG_BASE = 0x18000,
120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
Mark Lord2b748a02009-03-10 22:01:17 -0400121 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
122
Mark Lordcae5a292009-04-06 16:43:45 -0400123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
Mark Lord2b748a02009-03-10 22:01:17 -0400125
126 /*
127 * Registers for the (unused here) transaction coalescing feature:
128 */
Mark Lordcae5a292009-04-06 16:43:45 -0400129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
Mark Lord2b748a02009-03-10 22:01:17 -0400131
Mark Lordcae5a292009-04-06 16:43:45 -0400132 SATAHC0_REG_BASE = 0x20000,
133 FLASH_CTL = 0x1046c,
134 GPIO_PORT_CTL = 0x104f0,
135 RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400136
137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
141
Brett Russ31961942005-09-30 01:36:00 -0400142 MV_MAX_Q_DEPTH = 32,
143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
144
145 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
146 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400147 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
148 */
149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500151 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400153
Mark Lord352fab72008-04-19 14:43:42 -0400154 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400155 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
157 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400159
160 /* Host Flags */
161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100162
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Mark Lord91b1a842009-01-30 18:46:39 -0500164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
Mark Lordad3aef52008-05-14 09:21:43 -0400165
Mark Lord91b1a842009-01-30 18:46:39 -0500166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
Brett Russ20f733e2005-09-01 18:26:17 -0400167
Mark Lord40f21b12009-03-10 18:51:04 -0400168 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
169 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
Mark Lord91b1a842009-01-30 18:46:39 -0500170
171 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
Mark Lordad3aef52008-05-14 09:21:43 -0400172
Brett Russ31961942005-09-30 01:36:00 -0400173 CRQB_FLAG_READ = (1 << 0),
174 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400175 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400176 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400177 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400178 CRQB_CMD_ADDR_SHIFT = 8,
179 CRQB_CMD_CS = (0x2 << 11),
180 CRQB_CMD_LAST = (1 << 15),
181
182 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400183 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
184 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400185
186 EPRD_FLAG_END_OF_TBL = (1 << 31),
187
Brett Russ20f733e2005-09-01 18:26:17 -0400188 /* PCI interface registers */
189
Mark Lordcae5a292009-04-06 16:43:45 -0400190 MV_PCI_COMMAND = 0xc00,
191 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
192 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
Brett Russ31961942005-09-30 01:36:00 -0400193
Mark Lordcae5a292009-04-06 16:43:45 -0400194 PCI_MAIN_CMD_STS = 0xd30,
Brett Russ20f733e2005-09-01 18:26:17 -0400195 STOP_PCI_MASTER = (1 << 2),
196 PCI_MASTER_EMPTY = (1 << 3),
197 GLOB_SFT_RST = (1 << 4),
198
Mark Lordcae5a292009-04-06 16:43:45 -0400199 MV_PCI_MODE = 0xd00,
Mark Lord8e7decd2008-05-02 02:07:51 -0400200 MV_PCI_MODE_MASK = 0x30,
201
Jeff Garzik522479f2005-11-12 22:14:02 -0500202 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
203 MV_PCI_DISC_TIMER = 0xd04,
204 MV_PCI_MSI_TRIGGER = 0xc38,
205 MV_PCI_SERR_MASK = 0xc28,
Mark Lordcae5a292009-04-06 16:43:45 -0400206 MV_PCI_XBAR_TMOUT = 0x1d04,
Jeff Garzik522479f2005-11-12 22:14:02 -0500207 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
208 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
209 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
210 MV_PCI_ERR_COMMAND = 0x1d50,
211
Mark Lordcae5a292009-04-06 16:43:45 -0400212 PCI_IRQ_CAUSE = 0x1d58,
213 PCI_IRQ_MASK = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400214 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
215
Mark Lordcae5a292009-04-06 16:43:45 -0400216 PCIE_IRQ_CAUSE = 0x1900,
217 PCIE_IRQ_MASK = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500218 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500219
Mark Lord7368f912008-04-25 11:24:24 -0400220 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
Mark Lordcae5a292009-04-06 16:43:45 -0400221 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
222 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
223 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
224 SOC_HC_MAIN_IRQ_MASK = 0x20024,
Mark Lord40f21b12009-03-10 18:51:04 -0400225 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
226 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
Brett Russ20f733e2005-09-01 18:26:17 -0400227 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
228 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
Mark Lord2b748a02009-03-10 22:01:17 -0400229 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
230 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
Brett Russ20f733e2005-09-01 18:26:17 -0400231 PCI_ERR = (1 << 18),
Mark Lord40f21b12009-03-10 18:51:04 -0400232 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
233 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
234 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
235 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
236 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400237 GPIO_INT = (1 << 22),
238 SELF_INT = (1 << 23),
239 TWSI_INT = (1 << 24),
240 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500241 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400242 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 /* SATAHC registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400245 HC_CFG = 0x00,
Brett Russ20f733e2005-09-01 18:26:17 -0400246
Mark Lordcae5a292009-04-06 16:43:45 -0400247 HC_IRQ_CAUSE = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400248 DMA_IRQ = (1 << 0), /* shift by port # */
249 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400250 DEV_IRQ = (1 << 8), /* shift by port # */
251
Mark Lord2b748a02009-03-10 22:01:17 -0400252 /*
253 * Per-HC (Host-Controller) interrupt coalescing feature.
254 * This is present on all chip generations.
255 *
256 * Coalescing defers the interrupt until either the IO_THRESHOLD
257 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
258 */
Mark Lordcae5a292009-04-06 16:43:45 -0400259 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
260 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
Mark Lord2b748a02009-03-10 22:01:17 -0400261
Mark Lordcae5a292009-04-06 16:43:45 -0400262 SOC_LED_CTRL = 0x2c,
Mark Lord000b3442009-03-15 11:33:19 -0400263 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
264 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
265 /* with dev activity LED */
266
Brett Russ20f733e2005-09-01 18:26:17 -0400267 /* Shadow block registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400268 SHD_BLK = 0x100,
269 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
Brett Russ20f733e2005-09-01 18:26:17 -0400270
271 /* SATA registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400272 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
273 SATA_ACTIVE = 0x350,
274 FIS_IRQ_CAUSE = 0x364,
275 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
Mark Lord17c5aab2008-04-16 14:56:51 -0400276
Mark Lordcae5a292009-04-06 16:43:45 -0400277 LTMODE = 0x30c, /* requires read-after-write */
Mark Lord17c5aab2008-04-16 14:56:51 -0400278 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
279
Mark Lordcae5a292009-04-06 16:43:45 -0400280 PHY_MODE2 = 0x330,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500281 PHY_MODE3 = 0x310,
Mark Lordcae5a292009-04-06 16:43:45 -0400282
283 PHY_MODE4 = 0x314, /* requires read-after-write */
Mark Lordba069e32008-05-31 16:46:34 -0400284 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
285 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
286 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
287 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
288
Mark Lordcae5a292009-04-06 16:43:45 -0400289 SATA_IFCTL = 0x344,
290 SATA_TESTCTL = 0x348,
291 SATA_IFSTAT = 0x34c,
292 VENDOR_UNIQUE_FIS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400293
Mark Lordcae5a292009-04-06 16:43:45 -0400294 FISCFG = 0x360,
Mark Lord8e7decd2008-05-02 02:07:51 -0400295 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
296 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
Mark Lord17c5aab2008-04-16 14:56:51 -0400297
Martin Michlmayr29b7e432009-05-04 20:58:50 +0200298 PHY_MODE9_GEN2 = 0x398,
299 PHY_MODE9_GEN1 = 0x39c,
300 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
301
Jeff Garzikc9d39132005-11-13 17:47:51 -0500302 MV5_PHY_MODE = 0x74,
Mark Lordcae5a292009-04-06 16:43:45 -0400303 MV5_LTMODE = 0x30,
304 MV5_PHY_CTL = 0x0C,
305 SATA_IFCFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500306
307 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400308
309 /* Port registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400310 EDMA_CFG = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500311 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
312 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
313 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
314 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
315 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400316 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
317 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400318
Mark Lordcae5a292009-04-06 16:43:45 -0400319 EDMA_ERR_IRQ_CAUSE = 0x8,
320 EDMA_ERR_IRQ_MASK = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400321 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
322 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
323 EDMA_ERR_DEV = (1 << 2), /* device error */
324 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
325 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
326 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400327 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
328 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400329 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400330 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400331 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
332 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
333 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
334 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500335
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400336 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500337 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
338 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
339 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
340 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
341
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400342 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500343
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400344 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500345 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
346 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
347 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
348 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
349 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
350
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400351 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500352
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400353 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400354 EDMA_ERR_OVERRUN_5 = (1 << 5),
355 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500356
357 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
358 EDMA_ERR_LNK_CTRL_RX_1 |
359 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord85afb932008-04-19 14:54:41 -0400360 EDMA_ERR_LNK_CTRL_TX,
Mark Lord646a4da2008-01-26 18:30:37 -0500361
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400362 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
363 EDMA_ERR_PRD_PAR |
364 EDMA_ERR_DEV_DCON |
365 EDMA_ERR_DEV_CON |
366 EDMA_ERR_SERR |
367 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400368 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400369 EDMA_ERR_CRPB_PAR |
370 EDMA_ERR_INTRL_PAR |
371 EDMA_ERR_IORDY |
372 EDMA_ERR_LNK_CTRL_RX_2 |
373 EDMA_ERR_LNK_DATA_RX |
374 EDMA_ERR_LNK_DATA_TX |
375 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400376
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400377 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
378 EDMA_ERR_PRD_PAR |
379 EDMA_ERR_DEV_DCON |
380 EDMA_ERR_DEV_CON |
381 EDMA_ERR_OVERRUN_5 |
382 EDMA_ERR_UNDERRUN_5 |
383 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400384 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400385 EDMA_ERR_CRPB_PAR |
386 EDMA_ERR_INTRL_PAR |
387 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400388
Mark Lordcae5a292009-04-06 16:43:45 -0400389 EDMA_REQ_Q_BASE_HI = 0x10,
390 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400391
Mark Lordcae5a292009-04-06 16:43:45 -0400392 EDMA_REQ_Q_OUT_PTR = 0x18,
Brett Russ31961942005-09-30 01:36:00 -0400393 EDMA_REQ_Q_PTR_SHIFT = 5,
394
Mark Lordcae5a292009-04-06 16:43:45 -0400395 EDMA_RSP_Q_BASE_HI = 0x1c,
396 EDMA_RSP_Q_IN_PTR = 0x20,
397 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400398 EDMA_RSP_Q_PTR_SHIFT = 3,
399
Mark Lordcae5a292009-04-06 16:43:45 -0400400 EDMA_CMD = 0x28, /* EDMA command register */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400401 EDMA_EN = (1 << 0), /* enable EDMA */
402 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
Mark Lord8e7decd2008-05-02 02:07:51 -0400403 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400404
Mark Lordcae5a292009-04-06 16:43:45 -0400405 EDMA_STATUS = 0x30, /* EDMA engine status */
Mark Lord8e7decd2008-05-02 02:07:51 -0400406 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
407 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
408
Mark Lordcae5a292009-04-06 16:43:45 -0400409 EDMA_IORDY_TMOUT = 0x34,
410 EDMA_ARB_CFG = 0x38,
Mark Lord8e7decd2008-05-02 02:07:51 -0400411
Mark Lordcae5a292009-04-06 16:43:45 -0400412 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
413 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
Mark Lordda142652009-01-30 18:51:54 -0500414
Mark Lordcae5a292009-04-06 16:43:45 -0400415 BMDMA_CMD = 0x224, /* bmdma command register */
416 BMDMA_STATUS = 0x228, /* bmdma status register */
417 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
418 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
Mark Lordda142652009-01-30 18:51:54 -0500419
Brett Russ31961942005-09-30 01:36:00 -0400420 /* Host private flags (hp_flags) */
421 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500422 MV_HP_ERRATA_50XXB0 = (1 << 1),
423 MV_HP_ERRATA_50XXB2 = (1 << 2),
424 MV_HP_ERRATA_60X1B2 = (1 << 3),
425 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400426 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
427 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
428 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500429 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Mark Lord616d4a92008-05-02 02:08:32 -0400430 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
Mark Lord1f398472008-05-27 17:54:48 -0400431 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
Mark Lord000b3442009-03-15 11:33:19 -0400432 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
Brett Russ20f733e2005-09-01 18:26:17 -0400433
Brett Russ31961942005-09-30 01:36:00 -0400434 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400435 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500436 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Mark Lord00f42ea2008-05-02 02:11:45 -0400437 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
Mark Lord29d187b2008-05-02 02:15:37 -0400438 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
Mark Lordd16ab3f2009-02-25 15:17:43 -0500439 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
Brett Russ31961942005-09-30 01:36:00 -0400440};
441
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400442#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
443#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500444#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Mark Lord8e7decd2008-05-02 02:07:51 -0400445#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
Mark Lord1f398472008-05-27 17:54:48 -0400446#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500447
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400448#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
449#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
450
Jeff Garzik095fec82005-11-12 09:50:49 -0500451enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400452 /* DMA boundary 0xffff is required by the s/g splitting
453 * we need on /length/ in mv_fill-sg().
454 */
455 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500456
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400457 /* mask of register bits containing lower 32 bits
458 * of EDMA request queue DMA address
459 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500460 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
461
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400462 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500463 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
464};
465
Jeff Garzik522479f2005-11-12 22:14:02 -0500466enum chip_type {
467 chip_504x,
468 chip_508x,
469 chip_5080,
470 chip_604x,
471 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500472 chip_6042,
473 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500474 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500475};
476
Brett Russ31961942005-09-30 01:36:00 -0400477/* Command ReQuest Block: 32B */
478struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400479 __le32 sg_addr;
480 __le32 sg_addr_hi;
481 __le16 ctrl_flags;
482 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400483};
484
Jeff Garzike4e7b892006-01-31 12:18:41 -0500485struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400486 __le32 addr;
487 __le32 addr_hi;
488 __le32 flags;
489 __le32 len;
490 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500491};
492
Brett Russ31961942005-09-30 01:36:00 -0400493/* Command ResPonse Block: 8B */
494struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400495 __le16 id;
496 __le16 flags;
497 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400498};
499
500/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
501struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400502 __le32 addr;
503 __le32 flags_size;
504 __le32 addr_hi;
505 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400506};
507
Mark Lord08da1752009-02-25 15:13:03 -0500508/*
509 * We keep a local cache of a few frequently accessed port
510 * registers here, to avoid having to read them (very slow)
511 * when switching between EDMA and non-EDMA modes.
512 */
513struct mv_cached_regs {
514 u32 fiscfg;
515 u32 ltmode;
516 u32 haltcond;
Mark Lordc01e8a22009-02-25 15:14:48 -0500517 u32 unknown_rsvd;
Mark Lord08da1752009-02-25 15:13:03 -0500518};
519
Brett Russ20f733e2005-09-01 18:26:17 -0400520struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400521 struct mv_crqb *crqb;
522 dma_addr_t crqb_dma;
523 struct mv_crpb *crpb;
524 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500525 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
526 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400527
528 unsigned int req_idx;
529 unsigned int resp_idx;
530
Brett Russ31961942005-09-30 01:36:00 -0400531 u32 pp_flags;
Mark Lord08da1752009-02-25 15:13:03 -0500532 struct mv_cached_regs cached;
Mark Lord29d187b2008-05-02 02:15:37 -0400533 unsigned int delayed_eh_pmp_map;
Brett Russ20f733e2005-09-01 18:26:17 -0400534};
535
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500536struct mv_port_signal {
537 u32 amps;
538 u32 pre;
539};
540
Mark Lord02a121d2007-12-01 13:07:22 -0500541struct mv_host_priv {
542 u32 hp_flags;
Saeed Bishara1bfeff02009-12-17 01:05:00 -0500543 unsigned int board_idx;
Mark Lord96e2c4872008-05-17 13:38:00 -0400544 u32 main_irq_mask;
Mark Lord02a121d2007-12-01 13:07:22 -0500545 struct mv_port_signal signal[8];
546 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500547 int n_ports;
548 void __iomem *base;
Mark Lord7368f912008-04-25 11:24:24 -0400549 void __iomem *main_irq_cause_addr;
550 void __iomem *main_irq_mask_addr;
Mark Lordcae5a292009-04-06 16:43:45 -0400551 u32 irq_cause_offset;
552 u32 irq_mask_offset;
Mark Lord02a121d2007-12-01 13:07:22 -0500553 u32 unmask_all_irqs;
Saeed Bisharac77a2f42009-12-06 18:26:18 +0200554
555#if defined(CONFIG_HAVE_CLK)
556 struct clk *clk;
557#endif
Mark Lordda2fa9b2008-01-26 18:32:45 -0500558 /*
559 * These consistent DMA memory pools give us guaranteed
560 * alignment for hardware-accessed data structures,
561 * and less memory waste in accomplishing the alignment.
562 */
563 struct dma_pool *crqb_pool;
564 struct dma_pool *crpb_pool;
565 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500566};
567
Jeff Garzik47c2b672005-11-12 21:13:17 -0500568struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500569 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
570 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500571 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
572 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
573 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500574 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
575 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500576 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100577 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500578};
579
Tejun Heo82ef04f2008-07-31 17:02:40 +0900580static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
581static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
582static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
583static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400584static int mv_port_start(struct ata_port *ap);
585static void mv_port_stop(struct ata_port *ap);
Mark Lord3e4a1392008-05-02 02:10:02 -0400586static int mv_qc_defer(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400587static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500588static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900589static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900590static int mv_hardreset(struct ata_link *link, unsigned int *class,
591 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400592static void mv_eh_freeze(struct ata_port *ap);
593static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500594static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400595
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500596static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
597 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500598static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
599static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
600 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500601static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
602 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500603static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100604static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500605
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500606static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
607 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500608static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
609static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
610 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500611static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
612 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500613static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500614static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
615 void __iomem *mmio);
616static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
617 void __iomem *mmio);
618static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
619 void __iomem *mmio, unsigned int n_hc);
620static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
621 void __iomem *mmio);
622static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Martin Michlmayr29b7e432009-05-04 20:58:50 +0200623static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
624 void __iomem *mmio, unsigned int port);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100625static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400626static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500627 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400628static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400629static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lord00b81232009-01-30 18:47:51 -0500630static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500631
Mark Lorde49856d2008-04-16 14:59:07 -0400632static void mv_pmp_select(struct ata_port *ap, int pmp);
633static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
634 unsigned long deadline);
635static int mv_softreset(struct ata_link *link, unsigned int *class,
636 unsigned long deadline);
Mark Lord29d187b2008-05-02 02:15:37 -0400637static void mv_pmp_error_handler(struct ata_port *ap);
Mark Lord4c299ca2008-05-02 02:16:20 -0400638static void mv_process_crpb_entries(struct ata_port *ap,
639 struct mv_port_priv *pp);
Brett Russ20f733e2005-09-01 18:26:17 -0400640
Mark Lordda142652009-01-30 18:51:54 -0500641static void mv_sff_irq_clear(struct ata_port *ap);
642static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
643static void mv_bmdma_setup(struct ata_queued_cmd *qc);
644static void mv_bmdma_start(struct ata_queued_cmd *qc);
645static void mv_bmdma_stop(struct ata_queued_cmd *qc);
646static u8 mv_bmdma_status(struct ata_port *ap);
Mark Lordd16ab3f2009-02-25 15:17:43 -0500647static u8 mv_sff_check_status(struct ata_port *ap);
Mark Lordda142652009-01-30 18:51:54 -0500648
Mark Lordeb73d552008-01-29 13:24:00 -0500649/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
650 * because we have to allow room for worst case splitting of
651 * PRDs for 64K boundaries in mv_fill_sg().
652 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400653static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900654 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400655 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400656 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400657};
658
659static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900660 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500661 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400662 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400663 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400664};
665
Tejun Heo029cfd62008-03-25 12:22:49 +0900666static struct ata_port_operations mv5_ops = {
667 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500668
Alan Coxc96f1732009-03-24 10:23:46 +0000669 .lost_interrupt = ATA_OP_NULL,
670
Mark Lord3e4a1392008-05-02 02:10:02 -0400671 .qc_defer = mv_qc_defer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500672 .qc_prep = mv_qc_prep,
673 .qc_issue = mv_qc_issue,
674
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400675 .freeze = mv_eh_freeze,
676 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900677 .hardreset = mv_hardreset,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400678
Jeff Garzikc9d39132005-11-13 17:47:51 -0500679 .scr_read = mv5_scr_read,
680 .scr_write = mv5_scr_write,
681
682 .port_start = mv_port_start,
683 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500684};
685
Tejun Heo029cfd62008-03-25 12:22:49 +0900686static struct ata_port_operations mv6_ops = {
Tejun Heo8930ff22010-05-10 21:41:33 +0200687 .inherits = &ata_bmdma_port_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400688
Tejun Heo8930ff22010-05-10 21:41:33 +0200689 .lost_interrupt = ATA_OP_NULL,
690
691 .qc_defer = mv_qc_defer,
692 .qc_prep = mv_qc_prep,
693 .qc_issue = mv_qc_issue,
694
695 .dev_config = mv6_dev_config,
696
697 .freeze = mv_eh_freeze,
698 .thaw = mv_eh_thaw,
699 .hardreset = mv_hardreset,
700 .softreset = mv_softreset,
Mark Lorde49856d2008-04-16 14:59:07 -0400701 .pmp_hardreset = mv_pmp_hardreset,
702 .pmp_softreset = mv_softreset,
Mark Lord29d187b2008-05-02 02:15:37 -0400703 .error_handler = mv_pmp_error_handler,
Mark Lordda142652009-01-30 18:51:54 -0500704
Tejun Heo8930ff22010-05-10 21:41:33 +0200705 .scr_read = mv_scr_read,
706 .scr_write = mv_scr_write,
707
Mark Lord40f21b12009-03-10 18:51:04 -0400708 .sff_check_status = mv_sff_check_status,
Mark Lordda142652009-01-30 18:51:54 -0500709 .sff_irq_clear = mv_sff_irq_clear,
710 .check_atapi_dma = mv_check_atapi_dma,
711 .bmdma_setup = mv_bmdma_setup,
712 .bmdma_start = mv_bmdma_start,
713 .bmdma_stop = mv_bmdma_stop,
714 .bmdma_status = mv_bmdma_status,
Tejun Heo8930ff22010-05-10 21:41:33 +0200715
716 .port_start = mv_port_start,
717 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400718};
719
Tejun Heo029cfd62008-03-25 12:22:49 +0900720static struct ata_port_operations mv_iie_ops = {
721 .inherits = &mv6_ops,
722 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500723 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500724};
725
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100726static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400727 { /* chip_504x */
Mark Lord91b1a842009-01-30 18:46:39 -0500728 .flags = MV_GEN_I_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400729 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400730 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500731 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400732 },
733 { /* chip_508x */
Mark Lord91b1a842009-01-30 18:46:39 -0500734 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400735 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400736 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500737 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400738 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500739 { /* chip_5080 */
Mark Lord91b1a842009-01-30 18:46:39 -0500740 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400741 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400742 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500743 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500744 },
Brett Russ20f733e2005-09-01 18:26:17 -0400745 { /* chip_604x */
Mark Lord91b1a842009-01-30 18:46:39 -0500746 .flags = MV_GEN_II_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400747 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400748 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500749 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400750 },
751 { /* chip_608x */
Mark Lord91b1a842009-01-30 18:46:39 -0500752 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400753 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400754 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500755 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400756 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500757 { /* chip_6042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500758 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400759 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400760 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500761 .port_ops = &mv_iie_ops,
762 },
763 { /* chip_7042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500764 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400765 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400766 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500767 .port_ops = &mv_iie_ops,
768 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500769 { /* chip_soc */
Mark Lord91b1a842009-01-30 18:46:39 -0500770 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400771 .pio_mask = ATA_PIO4,
Mark Lord17c5aab2008-04-16 14:56:51 -0400772 .udma_mask = ATA_UDMA6,
773 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500774 },
Brett Russ20f733e2005-09-01 18:26:17 -0400775};
776
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500777static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400778 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
779 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
780 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
781 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Mark Lord46c57842008-09-04 18:21:07 -0400782 /* RocketRAID 1720/174x have different identifiers */
783 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
Mark Lord44622542009-01-27 16:33:13 -0500784 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
785 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
Brett Russ20f733e2005-09-01 18:26:17 -0400786
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400787 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
788 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
789 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
790 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
791 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500792
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400793 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
794
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200795 /* Adaptec 1430SA */
796 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
797
Mark Lord02a121d2007-12-01 13:07:22 -0500798 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800799 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
800
Mark Lord02a121d2007-12-01 13:07:22 -0500801 /* Highpoint RocketRAID PCIe series */
802 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
803 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
804
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400805 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400806};
807
Jeff Garzik47c2b672005-11-12 21:13:17 -0500808static const struct mv_hw_ops mv5xxx_ops = {
809 .phy_errata = mv5_phy_errata,
810 .enable_leds = mv5_enable_leds,
811 .read_preamp = mv5_read_preamp,
812 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500813 .reset_flash = mv5_reset_flash,
814 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500815};
816
817static const struct mv_hw_ops mv6xxx_ops = {
818 .phy_errata = mv6_phy_errata,
819 .enable_leds = mv6_enable_leds,
820 .read_preamp = mv6_read_preamp,
821 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500822 .reset_flash = mv6_reset_flash,
823 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500824};
825
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500826static const struct mv_hw_ops mv_soc_ops = {
827 .phy_errata = mv6_phy_errata,
828 .enable_leds = mv_soc_enable_leds,
829 .read_preamp = mv_soc_read_preamp,
830 .reset_hc = mv_soc_reset_hc,
831 .reset_flash = mv_soc_reset_flash,
832 .reset_bus = mv_soc_reset_bus,
833};
834
Martin Michlmayr29b7e432009-05-04 20:58:50 +0200835static const struct mv_hw_ops mv_soc_65n_ops = {
836 .phy_errata = mv_soc_65n_phy_errata,
837 .enable_leds = mv_soc_enable_leds,
838 .reset_hc = mv_soc_reset_hc,
839 .reset_flash = mv_soc_reset_flash,
840 .reset_bus = mv_soc_reset_bus,
841};
842
Brett Russ20f733e2005-09-01 18:26:17 -0400843/*
844 * Functions
845 */
846
847static inline void writelfl(unsigned long data, void __iomem *addr)
848{
849 writel(data, addr);
850 (void) readl(addr); /* flush to avoid PCI posted write */
851}
852
Jeff Garzikc9d39132005-11-13 17:47:51 -0500853static inline unsigned int mv_hc_from_port(unsigned int port)
854{
855 return port >> MV_PORT_HC_SHIFT;
856}
857
858static inline unsigned int mv_hardport_from_port(unsigned int port)
859{
860 return port & MV_PORT_MASK;
861}
862
Mark Lord1cfd19a2008-04-19 15:05:50 -0400863/*
864 * Consolidate some rather tricky bit shift calculations.
865 * This is hot-path stuff, so not a function.
866 * Simple code, with two return values, so macro rather than inline.
867 *
868 * port is the sole input, in range 0..7.
Mark Lord7368f912008-04-25 11:24:24 -0400869 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
870 * hardport is the other output, in range 0..3.
Mark Lord1cfd19a2008-04-19 15:05:50 -0400871 *
872 * Note that port and hardport may be the same variable in some cases.
873 */
874#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
875{ \
876 shift = mv_hc_from_port(port) * HC_SHIFT; \
877 hardport = mv_hardport_from_port(port); \
878 shift += hardport * 2; \
879}
880
Mark Lord352fab72008-04-19 14:43:42 -0400881static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
882{
Mark Lordcae5a292009-04-06 16:43:45 -0400883 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
Mark Lord352fab72008-04-19 14:43:42 -0400884}
885
Jeff Garzikc9d39132005-11-13 17:47:51 -0500886static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
887 unsigned int port)
888{
889 return mv_hc_base(base, mv_hc_from_port(port));
890}
891
Brett Russ20f733e2005-09-01 18:26:17 -0400892static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
893{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500894 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500895 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500896 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400897}
898
Mark Lorde12bef52008-03-31 19:33:56 -0400899static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
900{
901 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
902 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
903
904 return hc_mmio + ofs;
905}
906
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500907static inline void __iomem *mv_host_base(struct ata_host *host)
908{
909 struct mv_host_priv *hpriv = host->private_data;
910 return hpriv->base;
911}
912
Brett Russ20f733e2005-09-01 18:26:17 -0400913static inline void __iomem *mv_ap_base(struct ata_port *ap)
914{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500915 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400916}
917
Jeff Garzikcca39742006-08-24 03:19:22 -0400918static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400919{
Jeff Garzikcca39742006-08-24 03:19:22 -0400920 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400921}
922
Mark Lord08da1752009-02-25 15:13:03 -0500923/**
924 * mv_save_cached_regs - (re-)initialize cached port registers
925 * @ap: the port whose registers we are caching
926 *
927 * Initialize the local cache of port registers,
928 * so that reading them over and over again can
929 * be avoided on the hotter paths of this driver.
930 * This saves a few microseconds each time we switch
931 * to/from EDMA mode to perform (eg.) a drive cache flush.
932 */
933static void mv_save_cached_regs(struct ata_port *ap)
934{
935 void __iomem *port_mmio = mv_ap_base(ap);
936 struct mv_port_priv *pp = ap->private_data;
937
Mark Lordcae5a292009-04-06 16:43:45 -0400938 pp->cached.fiscfg = readl(port_mmio + FISCFG);
939 pp->cached.ltmode = readl(port_mmio + LTMODE);
940 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
941 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
Mark Lord08da1752009-02-25 15:13:03 -0500942}
943
944/**
945 * mv_write_cached_reg - write to a cached port register
946 * @addr: hardware address of the register
947 * @old: pointer to cached value of the register
948 * @new: new value for the register
949 *
950 * Write a new value to a cached register,
951 * but only if the value is different from before.
952 */
953static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
954{
955 if (new != *old) {
Mark Lord12f3b6d2009-04-06 15:26:24 -0400956 unsigned long laddr;
Mark Lord08da1752009-02-25 15:13:03 -0500957 *old = new;
Mark Lord12f3b6d2009-04-06 15:26:24 -0400958 /*
959 * Workaround for 88SX60x1-B2 FEr SATA#13:
960 * Read-after-write is needed to prevent generating 64-bit
961 * write cycles on the PCI bus for SATA interface registers
962 * at offsets ending in 0x4 or 0xc.
963 *
964 * Looks like a lot of fuss, but it avoids an unnecessary
965 * +1 usec read-after-write delay for unaffected registers.
966 */
967 laddr = (long)addr & 0xffff;
968 if (laddr >= 0x300 && laddr <= 0x33c) {
969 laddr &= 0x000f;
970 if (laddr == 0x4 || laddr == 0xc) {
971 writelfl(new, addr); /* read after write */
972 return;
973 }
974 }
975 writel(new, addr); /* unaffected by the errata */
Mark Lord08da1752009-02-25 15:13:03 -0500976 }
977}
978
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400979static void mv_set_edma_ptrs(void __iomem *port_mmio,
980 struct mv_host_priv *hpriv,
981 struct mv_port_priv *pp)
982{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400983 u32 index;
984
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400985 /*
986 * initialize request queue
987 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400988 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
989 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400990
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400991 WARN_ON(pp->crqb_dma & 0x3ff);
Mark Lordcae5a292009-04-06 16:43:45 -0400992 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400993 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Mark Lordcae5a292009-04-06 16:43:45 -0400994 port_mmio + EDMA_REQ_Q_IN_PTR);
995 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400996
997 /*
998 * initialize response queue
999 */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001000 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
1001 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001002
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001003 WARN_ON(pp->crpb_dma & 0xff);
Mark Lordcae5a292009-04-06 16:43:45 -04001004 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1005 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001006 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Mark Lordcae5a292009-04-06 16:43:45 -04001007 port_mmio + EDMA_RSP_Q_OUT_PTR);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001008}
1009
Mark Lord2b748a02009-03-10 22:01:17 -04001010static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1011{
1012 /*
1013 * When writing to the main_irq_mask in hardware,
1014 * we must ensure exclusivity between the interrupt coalescing bits
1015 * and the corresponding individual port DONE_IRQ bits.
1016 *
1017 * Note that this register is really an "IRQ enable" register,
1018 * not an "IRQ mask" register as Marvell's naming might suggest.
1019 */
1020 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1021 mask &= ~DONE_IRQ_0_3;
1022 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1023 mask &= ~DONE_IRQ_4_7;
1024 writelfl(mask, hpriv->main_irq_mask_addr);
1025}
1026
Mark Lordc4de5732008-05-17 13:35:21 -04001027static void mv_set_main_irq_mask(struct ata_host *host,
1028 u32 disable_bits, u32 enable_bits)
1029{
1030 struct mv_host_priv *hpriv = host->private_data;
1031 u32 old_mask, new_mask;
1032
Mark Lord96e2c4872008-05-17 13:38:00 -04001033 old_mask = hpriv->main_irq_mask;
Mark Lordc4de5732008-05-17 13:35:21 -04001034 new_mask = (old_mask & ~disable_bits) | enable_bits;
Mark Lord96e2c4872008-05-17 13:38:00 -04001035 if (new_mask != old_mask) {
1036 hpriv->main_irq_mask = new_mask;
Mark Lord2b748a02009-03-10 22:01:17 -04001037 mv_write_main_irq_mask(new_mask, hpriv);
Mark Lord96e2c4872008-05-17 13:38:00 -04001038 }
Mark Lordc4de5732008-05-17 13:35:21 -04001039}
1040
1041static void mv_enable_port_irqs(struct ata_port *ap,
1042 unsigned int port_bits)
1043{
1044 unsigned int shift, hardport, port = ap->port_no;
1045 u32 disable_bits, enable_bits;
1046
1047 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1048
1049 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1050 enable_bits = port_bits << shift;
1051 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1052}
1053
Mark Lord00b81232009-01-30 18:47:51 -05001054static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1055 void __iomem *port_mmio,
1056 unsigned int port_irqs)
1057{
1058 struct mv_host_priv *hpriv = ap->host->private_data;
1059 int hardport = mv_hardport_from_port(ap->port_no);
1060 void __iomem *hc_mmio = mv_hc_base_from_port(
1061 mv_host_base(ap->host), ap->port_no);
1062 u32 hc_irq_cause;
1063
1064 /* clear EDMA event indicators, if any */
Mark Lordcae5a292009-04-06 16:43:45 -04001065 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
Mark Lord00b81232009-01-30 18:47:51 -05001066
1067 /* clear pending irq events */
1068 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
Mark Lordcae5a292009-04-06 16:43:45 -04001069 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
Mark Lord00b81232009-01-30 18:47:51 -05001070
1071 /* clear FIS IRQ Cause */
1072 if (IS_GEN_IIE(hpriv))
Mark Lordcae5a292009-04-06 16:43:45 -04001073 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
Mark Lord00b81232009-01-30 18:47:51 -05001074
1075 mv_enable_port_irqs(ap, port_irqs);
1076}
1077
Mark Lord2b748a02009-03-10 22:01:17 -04001078static void mv_set_irq_coalescing(struct ata_host *host,
1079 unsigned int count, unsigned int usecs)
1080{
1081 struct mv_host_priv *hpriv = host->private_data;
1082 void __iomem *mmio = hpriv->base, *hc_mmio;
1083 u32 coal_enable = 0;
1084 unsigned long flags;
Mark Lord6abf4672009-03-11 00:56:00 -04001085 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
Mark Lord2b748a02009-03-10 22:01:17 -04001086 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1087 ALL_PORTS_COAL_DONE;
1088
1089 /* Disable IRQ coalescing if either threshold is zero */
1090 if (!usecs || !count) {
1091 clks = count = 0;
1092 } else {
1093 /* Respect maximum limits of the hardware */
1094 clks = usecs * COAL_CLOCKS_PER_USEC;
1095 if (clks > MAX_COAL_TIME_THRESHOLD)
1096 clks = MAX_COAL_TIME_THRESHOLD;
1097 if (count > MAX_COAL_IO_COUNT)
1098 count = MAX_COAL_IO_COUNT;
1099 }
1100
1101 spin_lock_irqsave(&host->lock, flags);
Mark Lord6abf4672009-03-11 00:56:00 -04001102 mv_set_main_irq_mask(host, coal_disable, 0);
Mark Lord2b748a02009-03-10 22:01:17 -04001103
Mark Lord6abf4672009-03-11 00:56:00 -04001104 if (is_dual_hc && !IS_GEN_I(hpriv)) {
Mark Lord2b748a02009-03-10 22:01:17 -04001105 /*
Mark Lord6abf4672009-03-11 00:56:00 -04001106 * GEN_II/GEN_IIE with dual host controllers:
1107 * one set of global thresholds for the entire chip.
Mark Lord2b748a02009-03-10 22:01:17 -04001108 */
Mark Lordcae5a292009-04-06 16:43:45 -04001109 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1110 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
Mark Lord2b748a02009-03-10 22:01:17 -04001111 /* clear leftover coal IRQ bit */
Mark Lordcae5a292009-04-06 16:43:45 -04001112 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
Mark Lord6abf4672009-03-11 00:56:00 -04001113 if (count)
1114 coal_enable = ALL_PORTS_COAL_DONE;
1115 clks = count = 0; /* force clearing of regular regs below */
Mark Lord2b748a02009-03-10 22:01:17 -04001116 }
Mark Lord6abf4672009-03-11 00:56:00 -04001117
Mark Lord2b748a02009-03-10 22:01:17 -04001118 /*
1119 * All chips: independent thresholds for each HC on the chip.
1120 */
1121 hc_mmio = mv_hc_base_from_port(mmio, 0);
Mark Lordcae5a292009-04-06 16:43:45 -04001122 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1123 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1124 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
Mark Lord6abf4672009-03-11 00:56:00 -04001125 if (count)
1126 coal_enable |= PORTS_0_3_COAL_DONE;
1127 if (is_dual_hc) {
Mark Lord2b748a02009-03-10 22:01:17 -04001128 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
Mark Lordcae5a292009-04-06 16:43:45 -04001129 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1130 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1131 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
Mark Lord6abf4672009-03-11 00:56:00 -04001132 if (count)
1133 coal_enable |= PORTS_4_7_COAL_DONE;
Mark Lord2b748a02009-03-10 22:01:17 -04001134 }
Mark Lord2b748a02009-03-10 22:01:17 -04001135
Mark Lord6abf4672009-03-11 00:56:00 -04001136 mv_set_main_irq_mask(host, 0, coal_enable);
Mark Lord2b748a02009-03-10 22:01:17 -04001137 spin_unlock_irqrestore(&host->lock, flags);
1138}
1139
Brett Russ05b308e2005-10-05 17:08:53 -04001140/**
Mark Lord00b81232009-01-30 18:47:51 -05001141 * mv_start_edma - Enable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -04001142 * @base: port base address
1143 * @pp: port private data
1144 *
Tejun Heobeec7db2006-02-11 19:11:13 +09001145 * Verify the local cache of the eDMA state is accurate with a
1146 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -04001147 *
1148 * LOCKING:
1149 * Inherited from caller.
1150 */
Mark Lord00b81232009-01-30 18:47:51 -05001151static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -05001152 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -04001153{
Mark Lord72109162008-01-26 18:31:33 -05001154 int want_ncq = (protocol == ATA_PROT_NCQ);
1155
1156 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1157 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1158 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -04001159 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -05001160 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001161 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -05001162 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord0c589122008-01-26 18:31:16 -05001163
Mark Lord00b81232009-01-30 18:47:51 -05001164 mv_edma_cfg(ap, want_ncq, 1);
Mark Lord0c589122008-01-26 18:31:16 -05001165
Mark Lordf630d562008-01-26 18:31:00 -05001166 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Mark Lord00b81232009-01-30 18:47:51 -05001167 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001168
Mark Lordcae5a292009-04-06 16:43:45 -04001169 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
Brett Russafb0edd2005-10-05 17:08:42 -04001170 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1171 }
Brett Russ31961942005-09-30 01:36:00 -04001172}
1173
Mark Lord9b2c4e02008-05-02 02:09:14 -04001174static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1175{
1176 void __iomem *port_mmio = mv_ap_base(ap);
1177 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1178 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1179 int i;
1180
1181 /*
1182 * Wait for the EDMA engine to finish transactions in progress.
Mark Lordc46938c2008-05-02 14:02:28 -04001183 * No idea what a good "timeout" value might be, but measurements
1184 * indicate that it often requires hundreds of microseconds
1185 * with two drives in-use. So we use the 15msec value above
1186 * as a rough guess at what even more drives might require.
Mark Lord9b2c4e02008-05-02 02:09:14 -04001187 */
1188 for (i = 0; i < timeout; ++i) {
Mark Lordcae5a292009-04-06 16:43:45 -04001189 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
Mark Lord9b2c4e02008-05-02 02:09:14 -04001190 if ((edma_stat & empty_idle) == empty_idle)
1191 break;
1192 udelay(per_loop);
1193 }
1194 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1195}
1196
Brett Russ05b308e2005-10-05 17:08:53 -04001197/**
Mark Lorde12bef52008-03-31 19:33:56 -04001198 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -04001199 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -04001200 *
1201 * LOCKING:
1202 * Inherited from caller.
1203 */
Mark Lordb5624682008-03-31 19:34:40 -04001204static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -04001205{
Mark Lordb5624682008-03-31 19:34:40 -04001206 int i;
Brett Russ31961942005-09-30 01:36:00 -04001207
Mark Lordb5624682008-03-31 19:34:40 -04001208 /* Disable eDMA. The disable bit auto clears. */
Mark Lordcae5a292009-04-06 16:43:45 -04001209 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
Jeff Garzik8b260242005-11-12 12:32:50 -05001210
Mark Lordb5624682008-03-31 19:34:40 -04001211 /* Wait for the chip to confirm eDMA is off. */
1212 for (i = 10000; i > 0; i--) {
Mark Lordcae5a292009-04-06 16:43:45 -04001213 u32 reg = readl(port_mmio + EDMA_CMD);
Jeff Garzik4537deb2007-07-12 14:30:19 -04001214 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -04001215 return 0;
1216 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -04001217 }
Mark Lordb5624682008-03-31 19:34:40 -04001218 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -04001219}
1220
Mark Lorde12bef52008-03-31 19:33:56 -04001221static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001222{
Mark Lordb5624682008-03-31 19:34:40 -04001223 void __iomem *port_mmio = mv_ap_base(ap);
1224 struct mv_port_priv *pp = ap->private_data;
Mark Lord66e57a22009-01-30 18:52:58 -05001225 int err = 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001226
Mark Lordb5624682008-03-31 19:34:40 -04001227 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1228 return 0;
1229 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lord9b2c4e02008-05-02 02:09:14 -04001230 mv_wait_for_edma_empty_idle(ap);
Mark Lordb5624682008-03-31 19:34:40 -04001231 if (mv_stop_edma_engine(port_mmio)) {
1232 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Mark Lord66e57a22009-01-30 18:52:58 -05001233 err = -EIO;
Mark Lordb5624682008-03-31 19:34:40 -04001234 }
Mark Lord66e57a22009-01-30 18:52:58 -05001235 mv_edma_cfg(ap, 0, 0);
1236 return err;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001237}
1238
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001239#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -04001240static void mv_dump_mem(void __iomem *start, unsigned bytes)
1241{
Brett Russ31961942005-09-30 01:36:00 -04001242 int b, w;
1243 for (b = 0; b < bytes; ) {
1244 DPRINTK("%p: ", start + b);
1245 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001246 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -04001247 b += sizeof(u32);
1248 }
1249 printk("\n");
1250 }
Brett Russ31961942005-09-30 01:36:00 -04001251}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001252#endif
1253
Brett Russ31961942005-09-30 01:36:00 -04001254static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1255{
1256#ifdef ATA_DEBUG
1257 int b, w;
1258 u32 dw;
1259 for (b = 0; b < bytes; ) {
1260 DPRINTK("%02x: ", b);
1261 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001262 (void) pci_read_config_dword(pdev, b, &dw);
1263 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -04001264 b += sizeof(u32);
1265 }
1266 printk("\n");
1267 }
1268#endif
1269}
1270static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1271 struct pci_dev *pdev)
1272{
1273#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -05001274 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -04001275 port >> MV_PORT_HC_SHIFT);
1276 void __iomem *port_base;
1277 int start_port, num_ports, p, start_hc, num_hcs, hc;
1278
1279 if (0 > port) {
1280 start_hc = start_port = 0;
1281 num_ports = 8; /* shld be benign for 4 port devs */
1282 num_hcs = 2;
1283 } else {
1284 start_hc = port >> MV_PORT_HC_SHIFT;
1285 start_port = port;
1286 num_ports = num_hcs = 1;
1287 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001288 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001289 num_ports > 1 ? num_ports - 1 : start_port);
1290
1291 if (NULL != pdev) {
1292 DPRINTK("PCI config space regs:\n");
1293 mv_dump_pci_cfg(pdev, 0x68);
1294 }
1295 DPRINTK("PCI regs:\n");
1296 mv_dump_mem(mmio_base+0xc00, 0x3c);
1297 mv_dump_mem(mmio_base+0xd00, 0x34);
1298 mv_dump_mem(mmio_base+0xf00, 0x4);
1299 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1300 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001301 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001302 DPRINTK("HC regs (HC %i):\n", hc);
1303 mv_dump_mem(hc_base, 0x1c);
1304 }
1305 for (p = start_port; p < start_port + num_ports; p++) {
1306 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001307 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001308 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001309 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001310 mv_dump_mem(port_base+0x300, 0x60);
1311 }
1312#endif
1313}
1314
Brett Russ20f733e2005-09-01 18:26:17 -04001315static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1316{
1317 unsigned int ofs;
1318
1319 switch (sc_reg_in) {
1320 case SCR_STATUS:
1321 case SCR_CONTROL:
1322 case SCR_ERROR:
Mark Lordcae5a292009-04-06 16:43:45 -04001323 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
Brett Russ20f733e2005-09-01 18:26:17 -04001324 break;
1325 case SCR_ACTIVE:
Mark Lordcae5a292009-04-06 16:43:45 -04001326 ofs = SATA_ACTIVE; /* active is not with the others */
Brett Russ20f733e2005-09-01 18:26:17 -04001327 break;
1328 default:
1329 ofs = 0xffffffffU;
1330 break;
1331 }
1332 return ofs;
1333}
1334
Tejun Heo82ef04f2008-07-31 17:02:40 +09001335static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001336{
1337 unsigned int ofs = mv_scr_offset(sc_reg_in);
1338
Tejun Heoda3dbb12007-07-16 14:29:40 +09001339 if (ofs != 0xffffffffU) {
Tejun Heo82ef04f2008-07-31 17:02:40 +09001340 *val = readl(mv_ap_base(link->ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001341 return 0;
1342 } else
1343 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001344}
1345
Tejun Heo82ef04f2008-07-31 17:02:40 +09001346static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001347{
1348 unsigned int ofs = mv_scr_offset(sc_reg_in);
1349
Tejun Heoda3dbb12007-07-16 14:29:40 +09001350 if (ofs != 0xffffffffU) {
Mark Lord20091772009-04-06 15:24:57 -04001351 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1352 if (sc_reg_in == SCR_CONTROL) {
1353 /*
1354 * Workaround for 88SX60x1 FEr SATA#26:
1355 *
1356 * COMRESETs have to take care not to accidently
1357 * put the drive to sleep when writing SCR_CONTROL.
1358 * Setting bits 12..15 prevents this problem.
1359 *
1360 * So if we see an outbound COMMRESET, set those bits.
1361 * Ditto for the followup write that clears the reset.
1362 *
1363 * The proprietary driver does this for
1364 * all chip versions, and so do we.
1365 */
1366 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1367 val |= 0xf000;
1368 }
1369 writelfl(val, addr);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001370 return 0;
1371 } else
1372 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001373}
1374
Mark Lordf2738272008-01-26 18:32:29 -05001375static void mv6_dev_config(struct ata_device *adev)
1376{
1377 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001378 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1379 *
1380 * Gen-II does not support NCQ over a port multiplier
1381 * (no FIS-based switching).
Mark Lordf2738272008-01-26 18:32:29 -05001382 */
Mark Lorde49856d2008-04-16 14:59:07 -04001383 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001384 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001385 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001386 ata_dev_printk(adev, KERN_INFO,
1387 "NCQ disabled for command-based switching\n");
Mark Lord352fab72008-04-19 14:43:42 -04001388 }
Mark Lorde49856d2008-04-16 14:59:07 -04001389 }
Mark Lordf2738272008-01-26 18:32:29 -05001390}
1391
Mark Lord3e4a1392008-05-02 02:10:02 -04001392static int mv_qc_defer(struct ata_queued_cmd *qc)
1393{
1394 struct ata_link *link = qc->dev->link;
1395 struct ata_port *ap = link->ap;
1396 struct mv_port_priv *pp = ap->private_data;
1397
1398 /*
Mark Lord29d187b2008-05-02 02:15:37 -04001399 * Don't allow new commands if we're in a delayed EH state
1400 * for NCQ and/or FIS-based switching.
1401 */
1402 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1403 return ATA_DEFER_PORT;
Gwendal Grignou159a7ff2009-10-12 15:44:00 -07001404
1405 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1406 * can run concurrently.
1407 * set excl_link when we want to send a PIO command in DMA mode
1408 * or a non-NCQ command in NCQ mode.
1409 * When we receive a command from that link, and there are no
1410 * outstanding commands, mark a flag to clear excl_link and let
1411 * the command go through.
1412 */
1413 if (unlikely(ap->excl_link)) {
1414 if (link == ap->excl_link) {
1415 if (ap->nr_active_links)
1416 return ATA_DEFER_PORT;
1417 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1418 return 0;
1419 } else
1420 return ATA_DEFER_PORT;
1421 }
1422
Mark Lord29d187b2008-05-02 02:15:37 -04001423 /*
Mark Lord3e4a1392008-05-02 02:10:02 -04001424 * If the port is completely idle, then allow the new qc.
1425 */
1426 if (ap->nr_active_links == 0)
1427 return 0;
1428
Tejun Heo4bdee6c2008-08-13 20:24:16 +09001429 /*
1430 * The port is operating in host queuing mode (EDMA) with NCQ
1431 * enabled, allow multiple NCQ commands. EDMA also allows
1432 * queueing multiple DMA commands but libata core currently
1433 * doesn't allow it.
1434 */
1435 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
Gwendal Grignou159a7ff2009-10-12 15:44:00 -07001436 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1437 if (ata_is_ncq(qc->tf.protocol))
1438 return 0;
1439 else {
1440 ap->excl_link = link;
1441 return ATA_DEFER_PORT;
1442 }
1443 }
Tejun Heo4bdee6c2008-08-13 20:24:16 +09001444
Mark Lord3e4a1392008-05-02 02:10:02 -04001445 return ATA_DEFER_PORT;
1446}
1447
Mark Lord08da1752009-02-25 15:13:03 -05001448static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
Mark Lorde49856d2008-04-16 14:59:07 -04001449{
Mark Lord08da1752009-02-25 15:13:03 -05001450 struct mv_port_priv *pp = ap->private_data;
1451 void __iomem *port_mmio;
Mark Lord00f42ea2008-05-02 02:11:45 -04001452
Mark Lord08da1752009-02-25 15:13:03 -05001453 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1454 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1455 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
Mark Lord00f42ea2008-05-02 02:11:45 -04001456
Mark Lord08da1752009-02-25 15:13:03 -05001457 ltmode = *old_ltmode & ~LTMODE_BIT8;
1458 haltcond = *old_haltcond | EDMA_ERR_DEV;
Mark Lord00f42ea2008-05-02 02:11:45 -04001459
1460 if (want_fbs) {
Mark Lord08da1752009-02-25 15:13:03 -05001461 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1462 ltmode = *old_ltmode | LTMODE_BIT8;
Mark Lord4c299ca2008-05-02 02:16:20 -04001463 if (want_ncq)
Mark Lord08da1752009-02-25 15:13:03 -05001464 haltcond &= ~EDMA_ERR_DEV;
Mark Lord4c299ca2008-05-02 02:16:20 -04001465 else
Mark Lord08da1752009-02-25 15:13:03 -05001466 fiscfg |= FISCFG_WAIT_DEV_ERR;
1467 } else {
1468 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
Mark Lorde49856d2008-04-16 14:59:07 -04001469 }
Mark Lord00f42ea2008-05-02 02:11:45 -04001470
Mark Lord08da1752009-02-25 15:13:03 -05001471 port_mmio = mv_ap_base(ap);
Mark Lordcae5a292009-04-06 16:43:45 -04001472 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1473 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1474 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
Mark Lord0c589122008-01-26 18:31:16 -05001475}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001476
Mark Lorddd2890f2008-05-02 02:10:56 -04001477static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1478{
1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 u32 old, new;
1481
1482 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
Mark Lordcae5a292009-04-06 16:43:45 -04001483 old = readl(hpriv->base + GPIO_PORT_CTL);
Mark Lorddd2890f2008-05-02 02:10:56 -04001484 if (want_ncq)
1485 new = old | (1 << 22);
1486 else
1487 new = old & ~(1 << 22);
1488 if (new != old)
Mark Lordcae5a292009-04-06 16:43:45 -04001489 writel(new, hpriv->base + GPIO_PORT_CTL);
Mark Lorddd2890f2008-05-02 02:10:56 -04001490}
1491
Mark Lordc01e8a22009-02-25 15:14:48 -05001492/**
Mark Lord40f21b12009-03-10 18:51:04 -04001493 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1494 * @ap: Port being initialized
Mark Lordc01e8a22009-02-25 15:14:48 -05001495 *
1496 * There are two DMA modes on these chips: basic DMA, and EDMA.
1497 *
1498 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1499 * of basic DMA on the GEN_IIE versions of the chips.
1500 *
1501 * This bit survives EDMA resets, and must be set for basic DMA
1502 * to function, and should be cleared when EDMA is active.
1503 */
1504static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1505{
1506 struct mv_port_priv *pp = ap->private_data;
1507 u32 new, *old = &pp->cached.unknown_rsvd;
1508
1509 if (enable_bmdma)
1510 new = *old | 1;
1511 else
1512 new = *old & ~1;
Mark Lordcae5a292009-04-06 16:43:45 -04001513 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
Mark Lordc01e8a22009-02-25 15:14:48 -05001514}
1515
Mark Lord000b3442009-03-15 11:33:19 -04001516/*
1517 * SOC chips have an issue whereby the HDD LEDs don't always blink
1518 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1519 * of the SOC takes care of it, generating a steady blink rate when
1520 * any drive on the chip is active.
1521 *
1522 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1523 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1524 *
1525 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1526 * LED operation works then, and provides better (more accurate) feedback.
1527 *
1528 * Note that this code assumes that an SOC never has more than one HC onboard.
1529 */
1530static void mv_soc_led_blink_enable(struct ata_port *ap)
1531{
1532 struct ata_host *host = ap->host;
1533 struct mv_host_priv *hpriv = host->private_data;
1534 void __iomem *hc_mmio;
1535 u32 led_ctrl;
1536
1537 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1538 return;
1539 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1540 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
Mark Lordcae5a292009-04-06 16:43:45 -04001541 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1542 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
Mark Lord000b3442009-03-15 11:33:19 -04001543}
1544
1545static void mv_soc_led_blink_disable(struct ata_port *ap)
1546{
1547 struct ata_host *host = ap->host;
1548 struct mv_host_priv *hpriv = host->private_data;
1549 void __iomem *hc_mmio;
1550 u32 led_ctrl;
1551 unsigned int port;
1552
1553 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1554 return;
1555
1556 /* disable led-blink only if no ports are using NCQ */
1557 for (port = 0; port < hpriv->n_ports; port++) {
1558 struct ata_port *this_ap = host->ports[port];
1559 struct mv_port_priv *pp = this_ap->private_data;
1560
1561 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1562 return;
1563 }
1564
1565 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1566 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
Mark Lordcae5a292009-04-06 16:43:45 -04001567 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1568 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
Mark Lord000b3442009-03-15 11:33:19 -04001569}
1570
Mark Lord00b81232009-01-30 18:47:51 -05001571static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001572{
1573 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001574 struct mv_port_priv *pp = ap->private_data;
1575 struct mv_host_priv *hpriv = ap->host->private_data;
1576 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001577
1578 /* set up non-NCQ EDMA configuration */
1579 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Mark Lordd16ab3f2009-02-25 15:17:43 -05001580 pp->pp_flags &=
1581 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001582
1583 if (IS_GEN_I(hpriv))
1584 cfg |= (1 << 8); /* enab config burst size mask */
1585
Mark Lorddd2890f2008-05-02 02:10:56 -04001586 else if (IS_GEN_II(hpriv)) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05001587 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Mark Lorddd2890f2008-05-02 02:10:56 -04001588 mv_60x1_errata_sata25(ap, want_ncq);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001589
Mark Lorddd2890f2008-05-02 02:10:56 -04001590 } else if (IS_GEN_IIE(hpriv)) {
Mark Lord00f42ea2008-05-02 02:11:45 -04001591 int want_fbs = sata_pmp_attached(ap);
1592 /*
1593 * Possible future enhancement:
1594 *
1595 * The chip can use FBS with non-NCQ, if we allow it,
1596 * But first we need to have the error handling in place
1597 * for this mode (datasheet section 7.3.15.4.2.3).
1598 * So disallow non-NCQ FBS for now.
1599 */
1600 want_fbs &= want_ncq;
1601
Mark Lord08da1752009-02-25 15:13:03 -05001602 mv_config_fbs(ap, want_ncq, want_fbs);
Mark Lord00f42ea2008-05-02 02:11:45 -04001603
1604 if (want_fbs) {
1605 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1606 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1607 }
1608
Jeff Garzike728eab2007-02-25 02:53:41 -05001609 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
Mark Lord00b81232009-01-30 18:47:51 -05001610 if (want_edma) {
1611 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1612 if (!IS_SOC(hpriv))
1613 cfg |= (1 << 18); /* enab early completion */
1614 }
Mark Lord616d4a92008-05-02 02:08:32 -04001615 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1616 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
Mark Lordc01e8a22009-02-25 15:14:48 -05001617 mv_bmdma_enable_iie(ap, !want_edma);
Mark Lord000b3442009-03-15 11:33:19 -04001618
1619 if (IS_SOC(hpriv)) {
1620 if (want_ncq)
1621 mv_soc_led_blink_enable(ap);
1622 else
1623 mv_soc_led_blink_disable(ap);
1624 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001625 }
1626
Mark Lord72109162008-01-26 18:31:33 -05001627 if (want_ncq) {
1628 cfg |= EDMA_CFG_NCQ;
1629 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
Mark Lord00b81232009-01-30 18:47:51 -05001630 }
Mark Lord72109162008-01-26 18:31:33 -05001631
Mark Lordcae5a292009-04-06 16:43:45 -04001632 writelfl(cfg, port_mmio + EDMA_CFG);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001633}
1634
Mark Lordda2fa9b2008-01-26 18:32:45 -05001635static void mv_port_free_dma_mem(struct ata_port *ap)
1636{
1637 struct mv_host_priv *hpriv = ap->host->private_data;
1638 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001639 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001640
1641 if (pp->crqb) {
1642 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1643 pp->crqb = NULL;
1644 }
1645 if (pp->crpb) {
1646 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1647 pp->crpb = NULL;
1648 }
Mark Lordeb73d552008-01-29 13:24:00 -05001649 /*
1650 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1651 * For later hardware, we have one unique sg_tbl per NCQ tag.
1652 */
1653 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1654 if (pp->sg_tbl[tag]) {
1655 if (tag == 0 || !IS_GEN_I(hpriv))
1656 dma_pool_free(hpriv->sg_tbl_pool,
1657 pp->sg_tbl[tag],
1658 pp->sg_tbl_dma[tag]);
1659 pp->sg_tbl[tag] = NULL;
1660 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001661 }
1662}
1663
Brett Russ05b308e2005-10-05 17:08:53 -04001664/**
1665 * mv_port_start - Port specific init/start routine.
1666 * @ap: ATA channel to manipulate
1667 *
1668 * Allocate and point to DMA memory, init port private memory,
1669 * zero indices.
1670 *
1671 * LOCKING:
1672 * Inherited from caller.
1673 */
Brett Russ31961942005-09-30 01:36:00 -04001674static int mv_port_start(struct ata_port *ap)
1675{
Jeff Garzikcca39742006-08-24 03:19:22 -04001676 struct device *dev = ap->host->dev;
1677 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001678 struct mv_port_priv *pp;
Mark Lord933cb8e2009-04-06 12:30:43 -04001679 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001680 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001681
Tejun Heo24dc5f32007-01-20 16:00:28 +09001682 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001683 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001684 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001685 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001686
Mark Lordda2fa9b2008-01-26 18:32:45 -05001687 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1688 if (!pp->crqb)
1689 return -ENOMEM;
1690 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001691
Mark Lordda2fa9b2008-01-26 18:32:45 -05001692 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1693 if (!pp->crpb)
1694 goto out_port_free_dma_mem;
1695 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001696
Mark Lord3bd0a702008-06-18 12:11:16 -04001697 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1698 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1699 ap->flags |= ATA_FLAG_AN;
Mark Lordeb73d552008-01-29 13:24:00 -05001700 /*
1701 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1702 * For later hardware, we need one unique sg_tbl per NCQ tag.
1703 */
1704 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1705 if (tag == 0 || !IS_GEN_I(hpriv)) {
1706 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1707 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1708 if (!pp->sg_tbl[tag])
1709 goto out_port_free_dma_mem;
1710 } else {
1711 pp->sg_tbl[tag] = pp->sg_tbl[0];
1712 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1713 }
1714 }
Mark Lord933cb8e2009-04-06 12:30:43 -04001715
1716 spin_lock_irqsave(ap->lock, flags);
Mark Lord08da1752009-02-25 15:13:03 -05001717 mv_save_cached_regs(ap);
Mark Lord66e57a22009-01-30 18:52:58 -05001718 mv_edma_cfg(ap, 0, 0);
Mark Lord933cb8e2009-04-06 12:30:43 -04001719 spin_unlock_irqrestore(ap->lock, flags);
1720
Brett Russ31961942005-09-30 01:36:00 -04001721 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001722
1723out_port_free_dma_mem:
1724 mv_port_free_dma_mem(ap);
1725 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001726}
1727
Brett Russ05b308e2005-10-05 17:08:53 -04001728/**
1729 * mv_port_stop - Port specific cleanup/stop routine.
1730 * @ap: ATA channel to manipulate
1731 *
1732 * Stop DMA, cleanup port memory.
1733 *
1734 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001735 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001736 */
Brett Russ31961942005-09-30 01:36:00 -04001737static void mv_port_stop(struct ata_port *ap)
1738{
Mark Lord933cb8e2009-04-06 12:30:43 -04001739 unsigned long flags;
1740
1741 spin_lock_irqsave(ap->lock, flags);
Mark Lorde12bef52008-03-31 19:33:56 -04001742 mv_stop_edma(ap);
Mark Lord88e675e2008-05-17 13:36:30 -04001743 mv_enable_port_irqs(ap, 0);
Mark Lord933cb8e2009-04-06 12:30:43 -04001744 spin_unlock_irqrestore(ap->lock, flags);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001745 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001746}
1747
Brett Russ05b308e2005-10-05 17:08:53 -04001748/**
1749 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1750 * @qc: queued command whose SG list to source from
1751 *
1752 * Populate the SG list and mark the last entry.
1753 *
1754 * LOCKING:
1755 * Inherited from caller.
1756 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001757static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001758{
1759 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001760 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001761 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001762 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001763
Mark Lordeb73d552008-01-29 13:24:00 -05001764 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001765 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001766 dma_addr_t addr = sg_dma_address(sg);
1767 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001768
Olof Johansson4007b492007-10-02 20:45:27 -05001769 while (sg_len) {
1770 u32 offset = addr & 0xffff;
1771 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001772
Mark Lord32cd11a2009-02-01 16:50:32 -05001773 if (offset + len > 0x10000)
Olof Johansson4007b492007-10-02 20:45:27 -05001774 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001775
Olof Johansson4007b492007-10-02 20:45:27 -05001776 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1777 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001778 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Mark Lord32cd11a2009-02-01 16:50:32 -05001779 mv_sg->reserved = 0;
Olof Johansson4007b492007-10-02 20:45:27 -05001780
1781 sg_len -= len;
1782 addr += len;
1783
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001784 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001785 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001786 }
Brett Russ31961942005-09-30 01:36:00 -04001787 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001788
1789 if (likely(last_sg))
1790 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Mark Lord32cd11a2009-02-01 16:50:32 -05001791 mb(); /* ensure data structure is visible to the chipset */
Brett Russ31961942005-09-30 01:36:00 -04001792}
1793
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001794static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001795{
Mark Lord559eeda2006-05-19 16:40:15 -04001796 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001797 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001798 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001799}
1800
Brett Russ05b308e2005-10-05 17:08:53 -04001801/**
Mark Lordda142652009-01-30 18:51:54 -05001802 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1803 * @ap: Port associated with this ATA transaction.
1804 *
1805 * We need this only for ATAPI bmdma transactions,
1806 * as otherwise we experience spurious interrupts
1807 * after libata-sff handles the bmdma interrupts.
1808 */
1809static void mv_sff_irq_clear(struct ata_port *ap)
1810{
1811 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1812}
1813
1814/**
1815 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1816 * @qc: queued command to check for chipset/DMA compatibility.
1817 *
1818 * The bmdma engines cannot handle speculative data sizes
1819 * (bytecount under/over flow). So only allow DMA for
1820 * data transfer commands with known data sizes.
1821 *
1822 * LOCKING:
1823 * Inherited from caller.
1824 */
1825static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1826{
1827 struct scsi_cmnd *scmd = qc->scsicmd;
1828
1829 if (scmd) {
1830 switch (scmd->cmnd[0]) {
1831 case READ_6:
1832 case READ_10:
1833 case READ_12:
1834 case WRITE_6:
1835 case WRITE_10:
1836 case WRITE_12:
1837 case GPCMD_READ_CD:
1838 case GPCMD_SEND_DVD_STRUCTURE:
1839 case GPCMD_SEND_CUE_SHEET:
1840 return 0; /* DMA is safe */
1841 }
1842 }
1843 return -EOPNOTSUPP; /* use PIO instead */
1844}
1845
1846/**
1847 * mv_bmdma_setup - Set up BMDMA transaction
1848 * @qc: queued command to prepare DMA for.
1849 *
1850 * LOCKING:
1851 * Inherited from caller.
1852 */
1853static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1854{
1855 struct ata_port *ap = qc->ap;
1856 void __iomem *port_mmio = mv_ap_base(ap);
1857 struct mv_port_priv *pp = ap->private_data;
1858
1859 mv_fill_sg(qc);
1860
1861 /* clear all DMA cmd bits */
Mark Lordcae5a292009-04-06 16:43:45 -04001862 writel(0, port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001863
1864 /* load PRD table addr. */
1865 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
Mark Lordcae5a292009-04-06 16:43:45 -04001866 port_mmio + BMDMA_PRD_HIGH);
Mark Lordda142652009-01-30 18:51:54 -05001867 writelfl(pp->sg_tbl_dma[qc->tag],
Mark Lordcae5a292009-04-06 16:43:45 -04001868 port_mmio + BMDMA_PRD_LOW);
Mark Lordda142652009-01-30 18:51:54 -05001869
1870 /* issue r/w command */
1871 ap->ops->sff_exec_command(ap, &qc->tf);
1872}
1873
1874/**
1875 * mv_bmdma_start - Start a BMDMA transaction
1876 * @qc: queued command to start DMA on.
1877 *
1878 * LOCKING:
1879 * Inherited from caller.
1880 */
1881static void mv_bmdma_start(struct ata_queued_cmd *qc)
1882{
1883 struct ata_port *ap = qc->ap;
1884 void __iomem *port_mmio = mv_ap_base(ap);
1885 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1886 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1887
1888 /* start host DMA transaction */
Mark Lordcae5a292009-04-06 16:43:45 -04001889 writelfl(cmd, port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001890}
1891
1892/**
1893 * mv_bmdma_stop - Stop BMDMA transfer
1894 * @qc: queued command to stop DMA on.
1895 *
1896 * Clears the ATA_DMA_START flag in the bmdma control register
1897 *
1898 * LOCKING:
1899 * Inherited from caller.
1900 */
Mark Lord44b73382010-08-19 21:40:44 -04001901static void mv_bmdma_stop_ap(struct ata_port *ap)
Mark Lordda142652009-01-30 18:51:54 -05001902{
Mark Lordda142652009-01-30 18:51:54 -05001903 void __iomem *port_mmio = mv_ap_base(ap);
1904 u32 cmd;
1905
1906 /* clear start/stop bit */
Mark Lordcae5a292009-04-06 16:43:45 -04001907 cmd = readl(port_mmio + BMDMA_CMD);
Mark Lord44b73382010-08-19 21:40:44 -04001908 if (cmd & ATA_DMA_START) {
1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001911
Mark Lord44b73382010-08-19 21:40:44 -04001912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1913 ata_sff_dma_pause(ap);
1914 }
1915}
1916
1917static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1918{
1919 mv_bmdma_stop_ap(qc->ap);
Mark Lordda142652009-01-30 18:51:54 -05001920}
1921
1922/**
1923 * mv_bmdma_status - Read BMDMA status
1924 * @ap: port for which to retrieve DMA status.
1925 *
1926 * Read and return equivalent of the sff BMDMA status register.
1927 *
1928 * LOCKING:
1929 * Inherited from caller.
1930 */
1931static u8 mv_bmdma_status(struct ata_port *ap)
1932{
1933 void __iomem *port_mmio = mv_ap_base(ap);
1934 u32 reg, status;
1935
1936 /*
1937 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1938 * and the ATA_DMA_INTR bit doesn't exist.
1939 */
Mark Lordcae5a292009-04-06 16:43:45 -04001940 reg = readl(port_mmio + BMDMA_STATUS);
Mark Lordda142652009-01-30 18:51:54 -05001941 if (reg & ATA_DMA_ACTIVE)
1942 status = ATA_DMA_ACTIVE;
Mark Lord44b73382010-08-19 21:40:44 -04001943 else if (reg & ATA_DMA_ERR)
Mark Lordda142652009-01-30 18:51:54 -05001944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
Mark Lord44b73382010-08-19 21:40:44 -04001945 else {
1946 /*
1947 * Just because DMA_ACTIVE is 0 (DMA completed),
1948 * this does _not_ mean the device is "done".
1949 * So we should not yet be signalling ATA_DMA_INTR
1950 * in some cases. Eg. DSM/TRIM, and perhaps others.
1951 */
1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0;
1955 else
1956 status = ATA_DMA_INTR;
1957 }
Mark Lordda142652009-01-30 18:51:54 -05001958 return status;
1959}
1960
Mark Lord299b3f82009-04-13 11:29:34 -04001961static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1962{
1963 struct ata_taskfile *tf = &qc->tf;
1964 /*
1965 * Workaround for 88SX60x1 FEr SATA#24.
1966 *
1967 * Chip may corrupt WRITEs if multi_count >= 4kB.
1968 * Note that READs are unaffected.
1969 *
1970 * It's not clear if this errata really means "4K bytes",
1971 * or if it always happens for multi_count > 7
1972 * regardless of device sector_size.
1973 *
1974 * So, for safety, any write with multi_count > 7
1975 * gets converted here into a regular PIO write instead:
1976 */
1977 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1978 if (qc->dev->multi_count > 7) {
1979 switch (tf->command) {
1980 case ATA_CMD_WRITE_MULTI:
1981 tf->command = ATA_CMD_PIO_WRITE;
1982 break;
1983 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1984 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1985 /* fall through */
1986 case ATA_CMD_WRITE_MULTI_EXT:
1987 tf->command = ATA_CMD_PIO_WRITE_EXT;
1988 break;
1989 }
1990 }
1991 }
1992}
1993
Mark Lordda142652009-01-30 18:51:54 -05001994/**
Brett Russ05b308e2005-10-05 17:08:53 -04001995 * mv_qc_prep - Host specific command preparation.
1996 * @qc: queued command to prepare
1997 *
1998 * This routine simply redirects to the general purpose routine
1999 * if command is not DMA. Else, it handles prep of the CRQB
2000 * (command request block), does some sanity checking, and calls
2001 * the SG load routine.
2002 *
2003 * LOCKING:
2004 * Inherited from caller.
2005 */
Brett Russ31961942005-09-30 01:36:00 -04002006static void mv_qc_prep(struct ata_queued_cmd *qc)
2007{
2008 struct ata_port *ap = qc->ap;
2009 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04002010 __le16 *cw;
Mark Lord8d2b4502009-04-13 11:27:18 -04002011 struct ata_taskfile *tf = &qc->tf;
Brett Russ31961942005-09-30 01:36:00 -04002012 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04002013 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04002014
Mark Lord299b3f82009-04-13 11:29:34 -04002015 switch (tf->protocol) {
2016 case ATA_PROT_DMA:
Mark Lord44b73382010-08-19 21:40:44 -04002017 if (tf->command == ATA_CMD_DSM)
2018 return;
2019 /* fall-thru */
Mark Lord299b3f82009-04-13 11:29:34 -04002020 case ATA_PROT_NCQ:
2021 break; /* continue below */
2022 case ATA_PROT_PIO:
2023 mv_rw_multi_errata_sata24(qc);
Brett Russ31961942005-09-30 01:36:00 -04002024 return;
Mark Lord299b3f82009-04-13 11:29:34 -04002025 default:
2026 return;
2027 }
Brett Russ20f733e2005-09-01 18:26:17 -04002028
Brett Russ31961942005-09-30 01:36:00 -04002029 /* Fill in command request block
2030 */
Mark Lord8d2b4502009-04-13 11:27:18 -04002031 if (!(tf->flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04002032 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09002033 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04002034 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04002035 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04002036
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002037 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002038 in_index = pp->req_idx;
Brett Russ31961942005-09-30 01:36:00 -04002039
Mark Lorda6432432006-05-19 16:36:36 -04002040 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05002041 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04002042 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05002043 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04002044 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2045
2046 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04002047
2048 /* Sadly, the CRQB cannot accomodate all registers--there are
2049 * only 11 bytes...so we must pick and choose required
2050 * registers based on the command. So, we drop feature and
2051 * hob_feature for [RW] DMA commands, but they are needed for
Mark Lordcd12e1f2009-01-19 18:06:28 -05002052 * NCQ. NCQ will drop hob_nsect, which is not needed there
2053 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
Brett Russ31961942005-09-30 01:36:00 -04002054 */
2055 switch (tf->command) {
2056 case ATA_CMD_READ:
2057 case ATA_CMD_READ_EXT:
2058 case ATA_CMD_WRITE:
2059 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01002060 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04002061 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2062 break;
Brett Russ31961942005-09-30 01:36:00 -04002063 case ATA_CMD_FPDMA_READ:
2064 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05002065 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04002066 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2067 break;
Brett Russ31961942005-09-30 01:36:00 -04002068 default:
2069 /* The only other commands EDMA supports in non-queued and
2070 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2071 * of which are defined/used by Linux. If we get here, this
2072 * driver needs work.
2073 *
2074 * FIXME: modify libata to give qc_prep a return value and
2075 * return error here.
2076 */
2077 BUG_ON(tf->command);
2078 break;
2079 }
2080 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2081 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2082 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2083 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2084 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2085 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2086 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2087 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2088 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2089
Jeff Garzike4e7b892006-01-31 12:18:41 -05002090 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04002091 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002092 mv_fill_sg(qc);
2093}
2094
2095/**
2096 * mv_qc_prep_iie - Host specific command preparation.
2097 * @qc: queued command to prepare
2098 *
2099 * This routine simply redirects to the general purpose routine
2100 * if command is not DMA. Else, it handles prep of the CRQB
2101 * (command request block), does some sanity checking, and calls
2102 * the SG load routine.
2103 *
2104 * LOCKING:
2105 * Inherited from caller.
2106 */
2107static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2108{
2109 struct ata_port *ap = qc->ap;
2110 struct mv_port_priv *pp = ap->private_data;
2111 struct mv_crqb_iie *crqb;
Mark Lord8d2b4502009-04-13 11:27:18 -04002112 struct ata_taskfile *tf = &qc->tf;
Mark Lorda6432432006-05-19 16:36:36 -04002113 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002114 u32 flags = 0;
2115
Mark Lord8d2b4502009-04-13 11:27:18 -04002116 if ((tf->protocol != ATA_PROT_DMA) &&
2117 (tf->protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05002118 return;
Mark Lord44b73382010-08-19 21:40:44 -04002119 if (tf->command == ATA_CMD_DSM)
2120 return; /* use bmdma for this */
Jeff Garzike4e7b892006-01-31 12:18:41 -05002121
Mark Lorde12bef52008-03-31 19:33:56 -04002122 /* Fill in Gen IIE command request block */
Mark Lord8d2b4502009-04-13 11:27:18 -04002123 if (!(tf->flags & ATA_TFLAG_WRITE))
Jeff Garzike4e7b892006-01-31 12:18:41 -05002124 flags |= CRQB_FLAG_READ;
2125
Tejun Heobeec7db2006-02-11 19:11:13 +09002126 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05002127 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05002128 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04002129 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002130
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002131 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002132 in_index = pp->req_idx;
Mark Lorda6432432006-05-19 16:36:36 -04002133
2134 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05002135 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2136 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05002137 crqb->flags = cpu_to_le32(flags);
2138
Jeff Garzike4e7b892006-01-31 12:18:41 -05002139 crqb->ata_cmd[0] = cpu_to_le32(
2140 (tf->command << 16) |
2141 (tf->feature << 24)
2142 );
2143 crqb->ata_cmd[1] = cpu_to_le32(
2144 (tf->lbal << 0) |
2145 (tf->lbam << 8) |
2146 (tf->lbah << 16) |
2147 (tf->device << 24)
2148 );
2149 crqb->ata_cmd[2] = cpu_to_le32(
2150 (tf->hob_lbal << 0) |
2151 (tf->hob_lbam << 8) |
2152 (tf->hob_lbah << 16) |
2153 (tf->hob_feature << 24)
2154 );
2155 crqb->ata_cmd[3] = cpu_to_le32(
2156 (tf->nsect << 0) |
2157 (tf->hob_nsect << 8)
2158 );
2159
2160 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2161 return;
Brett Russ31961942005-09-30 01:36:00 -04002162 mv_fill_sg(qc);
2163}
2164
Brett Russ05b308e2005-10-05 17:08:53 -04002165/**
Mark Lordd16ab3f2009-02-25 15:17:43 -05002166 * mv_sff_check_status - fetch device status, if valid
2167 * @ap: ATA port to fetch status from
2168 *
2169 * When using command issue via mv_qc_issue_fis(),
2170 * the initial ATA_BUSY state does not show up in the
2171 * ATA status (shadow) register. This can confuse libata!
2172 *
2173 * So we have a hook here to fake ATA_BUSY for that situation,
2174 * until the first time a BUSY, DRQ, or ERR bit is seen.
2175 *
2176 * The rest of the time, it simply returns the ATA status register.
2177 */
2178static u8 mv_sff_check_status(struct ata_port *ap)
2179{
2180 u8 stat = ioread8(ap->ioaddr.status_addr);
2181 struct mv_port_priv *pp = ap->private_data;
2182
2183 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2184 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2185 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2186 else
2187 stat = ATA_BUSY;
2188 }
2189 return stat;
2190}
2191
2192/**
Mark Lord70f8b792009-02-25 15:19:20 -05002193 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2194 * @fis: fis to be sent
2195 * @nwords: number of 32-bit words in the fis
2196 */
2197static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2198{
2199 void __iomem *port_mmio = mv_ap_base(ap);
2200 u32 ifctl, old_ifctl, ifstat;
2201 int i, timeout = 200, final_word = nwords - 1;
2202
2203 /* Initiate FIS transmission mode */
Mark Lordcae5a292009-04-06 16:43:45 -04002204 old_ifctl = readl(port_mmio + SATA_IFCTL);
Mark Lord70f8b792009-02-25 15:19:20 -05002205 ifctl = 0x100 | (old_ifctl & 0xf);
Mark Lordcae5a292009-04-06 16:43:45 -04002206 writelfl(ifctl, port_mmio + SATA_IFCTL);
Mark Lord70f8b792009-02-25 15:19:20 -05002207
2208 /* Send all words of the FIS except for the final word */
2209 for (i = 0; i < final_word; ++i)
Mark Lordcae5a292009-04-06 16:43:45 -04002210 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
Mark Lord70f8b792009-02-25 15:19:20 -05002211
2212 /* Flag end-of-transmission, and then send the final word */
Mark Lordcae5a292009-04-06 16:43:45 -04002213 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2214 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
Mark Lord70f8b792009-02-25 15:19:20 -05002215
2216 /*
2217 * Wait for FIS transmission to complete.
2218 * This typically takes just a single iteration.
2219 */
2220 do {
Mark Lordcae5a292009-04-06 16:43:45 -04002221 ifstat = readl(port_mmio + SATA_IFSTAT);
Mark Lord70f8b792009-02-25 15:19:20 -05002222 } while (!(ifstat & 0x1000) && --timeout);
2223
2224 /* Restore original port configuration */
Mark Lordcae5a292009-04-06 16:43:45 -04002225 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
Mark Lord70f8b792009-02-25 15:19:20 -05002226
2227 /* See if it worked */
2228 if ((ifstat & 0x3000) != 0x1000) {
2229 ata_port_printk(ap, KERN_WARNING,
2230 "%s transmission error, ifstat=%08x\n",
2231 __func__, ifstat);
2232 return AC_ERR_OTHER;
2233 }
2234 return 0;
2235}
2236
2237/**
2238 * mv_qc_issue_fis - Issue a command directly as a FIS
2239 * @qc: queued command to start
2240 *
2241 * Note that the ATA shadow registers are not updated
2242 * after command issue, so the device will appear "READY"
2243 * if polled, even while it is BUSY processing the command.
2244 *
2245 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2246 *
2247 * Note: we don't get updated shadow regs on *completion*
2248 * of non-data commands. So avoid sending them via this function,
2249 * as they will appear to have completed immediately.
2250 *
2251 * GEN_IIE has special registers that we could get the result tf from,
2252 * but earlier chipsets do not. For now, we ignore those registers.
2253 */
2254static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2255{
2256 struct ata_port *ap = qc->ap;
2257 struct mv_port_priv *pp = ap->private_data;
2258 struct ata_link *link = qc->dev->link;
2259 u32 fis[5];
2260 int err = 0;
2261
2262 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
Thiago Farina4c4a90f2009-11-08 14:30:57 -05002263 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
Mark Lord70f8b792009-02-25 15:19:20 -05002264 if (err)
2265 return err;
2266
2267 switch (qc->tf.protocol) {
2268 case ATAPI_PROT_PIO:
2269 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2270 /* fall through */
2271 case ATAPI_PROT_NODATA:
2272 ap->hsm_task_state = HSM_ST_FIRST;
2273 break;
2274 case ATA_PROT_PIO:
2275 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2276 if (qc->tf.flags & ATA_TFLAG_WRITE)
2277 ap->hsm_task_state = HSM_ST_FIRST;
2278 else
2279 ap->hsm_task_state = HSM_ST;
2280 break;
2281 default:
2282 ap->hsm_task_state = HSM_ST_LAST;
2283 break;
2284 }
2285
2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
Tejun Heoc4291372010-05-10 21:41:38 +02002287 ata_sff_queue_pio_task(ap, 0);
Mark Lord70f8b792009-02-25 15:19:20 -05002288 return 0;
2289}
2290
2291/**
Brett Russ05b308e2005-10-05 17:08:53 -04002292 * mv_qc_issue - Initiate a command to the host
2293 * @qc: queued command to start
2294 *
2295 * This routine simply redirects to the general purpose routine
2296 * if command is not DMA. Else, it sanity checks our local
2297 * caches of the request producer/consumer indices then enables
2298 * DMA and bumps the request producer index.
2299 *
2300 * LOCKING:
2301 * Inherited from caller.
2302 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09002303static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04002304{
Mark Lordf48765c2009-01-30 18:48:41 -05002305 static int limit_warnings = 10;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002306 struct ata_port *ap = qc->ap;
2307 void __iomem *port_mmio = mv_ap_base(ap);
2308 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002309 u32 in_index;
Mark Lord42ed8932009-02-25 15:15:39 -05002310 unsigned int port_irqs;
Brett Russ31961942005-09-30 01:36:00 -04002311
Mark Lordd16ab3f2009-02-25 15:17:43 -05002312 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2313
Mark Lordf48765c2009-01-30 18:48:41 -05002314 switch (qc->tf.protocol) {
2315 case ATA_PROT_DMA:
Mark Lord44b73382010-08-19 21:40:44 -04002316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2318 return AC_ERR_OTHER;
2319 break; /* use bmdma for this */
2320 }
2321 /* fall thru */
Mark Lordf48765c2009-01-30 18:48:41 -05002322 case ATA_PROT_NCQ:
2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2325 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2326
2327 /* Write the request in pointer to kick the EDMA to life */
2328 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
Mark Lordcae5a292009-04-06 16:43:45 -04002329 port_mmio + EDMA_REQ_Q_IN_PTR);
Mark Lordf48765c2009-01-30 18:48:41 -05002330 return 0;
2331
2332 case ATA_PROT_PIO:
Mark Lordc6112bd2008-06-18 12:13:02 -04002333 /*
2334 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2335 *
2336 * Someday, we might implement special polling workarounds
2337 * for these, but it all seems rather unnecessary since we
2338 * normally use only DMA for commands which transfer more
2339 * than a single block of data.
2340 *
2341 * Much of the time, this could just work regardless.
2342 * So for now, just log the incident, and allow the attempt.
2343 */
Mark Lordc7843e82008-06-18 21:57:42 -04002344 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
Mark Lordc6112bd2008-06-18 12:13:02 -04002345 --limit_warnings;
2346 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2347 ": attempting PIO w/multiple DRQ: "
2348 "this may fail due to h/w errata\n");
2349 }
Mark Lordf48765c2009-01-30 18:48:41 -05002350 /* drop through */
Mark Lord42ed8932009-02-25 15:15:39 -05002351 case ATA_PROT_NODATA:
Mark Lordf48765c2009-01-30 18:48:41 -05002352 case ATAPI_PROT_PIO:
Mark Lord42ed8932009-02-25 15:15:39 -05002353 case ATAPI_PROT_NODATA:
2354 if (ap->flags & ATA_FLAG_PIO_POLLING)
2355 qc->tf.flags |= ATA_TFLAG_POLLING;
2356 break;
Brett Russ31961942005-09-30 01:36:00 -04002357 }
Mark Lord42ed8932009-02-25 15:15:39 -05002358
2359 if (qc->tf.flags & ATA_TFLAG_POLLING)
2360 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2361 else
2362 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2363
2364 /*
2365 * We're about to send a non-EDMA capable command to the
2366 * port. Turn off EDMA so there won't be problems accessing
2367 * shadow block, etc registers.
2368 */
2369 mv_stop_edma(ap);
2370 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2371 mv_pmp_select(ap, qc->dev->link->pmp);
Mark Lord70f8b792009-02-25 15:19:20 -05002372
2373 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2374 struct mv_host_priv *hpriv = ap->host->private_data;
2375 /*
2376 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
Mark Lord40f21b12009-03-10 18:51:04 -04002377 *
Mark Lord70f8b792009-02-25 15:19:20 -05002378 * After any NCQ error, the READ_LOG_EXT command
2379 * from libata-eh *must* use mv_qc_issue_fis().
2380 * Otherwise it might fail, due to chip errata.
2381 *
2382 * Rather than special-case it, we'll just *always*
2383 * use this method here for READ_LOG_EXT, making for
2384 * easier testing.
2385 */
2386 if (IS_GEN_II(hpriv))
2387 return mv_qc_issue_fis(qc);
2388 }
Tejun Heo360ff782010-05-10 21:41:42 +02002389 return ata_bmdma_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04002390}
2391
Mark Lord8f767f82008-04-19 14:53:07 -04002392static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2393{
2394 struct mv_port_priv *pp = ap->private_data;
2395 struct ata_queued_cmd *qc;
2396
2397 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2398 return NULL;
2399 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Tejun Heo3e4ec342010-05-10 21:41:30 +02002400 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2401 return qc;
2402 return NULL;
Mark Lord8f767f82008-04-19 14:53:07 -04002403}
2404
Mark Lord29d187b2008-05-02 02:15:37 -04002405static void mv_pmp_error_handler(struct ata_port *ap)
2406{
2407 unsigned int pmp, pmp_map;
2408 struct mv_port_priv *pp = ap->private_data;
2409
2410 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2411 /*
2412 * Perform NCQ error analysis on failed PMPs
2413 * before we freeze the port entirely.
2414 *
2415 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2416 */
2417 pmp_map = pp->delayed_eh_pmp_map;
2418 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2419 for (pmp = 0; pmp_map != 0; pmp++) {
2420 unsigned int this_pmp = (1 << pmp);
2421 if (pmp_map & this_pmp) {
2422 struct ata_link *link = &ap->pmp_link[pmp];
2423 pmp_map &= ~this_pmp;
2424 ata_eh_analyze_ncq_error(link);
2425 }
2426 }
2427 ata_port_freeze(ap);
2428 }
2429 sata_pmp_error_handler(ap);
2430}
2431
Mark Lord4c299ca2008-05-02 02:16:20 -04002432static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2433{
2434 void __iomem *port_mmio = mv_ap_base(ap);
2435
Mark Lordcae5a292009-04-06 16:43:45 -04002436 return readl(port_mmio + SATA_TESTCTL) >> 16;
Mark Lord4c299ca2008-05-02 02:16:20 -04002437}
2438
Mark Lord4c299ca2008-05-02 02:16:20 -04002439static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2440{
2441 struct ata_eh_info *ehi;
2442 unsigned int pmp;
2443
2444 /*
2445 * Initialize EH info for PMPs which saw device errors
2446 */
2447 ehi = &ap->link.eh_info;
2448 for (pmp = 0; pmp_map != 0; pmp++) {
2449 unsigned int this_pmp = (1 << pmp);
2450 if (pmp_map & this_pmp) {
2451 struct ata_link *link = &ap->pmp_link[pmp];
2452
2453 pmp_map &= ~this_pmp;
2454 ehi = &link->eh_info;
2455 ata_ehi_clear_desc(ehi);
2456 ata_ehi_push_desc(ehi, "dev err");
2457 ehi->err_mask |= AC_ERR_DEV;
2458 ehi->action |= ATA_EH_RESET;
2459 ata_link_abort(link);
2460 }
2461 }
2462}
2463
Mark Lord06aaca32008-05-19 09:01:24 -04002464static int mv_req_q_empty(struct ata_port *ap)
2465{
2466 void __iomem *port_mmio = mv_ap_base(ap);
2467 u32 in_ptr, out_ptr;
2468
Mark Lordcae5a292009-04-06 16:43:45 -04002469 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
Mark Lord06aaca32008-05-19 09:01:24 -04002470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Mark Lordcae5a292009-04-06 16:43:45 -04002471 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
Mark Lord06aaca32008-05-19 09:01:24 -04002472 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2473 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2474}
2475
Mark Lord4c299ca2008-05-02 02:16:20 -04002476static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2477{
2478 struct mv_port_priv *pp = ap->private_data;
2479 int failed_links;
2480 unsigned int old_map, new_map;
2481
2482 /*
2483 * Device error during FBS+NCQ operation:
2484 *
2485 * Set a port flag to prevent further I/O being enqueued.
2486 * Leave the EDMA running to drain outstanding commands from this port.
2487 * Perform the post-mortem/EH only when all responses are complete.
2488 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2489 */
2490 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2491 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2492 pp->delayed_eh_pmp_map = 0;
2493 }
2494 old_map = pp->delayed_eh_pmp_map;
2495 new_map = old_map | mv_get_err_pmp_map(ap);
2496
2497 if (old_map != new_map) {
2498 pp->delayed_eh_pmp_map = new_map;
2499 mv_pmp_eh_prep(ap, new_map & ~old_map);
2500 }
Mark Lordc46938c2008-05-02 14:02:28 -04002501 failed_links = hweight16(new_map);
Mark Lord4c299ca2008-05-02 02:16:20 -04002502
2503 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2504 "failed_links=%d nr_active_links=%d\n",
2505 __func__, pp->delayed_eh_pmp_map,
2506 ap->qc_active, failed_links,
2507 ap->nr_active_links);
2508
Mark Lord06aaca32008-05-19 09:01:24 -04002509 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
Mark Lord4c299ca2008-05-02 02:16:20 -04002510 mv_process_crpb_entries(ap, pp);
2511 mv_stop_edma(ap);
2512 mv_eh_freeze(ap);
2513 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2514 return 1; /* handled */
2515 }
2516 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2517 return 1; /* handled */
2518}
2519
2520static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2521{
2522 /*
2523 * Possible future enhancement:
2524 *
2525 * FBS+non-NCQ operation is not yet implemented.
2526 * See related notes in mv_edma_cfg().
2527 *
2528 * Device error during FBS+non-NCQ operation:
2529 *
2530 * We need to snapshot the shadow registers for each failed command.
2531 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2532 */
2533 return 0; /* not handled */
2534}
2535
2536static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2537{
2538 struct mv_port_priv *pp = ap->private_data;
2539
2540 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2541 return 0; /* EDMA was not active: not handled */
2542 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2543 return 0; /* FBS was not active: not handled */
2544
2545 if (!(edma_err_cause & EDMA_ERR_DEV))
2546 return 0; /* non DEV error: not handled */
2547 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2548 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2549 return 0; /* other problems: not handled */
2550
2551 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2552 /*
2553 * EDMA should NOT have self-disabled for this case.
2554 * If it did, then something is wrong elsewhere,
2555 * and we cannot handle it here.
2556 */
2557 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2558 ata_port_printk(ap, KERN_WARNING,
2559 "%s: err_cause=0x%x pp_flags=0x%x\n",
2560 __func__, edma_err_cause, pp->pp_flags);
2561 return 0; /* not handled */
2562 }
2563 return mv_handle_fbs_ncq_dev_err(ap);
2564 } else {
2565 /*
2566 * EDMA should have self-disabled for this case.
2567 * If it did not, then something is wrong elsewhere,
2568 * and we cannot handle it here.
2569 */
2570 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2571 ata_port_printk(ap, KERN_WARNING,
2572 "%s: err_cause=0x%x pp_flags=0x%x\n",
2573 __func__, edma_err_cause, pp->pp_flags);
2574 return 0; /* not handled */
2575 }
2576 return mv_handle_fbs_non_ncq_dev_err(ap);
2577 }
2578 return 0; /* not handled */
2579}
2580
Mark Lorda9010322008-05-02 02:14:02 -04002581static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
Mark Lord8f767f82008-04-19 14:53:07 -04002582{
Mark Lord8f767f82008-04-19 14:53:07 -04002583 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lorda9010322008-05-02 02:14:02 -04002584 char *when = "idle";
Mark Lord8f767f82008-04-19 14:53:07 -04002585
Mark Lord8f767f82008-04-19 14:53:07 -04002586 ata_ehi_clear_desc(ehi);
Tejun Heo3e4ec342010-05-10 21:41:30 +02002587 if (edma_was_enabled) {
Mark Lorda9010322008-05-02 02:14:02 -04002588 when = "EDMA enabled";
Mark Lord8f767f82008-04-19 14:53:07 -04002589 } else {
2590 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2591 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
Mark Lorda9010322008-05-02 02:14:02 -04002592 when = "polling";
Mark Lord8f767f82008-04-19 14:53:07 -04002593 }
Mark Lorda9010322008-05-02 02:14:02 -04002594 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
Mark Lord8f767f82008-04-19 14:53:07 -04002595 ehi->err_mask |= AC_ERR_OTHER;
2596 ehi->action |= ATA_EH_RESET;
2597 ata_port_freeze(ap);
2598}
2599
Brett Russ05b308e2005-10-05 17:08:53 -04002600/**
Brett Russ05b308e2005-10-05 17:08:53 -04002601 * mv_err_intr - Handle error interrupts on the port
2602 * @ap: ATA channel to manipulate
2603 *
Mark Lord8d073792008-04-19 15:07:49 -04002604 * Most cases require a full reset of the chip's state machine,
2605 * which also performs a COMRESET.
2606 * Also, if the port disabled DMA, update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04002607 *
2608 * LOCKING:
2609 * Inherited from caller.
2610 */
Mark Lord37b90462008-05-02 02:12:34 -04002611static void mv_err_intr(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002612{
Brett Russ31961942005-09-30 01:36:00 -04002613 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002614 u32 edma_err_cause, eh_freeze_mask, serr = 0;
Mark Lorde4006072008-05-14 09:19:30 -04002615 u32 fis_cause = 0;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002616 struct mv_port_priv *pp = ap->private_data;
2617 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002618 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002619 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lord37b90462008-05-02 02:12:34 -04002620 struct ata_queued_cmd *qc;
2621 int abort = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002622
Mark Lord8d073792008-04-19 15:07:49 -04002623 /*
Mark Lord37b90462008-05-02 02:12:34 -04002624 * Read and clear the SError and err_cause bits.
Mark Lorde4006072008-05-14 09:19:30 -04002625 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2626 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
Mark Lord8d073792008-04-19 15:07:49 -04002627 */
Mark Lord37b90462008-05-02 02:12:34 -04002628 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2629 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2630
Mark Lordcae5a292009-04-06 16:43:45 -04002631 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
Mark Lorde4006072008-05-14 09:19:30 -04002632 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
Mark Lordcae5a292009-04-06 16:43:45 -04002633 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2634 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
Mark Lorde4006072008-05-14 09:19:30 -04002635 }
Mark Lordcae5a292009-04-06 16:43:45 -04002636 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002637
Mark Lord4c299ca2008-05-02 02:16:20 -04002638 if (edma_err_cause & EDMA_ERR_DEV) {
2639 /*
2640 * Device errors during FIS-based switching operation
2641 * require special handling.
2642 */
2643 if (mv_handle_dev_err(ap, edma_err_cause))
2644 return;
2645 }
2646
Mark Lord37b90462008-05-02 02:12:34 -04002647 qc = mv_get_active_qc(ap);
2648 ata_ehi_clear_desc(ehi);
2649 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2650 edma_err_cause, pp->pp_flags);
Mark Lorde4006072008-05-14 09:19:30 -04002651
Mark Lordc443c502008-05-14 09:24:39 -04002652 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
Mark Lorde4006072008-05-14 09:19:30 -04002653 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
Mark Lordcae5a292009-04-06 16:43:45 -04002654 if (fis_cause & FIS_IRQ_CAUSE_AN) {
Mark Lordc443c502008-05-14 09:24:39 -04002655 u32 ec = edma_err_cause &
2656 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2657 sata_async_notification(ap);
2658 if (!ec)
2659 return; /* Just an AN; no need for the nukes */
2660 ata_ehi_push_desc(ehi, "SDB notify");
2661 }
2662 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002663 /*
Mark Lord352fab72008-04-19 14:43:42 -04002664 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002665 */
Mark Lord37b90462008-05-02 02:12:34 -04002666 if (edma_err_cause & EDMA_ERR_DEV) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002667 err_mask |= AC_ERR_DEV;
Mark Lord37b90462008-05-02 02:12:34 -04002668 action |= ATA_EH_RESET;
2669 ata_ehi_push_desc(ehi, "dev error");
2670 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002671 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002672 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002673 EDMA_ERR_INTRL_PAR)) {
2674 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002675 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09002676 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04002677 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002678 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2679 ata_ehi_hotplugged(ehi);
2680 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09002681 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09002682 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002683 }
2684
Mark Lord352fab72008-04-19 14:43:42 -04002685 /*
2686 * Gen-I has a different SELF_DIS bit,
2687 * different FREEZE bits, and no SERR bit:
2688 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002689 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002690 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002691 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002692 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002693 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002694 }
2695 } else {
2696 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002697 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002698 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002699 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002700 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002701 if (edma_err_cause & EDMA_ERR_SERR) {
Mark Lord8d073792008-04-19 15:07:49 -04002702 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2703 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002704 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002705 }
2706 }
Brett Russ20f733e2005-09-01 18:26:17 -04002707
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002708 if (!err_mask) {
2709 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09002710 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002711 }
2712
2713 ehi->serror |= serr;
2714 ehi->action |= action;
2715
2716 if (qc)
2717 qc->err_mask |= err_mask;
2718 else
2719 ehi->err_mask |= err_mask;
2720
Mark Lord37b90462008-05-02 02:12:34 -04002721 if (err_mask == AC_ERR_DEV) {
2722 /*
2723 * Cannot do ata_port_freeze() here,
2724 * because it would kill PIO access,
2725 * which is needed for further diagnosis.
2726 */
2727 mv_eh_freeze(ap);
2728 abort = 1;
2729 } else if (edma_err_cause & eh_freeze_mask) {
2730 /*
2731 * Note to self: ata_port_freeze() calls ata_port_abort()
2732 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002733 ata_port_freeze(ap);
Mark Lord37b90462008-05-02 02:12:34 -04002734 } else {
2735 abort = 1;
2736 }
2737
2738 if (abort) {
2739 if (qc)
2740 ata_link_abort(qc->dev->link);
2741 else
2742 ata_port_abort(ap);
2743 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002744}
2745
Mark Lordfcfb1f72008-04-19 15:06:40 -04002746static void mv_process_crpb_response(struct ata_port *ap,
2747 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2748{
Tejun Heo752e3862010-06-25 15:02:59 +02002749 u8 ata_status;
2750 u16 edma_status = le16_to_cpu(response->flags);
Mark Lordfcfb1f72008-04-19 15:06:40 -04002751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2752
Tejun Heo752e3862010-06-25 15:02:59 +02002753 if (unlikely(!qc)) {
Mark Lordfcfb1f72008-04-19 15:06:40 -04002754 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2755 __func__, tag);
Tejun Heo752e3862010-06-25 15:02:59 +02002756 return;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002757 }
Tejun Heo752e3862010-06-25 15:02:59 +02002758
2759 /*
2760 * edma_status from a response queue entry:
2761 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2762 * MSB is saved ATA status from command completion.
2763 */
2764 if (!ncq_enabled) {
2765 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2766 if (err_cause) {
2767 /*
2768 * Error will be seen/handled by
2769 * mv_err_intr(). So do nothing at all here.
2770 */
2771 return;
2772 }
2773 }
2774 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2775 if (!ac_err_mask(ata_status))
2776 ata_qc_complete(qc);
2777 /* else: leave it for mv_err_intr() */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002778}
2779
2780static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002781{
2782 void __iomem *port_mmio = mv_ap_base(ap);
2783 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002784 u32 in_index;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002785 bool work_done = false;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002786 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002787
Mark Lordfcfb1f72008-04-19 15:06:40 -04002788 /* Get the hardware queue position index */
Mark Lordcae5a292009-04-06 16:43:45 -04002789 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002790 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2791
Mark Lordfcfb1f72008-04-19 15:06:40 -04002792 /* Process new responses from since the last time we looked */
2793 while (in_index != pp->resp_idx) {
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002794 unsigned int tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002795 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002796
Mark Lordfcfb1f72008-04-19 15:06:40 -04002797 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002798
Mark Lordfcfb1f72008-04-19 15:06:40 -04002799 if (IS_GEN_I(hpriv)) {
2800 /* 50xx: no NCQ, only one command active at a time */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002801 tag = ap->link.active_tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002802 } else {
2803 /* Gen II/IIE: get command tag from CRPB entry */
2804 tag = le16_to_cpu(response->id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002805 }
Mark Lordfcfb1f72008-04-19 15:06:40 -04002806 mv_process_crpb_response(ap, response, tag, ncq_enabled);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002807 work_done = true;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002808 }
2809
Mark Lord352fab72008-04-19 14:43:42 -04002810 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002811 if (work_done)
2812 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
Mark Lordfcfb1f72008-04-19 15:06:40 -04002813 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
Mark Lordcae5a292009-04-06 16:43:45 -04002814 port_mmio + EDMA_RSP_Q_OUT_PTR);
Brett Russ20f733e2005-09-01 18:26:17 -04002815}
2816
Mark Lorda9010322008-05-02 02:14:02 -04002817static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2818{
2819 struct mv_port_priv *pp;
2820 int edma_was_enabled;
2821
Mark Lorda9010322008-05-02 02:14:02 -04002822 /*
2823 * Grab a snapshot of the EDMA_EN flag setting,
2824 * so that we have a consistent view for this port,
2825 * even if something we call of our routines changes it.
2826 */
2827 pp = ap->private_data;
2828 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2829 /*
2830 * Process completed CRPB response(s) before other events.
2831 */
2832 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2833 mv_process_crpb_entries(ap, pp);
Mark Lord4c299ca2008-05-02 02:16:20 -04002834 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2835 mv_handle_fbs_ncq_dev_err(ap);
Mark Lorda9010322008-05-02 02:14:02 -04002836 }
2837 /*
2838 * Handle chip-reported errors, or continue on to handle PIO.
2839 */
2840 if (unlikely(port_cause & ERR_IRQ)) {
2841 mv_err_intr(ap);
2842 } else if (!edma_was_enabled) {
2843 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2844 if (qc)
Tejun Heoc3b28892010-05-19 22:10:21 +02002845 ata_bmdma_port_intr(ap, qc);
Mark Lorda9010322008-05-02 02:14:02 -04002846 else
2847 mv_unexpected_intr(ap, edma_was_enabled);
2848 }
2849}
2850
Brett Russ05b308e2005-10-05 17:08:53 -04002851/**
2852 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04002853 * @host: host specific structure
Mark Lord7368f912008-04-25 11:24:24 -04002854 * @main_irq_cause: Main interrupt cause register for the chip.
Brett Russ05b308e2005-10-05 17:08:53 -04002855 *
2856 * LOCKING:
2857 * Inherited from caller.
2858 */
Mark Lord7368f912008-04-25 11:24:24 -04002859static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
Brett Russ20f733e2005-09-01 18:26:17 -04002860{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002861 struct mv_host_priv *hpriv = host->private_data;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002862 void __iomem *mmio = hpriv->base, *hc_mmio;
Mark Lorda3718c12008-04-19 15:07:18 -04002863 unsigned int handled = 0, port;
Brett Russ20f733e2005-09-01 18:26:17 -04002864
Mark Lord2b748a02009-03-10 22:01:17 -04002865 /* If asserted, clear the "all ports" IRQ coalescing bit */
2866 if (main_irq_cause & ALL_PORTS_COAL_DONE)
Mark Lordcae5a292009-04-06 16:43:45 -04002867 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
Mark Lord2b748a02009-03-10 22:01:17 -04002868
Mark Lorda3718c12008-04-19 15:07:18 -04002869 for (port = 0; port < hpriv->n_ports; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04002870 struct ata_port *ap = host->ports[port];
Mark Lordeabd5eb2008-05-02 02:13:27 -04002871 unsigned int p, shift, hardport, port_cause;
2872
Mark Lorda3718c12008-04-19 15:07:18 -04002873 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Mark Lorda3718c12008-04-19 15:07:18 -04002874 /*
Mark Lordeabd5eb2008-05-02 02:13:27 -04002875 * Each hc within the host has its own hc_irq_cause register,
2876 * where the interrupting ports bits get ack'd.
Mark Lorda3718c12008-04-19 15:07:18 -04002877 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002878 if (hardport == 0) { /* first port on this hc ? */
2879 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2880 u32 port_mask, ack_irqs;
2881 /*
2882 * Skip this entire hc if nothing pending for any ports
2883 */
2884 if (!hc_cause) {
2885 port += MV_PORTS_PER_HC - 1;
2886 continue;
2887 }
2888 /*
2889 * We don't need/want to read the hc_irq_cause register,
2890 * because doing so hurts performance, and
2891 * main_irq_cause already gives us everything we need.
2892 *
2893 * But we do have to *write* to the hc_irq_cause to ack
2894 * the ports that we are handling this time through.
2895 *
2896 * This requires that we create a bitmap for those
2897 * ports which interrupted us, and use that bitmap
2898 * to ack (only) those ports via hc_irq_cause.
2899 */
2900 ack_irqs = 0;
Mark Lord2b748a02009-03-10 22:01:17 -04002901 if (hc_cause & PORTS_0_3_COAL_DONE)
2902 ack_irqs = HC_COAL_IRQ;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002903 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2904 if ((port + p) >= hpriv->n_ports)
2905 break;
2906 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2907 if (hc_cause & port_mask)
2908 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2909 }
Mark Lorda3718c12008-04-19 15:07:18 -04002910 hc_mmio = mv_hc_base_from_port(mmio, port);
Mark Lordcae5a292009-04-06 16:43:45 -04002911 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
Mark Lorda3718c12008-04-19 15:07:18 -04002912 handled = 1;
2913 }
Mark Lorda9010322008-05-02 02:14:02 -04002914 /*
2915 * Handle interrupts signalled for this port:
2916 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002917 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
Mark Lorda9010322008-05-02 02:14:02 -04002918 if (port_cause)
2919 mv_port_intr(ap, port_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04002920 }
Mark Lorda3718c12008-04-19 15:07:18 -04002921 return handled;
Brett Russ20f733e2005-09-01 18:26:17 -04002922}
2923
Mark Lorda3718c12008-04-19 15:07:18 -04002924static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002925{
Mark Lord02a121d2007-12-01 13:07:22 -05002926 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002927 struct ata_port *ap;
2928 struct ata_queued_cmd *qc;
2929 struct ata_eh_info *ehi;
2930 unsigned int i, err_mask, printed = 0;
2931 u32 err_cause;
2932
Mark Lordcae5a292009-04-06 16:43:45 -04002933 err_cause = readl(mmio + hpriv->irq_cause_offset);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002934
2935 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2936 err_cause);
2937
2938 DPRINTK("All regs @ PCI error\n");
2939 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2940
Mark Lordcae5a292009-04-06 16:43:45 -04002941 writelfl(0, mmio + hpriv->irq_cause_offset);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002942
2943 for (i = 0; i < host->n_ports; i++) {
2944 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09002945 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002946 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002947 ata_ehi_clear_desc(ehi);
2948 if (!printed++)
2949 ata_ehi_push_desc(ehi,
2950 "PCI err cause 0x%08x", err_cause);
2951 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002952 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002953 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002954 if (qc)
2955 qc->err_mask |= err_mask;
2956 else
2957 ehi->err_mask |= err_mask;
2958
2959 ata_port_freeze(ap);
2960 }
2961 }
Mark Lorda3718c12008-04-19 15:07:18 -04002962 return 1; /* handled */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002963}
2964
Brett Russ05b308e2005-10-05 17:08:53 -04002965/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002966 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04002967 * @irq: unused
2968 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04002969 *
2970 * Read the read only register to determine if any host
2971 * controllers have pending interrupts. If so, call lower level
2972 * routine to handle. Also check for PCI errors which are only
2973 * reported here.
2974 *
Jeff Garzik8b260242005-11-12 12:32:50 -05002975 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002976 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04002977 * interrupts.
2978 */
David Howells7d12e782006-10-05 14:55:46 +01002979static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04002980{
Jeff Garzikcca39742006-08-24 03:19:22 -04002981 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002982 struct mv_host_priv *hpriv = host->private_data;
Mark Lorda3718c12008-04-19 15:07:18 -04002983 unsigned int handled = 0;
Mark Lord6d3c30e2009-01-21 10:31:29 -05002984 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
Mark Lord96e2c4872008-05-17 13:38:00 -04002985 u32 main_irq_cause, pending_irqs;
Brett Russ20f733e2005-09-01 18:26:17 -04002986
Mark Lord646a4da2008-01-26 18:30:37 -05002987 spin_lock(&host->lock);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002988
2989 /* for MSI: block new interrupts while in here */
2990 if (using_msi)
Mark Lord2b748a02009-03-10 22:01:17 -04002991 mv_write_main_irq_mask(0, hpriv);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002992
Mark Lord7368f912008-04-25 11:24:24 -04002993 main_irq_cause = readl(hpriv->main_irq_cause_addr);
Mark Lord96e2c4872008-05-17 13:38:00 -04002994 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
Mark Lord352fab72008-04-19 14:43:42 -04002995 /*
2996 * Deal with cases where we either have nothing pending, or have read
2997 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04002998 */
Mark Lorda44253d2008-05-17 13:37:07 -04002999 if (pending_irqs && main_irq_cause != 0xffffffffU) {
Mark Lord1f398472008-05-27 17:54:48 -04003000 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
Mark Lorda3718c12008-04-19 15:07:18 -04003001 handled = mv_pci_error(host, hpriv->base);
3002 else
Mark Lorda44253d2008-05-17 13:37:07 -04003003 handled = mv_host_intr(host, pending_irqs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003004 }
Mark Lord6d3c30e2009-01-21 10:31:29 -05003005
3006 /* for MSI: unmask; interrupt cause bits will retrigger now */
3007 if (using_msi)
Mark Lord2b748a02009-03-10 22:01:17 -04003008 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
Mark Lord6d3c30e2009-01-21 10:31:29 -05003009
Mark Lord9d51af72009-03-10 16:28:51 -04003010 spin_unlock(&host->lock);
3011
Brett Russ20f733e2005-09-01 18:26:17 -04003012 return IRQ_RETVAL(handled);
3013}
3014
Jeff Garzikc9d39132005-11-13 17:47:51 -05003015static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3016{
3017 unsigned int ofs;
3018
3019 switch (sc_reg_in) {
3020 case SCR_STATUS:
3021 case SCR_ERROR:
3022 case SCR_CONTROL:
3023 ofs = sc_reg_in * sizeof(u32);
3024 break;
3025 default:
3026 ofs = 0xffffffffU;
3027 break;
3028 }
3029 return ofs;
3030}
3031
Tejun Heo82ef04f2008-07-31 17:02:40 +09003032static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05003033{
Tejun Heo82ef04f2008-07-31 17:02:40 +09003034 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003035 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09003036 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003037 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3038
Tejun Heoda3dbb12007-07-16 14:29:40 +09003039 if (ofs != 0xffffffffU) {
3040 *val = readl(addr + ofs);
3041 return 0;
3042 } else
3043 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05003044}
3045
Tejun Heo82ef04f2008-07-31 17:02:40 +09003046static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05003047{
Tejun Heo82ef04f2008-07-31 17:02:40 +09003048 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003049 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09003050 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003051 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3052
Tejun Heoda3dbb12007-07-16 14:29:40 +09003053 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09003054 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09003055 return 0;
3056 } else
3057 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05003058}
3059
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003060static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05003061{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003062 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05003063 int early_5080;
3064
Auke Kok44c10132007-06-08 15:46:36 -07003065 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05003066
3067 if (!early_5080) {
3068 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3069 tmp |= (1 << 0);
3070 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3071 }
3072
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003073 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05003074}
3075
3076static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3077{
Mark Lordcae5a292009-04-06 16:43:45 -04003078 writel(0x0fcfffff, mmio + FLASH_CTL);
Jeff Garzik522479f2005-11-12 22:14:02 -05003079}
3080
Jeff Garzik47c2b672005-11-12 21:13:17 -05003081static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003082 void __iomem *mmio)
3083{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003084 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3085 u32 tmp;
3086
3087 tmp = readl(phy_mmio + MV5_PHY_MODE);
3088
3089 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3090 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003091}
3092
Jeff Garzik47c2b672005-11-12 21:13:17 -05003093static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003094{
Jeff Garzik522479f2005-11-12 22:14:02 -05003095 u32 tmp;
3096
Mark Lordcae5a292009-04-06 16:43:45 -04003097 writel(0, mmio + GPIO_PORT_CTL);
Jeff Garzik522479f2005-11-12 22:14:02 -05003098
3099 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3100
3101 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3102 tmp |= ~(1 << 0);
3103 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003104}
3105
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003106static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3107 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003108{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003109 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3110 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3111 u32 tmp;
3112 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3113
3114 if (fix_apm_sq) {
Mark Lordcae5a292009-04-06 16:43:45 -04003115 tmp = readl(phy_mmio + MV5_LTMODE);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003116 tmp |= (1 << 19);
Mark Lordcae5a292009-04-06 16:43:45 -04003117 writel(tmp, phy_mmio + MV5_LTMODE);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003118
Mark Lordcae5a292009-04-06 16:43:45 -04003119 tmp = readl(phy_mmio + MV5_PHY_CTL);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003120 tmp &= ~0x3;
3121 tmp |= 0x1;
Mark Lordcae5a292009-04-06 16:43:45 -04003122 writel(tmp, phy_mmio + MV5_PHY_CTL);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003123 }
3124
3125 tmp = readl(phy_mmio + MV5_PHY_MODE);
3126 tmp &= ~mask;
3127 tmp |= hpriv->signal[port].pre;
3128 tmp |= hpriv->signal[port].amps;
3129 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003130}
3131
Jeff Garzikc9d39132005-11-13 17:47:51 -05003132
3133#undef ZERO
3134#define ZERO(reg) writel(0, port_mmio + (reg))
3135static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3136 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05003137{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003138 void __iomem *port_mmio = mv_port_base(mmio, port);
3139
Mark Lorde12bef52008-03-31 19:33:56 -04003140 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003141
3142 ZERO(0x028); /* command */
Mark Lordcae5a292009-04-06 16:43:45 -04003143 writel(0x11f, port_mmio + EDMA_CFG);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003144 ZERO(0x004); /* timer */
3145 ZERO(0x008); /* irq err cause */
3146 ZERO(0x00c); /* irq err mask */
3147 ZERO(0x010); /* rq bah */
3148 ZERO(0x014); /* rq inp */
3149 ZERO(0x018); /* rq outp */
3150 ZERO(0x01c); /* respq bah */
3151 ZERO(0x024); /* respq outp */
3152 ZERO(0x020); /* respq inp */
3153 ZERO(0x02c); /* test control */
Mark Lordcae5a292009-04-06 16:43:45 -04003154 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003155}
3156#undef ZERO
3157
3158#define ZERO(reg) writel(0, hc_mmio + (reg))
3159static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3160 unsigned int hc)
3161{
3162 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3163 u32 tmp;
3164
3165 ZERO(0x00c);
3166 ZERO(0x010);
3167 ZERO(0x014);
3168 ZERO(0x018);
3169
3170 tmp = readl(hc_mmio + 0x20);
3171 tmp &= 0x1c1c1c1c;
3172 tmp |= 0x03030303;
3173 writel(tmp, hc_mmio + 0x20);
3174}
3175#undef ZERO
3176
3177static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3178 unsigned int n_hc)
3179{
3180 unsigned int hc, port;
3181
3182 for (hc = 0; hc < n_hc; hc++) {
3183 for (port = 0; port < MV_PORTS_PER_HC; port++)
3184 mv5_reset_hc_port(hpriv, mmio,
3185 (hc * MV_PORTS_PER_HC) + port);
3186
3187 mv5_reset_one_hc(hpriv, mmio, hc);
3188 }
3189
3190 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003191}
3192
Jeff Garzik101ffae2005-11-12 22:17:49 -05003193#undef ZERO
3194#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003195static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003196{
Mark Lord02a121d2007-12-01 13:07:22 -05003197 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003198 u32 tmp;
3199
Mark Lordcae5a292009-04-06 16:43:45 -04003200 tmp = readl(mmio + MV_PCI_MODE);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003201 tmp &= 0xff00ffff;
Mark Lordcae5a292009-04-06 16:43:45 -04003202 writel(tmp, mmio + MV_PCI_MODE);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003203
3204 ZERO(MV_PCI_DISC_TIMER);
3205 ZERO(MV_PCI_MSI_TRIGGER);
Mark Lordcae5a292009-04-06 16:43:45 -04003206 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003207 ZERO(MV_PCI_SERR_MASK);
Mark Lordcae5a292009-04-06 16:43:45 -04003208 ZERO(hpriv->irq_cause_offset);
3209 ZERO(hpriv->irq_mask_offset);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003210 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3211 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3212 ZERO(MV_PCI_ERR_ATTRIBUTE);
3213 ZERO(MV_PCI_ERR_COMMAND);
3214}
3215#undef ZERO
3216
3217static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3218{
3219 u32 tmp;
3220
3221 mv5_reset_flash(hpriv, mmio);
3222
Mark Lordcae5a292009-04-06 16:43:45 -04003223 tmp = readl(mmio + GPIO_PORT_CTL);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003224 tmp &= 0x3;
3225 tmp |= (1 << 5) | (1 << 6);
Mark Lordcae5a292009-04-06 16:43:45 -04003226 writel(tmp, mmio + GPIO_PORT_CTL);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003227}
3228
3229/**
3230 * mv6_reset_hc - Perform the 6xxx global soft reset
3231 * @mmio: base address of the HBA
3232 *
3233 * This routine only applies to 6xxx parts.
3234 *
3235 * LOCKING:
3236 * Inherited from caller.
3237 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05003238static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3239 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003240{
Mark Lordcae5a292009-04-06 16:43:45 -04003241 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003242 int i, rc = 0;
3243 u32 t;
3244
3245 /* Following procedure defined in PCI "main command and status
3246 * register" table.
3247 */
3248 t = readl(reg);
3249 writel(t | STOP_PCI_MASTER, reg);
3250
3251 for (i = 0; i < 1000; i++) {
3252 udelay(1);
3253 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003254 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003255 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003256 }
3257 if (!(PCI_MASTER_EMPTY & t)) {
3258 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3259 rc = 1;
3260 goto done;
3261 }
3262
3263 /* set reset */
3264 i = 5;
3265 do {
3266 writel(t | GLOB_SFT_RST, reg);
3267 t = readl(reg);
3268 udelay(1);
3269 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3270
3271 if (!(GLOB_SFT_RST & t)) {
3272 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3273 rc = 1;
3274 goto done;
3275 }
3276
3277 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3278 i = 5;
3279 do {
3280 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3281 t = readl(reg);
3282 udelay(1);
3283 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3284
3285 if (GLOB_SFT_RST & t) {
3286 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3287 rc = 1;
3288 }
3289done:
3290 return rc;
3291}
3292
Jeff Garzik47c2b672005-11-12 21:13:17 -05003293static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003294 void __iomem *mmio)
3295{
3296 void __iomem *port_mmio;
3297 u32 tmp;
3298
Mark Lordcae5a292009-04-06 16:43:45 -04003299 tmp = readl(mmio + RESET_CFG);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003300 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003301 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003302 hpriv->signal[idx].pre = 0x1 << 5;
3303 return;
3304 }
3305
3306 port_mmio = mv_port_base(mmio, idx);
3307 tmp = readl(port_mmio + PHY_MODE2);
3308
3309 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3310 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3311}
3312
Jeff Garzik47c2b672005-11-12 21:13:17 -05003313static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003314{
Mark Lordcae5a292009-04-06 16:43:45 -04003315 writel(0x00000060, mmio + GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003316}
3317
Jeff Garzikc9d39132005-11-13 17:47:51 -05003318static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003319 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003320{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003321 void __iomem *port_mmio = mv_port_base(mmio, port);
3322
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003323 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003324 int fix_phy_mode2 =
3325 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003326 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05003327 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Mark Lord8c30a8b2008-05-27 17:56:31 -04003328 u32 m2, m3;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003329
3330 if (fix_phy_mode2) {
3331 m2 = readl(port_mmio + PHY_MODE2);
3332 m2 &= ~(1 << 16);
3333 m2 |= (1 << 31);
3334 writel(m2, port_mmio + PHY_MODE2);
3335
3336 udelay(200);
3337
3338 m2 = readl(port_mmio + PHY_MODE2);
3339 m2 &= ~((1 << 16) | (1 << 31));
3340 writel(m2, port_mmio + PHY_MODE2);
3341
3342 udelay(200);
3343 }
3344
Mark Lord8c30a8b2008-05-27 17:56:31 -04003345 /*
3346 * Gen-II/IIe PHY_MODE3 errata RM#2:
3347 * Achieves better receiver noise performance than the h/w default:
3348 */
3349 m3 = readl(port_mmio + PHY_MODE3);
3350 m3 = (m3 & 0x1f) | (0x5555601 << 5);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003351
Mark Lord0388a8c2008-05-28 13:41:52 -04003352 /* Guideline 88F5182 (GL# SATA-S11) */
3353 if (IS_SOC(hpriv))
3354 m3 &= ~0x1c;
3355
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003356 if (fix_phy_mode4) {
Mark Lordba069e32008-05-31 16:46:34 -04003357 u32 m4 = readl(port_mmio + PHY_MODE4);
3358 /*
3359 * Enforce reserved-bit restrictions on GenIIe devices only.
3360 * For earlier chipsets, force only the internal config field
3361 * (workaround for errata FEr SATA#10 part 1).
3362 */
Mark Lord8c30a8b2008-05-27 17:56:31 -04003363 if (IS_GEN_IIE(hpriv))
Mark Lordba069e32008-05-31 16:46:34 -04003364 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3365 else
3366 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
Mark Lord8c30a8b2008-05-27 17:56:31 -04003367 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003368 }
Mark Lordb406c7a2008-05-28 12:01:12 -04003369 /*
3370 * Workaround for 60x1-B2 errata SATA#13:
3371 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3372 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
Mark Lordba684602009-04-06 15:25:39 -04003373 * Or ensure we use writelfl() when writing PHY_MODE4.
Mark Lordb406c7a2008-05-28 12:01:12 -04003374 */
3375 writel(m3, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003376
3377 /* Revert values of pre-emphasis and signal amps to the saved ones */
3378 m2 = readl(port_mmio + PHY_MODE2);
3379
3380 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003381 m2 |= hpriv->signal[port].amps;
3382 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003383 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003384
Jeff Garzike4e7b892006-01-31 12:18:41 -05003385 /* according to mvSata 3.6.1, some IIE values are fixed */
3386 if (IS_GEN_IIE(hpriv)) {
3387 m2 &= ~0xC30FF01F;
3388 m2 |= 0x0000900F;
3389 }
3390
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003391 writel(m2, port_mmio + PHY_MODE2);
3392}
3393
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003394/* TODO: use the generic LED interface to configure the SATA Presence */
3395/* & Acitivy LEDs on the board */
3396static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3397 void __iomem *mmio)
3398{
3399 return;
3400}
3401
3402static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3403 void __iomem *mmio)
3404{
3405 void __iomem *port_mmio;
3406 u32 tmp;
3407
3408 port_mmio = mv_port_base(mmio, idx);
3409 tmp = readl(port_mmio + PHY_MODE2);
3410
3411 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3412 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3413}
3414
3415#undef ZERO
3416#define ZERO(reg) writel(0, port_mmio + (reg))
3417static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3418 void __iomem *mmio, unsigned int port)
3419{
3420 void __iomem *port_mmio = mv_port_base(mmio, port);
3421
Mark Lorde12bef52008-03-31 19:33:56 -04003422 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003423
3424 ZERO(0x028); /* command */
Mark Lordcae5a292009-04-06 16:43:45 -04003425 writel(0x101f, port_mmio + EDMA_CFG);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003426 ZERO(0x004); /* timer */
3427 ZERO(0x008); /* irq err cause */
3428 ZERO(0x00c); /* irq err mask */
3429 ZERO(0x010); /* rq bah */
3430 ZERO(0x014); /* rq inp */
3431 ZERO(0x018); /* rq outp */
3432 ZERO(0x01c); /* respq bah */
3433 ZERO(0x024); /* respq outp */
3434 ZERO(0x020); /* respq inp */
3435 ZERO(0x02c); /* test control */
Saeed Bisharad7b0c142009-12-06 18:26:17 +02003436 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003437}
3438
3439#undef ZERO
3440
3441#define ZERO(reg) writel(0, hc_mmio + (reg))
3442static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3443 void __iomem *mmio)
3444{
3445 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3446
3447 ZERO(0x00c);
3448 ZERO(0x010);
3449 ZERO(0x014);
3450
3451}
3452
3453#undef ZERO
3454
3455static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3456 void __iomem *mmio, unsigned int n_hc)
3457{
3458 unsigned int port;
3459
3460 for (port = 0; port < hpriv->n_ports; port++)
3461 mv_soc_reset_hc_port(hpriv, mmio, port);
3462
3463 mv_soc_reset_one_hc(hpriv, mmio);
3464
3465 return 0;
3466}
3467
3468static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3469 void __iomem *mmio)
3470{
3471 return;
3472}
3473
3474static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3475{
3476 return;
3477}
3478
Martin Michlmayr29b7e432009-05-04 20:58:50 +02003479static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3480 void __iomem *mmio, unsigned int port)
3481{
3482 void __iomem *port_mmio = mv_port_base(mmio, port);
3483 u32 reg;
3484
3485 reg = readl(port_mmio + PHY_MODE3);
3486 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3487 reg |= (0x1 << 27);
3488 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3489 reg |= (0x1 << 29);
3490 writel(reg, port_mmio + PHY_MODE3);
3491
3492 reg = readl(port_mmio + PHY_MODE4);
3493 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3494 reg |= (0x1 << 16);
3495 writel(reg, port_mmio + PHY_MODE4);
3496
3497 reg = readl(port_mmio + PHY_MODE9_GEN2);
3498 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3499 reg |= 0x8;
3500 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3501 writel(reg, port_mmio + PHY_MODE9_GEN2);
3502
3503 reg = readl(port_mmio + PHY_MODE9_GEN1);
3504 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3505 reg |= 0x8;
3506 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3507 writel(reg, port_mmio + PHY_MODE9_GEN1);
3508}
3509
3510/**
3511 * soc_is_65 - check if the soc is 65 nano device
3512 *
3513 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3514 * register, this register should contain non-zero value and it exists only
3515 * in the 65 nano devices, when reading it from older devices we get 0.
3516 */
3517static bool soc_is_65n(struct mv_host_priv *hpriv)
3518{
3519 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3520
3521 if (readl(port0_mmio + PHYCFG_OFS))
3522 return true;
3523 return false;
3524}
3525
Mark Lord8e7decd2008-05-02 02:07:51 -04003526static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
Mark Lordb67a1062008-03-31 19:35:13 -04003527{
Mark Lordcae5a292009-04-06 16:43:45 -04003528 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
Mark Lordb67a1062008-03-31 19:35:13 -04003529
Mark Lord8e7decd2008-05-02 02:07:51 -04003530 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
Mark Lordb67a1062008-03-31 19:35:13 -04003531 if (want_gen2i)
Mark Lord8e7decd2008-05-02 02:07:51 -04003532 ifcfg |= (1 << 7); /* enable gen2i speed */
Mark Lordcae5a292009-04-06 16:43:45 -04003533 writelfl(ifcfg, port_mmio + SATA_IFCFG);
Mark Lordb67a1062008-03-31 19:35:13 -04003534}
3535
Mark Lorde12bef52008-03-31 19:33:56 -04003536static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05003537 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04003538{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003539 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04003540
Mark Lord8e7decd2008-05-02 02:07:51 -04003541 /*
3542 * The datasheet warns against setting EDMA_RESET when EDMA is active
3543 * (but doesn't say what the problem might be). So we first try
3544 * to disable the EDMA engine before doing the EDMA_RESET operation.
3545 */
Mark Lord0d8be5c2008-04-16 14:56:12 -04003546 mv_stop_edma_engine(port_mmio);
Mark Lordcae5a292009-04-06 16:43:45 -04003547 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003548
Mark Lordb67a1062008-03-31 19:35:13 -04003549 if (!IS_GEN_I(hpriv)) {
Mark Lord8e7decd2008-05-02 02:07:51 -04003550 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3551 mv_setup_ifcfg(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003552 }
Mark Lordb67a1062008-03-31 19:35:13 -04003553 /*
Mark Lord8e7decd2008-05-02 02:07:51 -04003554 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
Mark Lordb67a1062008-03-31 19:35:13 -04003555 * link, and physical layers. It resets all SATA interface registers
Mark Lordcae5a292009-04-06 16:43:45 -04003556 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04003557 */
Mark Lordcae5a292009-04-06 16:43:45 -04003558 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
Mark Lordb67a1062008-03-31 19:35:13 -04003559 udelay(25); /* allow reset propagation */
Mark Lordcae5a292009-04-06 16:43:45 -04003560 writelfl(0, port_mmio + EDMA_CMD);
Brett Russ20f733e2005-09-01 18:26:17 -04003561
Jeff Garzikc9d39132005-11-13 17:47:51 -05003562 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3563
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003564 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05003565 mdelay(1);
3566}
3567
Mark Lorde49856d2008-04-16 14:59:07 -04003568static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05003569{
Mark Lorde49856d2008-04-16 14:59:07 -04003570 if (sata_pmp_supported(ap)) {
3571 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lordcae5a292009-04-06 16:43:45 -04003572 u32 reg = readl(port_mmio + SATA_IFCTL);
Mark Lorde49856d2008-04-16 14:59:07 -04003573 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05003574
Mark Lorde49856d2008-04-16 14:59:07 -04003575 if (old != pmp) {
3576 reg = (reg & ~0xf) | pmp;
Mark Lordcae5a292009-04-06 16:43:45 -04003577 writelfl(reg, port_mmio + SATA_IFCTL);
Mark Lorde49856d2008-04-16 14:59:07 -04003578 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09003579 }
Brett Russ20f733e2005-09-01 18:26:17 -04003580}
3581
Mark Lorde49856d2008-04-16 14:59:07 -04003582static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3583 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05003584{
Mark Lorde49856d2008-04-16 14:59:07 -04003585 mv_pmp_select(link->ap, sata_srst_pmp(link));
3586 return sata_std_hardreset(link, class, deadline);
3587}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04003588
Mark Lorde49856d2008-04-16 14:59:07 -04003589static int mv_softreset(struct ata_link *link, unsigned int *class,
3590 unsigned long deadline)
3591{
3592 mv_pmp_select(link->ap, sata_srst_pmp(link));
3593 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05003594}
3595
Tejun Heocc0680a2007-08-06 18:36:23 +09003596static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003597 unsigned long deadline)
3598{
Tejun Heocc0680a2007-08-06 18:36:23 +09003599 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003600 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04003601 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003602 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04003603 int rc, attempts = 0, extra = 0;
3604 u32 sstatus;
3605 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003606
Mark Lorde12bef52008-03-31 19:33:56 -04003607 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04003608 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lordd16ab3f2009-02-25 15:17:43 -05003609 pp->pp_flags &=
3610 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003611
Mark Lord0d8be5c2008-04-16 14:56:12 -04003612 /* Workaround for errata FEr SATA#10 (part 2) */
3613 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04003614 const unsigned long *timing =
3615 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003616
Mark Lord17c5aab2008-04-16 14:56:51 -04003617 rc = sata_link_hardreset(link, timing, deadline + extra,
3618 &online, NULL);
Mark Lord9dcffd92008-05-14 09:18:12 -04003619 rc = online ? -EAGAIN : rc;
Mark Lord17c5aab2008-04-16 14:56:51 -04003620 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04003621 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04003622 sata_scr_read(link, SCR_STATUS, &sstatus);
3623 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3624 /* Force 1.5gb/s link speed and try again */
Mark Lord8e7decd2008-05-02 02:07:51 -04003625 mv_setup_ifcfg(mv_ap_base(ap), 0);
Mark Lord0d8be5c2008-04-16 14:56:12 -04003626 if (time_after(jiffies + HZ, deadline))
3627 extra = HZ; /* only extend it once, max */
3628 }
3629 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Mark Lord08da1752009-02-25 15:13:03 -05003630 mv_save_cached_regs(ap);
Mark Lord66e57a22009-01-30 18:52:58 -05003631 mv_edma_cfg(ap, 0, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003632
Mark Lord17c5aab2008-04-16 14:56:51 -04003633 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003634}
3635
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003636static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04003637{
Mark Lord1cfd19a2008-04-19 15:05:50 -04003638 mv_stop_edma(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003639 mv_enable_port_irqs(ap, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003640}
3641
3642static void mv_eh_thaw(struct ata_port *ap)
3643{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003644 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordc4de5732008-05-17 13:35:21 -04003645 unsigned int port = ap->port_no;
3646 unsigned int hardport = mv_hardport_from_port(port);
Mark Lord1cfd19a2008-04-19 15:05:50 -04003647 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003648 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003649 u32 hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003650
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003651 /* clear EDMA errors on this port */
Mark Lordcae5a292009-04-06 16:43:45 -04003652 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003653
3654 /* clear pending irq events */
Mark Lordcae6edc2009-01-19 18:05:42 -05003655 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
Mark Lordcae5a292009-04-06 16:43:45 -04003656 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003657
Mark Lord88e675e2008-05-17 13:36:30 -04003658 mv_enable_port_irqs(ap, ERR_IRQ);
Brett Russ31961942005-09-30 01:36:00 -04003659}
3660
Brett Russ05b308e2005-10-05 17:08:53 -04003661/**
3662 * mv_port_init - Perform some early initialization on a single port.
3663 * @port: libata data structure storing shadow register addresses
3664 * @port_mmio: base address of the port
3665 *
3666 * Initialize shadow register mmio addresses, clear outstanding
3667 * interrupts on the port, and unmask interrupts for the future
3668 * start of the port.
3669 *
3670 * LOCKING:
3671 * Inherited from caller.
3672 */
Brett Russ31961942005-09-30 01:36:00 -04003673static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3674{
Mark Lordcae5a292009-04-06 16:43:45 -04003675 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
Brett Russ31961942005-09-30 01:36:00 -04003676
Jeff Garzik8b260242005-11-12 12:32:50 -05003677 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04003678 */
3679 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05003680 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04003681 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3682 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3683 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3684 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3685 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3686 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05003687 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04003688 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3689 /* special case: control/altstatus doesn't have ATA_REG_ address */
Mark Lordcae5a292009-04-06 16:43:45 -04003690 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
Brett Russ31961942005-09-30 01:36:00 -04003691
Brett Russ31961942005-09-30 01:36:00 -04003692 /* Clear any currently outstanding port interrupt conditions */
Mark Lordcae5a292009-04-06 16:43:45 -04003693 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3694 writelfl(readl(serr), serr);
3695 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
Brett Russ31961942005-09-30 01:36:00 -04003696
Mark Lord646a4da2008-01-26 18:30:37 -05003697 /* unmask all non-transient EDMA error interrupts */
Mark Lordcae5a292009-04-06 16:43:45 -04003698 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
Brett Russ20f733e2005-09-01 18:26:17 -04003699
Jeff Garzik8b260242005-11-12 12:32:50 -05003700 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Mark Lordcae5a292009-04-06 16:43:45 -04003701 readl(port_mmio + EDMA_CFG),
3702 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3703 readl(port_mmio + EDMA_ERR_IRQ_MASK));
Brett Russ20f733e2005-09-01 18:26:17 -04003704}
3705
Mark Lord616d4a92008-05-02 02:08:32 -04003706static unsigned int mv_in_pcix_mode(struct ata_host *host)
3707{
3708 struct mv_host_priv *hpriv = host->private_data;
3709 void __iomem *mmio = hpriv->base;
3710 u32 reg;
3711
Mark Lord1f398472008-05-27 17:54:48 -04003712 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
Mark Lord616d4a92008-05-02 02:08:32 -04003713 return 0; /* not PCI-X capable */
Mark Lordcae5a292009-04-06 16:43:45 -04003714 reg = readl(mmio + MV_PCI_MODE);
Mark Lord616d4a92008-05-02 02:08:32 -04003715 if ((reg & MV_PCI_MODE_MASK) == 0)
3716 return 0; /* conventional PCI mode */
3717 return 1; /* chip is in PCI-X mode */
3718}
3719
3720static int mv_pci_cut_through_okay(struct ata_host *host)
3721{
3722 struct mv_host_priv *hpriv = host->private_data;
3723 void __iomem *mmio = hpriv->base;
3724 u32 reg;
3725
3726 if (!mv_in_pcix_mode(host)) {
Mark Lordcae5a292009-04-06 16:43:45 -04003727 reg = readl(mmio + MV_PCI_COMMAND);
3728 if (reg & MV_PCI_COMMAND_MRDTRIG)
Mark Lord616d4a92008-05-02 02:08:32 -04003729 return 0; /* not okay */
3730 }
3731 return 1; /* okay */
3732}
3733
Mark Lord65ad7fef2009-04-06 15:24:14 -04003734static void mv_60x1b2_errata_pci7(struct ata_host *host)
3735{
3736 struct mv_host_priv *hpriv = host->private_data;
3737 void __iomem *mmio = hpriv->base;
3738
3739 /* workaround for 60x1-B2 errata PCI#7 */
3740 if (mv_in_pcix_mode(host)) {
Mark Lordcae5a292009-04-06 16:43:45 -04003741 u32 reg = readl(mmio + MV_PCI_COMMAND);
3742 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
Mark Lord65ad7fef2009-04-06 15:24:14 -04003743 }
3744}
3745
Tejun Heo4447d352007-04-17 23:44:08 +09003746static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003747{
Tejun Heo4447d352007-04-17 23:44:08 +09003748 struct pci_dev *pdev = to_pci_dev(host->dev);
3749 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003750 u32 hp_flags = hpriv->hp_flags;
3751
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003752 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003753 case chip_5080:
3754 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003755 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003756
Auke Kok44c10132007-06-08 15:46:36 -07003757 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003758 case 0x1:
3759 hp_flags |= MV_HP_ERRATA_50XXB0;
3760 break;
3761 case 0x3:
3762 hp_flags |= MV_HP_ERRATA_50XXB2;
3763 break;
3764 default:
3765 dev_printk(KERN_WARNING, &pdev->dev,
3766 "Applying 50XXB2 workarounds to unknown rev\n");
3767 hp_flags |= MV_HP_ERRATA_50XXB2;
3768 break;
3769 }
3770 break;
3771
3772 case chip_504x:
3773 case chip_508x:
3774 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003775 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003776
Auke Kok44c10132007-06-08 15:46:36 -07003777 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003778 case 0x0:
3779 hp_flags |= MV_HP_ERRATA_50XXB0;
3780 break;
3781 case 0x3:
3782 hp_flags |= MV_HP_ERRATA_50XXB2;
3783 break;
3784 default:
3785 dev_printk(KERN_WARNING, &pdev->dev,
3786 "Applying B2 workarounds to unknown rev\n");
3787 hp_flags |= MV_HP_ERRATA_50XXB2;
3788 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003789 }
3790 break;
3791
3792 case chip_604x:
3793 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05003794 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003795 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003796
Auke Kok44c10132007-06-08 15:46:36 -07003797 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003798 case 0x7:
Mark Lord65ad7fef2009-04-06 15:24:14 -04003799 mv_60x1b2_errata_pci7(host);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003800 hp_flags |= MV_HP_ERRATA_60X1B2;
3801 break;
3802 case 0x9:
3803 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003804 break;
3805 default:
3806 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05003807 "Applying B2 workarounds to unknown rev\n");
3808 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003809 break;
3810 }
3811 break;
3812
Jeff Garzike4e7b892006-01-31 12:18:41 -05003813 case chip_7042:
Mark Lord616d4a92008-05-02 02:08:32 -04003814 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
Mark Lord306b30f2007-12-04 14:07:52 -05003815 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3816 (pdev->device == 0x2300 || pdev->device == 0x2310))
3817 {
Mark Lord4e520032007-12-11 12:58:05 -05003818 /*
3819 * Highpoint RocketRAID PCIe 23xx series cards:
3820 *
3821 * Unconfigured drives are treated as "Legacy"
3822 * by the BIOS, and it overwrites sector 8 with
3823 * a "Lgcy" metadata block prior to Linux boot.
3824 *
3825 * Configured drives (RAID or JBOD) leave sector 8
3826 * alone, but instead overwrite a high numbered
3827 * sector for the RAID metadata. This sector can
3828 * be determined exactly, by truncating the physical
3829 * drive capacity to a nice even GB value.
3830 *
3831 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3832 *
3833 * Warn the user, lest they think we're just buggy.
3834 */
3835 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3836 " BIOS CORRUPTS DATA on all attached drives,"
3837 " regardless of if/how they are configured."
3838 " BEWARE!\n");
3839 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3840 " use sectors 8-9 on \"Legacy\" drives,"
3841 " and avoid the final two gigabytes on"
3842 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05003843 }
Mark Lord8e7decd2008-05-02 02:07:51 -04003844 /* drop through */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003845 case chip_6042:
3846 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003847 hp_flags |= MV_HP_GEN_IIE;
Mark Lord616d4a92008-05-02 02:08:32 -04003848 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3849 hp_flags |= MV_HP_CUT_THROUGH;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003850
Auke Kok44c10132007-06-08 15:46:36 -07003851 switch (pdev->revision) {
Mark Lord5cf73bf2008-05-27 17:58:56 -04003852 case 0x2: /* Rev.B0: the first/only public release */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003853 hp_flags |= MV_HP_ERRATA_60X1C0;
3854 break;
3855 default:
3856 dev_printk(KERN_WARNING, &pdev->dev,
3857 "Applying 60X1C0 workarounds to unknown rev\n");
3858 hp_flags |= MV_HP_ERRATA_60X1C0;
3859 break;
3860 }
3861 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003862 case chip_soc:
Martin Michlmayr29b7e432009-05-04 20:58:50 +02003863 if (soc_is_65n(hpriv))
3864 hpriv->ops = &mv_soc_65n_ops;
3865 else
3866 hpriv->ops = &mv_soc_ops;
Saeed Bisharaeb3a55a2008-08-04 00:52:55 -11003867 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3868 MV_HP_ERRATA_60X1C0;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003869 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003870
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003871 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003872 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003873 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003874 return 1;
3875 }
3876
3877 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05003878 if (hp_flags & MV_HP_PCIE) {
Mark Lordcae5a292009-04-06 16:43:45 -04003879 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3880 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
Mark Lord02a121d2007-12-01 13:07:22 -05003881 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3882 } else {
Mark Lordcae5a292009-04-06 16:43:45 -04003883 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3884 hpriv->irq_mask_offset = PCI_IRQ_MASK;
Mark Lord02a121d2007-12-01 13:07:22 -05003885 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3886 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003887
3888 return 0;
3889}
3890
Brett Russ05b308e2005-10-05 17:08:53 -04003891/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05003892 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09003893 * @host: ATA host to initialize
Brett Russ05b308e2005-10-05 17:08:53 -04003894 *
3895 * If possible, do an early global reset of the host. Then do
3896 * our port init and clear/unmask all/relevant host interrupts.
3897 *
3898 * LOCKING:
3899 * Inherited from caller.
3900 */
Saeed Bishara1bfeff02009-12-17 01:05:00 -05003901static int mv_init_host(struct ata_host *host)
Brett Russ20f733e2005-09-01 18:26:17 -04003902{
3903 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09003904 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003905 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003906
Saeed Bishara1bfeff02009-12-17 01:05:00 -05003907 rc = mv_chip_id(host, hpriv->board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003908 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04003909 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003910
Mark Lord1f398472008-05-27 17:54:48 -04003911 if (IS_SOC(hpriv)) {
Mark Lordcae5a292009-04-06 16:43:45 -04003912 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3913 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
Mark Lord1f398472008-05-27 17:54:48 -04003914 } else {
Mark Lordcae5a292009-04-06 16:43:45 -04003915 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3916 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003917 }
Mark Lord352fab72008-04-19 14:43:42 -04003918
Thomas Reitmayr5d0fb2e2009-01-24 20:24:58 +01003919 /* initialize shadow irq mask with register's value */
3920 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3921
Mark Lord352fab72008-04-19 14:43:42 -04003922 /* global interrupt mask: 0 == mask everything */
Mark Lordc4de5732008-05-17 13:35:21 -04003923 mv_set_main_irq_mask(host, ~0, 0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003924
Tejun Heo4447d352007-04-17 23:44:08 +09003925 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003926
Tejun Heo4447d352007-04-17 23:44:08 +09003927 for (port = 0; port < host->n_ports; port++)
Martin Michlmayr29b7e432009-05-04 20:58:50 +02003928 if (hpriv->ops->read_preamp)
3929 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003930
Jeff Garzikc9d39132005-11-13 17:47:51 -05003931 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003932 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003933 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04003934
Jeff Garzik522479f2005-11-12 22:14:02 -05003935 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003936 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003937 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003938
Tejun Heo4447d352007-04-17 23:44:08 +09003939 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09003940 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003941 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09003942
3943 mv_port_init(&ap->ioaddr, port_mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003944 }
3945
3946 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04003947 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3948
3949 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3950 "(before clear)=0x%08x\n", hc,
Mark Lordcae5a292009-04-06 16:43:45 -04003951 readl(hc_mmio + HC_CFG),
3952 readl(hc_mmio + HC_IRQ_CAUSE));
Brett Russ31961942005-09-30 01:36:00 -04003953
3954 /* Clear any currently outstanding hc interrupt conditions */
Mark Lordcae5a292009-04-06 16:43:45 -04003955 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
Brett Russ20f733e2005-09-01 18:26:17 -04003956 }
3957
Mark Lord44c65d12009-04-06 12:29:49 -04003958 if (!IS_SOC(hpriv)) {
3959 /* Clear any currently outstanding host interrupt conditions */
Mark Lordcae5a292009-04-06 16:43:45 -04003960 writelfl(0, mmio + hpriv->irq_cause_offset);
Brett Russ31961942005-09-30 01:36:00 -04003961
Mark Lord44c65d12009-04-06 12:29:49 -04003962 /* and unmask interrupt generation for host regs */
Mark Lordcae5a292009-04-06 16:43:45 -04003963 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
Mark Lord44c65d12009-04-06 12:29:49 -04003964 }
Jeff Garzikfb621e22007-02-25 04:19:45 -05003965
Mark Lord6be96ac2009-02-19 10:38:04 -05003966 /*
3967 * enable only global host interrupts for now.
3968 * The per-port interrupts get done later as ports are set up.
3969 */
3970 mv_set_main_irq_mask(host, 0, PCI_ERR);
Mark Lord2b748a02009-03-10 22:01:17 -04003971 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3972 irq_coalescing_usecs);
Brett Russ31961942005-09-30 01:36:00 -04003973done:
Brett Russ20f733e2005-09-01 18:26:17 -04003974 return rc;
3975}
3976
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003977static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3978{
3979 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3980 MV_CRQB_Q_SZ, 0);
3981 if (!hpriv->crqb_pool)
3982 return -ENOMEM;
3983
3984 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3985 MV_CRPB_Q_SZ, 0);
3986 if (!hpriv->crpb_pool)
3987 return -ENOMEM;
3988
3989 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3990 MV_SG_TBL_SZ, 0);
3991 if (!hpriv->sg_tbl_pool)
3992 return -ENOMEM;
3993
3994 return 0;
3995}
3996
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003997static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3998 struct mbus_dram_target_info *dram)
3999{
4000 int i;
4001
4002 for (i = 0; i < 4; i++) {
4003 writel(0, hpriv->base + WINDOW_CTRL(i));
4004 writel(0, hpriv->base + WINDOW_BASE(i));
4005 }
4006
4007 for (i = 0; i < dram->num_cs; i++) {
4008 struct mbus_dram_window *cs = dram->cs + i;
4009
4010 writel(((cs->size - 1) & 0xffff0000) |
4011 (cs->mbus_attr << 8) |
4012 (dram->mbus_dram_target_id << 4) | 1,
4013 hpriv->base + WINDOW_CTRL(i));
4014 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4015 }
4016}
4017
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004018/**
4019 * mv_platform_probe - handle a positive probe of an soc Marvell
4020 * host
4021 * @pdev: platform device found
4022 *
4023 * LOCKING:
4024 * Inherited from caller.
4025 */
4026static int mv_platform_probe(struct platform_device *pdev)
4027{
4028 static int printed_version;
4029 const struct mv_sata_platform_data *mv_platform_data;
4030 const struct ata_port_info *ppi[] =
4031 { &mv_port_info[chip_soc], NULL };
4032 struct ata_host *host;
4033 struct mv_host_priv *hpriv;
4034 struct resource *res;
4035 int n_ports, rc;
4036
4037 if (!printed_version++)
4038 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4039
4040 /*
4041 * Simple resource validation ..
4042 */
4043 if (unlikely(pdev->num_resources != 2)) {
4044 dev_err(&pdev->dev, "invalid number of resources\n");
4045 return -EINVAL;
4046 }
4047
4048 /*
4049 * Get the register base first
4050 */
4051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4052 if (res == NULL)
4053 return -EINVAL;
4054
4055 /* allocate host */
4056 mv_platform_data = pdev->dev.platform_data;
4057 n_ports = mv_platform_data->n_ports;
4058
4059 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4060 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4061
4062 if (!host || !hpriv)
4063 return -ENOMEM;
4064 host->private_data = hpriv;
4065 hpriv->n_ports = n_ports;
Saeed Bishara1bfeff02009-12-17 01:05:00 -05004066 hpriv->board_idx = chip_soc;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004067
4068 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11004069 hpriv->base = devm_ioremap(&pdev->dev, res->start,
Julia Lawall041b5ea2009-08-06 16:05:08 -07004070 resource_size(res));
Mark Lordcae5a292009-04-06 16:43:45 -04004071 hpriv->base -= SATAHC0_REG_BASE;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004072
Saeed Bisharac77a2f42009-12-06 18:26:18 +02004073#if defined(CONFIG_HAVE_CLK)
4074 hpriv->clk = clk_get(&pdev->dev, NULL);
4075 if (IS_ERR(hpriv->clk))
4076 dev_notice(&pdev->dev, "cannot get clkdev\n");
4077 else
4078 clk_enable(hpriv->clk);
4079#endif
4080
Lennert Buytenhek15a32632008-03-27 14:51:39 -04004081 /*
4082 * (Re-)program MBUS remapping windows if we are asked to.
4083 */
4084 if (mv_platform_data->dram != NULL)
4085 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4086
Byron Bradleyfbf14e22008-02-10 21:17:30 +00004087 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4088 if (rc)
Saeed Bisharac77a2f42009-12-06 18:26:18 +02004089 goto err;
Byron Bradleyfbf14e22008-02-10 21:17:30 +00004090
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004091 /* initialize adapter */
Saeed Bishara1bfeff02009-12-17 01:05:00 -05004092 rc = mv_init_host(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004093 if (rc)
Saeed Bisharac77a2f42009-12-06 18:26:18 +02004094 goto err;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004095
4096 dev_printk(KERN_INFO, &pdev->dev,
4097 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
4098 host->n_ports);
4099
4100 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4101 IRQF_SHARED, &mv6_sht);
Saeed Bisharac77a2f42009-12-06 18:26:18 +02004102err:
4103#if defined(CONFIG_HAVE_CLK)
4104 if (!IS_ERR(hpriv->clk)) {
4105 clk_disable(hpriv->clk);
4106 clk_put(hpriv->clk);
4107 }
4108#endif
4109
4110 return rc;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004111}
4112
4113/*
4114 *
4115 * mv_platform_remove - unplug a platform interface
4116 * @pdev: platform device
4117 *
4118 * A platform bus SATA device has been unplugged. Perform the needed
4119 * cleanup. Also called on module unload for any active devices.
4120 */
4121static int __devexit mv_platform_remove(struct platform_device *pdev)
4122{
4123 struct device *dev = &pdev->dev;
4124 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharac77a2f42009-12-06 18:26:18 +02004125#if defined(CONFIG_HAVE_CLK)
4126 struct mv_host_priv *hpriv = host->private_data;
4127#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004128 ata_host_detach(host);
Saeed Bisharac77a2f42009-12-06 18:26:18 +02004129
4130#if defined(CONFIG_HAVE_CLK)
4131 if (!IS_ERR(hpriv->clk)) {
4132 clk_disable(hpriv->clk);
4133 clk_put(hpriv->clk);
4134 }
4135#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004136 return 0;
4137}
4138
Saeed Bishara6481f2b2009-12-06 18:26:19 +02004139#ifdef CONFIG_PM
4140static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4141{
4142 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4143 if (host)
4144 return ata_host_suspend(host, state);
4145 else
4146 return 0;
4147}
4148
4149static int mv_platform_resume(struct platform_device *pdev)
4150{
4151 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4152 int ret;
4153
4154 if (host) {
4155 struct mv_host_priv *hpriv = host->private_data;
4156 const struct mv_sata_platform_data *mv_platform_data = \
4157 pdev->dev.platform_data;
4158 /*
4159 * (Re-)program MBUS remapping windows if we are asked to.
4160 */
4161 if (mv_platform_data->dram != NULL)
4162 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4163
4164 /* initialize adapter */
Saeed Bishara1bfeff02009-12-17 01:05:00 -05004165 ret = mv_init_host(host);
Saeed Bishara6481f2b2009-12-06 18:26:19 +02004166 if (ret) {
4167 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4168 return ret;
4169 }
4170 ata_host_resume(host);
4171 }
4172
4173 return 0;
4174}
4175#else
4176#define mv_platform_suspend NULL
4177#define mv_platform_resume NULL
4178#endif
4179
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004180static struct platform_driver mv_platform_driver = {
4181 .probe = mv_platform_probe,
4182 .remove = __devexit_p(mv_platform_remove),
Saeed Bishara6481f2b2009-12-06 18:26:19 +02004183 .suspend = mv_platform_suspend,
4184 .resume = mv_platform_resume,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004185 .driver = {
4186 .name = DRV_NAME,
4187 .owner = THIS_MODULE,
4188 },
4189};
4190
4191
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004192#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004193static int mv_pci_init_one(struct pci_dev *pdev,
4194 const struct pci_device_id *ent);
Saeed Bisharab2dec482009-12-06 18:26:22 +02004195#ifdef CONFIG_PM
4196static int mv_pci_device_resume(struct pci_dev *pdev);
4197#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004198
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004199
4200static struct pci_driver mv_pci_driver = {
4201 .name = DRV_NAME,
4202 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004203 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004204 .remove = ata_pci_remove_one,
Saeed Bisharab2dec482009-12-06 18:26:22 +02004205#ifdef CONFIG_PM
4206 .suspend = ata_pci_device_suspend,
4207 .resume = mv_pci_device_resume,
4208#endif
4209
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004210};
4211
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004212/* move to PCI layer or libata core? */
4213static int pci_go_64(struct pci_dev *pdev)
4214{
4215 int rc;
4216
Yang Hongyang6a355282009-04-06 19:01:13 -07004217 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4218 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004219 if (rc) {
Yang Hongyang284901a2009-04-06 19:01:15 -07004220 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004221 if (rc) {
4222 dev_printk(KERN_ERR, &pdev->dev,
4223 "64-bit DMA enable failed\n");
4224 return rc;
4225 }
4226 }
4227 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004228 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004229 if (rc) {
4230 dev_printk(KERN_ERR, &pdev->dev,
4231 "32-bit DMA enable failed\n");
4232 return rc;
4233 }
Yang Hongyang284901a2009-04-06 19:01:15 -07004234 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004235 if (rc) {
4236 dev_printk(KERN_ERR, &pdev->dev,
4237 "32-bit consistent DMA enable failed\n");
4238 return rc;
4239 }
4240 }
4241
4242 return rc;
4243}
4244
Brett Russ05b308e2005-10-05 17:08:53 -04004245/**
4246 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09004247 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04004248 *
4249 * FIXME: complete this.
4250 *
4251 * LOCKING:
4252 * Inherited from caller.
4253 */
Tejun Heo4447d352007-04-17 23:44:08 +09004254static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04004255{
Tejun Heo4447d352007-04-17 23:44:08 +09004256 struct pci_dev *pdev = to_pci_dev(host->dev);
4257 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07004258 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004259 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04004260
4261 /* Use this to determine the HW stepping of the chip so we know
4262 * what errata to workaround
4263 */
Brett Russ31961942005-09-30 01:36:00 -04004264 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4265 if (scc == 0)
4266 scc_s = "SCSI";
4267 else if (scc == 0x01)
4268 scc_s = "RAID";
4269 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004270 scc_s = "?";
4271
4272 if (IS_GEN_I(hpriv))
4273 gen = "I";
4274 else if (IS_GEN_II(hpriv))
4275 gen = "II";
4276 else if (IS_GEN_IIE(hpriv))
4277 gen = "IIE";
4278 else
4279 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04004280
Jeff Garzika9524a72005-10-30 14:39:11 -05004281 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004282 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4283 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04004284 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4285}
4286
Brett Russ05b308e2005-10-05 17:08:53 -04004287/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004288 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04004289 * @pdev: PCI device found
4290 * @ent: PCI device ID entry for the matched host
4291 *
4292 * LOCKING:
4293 * Inherited from caller.
4294 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004295static int mv_pci_init_one(struct pci_dev *pdev,
4296 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04004297{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04004298 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04004299 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09004300 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4301 struct ata_host *host;
4302 struct mv_host_priv *hpriv;
Saeed Bisharac4bc7d72009-12-06 18:26:20 +02004303 int n_ports, port, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004304
Jeff Garzika9524a72005-10-30 14:39:11 -05004305 if (!printed_version++)
4306 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04004307
Tejun Heo4447d352007-04-17 23:44:08 +09004308 /* allocate host */
4309 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4310
4311 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4312 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4313 if (!host || !hpriv)
4314 return -ENOMEM;
4315 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004316 hpriv->n_ports = n_ports;
Saeed Bishara1bfeff02009-12-17 01:05:00 -05004317 hpriv->board_idx = board_idx;
Tejun Heo4447d352007-04-17 23:44:08 +09004318
4319 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09004320 rc = pcim_enable_device(pdev);
4321 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04004322 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004323
Tejun Heo0d5ff562007-02-01 15:06:36 +09004324 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4325 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09004326 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09004327 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09004328 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09004329 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004330 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04004331
Jeff Garzikd88184f2007-02-26 01:26:06 -05004332 rc = pci_go_64(pdev);
4333 if (rc)
4334 return rc;
4335
Mark Lordda2fa9b2008-01-26 18:32:45 -05004336 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4337 if (rc)
4338 return rc;
4339
Saeed Bisharac4bc7d72009-12-06 18:26:20 +02004340 for (port = 0; port < host->n_ports; port++) {
4341 struct ata_port *ap = host->ports[port];
4342 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4343 unsigned int offset = port_mmio - hpriv->base;
4344
4345 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4346 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4347 }
4348
Brett Russ20f733e2005-09-01 18:26:17 -04004349 /* initialize adapter */
Saeed Bishara1bfeff02009-12-17 01:05:00 -05004350 rc = mv_init_host(host);
Tejun Heo24dc5f32007-01-20 16:00:28 +09004351 if (rc)
4352 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004353
Mark Lord6d3c30e2009-01-21 10:31:29 -05004354 /* Enable message-switched interrupts, if requested */
4355 if (msi && pci_enable_msi(pdev) == 0)
4356 hpriv->hp_flags |= MV_HP_FLAG_MSI;
Brett Russ20f733e2005-09-01 18:26:17 -04004357
Brett Russ31961942005-09-30 01:36:00 -04004358 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09004359 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04004360
Tejun Heo4447d352007-04-17 23:44:08 +09004361 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04004362 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09004363 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04004364 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04004365}
Saeed Bisharab2dec482009-12-06 18:26:22 +02004366
4367#ifdef CONFIG_PM
4368static int mv_pci_device_resume(struct pci_dev *pdev)
4369{
4370 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4371 int rc;
4372
4373 rc = ata_pci_device_do_resume(pdev);
4374 if (rc)
4375 return rc;
4376
4377 /* initialize adapter */
4378 rc = mv_init_host(host);
4379 if (rc)
4380 return rc;
4381
4382 ata_host_resume(host);
4383
4384 return 0;
4385}
4386#endif
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004387#endif
Brett Russ20f733e2005-09-01 18:26:17 -04004388
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004389static int mv_platform_probe(struct platform_device *pdev);
4390static int __devexit mv_platform_remove(struct platform_device *pdev);
4391
Brett Russ20f733e2005-09-01 18:26:17 -04004392static int __init mv_init(void)
4393{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004394 int rc = -ENODEV;
4395#ifdef CONFIG_PCI
4396 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004397 if (rc < 0)
4398 return rc;
4399#endif
4400 rc = platform_driver_register(&mv_platform_driver);
4401
4402#ifdef CONFIG_PCI
4403 if (rc < 0)
4404 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004405#endif
4406 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004407}
4408
4409static void __exit mv_exit(void)
4410{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004411#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04004412 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004413#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004414 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04004415}
4416
4417MODULE_AUTHOR("Brett Russ");
4418MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4419MODULE_LICENSE("GPL");
4420MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4421MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04004422MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04004423
Brett Russ20f733e2005-09-01 18:26:17 -04004424module_init(mv_init);
4425module_exit(mv_exit);