blob: 37ae5dc1070cafbc887e0a94821d12c6d1099e87 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lord40f21b12009-03-10 18:51:04 -04004 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
Mark Lord40f21b12009-03-10 18:51:04 -04008 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
Brett Russ20f733e2005-09-01 18:26:17 -040011 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
Jeff Garzik4a05e202007-05-24 23:40:15 -040028/*
Mark Lord85afb932008-04-19 14:54:41 -040029 * sata_mv TODO list:
30 *
Mark Lord85afb932008-04-19 14:54:41 -040031 * --> Develop a low-power-consumption strategy, and implement it.
32 *
Mark Lord2b748a02009-03-10 22:01:17 -040033 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
Mark Lord85afb932008-04-19 14:54:41 -040034 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
Jeff Garzik4a05e202007-05-24 23:40:15 -040042
Mark Lord65ad7fef2009-04-06 15:24:14 -040043/*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
Brett Russ20f733e2005-09-01 18:26:17 -040052#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080059#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040060#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050061#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050062#include <linux/platform_device.h>
63#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040064#include <linux/mbus.h>
Mark Lordc46938c2008-05-02 14:02:28 -040065#include <linux/bitops.h>
Brett Russ20f733e2005-09-01 18:26:17 -040066#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050067#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040068#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040069#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070
71#define DRV_NAME "sata_mv"
Mark Lordcae5a292009-04-06 16:43:45 -040072#define DRV_VERSION "1.28"
Brett Russ20f733e2005-09-01 18:26:17 -040073
Mark Lord40f21b12009-03-10 18:51:04 -040074/*
75 * module options
76 */
77
78static int msi;
79#ifdef CONFIG_PCI
80module_param(msi, int, S_IRUGO);
81MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
82#endif
83
Mark Lord2b748a02009-03-10 22:01:17 -040084static int irq_coalescing_io_count;
85module_param(irq_coalescing_io_count, int, S_IRUGO);
86MODULE_PARM_DESC(irq_coalescing_io_count,
87 "IRQ coalescing I/O count threshold (0..255)");
88
89static int irq_coalescing_usecs;
90module_param(irq_coalescing_usecs, int, S_IRUGO);
91MODULE_PARM_DESC(irq_coalescing_usecs,
92 "IRQ coalescing time threshold in usecs");
93
Brett Russ20f733e2005-09-01 18:26:17 -040094enum {
95 /* BAR's are enumerated in terms of pci_resource_start() terms */
96 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
97 MV_IO_BAR = 2, /* offset 0x18: IO space */
98 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
99
100 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
101 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
102
Mark Lord2b748a02009-03-10 22:01:17 -0400103 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
104 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
105 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
106 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
107
Brett Russ20f733e2005-09-01 18:26:17 -0400108 MV_PCI_REG_BASE = 0,
Mark Lord615ab952006-05-19 16:24:56 -0400109
Mark Lord2b748a02009-03-10 22:01:17 -0400110 /*
111 * Per-chip ("all ports") interrupt coalescing feature.
112 * This is only for GEN_II / GEN_IIE hardware.
113 *
114 * Coalescing defers the interrupt until either the IO_THRESHOLD
115 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
116 */
Mark Lordcae5a292009-04-06 16:43:45 -0400117 COAL_REG_BASE = 0x18000,
118 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
Mark Lord2b748a02009-03-10 22:01:17 -0400119 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
120
Mark Lordcae5a292009-04-06 16:43:45 -0400121 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
122 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
Mark Lord2b748a02009-03-10 22:01:17 -0400123
124 /*
125 * Registers for the (unused here) transaction coalescing feature:
126 */
Mark Lordcae5a292009-04-06 16:43:45 -0400127 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
128 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
Mark Lord2b748a02009-03-10 22:01:17 -0400129
Mark Lordcae5a292009-04-06 16:43:45 -0400130 SATAHC0_REG_BASE = 0x20000,
131 FLASH_CTL = 0x1046c,
132 GPIO_PORT_CTL = 0x104f0,
133 RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400134
135 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
136 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
137 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
138 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
139
Brett Russ31961942005-09-30 01:36:00 -0400140 MV_MAX_Q_DEPTH = 32,
141 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
142
143 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
144 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400145 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
146 */
147 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
148 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500149 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400150 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400151
Mark Lord352fab72008-04-19 14:43:42 -0400152 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400153 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400154 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
155 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
156 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400157
158 /* Host Flags */
159 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100160
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400161 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Mark Lord91b1a842009-01-30 18:46:39 -0500162 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
Mark Lordad3aef52008-05-14 09:21:43 -0400163
Mark Lord91b1a842009-01-30 18:46:39 -0500164 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
Brett Russ20f733e2005-09-01 18:26:17 -0400165
Mark Lord40f21b12009-03-10 18:51:04 -0400166 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
167 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
Mark Lord91b1a842009-01-30 18:46:39 -0500168
169 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
Mark Lordad3aef52008-05-14 09:21:43 -0400170
Brett Russ31961942005-09-30 01:36:00 -0400171 CRQB_FLAG_READ = (1 << 0),
172 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400173 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400174 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400175 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400176 CRQB_CMD_ADDR_SHIFT = 8,
177 CRQB_CMD_CS = (0x2 << 11),
178 CRQB_CMD_LAST = (1 << 15),
179
180 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400181 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
182 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400183
184 EPRD_FLAG_END_OF_TBL = (1 << 31),
185
Brett Russ20f733e2005-09-01 18:26:17 -0400186 /* PCI interface registers */
187
Mark Lordcae5a292009-04-06 16:43:45 -0400188 MV_PCI_COMMAND = 0xc00,
189 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
190 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
Brett Russ31961942005-09-30 01:36:00 -0400191
Mark Lordcae5a292009-04-06 16:43:45 -0400192 PCI_MAIN_CMD_STS = 0xd30,
Brett Russ20f733e2005-09-01 18:26:17 -0400193 STOP_PCI_MASTER = (1 << 2),
194 PCI_MASTER_EMPTY = (1 << 3),
195 GLOB_SFT_RST = (1 << 4),
196
Mark Lordcae5a292009-04-06 16:43:45 -0400197 MV_PCI_MODE = 0xd00,
Mark Lord8e7decd2008-05-02 02:07:51 -0400198 MV_PCI_MODE_MASK = 0x30,
199
Jeff Garzik522479f2005-11-12 22:14:02 -0500200 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
201 MV_PCI_DISC_TIMER = 0xd04,
202 MV_PCI_MSI_TRIGGER = 0xc38,
203 MV_PCI_SERR_MASK = 0xc28,
Mark Lordcae5a292009-04-06 16:43:45 -0400204 MV_PCI_XBAR_TMOUT = 0x1d04,
Jeff Garzik522479f2005-11-12 22:14:02 -0500205 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
206 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
207 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
208 MV_PCI_ERR_COMMAND = 0x1d50,
209
Mark Lordcae5a292009-04-06 16:43:45 -0400210 PCI_IRQ_CAUSE = 0x1d58,
211 PCI_IRQ_MASK = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400212 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
213
Mark Lordcae5a292009-04-06 16:43:45 -0400214 PCIE_IRQ_CAUSE = 0x1900,
215 PCIE_IRQ_MASK = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500216 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500217
Mark Lord7368f912008-04-25 11:24:24 -0400218 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
Mark Lordcae5a292009-04-06 16:43:45 -0400219 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
220 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
221 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
222 SOC_HC_MAIN_IRQ_MASK = 0x20024,
Mark Lord40f21b12009-03-10 18:51:04 -0400223 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
224 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
Brett Russ20f733e2005-09-01 18:26:17 -0400225 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
226 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
Mark Lord2b748a02009-03-10 22:01:17 -0400227 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
228 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
Brett Russ20f733e2005-09-01 18:26:17 -0400229 PCI_ERR = (1 << 18),
Mark Lord40f21b12009-03-10 18:51:04 -0400230 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
231 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
232 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
233 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
234 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400235 GPIO_INT = (1 << 22),
236 SELF_INT = (1 << 23),
237 TWSI_INT = (1 << 24),
238 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500239 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400240 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Brett Russ20f733e2005-09-01 18:26:17 -0400241
242 /* SATAHC registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400243 HC_CFG = 0x00,
Brett Russ20f733e2005-09-01 18:26:17 -0400244
Mark Lordcae5a292009-04-06 16:43:45 -0400245 HC_IRQ_CAUSE = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400246 DMA_IRQ = (1 << 0), /* shift by port # */
247 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400248 DEV_IRQ = (1 << 8), /* shift by port # */
249
Mark Lord2b748a02009-03-10 22:01:17 -0400250 /*
251 * Per-HC (Host-Controller) interrupt coalescing feature.
252 * This is present on all chip generations.
253 *
254 * Coalescing defers the interrupt until either the IO_THRESHOLD
255 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
256 */
Mark Lordcae5a292009-04-06 16:43:45 -0400257 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
258 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
Mark Lord2b748a02009-03-10 22:01:17 -0400259
Mark Lordcae5a292009-04-06 16:43:45 -0400260 SOC_LED_CTRL = 0x2c,
Mark Lord000b3442009-03-15 11:33:19 -0400261 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
262 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
263 /* with dev activity LED */
264
Brett Russ20f733e2005-09-01 18:26:17 -0400265 /* Shadow block registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400266 SHD_BLK = 0x100,
267 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
Brett Russ20f733e2005-09-01 18:26:17 -0400268
269 /* SATA registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400270 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
271 SATA_ACTIVE = 0x350,
272 FIS_IRQ_CAUSE = 0x364,
273 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
Mark Lord17c5aab2008-04-16 14:56:51 -0400274
Mark Lordcae5a292009-04-06 16:43:45 -0400275 LTMODE = 0x30c, /* requires read-after-write */
Mark Lord17c5aab2008-04-16 14:56:51 -0400276 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
277
Mark Lordcae5a292009-04-06 16:43:45 -0400278 PHY_MODE2 = 0x330,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500279 PHY_MODE3 = 0x310,
Mark Lordcae5a292009-04-06 16:43:45 -0400280
281 PHY_MODE4 = 0x314, /* requires read-after-write */
Mark Lordba069e32008-05-31 16:46:34 -0400282 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
283 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
284 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
285 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
286
Mark Lordcae5a292009-04-06 16:43:45 -0400287 SATA_IFCTL = 0x344,
288 SATA_TESTCTL = 0x348,
289 SATA_IFSTAT = 0x34c,
290 VENDOR_UNIQUE_FIS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400291
Mark Lordcae5a292009-04-06 16:43:45 -0400292 FISCFG = 0x360,
Mark Lord8e7decd2008-05-02 02:07:51 -0400293 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
294 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
Mark Lord17c5aab2008-04-16 14:56:51 -0400295
Jeff Garzikc9d39132005-11-13 17:47:51 -0500296 MV5_PHY_MODE = 0x74,
Mark Lordcae5a292009-04-06 16:43:45 -0400297 MV5_LTMODE = 0x30,
298 MV5_PHY_CTL = 0x0C,
299 SATA_IFCFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500300
301 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400302
303 /* Port registers */
Mark Lordcae5a292009-04-06 16:43:45 -0400304 EDMA_CFG = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500305 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
306 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
307 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
308 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
309 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400310 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
311 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400312
Mark Lordcae5a292009-04-06 16:43:45 -0400313 EDMA_ERR_IRQ_CAUSE = 0x8,
314 EDMA_ERR_IRQ_MASK = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400315 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
316 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
317 EDMA_ERR_DEV = (1 << 2), /* device error */
318 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
319 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
320 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400321 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
322 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400323 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400324 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400325 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
326 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
327 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
328 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500329
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400330 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500331 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
332 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
333 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
334 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
335
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400336 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500337
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400338 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500339 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
340 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
341 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
342 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
343 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
344
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400345 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500346
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400347 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400348 EDMA_ERR_OVERRUN_5 = (1 << 5),
349 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500350
351 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
352 EDMA_ERR_LNK_CTRL_RX_1 |
353 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord85afb932008-04-19 14:54:41 -0400354 EDMA_ERR_LNK_CTRL_TX,
Mark Lord646a4da2008-01-26 18:30:37 -0500355
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400356 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
357 EDMA_ERR_PRD_PAR |
358 EDMA_ERR_DEV_DCON |
359 EDMA_ERR_DEV_CON |
360 EDMA_ERR_SERR |
361 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400362 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400363 EDMA_ERR_CRPB_PAR |
364 EDMA_ERR_INTRL_PAR |
365 EDMA_ERR_IORDY |
366 EDMA_ERR_LNK_CTRL_RX_2 |
367 EDMA_ERR_LNK_DATA_RX |
368 EDMA_ERR_LNK_DATA_TX |
369 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400370
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400371 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
372 EDMA_ERR_PRD_PAR |
373 EDMA_ERR_DEV_DCON |
374 EDMA_ERR_DEV_CON |
375 EDMA_ERR_OVERRUN_5 |
376 EDMA_ERR_UNDERRUN_5 |
377 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400378 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400379 EDMA_ERR_CRPB_PAR |
380 EDMA_ERR_INTRL_PAR |
381 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400382
Mark Lordcae5a292009-04-06 16:43:45 -0400383 EDMA_REQ_Q_BASE_HI = 0x10,
384 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400385
Mark Lordcae5a292009-04-06 16:43:45 -0400386 EDMA_REQ_Q_OUT_PTR = 0x18,
Brett Russ31961942005-09-30 01:36:00 -0400387 EDMA_REQ_Q_PTR_SHIFT = 5,
388
Mark Lordcae5a292009-04-06 16:43:45 -0400389 EDMA_RSP_Q_BASE_HI = 0x1c,
390 EDMA_RSP_Q_IN_PTR = 0x20,
391 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400392 EDMA_RSP_Q_PTR_SHIFT = 3,
393
Mark Lordcae5a292009-04-06 16:43:45 -0400394 EDMA_CMD = 0x28, /* EDMA command register */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400395 EDMA_EN = (1 << 0), /* enable EDMA */
396 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
Mark Lord8e7decd2008-05-02 02:07:51 -0400397 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400398
Mark Lordcae5a292009-04-06 16:43:45 -0400399 EDMA_STATUS = 0x30, /* EDMA engine status */
Mark Lord8e7decd2008-05-02 02:07:51 -0400400 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
401 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
402
Mark Lordcae5a292009-04-06 16:43:45 -0400403 EDMA_IORDY_TMOUT = 0x34,
404 EDMA_ARB_CFG = 0x38,
Mark Lord8e7decd2008-05-02 02:07:51 -0400405
Mark Lordcae5a292009-04-06 16:43:45 -0400406 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
407 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
Mark Lordda142652009-01-30 18:51:54 -0500408
Mark Lordcae5a292009-04-06 16:43:45 -0400409 BMDMA_CMD = 0x224, /* bmdma command register */
410 BMDMA_STATUS = 0x228, /* bmdma status register */
411 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
412 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
Mark Lordda142652009-01-30 18:51:54 -0500413
Brett Russ31961942005-09-30 01:36:00 -0400414 /* Host private flags (hp_flags) */
415 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500416 MV_HP_ERRATA_50XXB0 = (1 << 1),
417 MV_HP_ERRATA_50XXB2 = (1 << 2),
418 MV_HP_ERRATA_60X1B2 = (1 << 3),
419 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400420 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
421 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
422 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500423 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Mark Lord616d4a92008-05-02 02:08:32 -0400424 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
Mark Lord1f398472008-05-27 17:54:48 -0400425 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
Mark Lord000b3442009-03-15 11:33:19 -0400426 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
Brett Russ20f733e2005-09-01 18:26:17 -0400427
Brett Russ31961942005-09-30 01:36:00 -0400428 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400429 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500430 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Mark Lord00f42ea2008-05-02 02:11:45 -0400431 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
Mark Lord29d187b2008-05-02 02:15:37 -0400432 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
Mark Lordd16ab3f2009-02-25 15:17:43 -0500433 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
Brett Russ31961942005-09-30 01:36:00 -0400434};
435
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400436#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
437#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500438#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Mark Lord8e7decd2008-05-02 02:07:51 -0400439#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
Mark Lord1f398472008-05-27 17:54:48 -0400440#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500441
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400442#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
443#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
444
Jeff Garzik095fec82005-11-12 09:50:49 -0500445enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400446 /* DMA boundary 0xffff is required by the s/g splitting
447 * we need on /length/ in mv_fill-sg().
448 */
449 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500450
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400451 /* mask of register bits containing lower 32 bits
452 * of EDMA request queue DMA address
453 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500454 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
455
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400456 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500457 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
458};
459
Jeff Garzik522479f2005-11-12 22:14:02 -0500460enum chip_type {
461 chip_504x,
462 chip_508x,
463 chip_5080,
464 chip_604x,
465 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500466 chip_6042,
467 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500468 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500469};
470
Brett Russ31961942005-09-30 01:36:00 -0400471/* Command ReQuest Block: 32B */
472struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400473 __le32 sg_addr;
474 __le32 sg_addr_hi;
475 __le16 ctrl_flags;
476 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400477};
478
Jeff Garzike4e7b892006-01-31 12:18:41 -0500479struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400480 __le32 addr;
481 __le32 addr_hi;
482 __le32 flags;
483 __le32 len;
484 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500485};
486
Brett Russ31961942005-09-30 01:36:00 -0400487/* Command ResPonse Block: 8B */
488struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400489 __le16 id;
490 __le16 flags;
491 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400492};
493
494/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
495struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400496 __le32 addr;
497 __le32 flags_size;
498 __le32 addr_hi;
499 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400500};
501
Mark Lord08da1752009-02-25 15:13:03 -0500502/*
503 * We keep a local cache of a few frequently accessed port
504 * registers here, to avoid having to read them (very slow)
505 * when switching between EDMA and non-EDMA modes.
506 */
507struct mv_cached_regs {
508 u32 fiscfg;
509 u32 ltmode;
510 u32 haltcond;
Mark Lordc01e8a22009-02-25 15:14:48 -0500511 u32 unknown_rsvd;
Mark Lord08da1752009-02-25 15:13:03 -0500512};
513
Brett Russ20f733e2005-09-01 18:26:17 -0400514struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400515 struct mv_crqb *crqb;
516 dma_addr_t crqb_dma;
517 struct mv_crpb *crpb;
518 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500519 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
520 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400521
522 unsigned int req_idx;
523 unsigned int resp_idx;
524
Brett Russ31961942005-09-30 01:36:00 -0400525 u32 pp_flags;
Mark Lord08da1752009-02-25 15:13:03 -0500526 struct mv_cached_regs cached;
Mark Lord29d187b2008-05-02 02:15:37 -0400527 unsigned int delayed_eh_pmp_map;
Brett Russ20f733e2005-09-01 18:26:17 -0400528};
529
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500530struct mv_port_signal {
531 u32 amps;
532 u32 pre;
533};
534
Mark Lord02a121d2007-12-01 13:07:22 -0500535struct mv_host_priv {
536 u32 hp_flags;
Mark Lord96e2c4872008-05-17 13:38:00 -0400537 u32 main_irq_mask;
Mark Lord02a121d2007-12-01 13:07:22 -0500538 struct mv_port_signal signal[8];
539 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500540 int n_ports;
541 void __iomem *base;
Mark Lord7368f912008-04-25 11:24:24 -0400542 void __iomem *main_irq_cause_addr;
543 void __iomem *main_irq_mask_addr;
Mark Lordcae5a292009-04-06 16:43:45 -0400544 u32 irq_cause_offset;
545 u32 irq_mask_offset;
Mark Lord02a121d2007-12-01 13:07:22 -0500546 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500547 /*
548 * These consistent DMA memory pools give us guaranteed
549 * alignment for hardware-accessed data structures,
550 * and less memory waste in accomplishing the alignment.
551 */
552 struct dma_pool *crqb_pool;
553 struct dma_pool *crpb_pool;
554 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500555};
556
Jeff Garzik47c2b672005-11-12 21:13:17 -0500557struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500558 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
559 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500560 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
561 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
562 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500563 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
564 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500565 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100566 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500567};
568
Tejun Heo82ef04f2008-07-31 17:02:40 +0900569static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
570static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
571static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
572static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400573static int mv_port_start(struct ata_port *ap);
574static void mv_port_stop(struct ata_port *ap);
Mark Lord3e4a1392008-05-02 02:10:02 -0400575static int mv_qc_defer(struct ata_queued_cmd *qc);
Brett Russ31961942005-09-30 01:36:00 -0400576static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500577static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900578static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900579static int mv_hardreset(struct ata_link *link, unsigned int *class,
580 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400581static void mv_eh_freeze(struct ata_port *ap);
582static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500583static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400584
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500585static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
586 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500587static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
588static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
589 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500590static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
591 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500592static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100593static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500594
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500595static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
596 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500597static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
598static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
599 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500600static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
601 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500602static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500603static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
604 void __iomem *mmio);
605static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
606 void __iomem *mmio);
607static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
608 void __iomem *mmio, unsigned int n_hc);
609static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
610 void __iomem *mmio);
611static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100612static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400613static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500614 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400615static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400616static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lord00b81232009-01-30 18:47:51 -0500617static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500618
Mark Lorde49856d2008-04-16 14:59:07 -0400619static void mv_pmp_select(struct ata_port *ap, int pmp);
620static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
621 unsigned long deadline);
622static int mv_softreset(struct ata_link *link, unsigned int *class,
623 unsigned long deadline);
Mark Lord29d187b2008-05-02 02:15:37 -0400624static void mv_pmp_error_handler(struct ata_port *ap);
Mark Lord4c299ca2008-05-02 02:16:20 -0400625static void mv_process_crpb_entries(struct ata_port *ap,
626 struct mv_port_priv *pp);
Brett Russ20f733e2005-09-01 18:26:17 -0400627
Mark Lordda142652009-01-30 18:51:54 -0500628static void mv_sff_irq_clear(struct ata_port *ap);
629static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
630static void mv_bmdma_setup(struct ata_queued_cmd *qc);
631static void mv_bmdma_start(struct ata_queued_cmd *qc);
632static void mv_bmdma_stop(struct ata_queued_cmd *qc);
633static u8 mv_bmdma_status(struct ata_port *ap);
Mark Lordd16ab3f2009-02-25 15:17:43 -0500634static u8 mv_sff_check_status(struct ata_port *ap);
Mark Lordda142652009-01-30 18:51:54 -0500635
Mark Lordeb73d552008-01-29 13:24:00 -0500636/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
637 * because we have to allow room for worst case splitting of
638 * PRDs for 64K boundaries in mv_fill_sg().
639 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400640static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900641 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400642 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400643 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400644};
645
646static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900647 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500648 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400649 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400650 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400651};
652
Tejun Heo029cfd62008-03-25 12:22:49 +0900653static struct ata_port_operations mv5_ops = {
654 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500655
Alan Coxc96f1732009-03-24 10:23:46 +0000656 .lost_interrupt = ATA_OP_NULL,
657
Mark Lord3e4a1392008-05-02 02:10:02 -0400658 .qc_defer = mv_qc_defer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500659 .qc_prep = mv_qc_prep,
660 .qc_issue = mv_qc_issue,
661
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400662 .freeze = mv_eh_freeze,
663 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900664 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900665 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900666 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400667
Jeff Garzikc9d39132005-11-13 17:47:51 -0500668 .scr_read = mv5_scr_read,
669 .scr_write = mv5_scr_write,
670
671 .port_start = mv_port_start,
672 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500673};
674
Tejun Heo029cfd62008-03-25 12:22:49 +0900675static struct ata_port_operations mv6_ops = {
676 .inherits = &mv5_ops,
Mark Lordf2738272008-01-26 18:32:29 -0500677 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400678 .scr_read = mv_scr_read,
679 .scr_write = mv_scr_write,
680
Mark Lorde49856d2008-04-16 14:59:07 -0400681 .pmp_hardreset = mv_pmp_hardreset,
682 .pmp_softreset = mv_softreset,
683 .softreset = mv_softreset,
Mark Lord29d187b2008-05-02 02:15:37 -0400684 .error_handler = mv_pmp_error_handler,
Mark Lordda142652009-01-30 18:51:54 -0500685
Mark Lord40f21b12009-03-10 18:51:04 -0400686 .sff_check_status = mv_sff_check_status,
Mark Lordda142652009-01-30 18:51:54 -0500687 .sff_irq_clear = mv_sff_irq_clear,
688 .check_atapi_dma = mv_check_atapi_dma,
689 .bmdma_setup = mv_bmdma_setup,
690 .bmdma_start = mv_bmdma_start,
691 .bmdma_stop = mv_bmdma_stop,
692 .bmdma_status = mv_bmdma_status,
Brett Russ20f733e2005-09-01 18:26:17 -0400693};
694
Tejun Heo029cfd62008-03-25 12:22:49 +0900695static struct ata_port_operations mv_iie_ops = {
696 .inherits = &mv6_ops,
697 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500698 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500699};
700
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100701static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400702 { /* chip_504x */
Mark Lord91b1a842009-01-30 18:46:39 -0500703 .flags = MV_GEN_I_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400704 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400705 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500706 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400707 },
708 { /* chip_508x */
Mark Lord91b1a842009-01-30 18:46:39 -0500709 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400710 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400711 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500712 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400713 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500714 { /* chip_5080 */
Mark Lord91b1a842009-01-30 18:46:39 -0500715 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400716 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400717 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500718 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500719 },
Brett Russ20f733e2005-09-01 18:26:17 -0400720 { /* chip_604x */
Mark Lord91b1a842009-01-30 18:46:39 -0500721 .flags = MV_GEN_II_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400722 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400723 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500724 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400725 },
726 { /* chip_608x */
Mark Lord91b1a842009-01-30 18:46:39 -0500727 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
Mark Lordc361acb2009-04-06 15:22:21 -0400728 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400729 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500730 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400731 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500732 { /* chip_6042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500733 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400734 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400735 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500736 .port_ops = &mv_iie_ops,
737 },
738 { /* chip_7042 */
Mark Lord91b1a842009-01-30 18:46:39 -0500739 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400740 .pio_mask = ATA_PIO4,
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400741 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500742 .port_ops = &mv_iie_ops,
743 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500744 { /* chip_soc */
Mark Lord91b1a842009-01-30 18:46:39 -0500745 .flags = MV_GEN_IIE_FLAGS,
Mark Lordc361acb2009-04-06 15:22:21 -0400746 .pio_mask = ATA_PIO4,
Mark Lord17c5aab2008-04-16 14:56:51 -0400747 .udma_mask = ATA_UDMA6,
748 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500749 },
Brett Russ20f733e2005-09-01 18:26:17 -0400750};
751
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500752static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400753 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
754 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
755 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
756 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Mark Lord46c57842008-09-04 18:21:07 -0400757 /* RocketRAID 1720/174x have different identifiers */
758 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
Mark Lord44622542009-01-27 16:33:13 -0500759 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
760 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
Brett Russ20f733e2005-09-01 18:26:17 -0400761
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400762 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
763 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
764 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
765 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
766 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500767
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400768 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
769
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200770 /* Adaptec 1430SA */
771 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
772
Mark Lord02a121d2007-12-01 13:07:22 -0500773 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800774 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
775
Mark Lord02a121d2007-12-01 13:07:22 -0500776 /* Highpoint RocketRAID PCIe series */
777 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
778 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
779
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400780 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400781};
782
Jeff Garzik47c2b672005-11-12 21:13:17 -0500783static const struct mv_hw_ops mv5xxx_ops = {
784 .phy_errata = mv5_phy_errata,
785 .enable_leds = mv5_enable_leds,
786 .read_preamp = mv5_read_preamp,
787 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500788 .reset_flash = mv5_reset_flash,
789 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500790};
791
792static const struct mv_hw_ops mv6xxx_ops = {
793 .phy_errata = mv6_phy_errata,
794 .enable_leds = mv6_enable_leds,
795 .read_preamp = mv6_read_preamp,
796 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500797 .reset_flash = mv6_reset_flash,
798 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500799};
800
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500801static const struct mv_hw_ops mv_soc_ops = {
802 .phy_errata = mv6_phy_errata,
803 .enable_leds = mv_soc_enable_leds,
804 .read_preamp = mv_soc_read_preamp,
805 .reset_hc = mv_soc_reset_hc,
806 .reset_flash = mv_soc_reset_flash,
807 .reset_bus = mv_soc_reset_bus,
808};
809
Brett Russ20f733e2005-09-01 18:26:17 -0400810/*
811 * Functions
812 */
813
814static inline void writelfl(unsigned long data, void __iomem *addr)
815{
816 writel(data, addr);
817 (void) readl(addr); /* flush to avoid PCI posted write */
818}
819
Jeff Garzikc9d39132005-11-13 17:47:51 -0500820static inline unsigned int mv_hc_from_port(unsigned int port)
821{
822 return port >> MV_PORT_HC_SHIFT;
823}
824
825static inline unsigned int mv_hardport_from_port(unsigned int port)
826{
827 return port & MV_PORT_MASK;
828}
829
Mark Lord1cfd19a2008-04-19 15:05:50 -0400830/*
831 * Consolidate some rather tricky bit shift calculations.
832 * This is hot-path stuff, so not a function.
833 * Simple code, with two return values, so macro rather than inline.
834 *
835 * port is the sole input, in range 0..7.
Mark Lord7368f912008-04-25 11:24:24 -0400836 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
837 * hardport is the other output, in range 0..3.
Mark Lord1cfd19a2008-04-19 15:05:50 -0400838 *
839 * Note that port and hardport may be the same variable in some cases.
840 */
841#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
842{ \
843 shift = mv_hc_from_port(port) * HC_SHIFT; \
844 hardport = mv_hardport_from_port(port); \
845 shift += hardport * 2; \
846}
847
Mark Lord352fab72008-04-19 14:43:42 -0400848static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
849{
Mark Lordcae5a292009-04-06 16:43:45 -0400850 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
Mark Lord352fab72008-04-19 14:43:42 -0400851}
852
Jeff Garzikc9d39132005-11-13 17:47:51 -0500853static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
854 unsigned int port)
855{
856 return mv_hc_base(base, mv_hc_from_port(port));
857}
858
Brett Russ20f733e2005-09-01 18:26:17 -0400859static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
860{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500861 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500862 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500863 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400864}
865
Mark Lorde12bef52008-03-31 19:33:56 -0400866static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
867{
868 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
869 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
870
871 return hc_mmio + ofs;
872}
873
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500874static inline void __iomem *mv_host_base(struct ata_host *host)
875{
876 struct mv_host_priv *hpriv = host->private_data;
877 return hpriv->base;
878}
879
Brett Russ20f733e2005-09-01 18:26:17 -0400880static inline void __iomem *mv_ap_base(struct ata_port *ap)
881{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500882 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400883}
884
Jeff Garzikcca39742006-08-24 03:19:22 -0400885static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400886{
Jeff Garzikcca39742006-08-24 03:19:22 -0400887 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400888}
889
Mark Lord08da1752009-02-25 15:13:03 -0500890/**
891 * mv_save_cached_regs - (re-)initialize cached port registers
892 * @ap: the port whose registers we are caching
893 *
894 * Initialize the local cache of port registers,
895 * so that reading them over and over again can
896 * be avoided on the hotter paths of this driver.
897 * This saves a few microseconds each time we switch
898 * to/from EDMA mode to perform (eg.) a drive cache flush.
899 */
900static void mv_save_cached_regs(struct ata_port *ap)
901{
902 void __iomem *port_mmio = mv_ap_base(ap);
903 struct mv_port_priv *pp = ap->private_data;
904
Mark Lordcae5a292009-04-06 16:43:45 -0400905 pp->cached.fiscfg = readl(port_mmio + FISCFG);
906 pp->cached.ltmode = readl(port_mmio + LTMODE);
907 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
908 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
Mark Lord08da1752009-02-25 15:13:03 -0500909}
910
911/**
912 * mv_write_cached_reg - write to a cached port register
913 * @addr: hardware address of the register
914 * @old: pointer to cached value of the register
915 * @new: new value for the register
916 *
917 * Write a new value to a cached register,
918 * but only if the value is different from before.
919 */
920static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
921{
922 if (new != *old) {
Mark Lord12f3b6d2009-04-06 15:26:24 -0400923 unsigned long laddr;
Mark Lord08da1752009-02-25 15:13:03 -0500924 *old = new;
Mark Lord12f3b6d2009-04-06 15:26:24 -0400925 /*
926 * Workaround for 88SX60x1-B2 FEr SATA#13:
927 * Read-after-write is needed to prevent generating 64-bit
928 * write cycles on the PCI bus for SATA interface registers
929 * at offsets ending in 0x4 or 0xc.
930 *
931 * Looks like a lot of fuss, but it avoids an unnecessary
932 * +1 usec read-after-write delay for unaffected registers.
933 */
934 laddr = (long)addr & 0xffff;
935 if (laddr >= 0x300 && laddr <= 0x33c) {
936 laddr &= 0x000f;
937 if (laddr == 0x4 || laddr == 0xc) {
938 writelfl(new, addr); /* read after write */
939 return;
940 }
941 }
942 writel(new, addr); /* unaffected by the errata */
Mark Lord08da1752009-02-25 15:13:03 -0500943 }
944}
945
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400946static void mv_set_edma_ptrs(void __iomem *port_mmio,
947 struct mv_host_priv *hpriv,
948 struct mv_port_priv *pp)
949{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400950 u32 index;
951
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400952 /*
953 * initialize request queue
954 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400955 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
956 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400957
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400958 WARN_ON(pp->crqb_dma & 0x3ff);
Mark Lordcae5a292009-04-06 16:43:45 -0400959 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400960 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Mark Lordcae5a292009-04-06 16:43:45 -0400961 port_mmio + EDMA_REQ_Q_IN_PTR);
962 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400963
964 /*
965 * initialize response queue
966 */
Mark Lordfcfb1f72008-04-19 15:06:40 -0400967 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
968 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400969
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400970 WARN_ON(pp->crpb_dma & 0xff);
Mark Lordcae5a292009-04-06 16:43:45 -0400971 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
972 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400973 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Mark Lordcae5a292009-04-06 16:43:45 -0400974 port_mmio + EDMA_RSP_Q_OUT_PTR);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400975}
976
Mark Lord2b748a02009-03-10 22:01:17 -0400977static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
978{
979 /*
980 * When writing to the main_irq_mask in hardware,
981 * we must ensure exclusivity between the interrupt coalescing bits
982 * and the corresponding individual port DONE_IRQ bits.
983 *
984 * Note that this register is really an "IRQ enable" register,
985 * not an "IRQ mask" register as Marvell's naming might suggest.
986 */
987 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
988 mask &= ~DONE_IRQ_0_3;
989 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
990 mask &= ~DONE_IRQ_4_7;
991 writelfl(mask, hpriv->main_irq_mask_addr);
992}
993
Mark Lordc4de5732008-05-17 13:35:21 -0400994static void mv_set_main_irq_mask(struct ata_host *host,
995 u32 disable_bits, u32 enable_bits)
996{
997 struct mv_host_priv *hpriv = host->private_data;
998 u32 old_mask, new_mask;
999
Mark Lord96e2c4872008-05-17 13:38:00 -04001000 old_mask = hpriv->main_irq_mask;
Mark Lordc4de5732008-05-17 13:35:21 -04001001 new_mask = (old_mask & ~disable_bits) | enable_bits;
Mark Lord96e2c4872008-05-17 13:38:00 -04001002 if (new_mask != old_mask) {
1003 hpriv->main_irq_mask = new_mask;
Mark Lord2b748a02009-03-10 22:01:17 -04001004 mv_write_main_irq_mask(new_mask, hpriv);
Mark Lord96e2c4872008-05-17 13:38:00 -04001005 }
Mark Lordc4de5732008-05-17 13:35:21 -04001006}
1007
1008static void mv_enable_port_irqs(struct ata_port *ap,
1009 unsigned int port_bits)
1010{
1011 unsigned int shift, hardport, port = ap->port_no;
1012 u32 disable_bits, enable_bits;
1013
1014 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1015
1016 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1017 enable_bits = port_bits << shift;
1018 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1019}
1020
Mark Lord00b81232009-01-30 18:47:51 -05001021static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1022 void __iomem *port_mmio,
1023 unsigned int port_irqs)
1024{
1025 struct mv_host_priv *hpriv = ap->host->private_data;
1026 int hardport = mv_hardport_from_port(ap->port_no);
1027 void __iomem *hc_mmio = mv_hc_base_from_port(
1028 mv_host_base(ap->host), ap->port_no);
1029 u32 hc_irq_cause;
1030
1031 /* clear EDMA event indicators, if any */
Mark Lordcae5a292009-04-06 16:43:45 -04001032 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
Mark Lord00b81232009-01-30 18:47:51 -05001033
1034 /* clear pending irq events */
1035 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
Mark Lordcae5a292009-04-06 16:43:45 -04001036 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
Mark Lord00b81232009-01-30 18:47:51 -05001037
1038 /* clear FIS IRQ Cause */
1039 if (IS_GEN_IIE(hpriv))
Mark Lordcae5a292009-04-06 16:43:45 -04001040 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
Mark Lord00b81232009-01-30 18:47:51 -05001041
1042 mv_enable_port_irqs(ap, port_irqs);
1043}
1044
Mark Lord2b748a02009-03-10 22:01:17 -04001045static void mv_set_irq_coalescing(struct ata_host *host,
1046 unsigned int count, unsigned int usecs)
1047{
1048 struct mv_host_priv *hpriv = host->private_data;
1049 void __iomem *mmio = hpriv->base, *hc_mmio;
1050 u32 coal_enable = 0;
1051 unsigned long flags;
Mark Lord6abf4672009-03-11 00:56:00 -04001052 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
Mark Lord2b748a02009-03-10 22:01:17 -04001053 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1054 ALL_PORTS_COAL_DONE;
1055
1056 /* Disable IRQ coalescing if either threshold is zero */
1057 if (!usecs || !count) {
1058 clks = count = 0;
1059 } else {
1060 /* Respect maximum limits of the hardware */
1061 clks = usecs * COAL_CLOCKS_PER_USEC;
1062 if (clks > MAX_COAL_TIME_THRESHOLD)
1063 clks = MAX_COAL_TIME_THRESHOLD;
1064 if (count > MAX_COAL_IO_COUNT)
1065 count = MAX_COAL_IO_COUNT;
1066 }
1067
1068 spin_lock_irqsave(&host->lock, flags);
Mark Lord6abf4672009-03-11 00:56:00 -04001069 mv_set_main_irq_mask(host, coal_disable, 0);
Mark Lord2b748a02009-03-10 22:01:17 -04001070
Mark Lord6abf4672009-03-11 00:56:00 -04001071 if (is_dual_hc && !IS_GEN_I(hpriv)) {
Mark Lord2b748a02009-03-10 22:01:17 -04001072 /*
Mark Lord6abf4672009-03-11 00:56:00 -04001073 * GEN_II/GEN_IIE with dual host controllers:
1074 * one set of global thresholds for the entire chip.
Mark Lord2b748a02009-03-10 22:01:17 -04001075 */
Mark Lordcae5a292009-04-06 16:43:45 -04001076 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1077 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
Mark Lord2b748a02009-03-10 22:01:17 -04001078 /* clear leftover coal IRQ bit */
Mark Lordcae5a292009-04-06 16:43:45 -04001079 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
Mark Lord6abf4672009-03-11 00:56:00 -04001080 if (count)
1081 coal_enable = ALL_PORTS_COAL_DONE;
1082 clks = count = 0; /* force clearing of regular regs below */
Mark Lord2b748a02009-03-10 22:01:17 -04001083 }
Mark Lord6abf4672009-03-11 00:56:00 -04001084
Mark Lord2b748a02009-03-10 22:01:17 -04001085 /*
1086 * All chips: independent thresholds for each HC on the chip.
1087 */
1088 hc_mmio = mv_hc_base_from_port(mmio, 0);
Mark Lordcae5a292009-04-06 16:43:45 -04001089 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1090 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1091 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
Mark Lord6abf4672009-03-11 00:56:00 -04001092 if (count)
1093 coal_enable |= PORTS_0_3_COAL_DONE;
1094 if (is_dual_hc) {
Mark Lord2b748a02009-03-10 22:01:17 -04001095 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
Mark Lordcae5a292009-04-06 16:43:45 -04001096 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1097 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1098 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
Mark Lord6abf4672009-03-11 00:56:00 -04001099 if (count)
1100 coal_enable |= PORTS_4_7_COAL_DONE;
Mark Lord2b748a02009-03-10 22:01:17 -04001101 }
Mark Lord2b748a02009-03-10 22:01:17 -04001102
Mark Lord6abf4672009-03-11 00:56:00 -04001103 mv_set_main_irq_mask(host, 0, coal_enable);
Mark Lord2b748a02009-03-10 22:01:17 -04001104 spin_unlock_irqrestore(&host->lock, flags);
1105}
1106
Brett Russ05b308e2005-10-05 17:08:53 -04001107/**
Mark Lord00b81232009-01-30 18:47:51 -05001108 * mv_start_edma - Enable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -04001109 * @base: port base address
1110 * @pp: port private data
1111 *
Tejun Heobeec7db2006-02-11 19:11:13 +09001112 * Verify the local cache of the eDMA state is accurate with a
1113 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -04001114 *
1115 * LOCKING:
1116 * Inherited from caller.
1117 */
Mark Lord00b81232009-01-30 18:47:51 -05001118static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -05001119 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -04001120{
Mark Lord72109162008-01-26 18:31:33 -05001121 int want_ncq = (protocol == ATA_PROT_NCQ);
1122
1123 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1124 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1125 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -04001126 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -05001127 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001128 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -05001129 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord0c589122008-01-26 18:31:16 -05001130
Mark Lord00b81232009-01-30 18:47:51 -05001131 mv_edma_cfg(ap, want_ncq, 1);
Mark Lord0c589122008-01-26 18:31:16 -05001132
Mark Lordf630d562008-01-26 18:31:00 -05001133 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Mark Lord00b81232009-01-30 18:47:51 -05001134 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001135
Mark Lordcae5a292009-04-06 16:43:45 -04001136 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
Brett Russafb0edd2005-10-05 17:08:42 -04001137 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1138 }
Brett Russ31961942005-09-30 01:36:00 -04001139}
1140
Mark Lord9b2c4e02008-05-02 02:09:14 -04001141static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1142{
1143 void __iomem *port_mmio = mv_ap_base(ap);
1144 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1145 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1146 int i;
1147
1148 /*
1149 * Wait for the EDMA engine to finish transactions in progress.
Mark Lordc46938c2008-05-02 14:02:28 -04001150 * No idea what a good "timeout" value might be, but measurements
1151 * indicate that it often requires hundreds of microseconds
1152 * with two drives in-use. So we use the 15msec value above
1153 * as a rough guess at what even more drives might require.
Mark Lord9b2c4e02008-05-02 02:09:14 -04001154 */
1155 for (i = 0; i < timeout; ++i) {
Mark Lordcae5a292009-04-06 16:43:45 -04001156 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
Mark Lord9b2c4e02008-05-02 02:09:14 -04001157 if ((edma_stat & empty_idle) == empty_idle)
1158 break;
1159 udelay(per_loop);
1160 }
1161 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1162}
1163
Brett Russ05b308e2005-10-05 17:08:53 -04001164/**
Mark Lorde12bef52008-03-31 19:33:56 -04001165 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -04001166 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -04001167 *
1168 * LOCKING:
1169 * Inherited from caller.
1170 */
Mark Lordb5624682008-03-31 19:34:40 -04001171static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -04001172{
Mark Lordb5624682008-03-31 19:34:40 -04001173 int i;
Brett Russ31961942005-09-30 01:36:00 -04001174
Mark Lordb5624682008-03-31 19:34:40 -04001175 /* Disable eDMA. The disable bit auto clears. */
Mark Lordcae5a292009-04-06 16:43:45 -04001176 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
Jeff Garzik8b260242005-11-12 12:32:50 -05001177
Mark Lordb5624682008-03-31 19:34:40 -04001178 /* Wait for the chip to confirm eDMA is off. */
1179 for (i = 10000; i > 0; i--) {
Mark Lordcae5a292009-04-06 16:43:45 -04001180 u32 reg = readl(port_mmio + EDMA_CMD);
Jeff Garzik4537deb2007-07-12 14:30:19 -04001181 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -04001182 return 0;
1183 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -04001184 }
Mark Lordb5624682008-03-31 19:34:40 -04001185 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -04001186}
1187
Mark Lorde12bef52008-03-31 19:33:56 -04001188static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001189{
Mark Lordb5624682008-03-31 19:34:40 -04001190 void __iomem *port_mmio = mv_ap_base(ap);
1191 struct mv_port_priv *pp = ap->private_data;
Mark Lord66e57a22009-01-30 18:52:58 -05001192 int err = 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001193
Mark Lordb5624682008-03-31 19:34:40 -04001194 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1195 return 0;
1196 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lord9b2c4e02008-05-02 02:09:14 -04001197 mv_wait_for_edma_empty_idle(ap);
Mark Lordb5624682008-03-31 19:34:40 -04001198 if (mv_stop_edma_engine(port_mmio)) {
1199 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Mark Lord66e57a22009-01-30 18:52:58 -05001200 err = -EIO;
Mark Lordb5624682008-03-31 19:34:40 -04001201 }
Mark Lord66e57a22009-01-30 18:52:58 -05001202 mv_edma_cfg(ap, 0, 0);
1203 return err;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001204}
1205
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001206#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -04001207static void mv_dump_mem(void __iomem *start, unsigned bytes)
1208{
Brett Russ31961942005-09-30 01:36:00 -04001209 int b, w;
1210 for (b = 0; b < bytes; ) {
1211 DPRINTK("%p: ", start + b);
1212 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001213 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -04001214 b += sizeof(u32);
1215 }
1216 printk("\n");
1217 }
Brett Russ31961942005-09-30 01:36:00 -04001218}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -04001219#endif
1220
Brett Russ31961942005-09-30 01:36:00 -04001221static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1222{
1223#ifdef ATA_DEBUG
1224 int b, w;
1225 u32 dw;
1226 for (b = 0; b < bytes; ) {
1227 DPRINTK("%02x: ", b);
1228 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001229 (void) pci_read_config_dword(pdev, b, &dw);
1230 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -04001231 b += sizeof(u32);
1232 }
1233 printk("\n");
1234 }
1235#endif
1236}
1237static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1238 struct pci_dev *pdev)
1239{
1240#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -05001241 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -04001242 port >> MV_PORT_HC_SHIFT);
1243 void __iomem *port_base;
1244 int start_port, num_ports, p, start_hc, num_hcs, hc;
1245
1246 if (0 > port) {
1247 start_hc = start_port = 0;
1248 num_ports = 8; /* shld be benign for 4 port devs */
1249 num_hcs = 2;
1250 } else {
1251 start_hc = port >> MV_PORT_HC_SHIFT;
1252 start_port = port;
1253 num_ports = num_hcs = 1;
1254 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001255 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001256 num_ports > 1 ? num_ports - 1 : start_port);
1257
1258 if (NULL != pdev) {
1259 DPRINTK("PCI config space regs:\n");
1260 mv_dump_pci_cfg(pdev, 0x68);
1261 }
1262 DPRINTK("PCI regs:\n");
1263 mv_dump_mem(mmio_base+0xc00, 0x3c);
1264 mv_dump_mem(mmio_base+0xd00, 0x34);
1265 mv_dump_mem(mmio_base+0xf00, 0x4);
1266 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1267 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001268 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001269 DPRINTK("HC regs (HC %i):\n", hc);
1270 mv_dump_mem(hc_base, 0x1c);
1271 }
1272 for (p = start_port; p < start_port + num_ports; p++) {
1273 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001274 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001275 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001276 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001277 mv_dump_mem(port_base+0x300, 0x60);
1278 }
1279#endif
1280}
1281
Brett Russ20f733e2005-09-01 18:26:17 -04001282static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1283{
1284 unsigned int ofs;
1285
1286 switch (sc_reg_in) {
1287 case SCR_STATUS:
1288 case SCR_CONTROL:
1289 case SCR_ERROR:
Mark Lordcae5a292009-04-06 16:43:45 -04001290 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
Brett Russ20f733e2005-09-01 18:26:17 -04001291 break;
1292 case SCR_ACTIVE:
Mark Lordcae5a292009-04-06 16:43:45 -04001293 ofs = SATA_ACTIVE; /* active is not with the others */
Brett Russ20f733e2005-09-01 18:26:17 -04001294 break;
1295 default:
1296 ofs = 0xffffffffU;
1297 break;
1298 }
1299 return ofs;
1300}
1301
Tejun Heo82ef04f2008-07-31 17:02:40 +09001302static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001303{
1304 unsigned int ofs = mv_scr_offset(sc_reg_in);
1305
Tejun Heoda3dbb12007-07-16 14:29:40 +09001306 if (ofs != 0xffffffffU) {
Tejun Heo82ef04f2008-07-31 17:02:40 +09001307 *val = readl(mv_ap_base(link->ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001308 return 0;
1309 } else
1310 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001311}
1312
Tejun Heo82ef04f2008-07-31 17:02:40 +09001313static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001314{
1315 unsigned int ofs = mv_scr_offset(sc_reg_in);
1316
Tejun Heoda3dbb12007-07-16 14:29:40 +09001317 if (ofs != 0xffffffffU) {
Mark Lord20091772009-04-06 15:24:57 -04001318 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1319 if (sc_reg_in == SCR_CONTROL) {
1320 /*
1321 * Workaround for 88SX60x1 FEr SATA#26:
1322 *
1323 * COMRESETs have to take care not to accidently
1324 * put the drive to sleep when writing SCR_CONTROL.
1325 * Setting bits 12..15 prevents this problem.
1326 *
1327 * So if we see an outbound COMMRESET, set those bits.
1328 * Ditto for the followup write that clears the reset.
1329 *
1330 * The proprietary driver does this for
1331 * all chip versions, and so do we.
1332 */
1333 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1334 val |= 0xf000;
1335 }
1336 writelfl(val, addr);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001337 return 0;
1338 } else
1339 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001340}
1341
Mark Lordf2738272008-01-26 18:32:29 -05001342static void mv6_dev_config(struct ata_device *adev)
1343{
1344 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001345 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1346 *
1347 * Gen-II does not support NCQ over a port multiplier
1348 * (no FIS-based switching).
Mark Lordf2738272008-01-26 18:32:29 -05001349 */
Mark Lorde49856d2008-04-16 14:59:07 -04001350 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001351 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001352 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001353 ata_dev_printk(adev, KERN_INFO,
1354 "NCQ disabled for command-based switching\n");
Mark Lord352fab72008-04-19 14:43:42 -04001355 }
Mark Lorde49856d2008-04-16 14:59:07 -04001356 }
Mark Lordf2738272008-01-26 18:32:29 -05001357}
1358
Mark Lord3e4a1392008-05-02 02:10:02 -04001359static int mv_qc_defer(struct ata_queued_cmd *qc)
1360{
1361 struct ata_link *link = qc->dev->link;
1362 struct ata_port *ap = link->ap;
1363 struct mv_port_priv *pp = ap->private_data;
1364
1365 /*
Mark Lord29d187b2008-05-02 02:15:37 -04001366 * Don't allow new commands if we're in a delayed EH state
1367 * for NCQ and/or FIS-based switching.
1368 */
1369 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1370 return ATA_DEFER_PORT;
1371 /*
Mark Lord3e4a1392008-05-02 02:10:02 -04001372 * If the port is completely idle, then allow the new qc.
1373 */
1374 if (ap->nr_active_links == 0)
1375 return 0;
1376
Tejun Heo4bdee6c2008-08-13 20:24:16 +09001377 /*
1378 * The port is operating in host queuing mode (EDMA) with NCQ
1379 * enabled, allow multiple NCQ commands. EDMA also allows
1380 * queueing multiple DMA commands but libata core currently
1381 * doesn't allow it.
1382 */
1383 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1384 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1385 return 0;
1386
Mark Lord3e4a1392008-05-02 02:10:02 -04001387 return ATA_DEFER_PORT;
1388}
1389
Mark Lord08da1752009-02-25 15:13:03 -05001390static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
Mark Lorde49856d2008-04-16 14:59:07 -04001391{
Mark Lord08da1752009-02-25 15:13:03 -05001392 struct mv_port_priv *pp = ap->private_data;
1393 void __iomem *port_mmio;
Mark Lord00f42ea2008-05-02 02:11:45 -04001394
Mark Lord08da1752009-02-25 15:13:03 -05001395 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1396 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1397 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
Mark Lord00f42ea2008-05-02 02:11:45 -04001398
Mark Lord08da1752009-02-25 15:13:03 -05001399 ltmode = *old_ltmode & ~LTMODE_BIT8;
1400 haltcond = *old_haltcond | EDMA_ERR_DEV;
Mark Lord00f42ea2008-05-02 02:11:45 -04001401
1402 if (want_fbs) {
Mark Lord08da1752009-02-25 15:13:03 -05001403 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1404 ltmode = *old_ltmode | LTMODE_BIT8;
Mark Lord4c299ca2008-05-02 02:16:20 -04001405 if (want_ncq)
Mark Lord08da1752009-02-25 15:13:03 -05001406 haltcond &= ~EDMA_ERR_DEV;
Mark Lord4c299ca2008-05-02 02:16:20 -04001407 else
Mark Lord08da1752009-02-25 15:13:03 -05001408 fiscfg |= FISCFG_WAIT_DEV_ERR;
1409 } else {
1410 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
Mark Lorde49856d2008-04-16 14:59:07 -04001411 }
Mark Lord00f42ea2008-05-02 02:11:45 -04001412
Mark Lord08da1752009-02-25 15:13:03 -05001413 port_mmio = mv_ap_base(ap);
Mark Lordcae5a292009-04-06 16:43:45 -04001414 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1415 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1416 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
Mark Lord0c589122008-01-26 18:31:16 -05001417}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001418
Mark Lorddd2890f2008-05-02 02:10:56 -04001419static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1420{
1421 struct mv_host_priv *hpriv = ap->host->private_data;
1422 u32 old, new;
1423
1424 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
Mark Lordcae5a292009-04-06 16:43:45 -04001425 old = readl(hpriv->base + GPIO_PORT_CTL);
Mark Lorddd2890f2008-05-02 02:10:56 -04001426 if (want_ncq)
1427 new = old | (1 << 22);
1428 else
1429 new = old & ~(1 << 22);
1430 if (new != old)
Mark Lordcae5a292009-04-06 16:43:45 -04001431 writel(new, hpriv->base + GPIO_PORT_CTL);
Mark Lorddd2890f2008-05-02 02:10:56 -04001432}
1433
Mark Lordc01e8a22009-02-25 15:14:48 -05001434/**
Mark Lord40f21b12009-03-10 18:51:04 -04001435 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1436 * @ap: Port being initialized
Mark Lordc01e8a22009-02-25 15:14:48 -05001437 *
1438 * There are two DMA modes on these chips: basic DMA, and EDMA.
1439 *
1440 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1441 * of basic DMA on the GEN_IIE versions of the chips.
1442 *
1443 * This bit survives EDMA resets, and must be set for basic DMA
1444 * to function, and should be cleared when EDMA is active.
1445 */
1446static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1447{
1448 struct mv_port_priv *pp = ap->private_data;
1449 u32 new, *old = &pp->cached.unknown_rsvd;
1450
1451 if (enable_bmdma)
1452 new = *old | 1;
1453 else
1454 new = *old & ~1;
Mark Lordcae5a292009-04-06 16:43:45 -04001455 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
Mark Lordc01e8a22009-02-25 15:14:48 -05001456}
1457
Mark Lord000b3442009-03-15 11:33:19 -04001458/*
1459 * SOC chips have an issue whereby the HDD LEDs don't always blink
1460 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1461 * of the SOC takes care of it, generating a steady blink rate when
1462 * any drive on the chip is active.
1463 *
1464 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1465 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1466 *
1467 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1468 * LED operation works then, and provides better (more accurate) feedback.
1469 *
1470 * Note that this code assumes that an SOC never has more than one HC onboard.
1471 */
1472static void mv_soc_led_blink_enable(struct ata_port *ap)
1473{
1474 struct ata_host *host = ap->host;
1475 struct mv_host_priv *hpriv = host->private_data;
1476 void __iomem *hc_mmio;
1477 u32 led_ctrl;
1478
1479 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1480 return;
1481 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1482 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
Mark Lordcae5a292009-04-06 16:43:45 -04001483 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1484 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
Mark Lord000b3442009-03-15 11:33:19 -04001485}
1486
1487static void mv_soc_led_blink_disable(struct ata_port *ap)
1488{
1489 struct ata_host *host = ap->host;
1490 struct mv_host_priv *hpriv = host->private_data;
1491 void __iomem *hc_mmio;
1492 u32 led_ctrl;
1493 unsigned int port;
1494
1495 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1496 return;
1497
1498 /* disable led-blink only if no ports are using NCQ */
1499 for (port = 0; port < hpriv->n_ports; port++) {
1500 struct ata_port *this_ap = host->ports[port];
1501 struct mv_port_priv *pp = this_ap->private_data;
1502
1503 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1504 return;
1505 }
1506
1507 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1508 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
Mark Lordcae5a292009-04-06 16:43:45 -04001509 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1510 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
Mark Lord000b3442009-03-15 11:33:19 -04001511}
1512
Mark Lord00b81232009-01-30 18:47:51 -05001513static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001514{
1515 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001516 struct mv_port_priv *pp = ap->private_data;
1517 struct mv_host_priv *hpriv = ap->host->private_data;
1518 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001519
1520 /* set up non-NCQ EDMA configuration */
1521 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Mark Lordd16ab3f2009-02-25 15:17:43 -05001522 pp->pp_flags &=
1523 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001524
1525 if (IS_GEN_I(hpriv))
1526 cfg |= (1 << 8); /* enab config burst size mask */
1527
Mark Lorddd2890f2008-05-02 02:10:56 -04001528 else if (IS_GEN_II(hpriv)) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05001529 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Mark Lorddd2890f2008-05-02 02:10:56 -04001530 mv_60x1_errata_sata25(ap, want_ncq);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001531
Mark Lorddd2890f2008-05-02 02:10:56 -04001532 } else if (IS_GEN_IIE(hpriv)) {
Mark Lord00f42ea2008-05-02 02:11:45 -04001533 int want_fbs = sata_pmp_attached(ap);
1534 /*
1535 * Possible future enhancement:
1536 *
1537 * The chip can use FBS with non-NCQ, if we allow it,
1538 * But first we need to have the error handling in place
1539 * for this mode (datasheet section 7.3.15.4.2.3).
1540 * So disallow non-NCQ FBS for now.
1541 */
1542 want_fbs &= want_ncq;
1543
Mark Lord08da1752009-02-25 15:13:03 -05001544 mv_config_fbs(ap, want_ncq, want_fbs);
Mark Lord00f42ea2008-05-02 02:11:45 -04001545
1546 if (want_fbs) {
1547 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1548 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1549 }
1550
Jeff Garzike728eab2007-02-25 02:53:41 -05001551 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
Mark Lord00b81232009-01-30 18:47:51 -05001552 if (want_edma) {
1553 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1554 if (!IS_SOC(hpriv))
1555 cfg |= (1 << 18); /* enab early completion */
1556 }
Mark Lord616d4a92008-05-02 02:08:32 -04001557 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1558 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
Mark Lordc01e8a22009-02-25 15:14:48 -05001559 mv_bmdma_enable_iie(ap, !want_edma);
Mark Lord000b3442009-03-15 11:33:19 -04001560
1561 if (IS_SOC(hpriv)) {
1562 if (want_ncq)
1563 mv_soc_led_blink_enable(ap);
1564 else
1565 mv_soc_led_blink_disable(ap);
1566 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001567 }
1568
Mark Lord72109162008-01-26 18:31:33 -05001569 if (want_ncq) {
1570 cfg |= EDMA_CFG_NCQ;
1571 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
Mark Lord00b81232009-01-30 18:47:51 -05001572 }
Mark Lord72109162008-01-26 18:31:33 -05001573
Mark Lordcae5a292009-04-06 16:43:45 -04001574 writelfl(cfg, port_mmio + EDMA_CFG);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001575}
1576
Mark Lordda2fa9b2008-01-26 18:32:45 -05001577static void mv_port_free_dma_mem(struct ata_port *ap)
1578{
1579 struct mv_host_priv *hpriv = ap->host->private_data;
1580 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001581 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001582
1583 if (pp->crqb) {
1584 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1585 pp->crqb = NULL;
1586 }
1587 if (pp->crpb) {
1588 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1589 pp->crpb = NULL;
1590 }
Mark Lordeb73d552008-01-29 13:24:00 -05001591 /*
1592 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1593 * For later hardware, we have one unique sg_tbl per NCQ tag.
1594 */
1595 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1596 if (pp->sg_tbl[tag]) {
1597 if (tag == 0 || !IS_GEN_I(hpriv))
1598 dma_pool_free(hpriv->sg_tbl_pool,
1599 pp->sg_tbl[tag],
1600 pp->sg_tbl_dma[tag]);
1601 pp->sg_tbl[tag] = NULL;
1602 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001603 }
1604}
1605
Brett Russ05b308e2005-10-05 17:08:53 -04001606/**
1607 * mv_port_start - Port specific init/start routine.
1608 * @ap: ATA channel to manipulate
1609 *
1610 * Allocate and point to DMA memory, init port private memory,
1611 * zero indices.
1612 *
1613 * LOCKING:
1614 * Inherited from caller.
1615 */
Brett Russ31961942005-09-30 01:36:00 -04001616static int mv_port_start(struct ata_port *ap)
1617{
Jeff Garzikcca39742006-08-24 03:19:22 -04001618 struct device *dev = ap->host->dev;
1619 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001620 struct mv_port_priv *pp;
Mark Lord933cb8e2009-04-06 12:30:43 -04001621 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001622 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001623
Tejun Heo24dc5f32007-01-20 16:00:28 +09001624 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001625 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001626 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001627 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001628
Mark Lordda2fa9b2008-01-26 18:32:45 -05001629 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1630 if (!pp->crqb)
1631 return -ENOMEM;
1632 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001633
Mark Lordda2fa9b2008-01-26 18:32:45 -05001634 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1635 if (!pp->crpb)
1636 goto out_port_free_dma_mem;
1637 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001638
Mark Lord3bd0a702008-06-18 12:11:16 -04001639 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1640 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1641 ap->flags |= ATA_FLAG_AN;
Mark Lordeb73d552008-01-29 13:24:00 -05001642 /*
1643 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1644 * For later hardware, we need one unique sg_tbl per NCQ tag.
1645 */
1646 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1647 if (tag == 0 || !IS_GEN_I(hpriv)) {
1648 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1649 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1650 if (!pp->sg_tbl[tag])
1651 goto out_port_free_dma_mem;
1652 } else {
1653 pp->sg_tbl[tag] = pp->sg_tbl[0];
1654 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1655 }
1656 }
Mark Lord933cb8e2009-04-06 12:30:43 -04001657
1658 spin_lock_irqsave(ap->lock, flags);
Mark Lord08da1752009-02-25 15:13:03 -05001659 mv_save_cached_regs(ap);
Mark Lord66e57a22009-01-30 18:52:58 -05001660 mv_edma_cfg(ap, 0, 0);
Mark Lord933cb8e2009-04-06 12:30:43 -04001661 spin_unlock_irqrestore(ap->lock, flags);
1662
Brett Russ31961942005-09-30 01:36:00 -04001663 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001664
1665out_port_free_dma_mem:
1666 mv_port_free_dma_mem(ap);
1667 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001668}
1669
Brett Russ05b308e2005-10-05 17:08:53 -04001670/**
1671 * mv_port_stop - Port specific cleanup/stop routine.
1672 * @ap: ATA channel to manipulate
1673 *
1674 * Stop DMA, cleanup port memory.
1675 *
1676 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001677 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001678 */
Brett Russ31961942005-09-30 01:36:00 -04001679static void mv_port_stop(struct ata_port *ap)
1680{
Mark Lord933cb8e2009-04-06 12:30:43 -04001681 unsigned long flags;
1682
1683 spin_lock_irqsave(ap->lock, flags);
Mark Lorde12bef52008-03-31 19:33:56 -04001684 mv_stop_edma(ap);
Mark Lord88e675e2008-05-17 13:36:30 -04001685 mv_enable_port_irqs(ap, 0);
Mark Lord933cb8e2009-04-06 12:30:43 -04001686 spin_unlock_irqrestore(ap->lock, flags);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001687 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001688}
1689
Brett Russ05b308e2005-10-05 17:08:53 -04001690/**
1691 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1692 * @qc: queued command whose SG list to source from
1693 *
1694 * Populate the SG list and mark the last entry.
1695 *
1696 * LOCKING:
1697 * Inherited from caller.
1698 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001699static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001700{
1701 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001702 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001703 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001704 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001705
Mark Lordeb73d552008-01-29 13:24:00 -05001706 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001707 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001708 dma_addr_t addr = sg_dma_address(sg);
1709 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001710
Olof Johansson4007b492007-10-02 20:45:27 -05001711 while (sg_len) {
1712 u32 offset = addr & 0xffff;
1713 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001714
Mark Lord32cd11a2009-02-01 16:50:32 -05001715 if (offset + len > 0x10000)
Olof Johansson4007b492007-10-02 20:45:27 -05001716 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001717
Olof Johansson4007b492007-10-02 20:45:27 -05001718 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1719 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001720 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Mark Lord32cd11a2009-02-01 16:50:32 -05001721 mv_sg->reserved = 0;
Olof Johansson4007b492007-10-02 20:45:27 -05001722
1723 sg_len -= len;
1724 addr += len;
1725
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001726 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001727 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001728 }
Brett Russ31961942005-09-30 01:36:00 -04001729 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001730
1731 if (likely(last_sg))
1732 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Mark Lord32cd11a2009-02-01 16:50:32 -05001733 mb(); /* ensure data structure is visible to the chipset */
Brett Russ31961942005-09-30 01:36:00 -04001734}
1735
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001736static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001737{
Mark Lord559eeda2006-05-19 16:40:15 -04001738 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001739 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001740 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001741}
1742
Brett Russ05b308e2005-10-05 17:08:53 -04001743/**
Mark Lordda142652009-01-30 18:51:54 -05001744 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1745 * @ap: Port associated with this ATA transaction.
1746 *
1747 * We need this only for ATAPI bmdma transactions,
1748 * as otherwise we experience spurious interrupts
1749 * after libata-sff handles the bmdma interrupts.
1750 */
1751static void mv_sff_irq_clear(struct ata_port *ap)
1752{
1753 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1754}
1755
1756/**
1757 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1758 * @qc: queued command to check for chipset/DMA compatibility.
1759 *
1760 * The bmdma engines cannot handle speculative data sizes
1761 * (bytecount under/over flow). So only allow DMA for
1762 * data transfer commands with known data sizes.
1763 *
1764 * LOCKING:
1765 * Inherited from caller.
1766 */
1767static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1768{
1769 struct scsi_cmnd *scmd = qc->scsicmd;
1770
1771 if (scmd) {
1772 switch (scmd->cmnd[0]) {
1773 case READ_6:
1774 case READ_10:
1775 case READ_12:
1776 case WRITE_6:
1777 case WRITE_10:
1778 case WRITE_12:
1779 case GPCMD_READ_CD:
1780 case GPCMD_SEND_DVD_STRUCTURE:
1781 case GPCMD_SEND_CUE_SHEET:
1782 return 0; /* DMA is safe */
1783 }
1784 }
1785 return -EOPNOTSUPP; /* use PIO instead */
1786}
1787
1788/**
1789 * mv_bmdma_setup - Set up BMDMA transaction
1790 * @qc: queued command to prepare DMA for.
1791 *
1792 * LOCKING:
1793 * Inherited from caller.
1794 */
1795static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1796{
1797 struct ata_port *ap = qc->ap;
1798 void __iomem *port_mmio = mv_ap_base(ap);
1799 struct mv_port_priv *pp = ap->private_data;
1800
1801 mv_fill_sg(qc);
1802
1803 /* clear all DMA cmd bits */
Mark Lordcae5a292009-04-06 16:43:45 -04001804 writel(0, port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001805
1806 /* load PRD table addr. */
1807 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
Mark Lordcae5a292009-04-06 16:43:45 -04001808 port_mmio + BMDMA_PRD_HIGH);
Mark Lordda142652009-01-30 18:51:54 -05001809 writelfl(pp->sg_tbl_dma[qc->tag],
Mark Lordcae5a292009-04-06 16:43:45 -04001810 port_mmio + BMDMA_PRD_LOW);
Mark Lordda142652009-01-30 18:51:54 -05001811
1812 /* issue r/w command */
1813 ap->ops->sff_exec_command(ap, &qc->tf);
1814}
1815
1816/**
1817 * mv_bmdma_start - Start a BMDMA transaction
1818 * @qc: queued command to start DMA on.
1819 *
1820 * LOCKING:
1821 * Inherited from caller.
1822 */
1823static void mv_bmdma_start(struct ata_queued_cmd *qc)
1824{
1825 struct ata_port *ap = qc->ap;
1826 void __iomem *port_mmio = mv_ap_base(ap);
1827 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1828 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1829
1830 /* start host DMA transaction */
Mark Lordcae5a292009-04-06 16:43:45 -04001831 writelfl(cmd, port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001832}
1833
1834/**
1835 * mv_bmdma_stop - Stop BMDMA transfer
1836 * @qc: queued command to stop DMA on.
1837 *
1838 * Clears the ATA_DMA_START flag in the bmdma control register
1839 *
1840 * LOCKING:
1841 * Inherited from caller.
1842 */
1843static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1844{
1845 struct ata_port *ap = qc->ap;
1846 void __iomem *port_mmio = mv_ap_base(ap);
1847 u32 cmd;
1848
1849 /* clear start/stop bit */
Mark Lordcae5a292009-04-06 16:43:45 -04001850 cmd = readl(port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001851 cmd &= ~ATA_DMA_START;
Mark Lordcae5a292009-04-06 16:43:45 -04001852 writelfl(cmd, port_mmio + BMDMA_CMD);
Mark Lordda142652009-01-30 18:51:54 -05001853
1854 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1855 ata_sff_dma_pause(ap);
1856}
1857
1858/**
1859 * mv_bmdma_status - Read BMDMA status
1860 * @ap: port for which to retrieve DMA status.
1861 *
1862 * Read and return equivalent of the sff BMDMA status register.
1863 *
1864 * LOCKING:
1865 * Inherited from caller.
1866 */
1867static u8 mv_bmdma_status(struct ata_port *ap)
1868{
1869 void __iomem *port_mmio = mv_ap_base(ap);
1870 u32 reg, status;
1871
1872 /*
1873 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1874 * and the ATA_DMA_INTR bit doesn't exist.
1875 */
Mark Lordcae5a292009-04-06 16:43:45 -04001876 reg = readl(port_mmio + BMDMA_STATUS);
Mark Lordda142652009-01-30 18:51:54 -05001877 if (reg & ATA_DMA_ACTIVE)
1878 status = ATA_DMA_ACTIVE;
1879 else
1880 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1881 return status;
1882}
1883
1884/**
Brett Russ05b308e2005-10-05 17:08:53 -04001885 * mv_qc_prep - Host specific command preparation.
1886 * @qc: queued command to prepare
1887 *
1888 * This routine simply redirects to the general purpose routine
1889 * if command is not DMA. Else, it handles prep of the CRQB
1890 * (command request block), does some sanity checking, and calls
1891 * the SG load routine.
1892 *
1893 * LOCKING:
1894 * Inherited from caller.
1895 */
Brett Russ31961942005-09-30 01:36:00 -04001896static void mv_qc_prep(struct ata_queued_cmd *qc)
1897{
1898 struct ata_port *ap = qc->ap;
1899 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001900 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001901 struct ata_taskfile *tf;
1902 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001903 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001904
Mark Lord138bfdd2008-01-26 18:33:18 -05001905 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1906 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001907 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001908
Brett Russ31961942005-09-30 01:36:00 -04001909 /* Fill in command request block
1910 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001911 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001912 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001913 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001914 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001915 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001916
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001917 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04001918 in_index = pp->req_idx;
Brett Russ31961942005-09-30 01:36:00 -04001919
Mark Lorda6432432006-05-19 16:36:36 -04001920 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001921 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001922 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001923 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001924 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1925
1926 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001927 tf = &qc->tf;
1928
1929 /* Sadly, the CRQB cannot accomodate all registers--there are
1930 * only 11 bytes...so we must pick and choose required
1931 * registers based on the command. So, we drop feature and
1932 * hob_feature for [RW] DMA commands, but they are needed for
Mark Lordcd12e1f2009-01-19 18:06:28 -05001933 * NCQ. NCQ will drop hob_nsect, which is not needed there
1934 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
Brett Russ31961942005-09-30 01:36:00 -04001935 */
1936 switch (tf->command) {
1937 case ATA_CMD_READ:
1938 case ATA_CMD_READ_EXT:
1939 case ATA_CMD_WRITE:
1940 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001941 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001942 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1943 break;
Brett Russ31961942005-09-30 01:36:00 -04001944 case ATA_CMD_FPDMA_READ:
1945 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001946 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001947 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1948 break;
Brett Russ31961942005-09-30 01:36:00 -04001949 default:
1950 /* The only other commands EDMA supports in non-queued and
1951 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1952 * of which are defined/used by Linux. If we get here, this
1953 * driver needs work.
1954 *
1955 * FIXME: modify libata to give qc_prep a return value and
1956 * return error here.
1957 */
1958 BUG_ON(tf->command);
1959 break;
1960 }
1961 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1962 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1963 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1964 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1965 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1966 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1967 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1968 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1969 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1970
Jeff Garzike4e7b892006-01-31 12:18:41 -05001971 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001972 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001973 mv_fill_sg(qc);
1974}
1975
1976/**
1977 * mv_qc_prep_iie - Host specific command preparation.
1978 * @qc: queued command to prepare
1979 *
1980 * This routine simply redirects to the general purpose routine
1981 * if command is not DMA. Else, it handles prep of the CRQB
1982 * (command request block), does some sanity checking, and calls
1983 * the SG load routine.
1984 *
1985 * LOCKING:
1986 * Inherited from caller.
1987 */
1988static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1989{
1990 struct ata_port *ap = qc->ap;
1991 struct mv_port_priv *pp = ap->private_data;
1992 struct mv_crqb_iie *crqb;
1993 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001994 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001995 u32 flags = 0;
1996
Mark Lord138bfdd2008-01-26 18:33:18 -05001997 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1998 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001999 return;
2000
Mark Lorde12bef52008-03-31 19:33:56 -04002001 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05002002 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
2003 flags |= CRQB_FLAG_READ;
2004
Tejun Heobeec7db2006-02-11 19:11:13 +09002005 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05002006 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05002007 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04002008 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002009
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002010 /* get current queue index from software */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002011 in_index = pp->req_idx;
Mark Lorda6432432006-05-19 16:36:36 -04002012
2013 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05002014 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2015 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05002016 crqb->flags = cpu_to_le32(flags);
2017
2018 tf = &qc->tf;
2019 crqb->ata_cmd[0] = cpu_to_le32(
2020 (tf->command << 16) |
2021 (tf->feature << 24)
2022 );
2023 crqb->ata_cmd[1] = cpu_to_le32(
2024 (tf->lbal << 0) |
2025 (tf->lbam << 8) |
2026 (tf->lbah << 16) |
2027 (tf->device << 24)
2028 );
2029 crqb->ata_cmd[2] = cpu_to_le32(
2030 (tf->hob_lbal << 0) |
2031 (tf->hob_lbam << 8) |
2032 (tf->hob_lbah << 16) |
2033 (tf->hob_feature << 24)
2034 );
2035 crqb->ata_cmd[3] = cpu_to_le32(
2036 (tf->nsect << 0) |
2037 (tf->hob_nsect << 8)
2038 );
2039
2040 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2041 return;
Brett Russ31961942005-09-30 01:36:00 -04002042 mv_fill_sg(qc);
2043}
2044
Brett Russ05b308e2005-10-05 17:08:53 -04002045/**
Mark Lordd16ab3f2009-02-25 15:17:43 -05002046 * mv_sff_check_status - fetch device status, if valid
2047 * @ap: ATA port to fetch status from
2048 *
2049 * When using command issue via mv_qc_issue_fis(),
2050 * the initial ATA_BUSY state does not show up in the
2051 * ATA status (shadow) register. This can confuse libata!
2052 *
2053 * So we have a hook here to fake ATA_BUSY for that situation,
2054 * until the first time a BUSY, DRQ, or ERR bit is seen.
2055 *
2056 * The rest of the time, it simply returns the ATA status register.
2057 */
2058static u8 mv_sff_check_status(struct ata_port *ap)
2059{
2060 u8 stat = ioread8(ap->ioaddr.status_addr);
2061 struct mv_port_priv *pp = ap->private_data;
2062
2063 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2064 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2065 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2066 else
2067 stat = ATA_BUSY;
2068 }
2069 return stat;
2070}
2071
2072/**
Mark Lord70f8b792009-02-25 15:19:20 -05002073 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2074 * @fis: fis to be sent
2075 * @nwords: number of 32-bit words in the fis
2076 */
2077static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2078{
2079 void __iomem *port_mmio = mv_ap_base(ap);
2080 u32 ifctl, old_ifctl, ifstat;
2081 int i, timeout = 200, final_word = nwords - 1;
2082
2083 /* Initiate FIS transmission mode */
Mark Lordcae5a292009-04-06 16:43:45 -04002084 old_ifctl = readl(port_mmio + SATA_IFCTL);
Mark Lord70f8b792009-02-25 15:19:20 -05002085 ifctl = 0x100 | (old_ifctl & 0xf);
Mark Lordcae5a292009-04-06 16:43:45 -04002086 writelfl(ifctl, port_mmio + SATA_IFCTL);
Mark Lord70f8b792009-02-25 15:19:20 -05002087
2088 /* Send all words of the FIS except for the final word */
2089 for (i = 0; i < final_word; ++i)
Mark Lordcae5a292009-04-06 16:43:45 -04002090 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
Mark Lord70f8b792009-02-25 15:19:20 -05002091
2092 /* Flag end-of-transmission, and then send the final word */
Mark Lordcae5a292009-04-06 16:43:45 -04002093 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2094 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
Mark Lord70f8b792009-02-25 15:19:20 -05002095
2096 /*
2097 * Wait for FIS transmission to complete.
2098 * This typically takes just a single iteration.
2099 */
2100 do {
Mark Lordcae5a292009-04-06 16:43:45 -04002101 ifstat = readl(port_mmio + SATA_IFSTAT);
Mark Lord70f8b792009-02-25 15:19:20 -05002102 } while (!(ifstat & 0x1000) && --timeout);
2103
2104 /* Restore original port configuration */
Mark Lordcae5a292009-04-06 16:43:45 -04002105 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
Mark Lord70f8b792009-02-25 15:19:20 -05002106
2107 /* See if it worked */
2108 if ((ifstat & 0x3000) != 0x1000) {
2109 ata_port_printk(ap, KERN_WARNING,
2110 "%s transmission error, ifstat=%08x\n",
2111 __func__, ifstat);
2112 return AC_ERR_OTHER;
2113 }
2114 return 0;
2115}
2116
2117/**
2118 * mv_qc_issue_fis - Issue a command directly as a FIS
2119 * @qc: queued command to start
2120 *
2121 * Note that the ATA shadow registers are not updated
2122 * after command issue, so the device will appear "READY"
2123 * if polled, even while it is BUSY processing the command.
2124 *
2125 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2126 *
2127 * Note: we don't get updated shadow regs on *completion*
2128 * of non-data commands. So avoid sending them via this function,
2129 * as they will appear to have completed immediately.
2130 *
2131 * GEN_IIE has special registers that we could get the result tf from,
2132 * but earlier chipsets do not. For now, we ignore those registers.
2133 */
2134static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2135{
2136 struct ata_port *ap = qc->ap;
2137 struct mv_port_priv *pp = ap->private_data;
2138 struct ata_link *link = qc->dev->link;
2139 u32 fis[5];
2140 int err = 0;
2141
2142 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2143 err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0]));
2144 if (err)
2145 return err;
2146
2147 switch (qc->tf.protocol) {
2148 case ATAPI_PROT_PIO:
2149 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2150 /* fall through */
2151 case ATAPI_PROT_NODATA:
2152 ap->hsm_task_state = HSM_ST_FIRST;
2153 break;
2154 case ATA_PROT_PIO:
2155 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2156 if (qc->tf.flags & ATA_TFLAG_WRITE)
2157 ap->hsm_task_state = HSM_ST_FIRST;
2158 else
2159 ap->hsm_task_state = HSM_ST;
2160 break;
2161 default:
2162 ap->hsm_task_state = HSM_ST_LAST;
2163 break;
2164 }
2165
2166 if (qc->tf.flags & ATA_TFLAG_POLLING)
2167 ata_pio_queue_task(ap, qc, 0);
2168 return 0;
2169}
2170
2171/**
Brett Russ05b308e2005-10-05 17:08:53 -04002172 * mv_qc_issue - Initiate a command to the host
2173 * @qc: queued command to start
2174 *
2175 * This routine simply redirects to the general purpose routine
2176 * if command is not DMA. Else, it sanity checks our local
2177 * caches of the request producer/consumer indices then enables
2178 * DMA and bumps the request producer index.
2179 *
2180 * LOCKING:
2181 * Inherited from caller.
2182 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09002183static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04002184{
Mark Lordf48765c2009-01-30 18:48:41 -05002185 static int limit_warnings = 10;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002186 struct ata_port *ap = qc->ap;
2187 void __iomem *port_mmio = mv_ap_base(ap);
2188 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002189 u32 in_index;
Mark Lord42ed8932009-02-25 15:15:39 -05002190 unsigned int port_irqs;
Brett Russ31961942005-09-30 01:36:00 -04002191
Mark Lordd16ab3f2009-02-25 15:17:43 -05002192 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2193
Mark Lordf48765c2009-01-30 18:48:41 -05002194 switch (qc->tf.protocol) {
2195 case ATA_PROT_DMA:
2196 case ATA_PROT_NCQ:
2197 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2198 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2199 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2200
2201 /* Write the request in pointer to kick the EDMA to life */
2202 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
Mark Lordcae5a292009-04-06 16:43:45 -04002203 port_mmio + EDMA_REQ_Q_IN_PTR);
Mark Lordf48765c2009-01-30 18:48:41 -05002204 return 0;
2205
2206 case ATA_PROT_PIO:
Mark Lordc6112bd2008-06-18 12:13:02 -04002207 /*
2208 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2209 *
2210 * Someday, we might implement special polling workarounds
2211 * for these, but it all seems rather unnecessary since we
2212 * normally use only DMA for commands which transfer more
2213 * than a single block of data.
2214 *
2215 * Much of the time, this could just work regardless.
2216 * So for now, just log the incident, and allow the attempt.
2217 */
Mark Lordc7843e82008-06-18 21:57:42 -04002218 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
Mark Lordc6112bd2008-06-18 12:13:02 -04002219 --limit_warnings;
2220 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2221 ": attempting PIO w/multiple DRQ: "
2222 "this may fail due to h/w errata\n");
2223 }
Mark Lordf48765c2009-01-30 18:48:41 -05002224 /* drop through */
Mark Lord42ed8932009-02-25 15:15:39 -05002225 case ATA_PROT_NODATA:
Mark Lordf48765c2009-01-30 18:48:41 -05002226 case ATAPI_PROT_PIO:
Mark Lord42ed8932009-02-25 15:15:39 -05002227 case ATAPI_PROT_NODATA:
2228 if (ap->flags & ATA_FLAG_PIO_POLLING)
2229 qc->tf.flags |= ATA_TFLAG_POLLING;
2230 break;
Brett Russ31961942005-09-30 01:36:00 -04002231 }
Mark Lord42ed8932009-02-25 15:15:39 -05002232
2233 if (qc->tf.flags & ATA_TFLAG_POLLING)
2234 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2235 else
2236 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2237
2238 /*
2239 * We're about to send a non-EDMA capable command to the
2240 * port. Turn off EDMA so there won't be problems accessing
2241 * shadow block, etc registers.
2242 */
2243 mv_stop_edma(ap);
2244 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2245 mv_pmp_select(ap, qc->dev->link->pmp);
Mark Lord70f8b792009-02-25 15:19:20 -05002246
2247 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2248 struct mv_host_priv *hpriv = ap->host->private_data;
2249 /*
2250 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
Mark Lord40f21b12009-03-10 18:51:04 -04002251 *
Mark Lord70f8b792009-02-25 15:19:20 -05002252 * After any NCQ error, the READ_LOG_EXT command
2253 * from libata-eh *must* use mv_qc_issue_fis().
2254 * Otherwise it might fail, due to chip errata.
2255 *
2256 * Rather than special-case it, we'll just *always*
2257 * use this method here for READ_LOG_EXT, making for
2258 * easier testing.
2259 */
2260 if (IS_GEN_II(hpriv))
2261 return mv_qc_issue_fis(qc);
2262 }
Mark Lord42ed8932009-02-25 15:15:39 -05002263 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04002264}
2265
Mark Lord8f767f82008-04-19 14:53:07 -04002266static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2267{
2268 struct mv_port_priv *pp = ap->private_data;
2269 struct ata_queued_cmd *qc;
2270
2271 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2272 return NULL;
2273 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Mark Lord95db5052009-01-30 18:49:29 -05002274 if (qc) {
2275 if (qc->tf.flags & ATA_TFLAG_POLLING)
2276 qc = NULL;
2277 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2278 qc = NULL;
2279 }
Mark Lord8f767f82008-04-19 14:53:07 -04002280 return qc;
2281}
2282
Mark Lord29d187b2008-05-02 02:15:37 -04002283static void mv_pmp_error_handler(struct ata_port *ap)
2284{
2285 unsigned int pmp, pmp_map;
2286 struct mv_port_priv *pp = ap->private_data;
2287
2288 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2289 /*
2290 * Perform NCQ error analysis on failed PMPs
2291 * before we freeze the port entirely.
2292 *
2293 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2294 */
2295 pmp_map = pp->delayed_eh_pmp_map;
2296 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2297 for (pmp = 0; pmp_map != 0; pmp++) {
2298 unsigned int this_pmp = (1 << pmp);
2299 if (pmp_map & this_pmp) {
2300 struct ata_link *link = &ap->pmp_link[pmp];
2301 pmp_map &= ~this_pmp;
2302 ata_eh_analyze_ncq_error(link);
2303 }
2304 }
2305 ata_port_freeze(ap);
2306 }
2307 sata_pmp_error_handler(ap);
2308}
2309
Mark Lord4c299ca2008-05-02 02:16:20 -04002310static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2311{
2312 void __iomem *port_mmio = mv_ap_base(ap);
2313
Mark Lordcae5a292009-04-06 16:43:45 -04002314 return readl(port_mmio + SATA_TESTCTL) >> 16;
Mark Lord4c299ca2008-05-02 02:16:20 -04002315}
2316
Mark Lord4c299ca2008-05-02 02:16:20 -04002317static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2318{
2319 struct ata_eh_info *ehi;
2320 unsigned int pmp;
2321
2322 /*
2323 * Initialize EH info for PMPs which saw device errors
2324 */
2325 ehi = &ap->link.eh_info;
2326 for (pmp = 0; pmp_map != 0; pmp++) {
2327 unsigned int this_pmp = (1 << pmp);
2328 if (pmp_map & this_pmp) {
2329 struct ata_link *link = &ap->pmp_link[pmp];
2330
2331 pmp_map &= ~this_pmp;
2332 ehi = &link->eh_info;
2333 ata_ehi_clear_desc(ehi);
2334 ata_ehi_push_desc(ehi, "dev err");
2335 ehi->err_mask |= AC_ERR_DEV;
2336 ehi->action |= ATA_EH_RESET;
2337 ata_link_abort(link);
2338 }
2339 }
2340}
2341
Mark Lord06aaca32008-05-19 09:01:24 -04002342static int mv_req_q_empty(struct ata_port *ap)
2343{
2344 void __iomem *port_mmio = mv_ap_base(ap);
2345 u32 in_ptr, out_ptr;
2346
Mark Lordcae5a292009-04-06 16:43:45 -04002347 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
Mark Lord06aaca32008-05-19 09:01:24 -04002348 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
Mark Lordcae5a292009-04-06 16:43:45 -04002349 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
Mark Lord06aaca32008-05-19 09:01:24 -04002350 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2351 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2352}
2353
Mark Lord4c299ca2008-05-02 02:16:20 -04002354static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2355{
2356 struct mv_port_priv *pp = ap->private_data;
2357 int failed_links;
2358 unsigned int old_map, new_map;
2359
2360 /*
2361 * Device error during FBS+NCQ operation:
2362 *
2363 * Set a port flag to prevent further I/O being enqueued.
2364 * Leave the EDMA running to drain outstanding commands from this port.
2365 * Perform the post-mortem/EH only when all responses are complete.
2366 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2367 */
2368 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2369 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2370 pp->delayed_eh_pmp_map = 0;
2371 }
2372 old_map = pp->delayed_eh_pmp_map;
2373 new_map = old_map | mv_get_err_pmp_map(ap);
2374
2375 if (old_map != new_map) {
2376 pp->delayed_eh_pmp_map = new_map;
2377 mv_pmp_eh_prep(ap, new_map & ~old_map);
2378 }
Mark Lordc46938c2008-05-02 14:02:28 -04002379 failed_links = hweight16(new_map);
Mark Lord4c299ca2008-05-02 02:16:20 -04002380
2381 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2382 "failed_links=%d nr_active_links=%d\n",
2383 __func__, pp->delayed_eh_pmp_map,
2384 ap->qc_active, failed_links,
2385 ap->nr_active_links);
2386
Mark Lord06aaca32008-05-19 09:01:24 -04002387 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
Mark Lord4c299ca2008-05-02 02:16:20 -04002388 mv_process_crpb_entries(ap, pp);
2389 mv_stop_edma(ap);
2390 mv_eh_freeze(ap);
2391 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2392 return 1; /* handled */
2393 }
2394 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2395 return 1; /* handled */
2396}
2397
2398static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2399{
2400 /*
2401 * Possible future enhancement:
2402 *
2403 * FBS+non-NCQ operation is not yet implemented.
2404 * See related notes in mv_edma_cfg().
2405 *
2406 * Device error during FBS+non-NCQ operation:
2407 *
2408 * We need to snapshot the shadow registers for each failed command.
2409 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2410 */
2411 return 0; /* not handled */
2412}
2413
2414static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2415{
2416 struct mv_port_priv *pp = ap->private_data;
2417
2418 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2419 return 0; /* EDMA was not active: not handled */
2420 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2421 return 0; /* FBS was not active: not handled */
2422
2423 if (!(edma_err_cause & EDMA_ERR_DEV))
2424 return 0; /* non DEV error: not handled */
2425 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2426 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2427 return 0; /* other problems: not handled */
2428
2429 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2430 /*
2431 * EDMA should NOT have self-disabled for this case.
2432 * If it did, then something is wrong elsewhere,
2433 * and we cannot handle it here.
2434 */
2435 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2436 ata_port_printk(ap, KERN_WARNING,
2437 "%s: err_cause=0x%x pp_flags=0x%x\n",
2438 __func__, edma_err_cause, pp->pp_flags);
2439 return 0; /* not handled */
2440 }
2441 return mv_handle_fbs_ncq_dev_err(ap);
2442 } else {
2443 /*
2444 * EDMA should have self-disabled for this case.
2445 * If it did not, then something is wrong elsewhere,
2446 * and we cannot handle it here.
2447 */
2448 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2449 ata_port_printk(ap, KERN_WARNING,
2450 "%s: err_cause=0x%x pp_flags=0x%x\n",
2451 __func__, edma_err_cause, pp->pp_flags);
2452 return 0; /* not handled */
2453 }
2454 return mv_handle_fbs_non_ncq_dev_err(ap);
2455 }
2456 return 0; /* not handled */
2457}
2458
Mark Lorda9010322008-05-02 02:14:02 -04002459static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
Mark Lord8f767f82008-04-19 14:53:07 -04002460{
Mark Lord8f767f82008-04-19 14:53:07 -04002461 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lorda9010322008-05-02 02:14:02 -04002462 char *when = "idle";
Mark Lord8f767f82008-04-19 14:53:07 -04002463
Mark Lord8f767f82008-04-19 14:53:07 -04002464 ata_ehi_clear_desc(ehi);
Mark Lorda9010322008-05-02 02:14:02 -04002465 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2466 when = "disabled";
2467 } else if (edma_was_enabled) {
2468 when = "EDMA enabled";
Mark Lord8f767f82008-04-19 14:53:07 -04002469 } else {
2470 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2471 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
Mark Lorda9010322008-05-02 02:14:02 -04002472 when = "polling";
Mark Lord8f767f82008-04-19 14:53:07 -04002473 }
Mark Lorda9010322008-05-02 02:14:02 -04002474 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
Mark Lord8f767f82008-04-19 14:53:07 -04002475 ehi->err_mask |= AC_ERR_OTHER;
2476 ehi->action |= ATA_EH_RESET;
2477 ata_port_freeze(ap);
2478}
2479
Brett Russ05b308e2005-10-05 17:08:53 -04002480/**
Brett Russ05b308e2005-10-05 17:08:53 -04002481 * mv_err_intr - Handle error interrupts on the port
2482 * @ap: ATA channel to manipulate
2483 *
Mark Lord8d073792008-04-19 15:07:49 -04002484 * Most cases require a full reset of the chip's state machine,
2485 * which also performs a COMRESET.
2486 * Also, if the port disabled DMA, update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04002487 *
2488 * LOCKING:
2489 * Inherited from caller.
2490 */
Mark Lord37b90462008-05-02 02:12:34 -04002491static void mv_err_intr(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002492{
Brett Russ31961942005-09-30 01:36:00 -04002493 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002494 u32 edma_err_cause, eh_freeze_mask, serr = 0;
Mark Lorde4006072008-05-14 09:19:30 -04002495 u32 fis_cause = 0;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002496 struct mv_port_priv *pp = ap->private_data;
2497 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002498 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002499 struct ata_eh_info *ehi = &ap->link.eh_info;
Mark Lord37b90462008-05-02 02:12:34 -04002500 struct ata_queued_cmd *qc;
2501 int abort = 0;
Brett Russ20f733e2005-09-01 18:26:17 -04002502
Mark Lord8d073792008-04-19 15:07:49 -04002503 /*
Mark Lord37b90462008-05-02 02:12:34 -04002504 * Read and clear the SError and err_cause bits.
Mark Lorde4006072008-05-14 09:19:30 -04002505 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2506 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
Mark Lord8d073792008-04-19 15:07:49 -04002507 */
Mark Lord37b90462008-05-02 02:12:34 -04002508 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2509 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2510
Mark Lordcae5a292009-04-06 16:43:45 -04002511 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
Mark Lorde4006072008-05-14 09:19:30 -04002512 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
Mark Lordcae5a292009-04-06 16:43:45 -04002513 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2514 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
Mark Lorde4006072008-05-14 09:19:30 -04002515 }
Mark Lordcae5a292009-04-06 16:43:45 -04002516 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002517
Mark Lord4c299ca2008-05-02 02:16:20 -04002518 if (edma_err_cause & EDMA_ERR_DEV) {
2519 /*
2520 * Device errors during FIS-based switching operation
2521 * require special handling.
2522 */
2523 if (mv_handle_dev_err(ap, edma_err_cause))
2524 return;
2525 }
2526
Mark Lord37b90462008-05-02 02:12:34 -04002527 qc = mv_get_active_qc(ap);
2528 ata_ehi_clear_desc(ehi);
2529 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2530 edma_err_cause, pp->pp_flags);
Mark Lorde4006072008-05-14 09:19:30 -04002531
Mark Lordc443c502008-05-14 09:24:39 -04002532 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
Mark Lorde4006072008-05-14 09:19:30 -04002533 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
Mark Lordcae5a292009-04-06 16:43:45 -04002534 if (fis_cause & FIS_IRQ_CAUSE_AN) {
Mark Lordc443c502008-05-14 09:24:39 -04002535 u32 ec = edma_err_cause &
2536 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2537 sata_async_notification(ap);
2538 if (!ec)
2539 return; /* Just an AN; no need for the nukes */
2540 ata_ehi_push_desc(ehi, "SDB notify");
2541 }
2542 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002543 /*
Mark Lord352fab72008-04-19 14:43:42 -04002544 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002545 */
Mark Lord37b90462008-05-02 02:12:34 -04002546 if (edma_err_cause & EDMA_ERR_DEV) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002547 err_mask |= AC_ERR_DEV;
Mark Lord37b90462008-05-02 02:12:34 -04002548 action |= ATA_EH_RESET;
2549 ata_ehi_push_desc(ehi, "dev error");
2550 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002551 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002552 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002553 EDMA_ERR_INTRL_PAR)) {
2554 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002555 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09002556 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04002557 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002558 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2559 ata_ehi_hotplugged(ehi);
2560 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09002561 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09002562 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002563 }
2564
Mark Lord352fab72008-04-19 14:43:42 -04002565 /*
2566 * Gen-I has a different SELF_DIS bit,
2567 * different FREEZE bits, and no SERR bit:
2568 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002569 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002570 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002571 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002572 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002573 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002574 }
2575 } else {
2576 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002577 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002578 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09002579 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002580 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002581 if (edma_err_cause & EDMA_ERR_SERR) {
Mark Lord8d073792008-04-19 15:07:49 -04002582 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2583 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002584 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002585 }
2586 }
Brett Russ20f733e2005-09-01 18:26:17 -04002587
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002588 if (!err_mask) {
2589 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09002590 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002591 }
2592
2593 ehi->serror |= serr;
2594 ehi->action |= action;
2595
2596 if (qc)
2597 qc->err_mask |= err_mask;
2598 else
2599 ehi->err_mask |= err_mask;
2600
Mark Lord37b90462008-05-02 02:12:34 -04002601 if (err_mask == AC_ERR_DEV) {
2602 /*
2603 * Cannot do ata_port_freeze() here,
2604 * because it would kill PIO access,
2605 * which is needed for further diagnosis.
2606 */
2607 mv_eh_freeze(ap);
2608 abort = 1;
2609 } else if (edma_err_cause & eh_freeze_mask) {
2610 /*
2611 * Note to self: ata_port_freeze() calls ata_port_abort()
2612 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002613 ata_port_freeze(ap);
Mark Lord37b90462008-05-02 02:12:34 -04002614 } else {
2615 abort = 1;
2616 }
2617
2618 if (abort) {
2619 if (qc)
2620 ata_link_abort(qc->dev->link);
2621 else
2622 ata_port_abort(ap);
2623 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002624}
2625
Mark Lordfcfb1f72008-04-19 15:06:40 -04002626static void mv_process_crpb_response(struct ata_port *ap,
2627 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2628{
2629 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2630
2631 if (qc) {
2632 u8 ata_status;
2633 u16 edma_status = le16_to_cpu(response->flags);
2634 /*
2635 * edma_status from a response queue entry:
Mark Lordcae5a292009-04-06 16:43:45 -04002636 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
Mark Lordfcfb1f72008-04-19 15:06:40 -04002637 * MSB is saved ATA status from command completion.
2638 */
2639 if (!ncq_enabled) {
2640 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2641 if (err_cause) {
2642 /*
2643 * Error will be seen/handled by mv_err_intr().
2644 * So do nothing at all here.
2645 */
2646 return;
2647 }
2648 }
2649 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
Mark Lord37b90462008-05-02 02:12:34 -04002650 if (!ac_err_mask(ata_status))
2651 ata_qc_complete(qc);
2652 /* else: leave it for mv_err_intr() */
Mark Lordfcfb1f72008-04-19 15:06:40 -04002653 } else {
2654 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2655 __func__, tag);
2656 }
2657}
2658
2659static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002660{
2661 void __iomem *port_mmio = mv_ap_base(ap);
2662 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002663 u32 in_index;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002664 bool work_done = false;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002665 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002666
Mark Lordfcfb1f72008-04-19 15:06:40 -04002667 /* Get the hardware queue position index */
Mark Lordcae5a292009-04-06 16:43:45 -04002668 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002669 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2670
Mark Lordfcfb1f72008-04-19 15:06:40 -04002671 /* Process new responses from since the last time we looked */
2672 while (in_index != pp->resp_idx) {
Jeff Garzik6c1153e2007-07-13 15:20:15 -04002673 unsigned int tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002674 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002675
Mark Lordfcfb1f72008-04-19 15:06:40 -04002676 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002677
Mark Lordfcfb1f72008-04-19 15:06:40 -04002678 if (IS_GEN_I(hpriv)) {
2679 /* 50xx: no NCQ, only one command active at a time */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002680 tag = ap->link.active_tag;
Mark Lordfcfb1f72008-04-19 15:06:40 -04002681 } else {
2682 /* Gen II/IIE: get command tag from CRPB entry */
2683 tag = le16_to_cpu(response->id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002684 }
Mark Lordfcfb1f72008-04-19 15:06:40 -04002685 mv_process_crpb_response(ap, response, tag, ncq_enabled);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002686 work_done = true;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002687 }
2688
Mark Lord352fab72008-04-19 14:43:42 -04002689 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002690 if (work_done)
2691 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
Mark Lordfcfb1f72008-04-19 15:06:40 -04002692 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
Mark Lordcae5a292009-04-06 16:43:45 -04002693 port_mmio + EDMA_RSP_Q_OUT_PTR);
Brett Russ20f733e2005-09-01 18:26:17 -04002694}
2695
Mark Lorda9010322008-05-02 02:14:02 -04002696static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2697{
2698 struct mv_port_priv *pp;
2699 int edma_was_enabled;
2700
2701 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2702 mv_unexpected_intr(ap, 0);
2703 return;
2704 }
2705 /*
2706 * Grab a snapshot of the EDMA_EN flag setting,
2707 * so that we have a consistent view for this port,
2708 * even if something we call of our routines changes it.
2709 */
2710 pp = ap->private_data;
2711 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2712 /*
2713 * Process completed CRPB response(s) before other events.
2714 */
2715 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2716 mv_process_crpb_entries(ap, pp);
Mark Lord4c299ca2008-05-02 02:16:20 -04002717 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2718 mv_handle_fbs_ncq_dev_err(ap);
Mark Lorda9010322008-05-02 02:14:02 -04002719 }
2720 /*
2721 * Handle chip-reported errors, or continue on to handle PIO.
2722 */
2723 if (unlikely(port_cause & ERR_IRQ)) {
2724 mv_err_intr(ap);
2725 } else if (!edma_was_enabled) {
2726 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2727 if (qc)
2728 ata_sff_host_intr(ap, qc);
2729 else
2730 mv_unexpected_intr(ap, edma_was_enabled);
2731 }
2732}
2733
Brett Russ05b308e2005-10-05 17:08:53 -04002734/**
2735 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04002736 * @host: host specific structure
Mark Lord7368f912008-04-25 11:24:24 -04002737 * @main_irq_cause: Main interrupt cause register for the chip.
Brett Russ05b308e2005-10-05 17:08:53 -04002738 *
2739 * LOCKING:
2740 * Inherited from caller.
2741 */
Mark Lord7368f912008-04-25 11:24:24 -04002742static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
Brett Russ20f733e2005-09-01 18:26:17 -04002743{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002744 struct mv_host_priv *hpriv = host->private_data;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002745 void __iomem *mmio = hpriv->base, *hc_mmio;
Mark Lorda3718c12008-04-19 15:07:18 -04002746 unsigned int handled = 0, port;
Brett Russ20f733e2005-09-01 18:26:17 -04002747
Mark Lord2b748a02009-03-10 22:01:17 -04002748 /* If asserted, clear the "all ports" IRQ coalescing bit */
2749 if (main_irq_cause & ALL_PORTS_COAL_DONE)
Mark Lordcae5a292009-04-06 16:43:45 -04002750 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
Mark Lord2b748a02009-03-10 22:01:17 -04002751
Mark Lorda3718c12008-04-19 15:07:18 -04002752 for (port = 0; port < hpriv->n_ports; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04002753 struct ata_port *ap = host->ports[port];
Mark Lordeabd5eb2008-05-02 02:13:27 -04002754 unsigned int p, shift, hardport, port_cause;
2755
Mark Lorda3718c12008-04-19 15:07:18 -04002756 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Mark Lorda3718c12008-04-19 15:07:18 -04002757 /*
Mark Lordeabd5eb2008-05-02 02:13:27 -04002758 * Each hc within the host has its own hc_irq_cause register,
2759 * where the interrupting ports bits get ack'd.
Mark Lorda3718c12008-04-19 15:07:18 -04002760 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002761 if (hardport == 0) { /* first port on this hc ? */
2762 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2763 u32 port_mask, ack_irqs;
2764 /*
2765 * Skip this entire hc if nothing pending for any ports
2766 */
2767 if (!hc_cause) {
2768 port += MV_PORTS_PER_HC - 1;
2769 continue;
2770 }
2771 /*
2772 * We don't need/want to read the hc_irq_cause register,
2773 * because doing so hurts performance, and
2774 * main_irq_cause already gives us everything we need.
2775 *
2776 * But we do have to *write* to the hc_irq_cause to ack
2777 * the ports that we are handling this time through.
2778 *
2779 * This requires that we create a bitmap for those
2780 * ports which interrupted us, and use that bitmap
2781 * to ack (only) those ports via hc_irq_cause.
2782 */
2783 ack_irqs = 0;
Mark Lord2b748a02009-03-10 22:01:17 -04002784 if (hc_cause & PORTS_0_3_COAL_DONE)
2785 ack_irqs = HC_COAL_IRQ;
Mark Lordeabd5eb2008-05-02 02:13:27 -04002786 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2787 if ((port + p) >= hpriv->n_ports)
2788 break;
2789 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2790 if (hc_cause & port_mask)
2791 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2792 }
Mark Lorda3718c12008-04-19 15:07:18 -04002793 hc_mmio = mv_hc_base_from_port(mmio, port);
Mark Lordcae5a292009-04-06 16:43:45 -04002794 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
Mark Lorda3718c12008-04-19 15:07:18 -04002795 handled = 1;
2796 }
Mark Lorda9010322008-05-02 02:14:02 -04002797 /*
2798 * Handle interrupts signalled for this port:
2799 */
Mark Lordeabd5eb2008-05-02 02:13:27 -04002800 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
Mark Lorda9010322008-05-02 02:14:02 -04002801 if (port_cause)
2802 mv_port_intr(ap, port_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04002803 }
Mark Lorda3718c12008-04-19 15:07:18 -04002804 return handled;
Brett Russ20f733e2005-09-01 18:26:17 -04002805}
2806
Mark Lorda3718c12008-04-19 15:07:18 -04002807static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002808{
Mark Lord02a121d2007-12-01 13:07:22 -05002809 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002810 struct ata_port *ap;
2811 struct ata_queued_cmd *qc;
2812 struct ata_eh_info *ehi;
2813 unsigned int i, err_mask, printed = 0;
2814 u32 err_cause;
2815
Mark Lordcae5a292009-04-06 16:43:45 -04002816 err_cause = readl(mmio + hpriv->irq_cause_offset);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002817
2818 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2819 err_cause);
2820
2821 DPRINTK("All regs @ PCI error\n");
2822 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2823
Mark Lordcae5a292009-04-06 16:43:45 -04002824 writelfl(0, mmio + hpriv->irq_cause_offset);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002825
2826 for (i = 0; i < host->n_ports; i++) {
2827 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09002828 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002829 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002830 ata_ehi_clear_desc(ehi);
2831 if (!printed++)
2832 ata_ehi_push_desc(ehi,
2833 "PCI err cause 0x%08x", err_cause);
2834 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002835 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09002836 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002837 if (qc)
2838 qc->err_mask |= err_mask;
2839 else
2840 ehi->err_mask |= err_mask;
2841
2842 ata_port_freeze(ap);
2843 }
2844 }
Mark Lorda3718c12008-04-19 15:07:18 -04002845 return 1; /* handled */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002846}
2847
Brett Russ05b308e2005-10-05 17:08:53 -04002848/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002849 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04002850 * @irq: unused
2851 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04002852 *
2853 * Read the read only register to determine if any host
2854 * controllers have pending interrupts. If so, call lower level
2855 * routine to handle. Also check for PCI errors which are only
2856 * reported here.
2857 *
Jeff Garzik8b260242005-11-12 12:32:50 -05002858 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04002859 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04002860 * interrupts.
2861 */
David Howells7d12e782006-10-05 14:55:46 +01002862static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04002863{
Jeff Garzikcca39742006-08-24 03:19:22 -04002864 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002865 struct mv_host_priv *hpriv = host->private_data;
Mark Lorda3718c12008-04-19 15:07:18 -04002866 unsigned int handled = 0;
Mark Lord6d3c30e2009-01-21 10:31:29 -05002867 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
Mark Lord96e2c4872008-05-17 13:38:00 -04002868 u32 main_irq_cause, pending_irqs;
Brett Russ20f733e2005-09-01 18:26:17 -04002869
Mark Lord646a4da2008-01-26 18:30:37 -05002870 spin_lock(&host->lock);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002871
2872 /* for MSI: block new interrupts while in here */
2873 if (using_msi)
Mark Lord2b748a02009-03-10 22:01:17 -04002874 mv_write_main_irq_mask(0, hpriv);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002875
Mark Lord7368f912008-04-25 11:24:24 -04002876 main_irq_cause = readl(hpriv->main_irq_cause_addr);
Mark Lord96e2c4872008-05-17 13:38:00 -04002877 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
Mark Lord352fab72008-04-19 14:43:42 -04002878 /*
2879 * Deal with cases where we either have nothing pending, or have read
2880 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04002881 */
Mark Lorda44253d2008-05-17 13:37:07 -04002882 if (pending_irqs && main_irq_cause != 0xffffffffU) {
Mark Lord1f398472008-05-27 17:54:48 -04002883 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
Mark Lorda3718c12008-04-19 15:07:18 -04002884 handled = mv_pci_error(host, hpriv->base);
2885 else
Mark Lorda44253d2008-05-17 13:37:07 -04002886 handled = mv_host_intr(host, pending_irqs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002887 }
Mark Lord6d3c30e2009-01-21 10:31:29 -05002888
2889 /* for MSI: unmask; interrupt cause bits will retrigger now */
2890 if (using_msi)
Mark Lord2b748a02009-03-10 22:01:17 -04002891 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
Mark Lord6d3c30e2009-01-21 10:31:29 -05002892
Mark Lord9d51af72009-03-10 16:28:51 -04002893 spin_unlock(&host->lock);
2894
Brett Russ20f733e2005-09-01 18:26:17 -04002895 return IRQ_RETVAL(handled);
2896}
2897
Jeff Garzikc9d39132005-11-13 17:47:51 -05002898static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2899{
2900 unsigned int ofs;
2901
2902 switch (sc_reg_in) {
2903 case SCR_STATUS:
2904 case SCR_ERROR:
2905 case SCR_CONTROL:
2906 ofs = sc_reg_in * sizeof(u32);
2907 break;
2908 default:
2909 ofs = 0xffffffffU;
2910 break;
2911 }
2912 return ofs;
2913}
2914
Tejun Heo82ef04f2008-07-31 17:02:40 +09002915static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002916{
Tejun Heo82ef04f2008-07-31 17:02:40 +09002917 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002918 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002919 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002920 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2921
Tejun Heoda3dbb12007-07-16 14:29:40 +09002922 if (ofs != 0xffffffffU) {
2923 *val = readl(addr + ofs);
2924 return 0;
2925 } else
2926 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002927}
2928
Tejun Heo82ef04f2008-07-31 17:02:40 +09002929static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002930{
Tejun Heo82ef04f2008-07-31 17:02:40 +09002931 struct mv_host_priv *hpriv = link->ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002932 void __iomem *mmio = hpriv->base;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002933 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002934 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2935
Tejun Heoda3dbb12007-07-16 14:29:40 +09002936 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09002937 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002938 return 0;
2939 } else
2940 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002941}
2942
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002943static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05002944{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002945 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05002946 int early_5080;
2947
Auke Kok44c10132007-06-08 15:46:36 -07002948 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05002949
2950 if (!early_5080) {
2951 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2952 tmp |= (1 << 0);
2953 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2954 }
2955
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002956 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05002957}
2958
2959static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2960{
Mark Lordcae5a292009-04-06 16:43:45 -04002961 writel(0x0fcfffff, mmio + FLASH_CTL);
Jeff Garzik522479f2005-11-12 22:14:02 -05002962}
2963
Jeff Garzik47c2b672005-11-12 21:13:17 -05002964static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002965 void __iomem *mmio)
2966{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002967 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2968 u32 tmp;
2969
2970 tmp = readl(phy_mmio + MV5_PHY_MODE);
2971
2972 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2973 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002974}
2975
Jeff Garzik47c2b672005-11-12 21:13:17 -05002976static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002977{
Jeff Garzik522479f2005-11-12 22:14:02 -05002978 u32 tmp;
2979
Mark Lordcae5a292009-04-06 16:43:45 -04002980 writel(0, mmio + GPIO_PORT_CTL);
Jeff Garzik522479f2005-11-12 22:14:02 -05002981
2982 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2983
2984 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2985 tmp |= ~(1 << 0);
2986 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002987}
2988
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002989static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2990 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002991{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002992 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2993 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2994 u32 tmp;
2995 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2996
2997 if (fix_apm_sq) {
Mark Lordcae5a292009-04-06 16:43:45 -04002998 tmp = readl(phy_mmio + MV5_LTMODE);
Jeff Garzikc9d39132005-11-13 17:47:51 -05002999 tmp |= (1 << 19);
Mark Lordcae5a292009-04-06 16:43:45 -04003000 writel(tmp, phy_mmio + MV5_LTMODE);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003001
Mark Lordcae5a292009-04-06 16:43:45 -04003002 tmp = readl(phy_mmio + MV5_PHY_CTL);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003003 tmp &= ~0x3;
3004 tmp |= 0x1;
Mark Lordcae5a292009-04-06 16:43:45 -04003005 writel(tmp, phy_mmio + MV5_PHY_CTL);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003006 }
3007
3008 tmp = readl(phy_mmio + MV5_PHY_MODE);
3009 tmp &= ~mask;
3010 tmp |= hpriv->signal[port].pre;
3011 tmp |= hpriv->signal[port].amps;
3012 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003013}
3014
Jeff Garzikc9d39132005-11-13 17:47:51 -05003015
3016#undef ZERO
3017#define ZERO(reg) writel(0, port_mmio + (reg))
3018static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3019 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05003020{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003021 void __iomem *port_mmio = mv_port_base(mmio, port);
3022
Mark Lorde12bef52008-03-31 19:33:56 -04003023 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003024
3025 ZERO(0x028); /* command */
Mark Lordcae5a292009-04-06 16:43:45 -04003026 writel(0x11f, port_mmio + EDMA_CFG);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003027 ZERO(0x004); /* timer */
3028 ZERO(0x008); /* irq err cause */
3029 ZERO(0x00c); /* irq err mask */
3030 ZERO(0x010); /* rq bah */
3031 ZERO(0x014); /* rq inp */
3032 ZERO(0x018); /* rq outp */
3033 ZERO(0x01c); /* respq bah */
3034 ZERO(0x024); /* respq outp */
3035 ZERO(0x020); /* respq inp */
3036 ZERO(0x02c); /* test control */
Mark Lordcae5a292009-04-06 16:43:45 -04003037 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
Jeff Garzikc9d39132005-11-13 17:47:51 -05003038}
3039#undef ZERO
3040
3041#define ZERO(reg) writel(0, hc_mmio + (reg))
3042static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3043 unsigned int hc)
3044{
3045 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3046 u32 tmp;
3047
3048 ZERO(0x00c);
3049 ZERO(0x010);
3050 ZERO(0x014);
3051 ZERO(0x018);
3052
3053 tmp = readl(hc_mmio + 0x20);
3054 tmp &= 0x1c1c1c1c;
3055 tmp |= 0x03030303;
3056 writel(tmp, hc_mmio + 0x20);
3057}
3058#undef ZERO
3059
3060static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3061 unsigned int n_hc)
3062{
3063 unsigned int hc, port;
3064
3065 for (hc = 0; hc < n_hc; hc++) {
3066 for (port = 0; port < MV_PORTS_PER_HC; port++)
3067 mv5_reset_hc_port(hpriv, mmio,
3068 (hc * MV_PORTS_PER_HC) + port);
3069
3070 mv5_reset_one_hc(hpriv, mmio, hc);
3071 }
3072
3073 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003074}
3075
Jeff Garzik101ffae2005-11-12 22:17:49 -05003076#undef ZERO
3077#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003078static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003079{
Mark Lord02a121d2007-12-01 13:07:22 -05003080 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003081 u32 tmp;
3082
Mark Lordcae5a292009-04-06 16:43:45 -04003083 tmp = readl(mmio + MV_PCI_MODE);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003084 tmp &= 0xff00ffff;
Mark Lordcae5a292009-04-06 16:43:45 -04003085 writel(tmp, mmio + MV_PCI_MODE);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003086
3087 ZERO(MV_PCI_DISC_TIMER);
3088 ZERO(MV_PCI_MSI_TRIGGER);
Mark Lordcae5a292009-04-06 16:43:45 -04003089 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003090 ZERO(MV_PCI_SERR_MASK);
Mark Lordcae5a292009-04-06 16:43:45 -04003091 ZERO(hpriv->irq_cause_offset);
3092 ZERO(hpriv->irq_mask_offset);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003093 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3094 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3095 ZERO(MV_PCI_ERR_ATTRIBUTE);
3096 ZERO(MV_PCI_ERR_COMMAND);
3097}
3098#undef ZERO
3099
3100static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3101{
3102 u32 tmp;
3103
3104 mv5_reset_flash(hpriv, mmio);
3105
Mark Lordcae5a292009-04-06 16:43:45 -04003106 tmp = readl(mmio + GPIO_PORT_CTL);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003107 tmp &= 0x3;
3108 tmp |= (1 << 5) | (1 << 6);
Mark Lordcae5a292009-04-06 16:43:45 -04003109 writel(tmp, mmio + GPIO_PORT_CTL);
Jeff Garzik101ffae2005-11-12 22:17:49 -05003110}
3111
3112/**
3113 * mv6_reset_hc - Perform the 6xxx global soft reset
3114 * @mmio: base address of the HBA
3115 *
3116 * This routine only applies to 6xxx parts.
3117 *
3118 * LOCKING:
3119 * Inherited from caller.
3120 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05003121static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3122 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003123{
Mark Lordcae5a292009-04-06 16:43:45 -04003124 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003125 int i, rc = 0;
3126 u32 t;
3127
3128 /* Following procedure defined in PCI "main command and status
3129 * register" table.
3130 */
3131 t = readl(reg);
3132 writel(t | STOP_PCI_MASTER, reg);
3133
3134 for (i = 0; i < 1000; i++) {
3135 udelay(1);
3136 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003137 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05003138 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05003139 }
3140 if (!(PCI_MASTER_EMPTY & t)) {
3141 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3142 rc = 1;
3143 goto done;
3144 }
3145
3146 /* set reset */
3147 i = 5;
3148 do {
3149 writel(t | GLOB_SFT_RST, reg);
3150 t = readl(reg);
3151 udelay(1);
3152 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3153
3154 if (!(GLOB_SFT_RST & t)) {
3155 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3156 rc = 1;
3157 goto done;
3158 }
3159
3160 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3161 i = 5;
3162 do {
3163 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3164 t = readl(reg);
3165 udelay(1);
3166 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3167
3168 if (GLOB_SFT_RST & t) {
3169 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3170 rc = 1;
3171 }
3172done:
3173 return rc;
3174}
3175
Jeff Garzik47c2b672005-11-12 21:13:17 -05003176static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003177 void __iomem *mmio)
3178{
3179 void __iomem *port_mmio;
3180 u32 tmp;
3181
Mark Lordcae5a292009-04-06 16:43:45 -04003182 tmp = readl(mmio + RESET_CFG);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003183 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003184 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003185 hpriv->signal[idx].pre = 0x1 << 5;
3186 return;
3187 }
3188
3189 port_mmio = mv_port_base(mmio, idx);
3190 tmp = readl(port_mmio + PHY_MODE2);
3191
3192 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3193 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3194}
3195
Jeff Garzik47c2b672005-11-12 21:13:17 -05003196static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003197{
Mark Lordcae5a292009-04-06 16:43:45 -04003198 writel(0x00000060, mmio + GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05003199}
3200
Jeff Garzikc9d39132005-11-13 17:47:51 -05003201static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003202 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003203{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003204 void __iomem *port_mmio = mv_port_base(mmio, port);
3205
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003206 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003207 int fix_phy_mode2 =
3208 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003209 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05003210 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Mark Lord8c30a8b2008-05-27 17:56:31 -04003211 u32 m2, m3;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003212
3213 if (fix_phy_mode2) {
3214 m2 = readl(port_mmio + PHY_MODE2);
3215 m2 &= ~(1 << 16);
3216 m2 |= (1 << 31);
3217 writel(m2, port_mmio + PHY_MODE2);
3218
3219 udelay(200);
3220
3221 m2 = readl(port_mmio + PHY_MODE2);
3222 m2 &= ~((1 << 16) | (1 << 31));
3223 writel(m2, port_mmio + PHY_MODE2);
3224
3225 udelay(200);
3226 }
3227
Mark Lord8c30a8b2008-05-27 17:56:31 -04003228 /*
3229 * Gen-II/IIe PHY_MODE3 errata RM#2:
3230 * Achieves better receiver noise performance than the h/w default:
3231 */
3232 m3 = readl(port_mmio + PHY_MODE3);
3233 m3 = (m3 & 0x1f) | (0x5555601 << 5);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003234
Mark Lord0388a8c2008-05-28 13:41:52 -04003235 /* Guideline 88F5182 (GL# SATA-S11) */
3236 if (IS_SOC(hpriv))
3237 m3 &= ~0x1c;
3238
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003239 if (fix_phy_mode4) {
Mark Lordba069e32008-05-31 16:46:34 -04003240 u32 m4 = readl(port_mmio + PHY_MODE4);
3241 /*
3242 * Enforce reserved-bit restrictions on GenIIe devices only.
3243 * For earlier chipsets, force only the internal config field
3244 * (workaround for errata FEr SATA#10 part 1).
3245 */
Mark Lord8c30a8b2008-05-27 17:56:31 -04003246 if (IS_GEN_IIE(hpriv))
Mark Lordba069e32008-05-31 16:46:34 -04003247 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3248 else
3249 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
Mark Lord8c30a8b2008-05-27 17:56:31 -04003250 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003251 }
Mark Lordb406c7a2008-05-28 12:01:12 -04003252 /*
3253 * Workaround for 60x1-B2 errata SATA#13:
3254 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3255 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
Mark Lordba684602009-04-06 15:25:39 -04003256 * Or ensure we use writelfl() when writing PHY_MODE4.
Mark Lordb406c7a2008-05-28 12:01:12 -04003257 */
3258 writel(m3, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003259
3260 /* Revert values of pre-emphasis and signal amps to the saved ones */
3261 m2 = readl(port_mmio + PHY_MODE2);
3262
3263 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003264 m2 |= hpriv->signal[port].amps;
3265 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003266 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003267
Jeff Garzike4e7b892006-01-31 12:18:41 -05003268 /* according to mvSata 3.6.1, some IIE values are fixed */
3269 if (IS_GEN_IIE(hpriv)) {
3270 m2 &= ~0xC30FF01F;
3271 m2 |= 0x0000900F;
3272 }
3273
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003274 writel(m2, port_mmio + PHY_MODE2);
3275}
3276
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003277/* TODO: use the generic LED interface to configure the SATA Presence */
3278/* & Acitivy LEDs on the board */
3279static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3280 void __iomem *mmio)
3281{
3282 return;
3283}
3284
3285static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3286 void __iomem *mmio)
3287{
3288 void __iomem *port_mmio;
3289 u32 tmp;
3290
3291 port_mmio = mv_port_base(mmio, idx);
3292 tmp = readl(port_mmio + PHY_MODE2);
3293
3294 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3295 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3296}
3297
3298#undef ZERO
3299#define ZERO(reg) writel(0, port_mmio + (reg))
3300static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3301 void __iomem *mmio, unsigned int port)
3302{
3303 void __iomem *port_mmio = mv_port_base(mmio, port);
3304
Mark Lorde12bef52008-03-31 19:33:56 -04003305 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003306
3307 ZERO(0x028); /* command */
Mark Lordcae5a292009-04-06 16:43:45 -04003308 writel(0x101f, port_mmio + EDMA_CFG);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003309 ZERO(0x004); /* timer */
3310 ZERO(0x008); /* irq err cause */
3311 ZERO(0x00c); /* irq err mask */
3312 ZERO(0x010); /* rq bah */
3313 ZERO(0x014); /* rq inp */
3314 ZERO(0x018); /* rq outp */
3315 ZERO(0x01c); /* respq bah */
3316 ZERO(0x024); /* respq outp */
3317 ZERO(0x020); /* respq inp */
3318 ZERO(0x02c); /* test control */
Mark Lordcae5a292009-04-06 16:43:45 -04003319 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003320}
3321
3322#undef ZERO
3323
3324#define ZERO(reg) writel(0, hc_mmio + (reg))
3325static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3326 void __iomem *mmio)
3327{
3328 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3329
3330 ZERO(0x00c);
3331 ZERO(0x010);
3332 ZERO(0x014);
3333
3334}
3335
3336#undef ZERO
3337
3338static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3339 void __iomem *mmio, unsigned int n_hc)
3340{
3341 unsigned int port;
3342
3343 for (port = 0; port < hpriv->n_ports; port++)
3344 mv_soc_reset_hc_port(hpriv, mmio, port);
3345
3346 mv_soc_reset_one_hc(hpriv, mmio);
3347
3348 return 0;
3349}
3350
3351static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3352 void __iomem *mmio)
3353{
3354 return;
3355}
3356
3357static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3358{
3359 return;
3360}
3361
Mark Lord8e7decd2008-05-02 02:07:51 -04003362static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
Mark Lordb67a1062008-03-31 19:35:13 -04003363{
Mark Lordcae5a292009-04-06 16:43:45 -04003364 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
Mark Lordb67a1062008-03-31 19:35:13 -04003365
Mark Lord8e7decd2008-05-02 02:07:51 -04003366 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
Mark Lordb67a1062008-03-31 19:35:13 -04003367 if (want_gen2i)
Mark Lord8e7decd2008-05-02 02:07:51 -04003368 ifcfg |= (1 << 7); /* enable gen2i speed */
Mark Lordcae5a292009-04-06 16:43:45 -04003369 writelfl(ifcfg, port_mmio + SATA_IFCFG);
Mark Lordb67a1062008-03-31 19:35:13 -04003370}
3371
Mark Lorde12bef52008-03-31 19:33:56 -04003372static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05003373 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04003374{
Jeff Garzikc9d39132005-11-13 17:47:51 -05003375 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04003376
Mark Lord8e7decd2008-05-02 02:07:51 -04003377 /*
3378 * The datasheet warns against setting EDMA_RESET when EDMA is active
3379 * (but doesn't say what the problem might be). So we first try
3380 * to disable the EDMA engine before doing the EDMA_RESET operation.
3381 */
Mark Lord0d8be5c2008-04-16 14:56:12 -04003382 mv_stop_edma_engine(port_mmio);
Mark Lordcae5a292009-04-06 16:43:45 -04003383 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003384
Mark Lordb67a1062008-03-31 19:35:13 -04003385 if (!IS_GEN_I(hpriv)) {
Mark Lord8e7decd2008-05-02 02:07:51 -04003386 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3387 mv_setup_ifcfg(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003388 }
Mark Lordb67a1062008-03-31 19:35:13 -04003389 /*
Mark Lord8e7decd2008-05-02 02:07:51 -04003390 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
Mark Lordb67a1062008-03-31 19:35:13 -04003391 * link, and physical layers. It resets all SATA interface registers
Mark Lordcae5a292009-04-06 16:43:45 -04003392 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04003393 */
Mark Lordcae5a292009-04-06 16:43:45 -04003394 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
Mark Lordb67a1062008-03-31 19:35:13 -04003395 udelay(25); /* allow reset propagation */
Mark Lordcae5a292009-04-06 16:43:45 -04003396 writelfl(0, port_mmio + EDMA_CMD);
Brett Russ20f733e2005-09-01 18:26:17 -04003397
Jeff Garzikc9d39132005-11-13 17:47:51 -05003398 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3399
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003400 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05003401 mdelay(1);
3402}
3403
Mark Lorde49856d2008-04-16 14:59:07 -04003404static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05003405{
Mark Lorde49856d2008-04-16 14:59:07 -04003406 if (sata_pmp_supported(ap)) {
3407 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lordcae5a292009-04-06 16:43:45 -04003408 u32 reg = readl(port_mmio + SATA_IFCTL);
Mark Lorde49856d2008-04-16 14:59:07 -04003409 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05003410
Mark Lorde49856d2008-04-16 14:59:07 -04003411 if (old != pmp) {
3412 reg = (reg & ~0xf) | pmp;
Mark Lordcae5a292009-04-06 16:43:45 -04003413 writelfl(reg, port_mmio + SATA_IFCTL);
Mark Lorde49856d2008-04-16 14:59:07 -04003414 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09003415 }
Brett Russ20f733e2005-09-01 18:26:17 -04003416}
3417
Mark Lorde49856d2008-04-16 14:59:07 -04003418static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3419 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05003420{
Mark Lorde49856d2008-04-16 14:59:07 -04003421 mv_pmp_select(link->ap, sata_srst_pmp(link));
3422 return sata_std_hardreset(link, class, deadline);
3423}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04003424
Mark Lorde49856d2008-04-16 14:59:07 -04003425static int mv_softreset(struct ata_link *link, unsigned int *class,
3426 unsigned long deadline)
3427{
3428 mv_pmp_select(link->ap, sata_srst_pmp(link));
3429 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05003430}
3431
Tejun Heocc0680a2007-08-06 18:36:23 +09003432static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003433 unsigned long deadline)
3434{
Tejun Heocc0680a2007-08-06 18:36:23 +09003435 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003436 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04003437 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003438 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04003439 int rc, attempts = 0, extra = 0;
3440 u32 sstatus;
3441 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003442
Mark Lorde12bef52008-03-31 19:33:56 -04003443 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04003444 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Mark Lordd16ab3f2009-02-25 15:17:43 -05003445 pp->pp_flags &=
3446 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003447
Mark Lord0d8be5c2008-04-16 14:56:12 -04003448 /* Workaround for errata FEr SATA#10 (part 2) */
3449 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04003450 const unsigned long *timing =
3451 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003452
Mark Lord17c5aab2008-04-16 14:56:51 -04003453 rc = sata_link_hardreset(link, timing, deadline + extra,
3454 &online, NULL);
Mark Lord9dcffd92008-05-14 09:18:12 -04003455 rc = online ? -EAGAIN : rc;
Mark Lord17c5aab2008-04-16 14:56:51 -04003456 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04003457 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04003458 sata_scr_read(link, SCR_STATUS, &sstatus);
3459 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3460 /* Force 1.5gb/s link speed and try again */
Mark Lord8e7decd2008-05-02 02:07:51 -04003461 mv_setup_ifcfg(mv_ap_base(ap), 0);
Mark Lord0d8be5c2008-04-16 14:56:12 -04003462 if (time_after(jiffies + HZ, deadline))
3463 extra = HZ; /* only extend it once, max */
3464 }
3465 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Mark Lord08da1752009-02-25 15:13:03 -05003466 mv_save_cached_regs(ap);
Mark Lord66e57a22009-01-30 18:52:58 -05003467 mv_edma_cfg(ap, 0, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003468
Mark Lord17c5aab2008-04-16 14:56:51 -04003469 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003470}
3471
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003472static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04003473{
Mark Lord1cfd19a2008-04-19 15:05:50 -04003474 mv_stop_edma(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003475 mv_enable_port_irqs(ap, 0);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003476}
3477
3478static void mv_eh_thaw(struct ata_port *ap)
3479{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003480 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordc4de5732008-05-17 13:35:21 -04003481 unsigned int port = ap->port_no;
3482 unsigned int hardport = mv_hardport_from_port(port);
Mark Lord1cfd19a2008-04-19 15:05:50 -04003483 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003484 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lordc4de5732008-05-17 13:35:21 -04003485 u32 hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003486
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003487 /* clear EDMA errors on this port */
Mark Lordcae5a292009-04-06 16:43:45 -04003488 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003489
3490 /* clear pending irq events */
Mark Lordcae6edc2009-01-19 18:05:42 -05003491 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
Mark Lordcae5a292009-04-06 16:43:45 -04003492 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04003493
Mark Lord88e675e2008-05-17 13:36:30 -04003494 mv_enable_port_irqs(ap, ERR_IRQ);
Brett Russ31961942005-09-30 01:36:00 -04003495}
3496
Brett Russ05b308e2005-10-05 17:08:53 -04003497/**
3498 * mv_port_init - Perform some early initialization on a single port.
3499 * @port: libata data structure storing shadow register addresses
3500 * @port_mmio: base address of the port
3501 *
3502 * Initialize shadow register mmio addresses, clear outstanding
3503 * interrupts on the port, and unmask interrupts for the future
3504 * start of the port.
3505 *
3506 * LOCKING:
3507 * Inherited from caller.
3508 */
Brett Russ31961942005-09-30 01:36:00 -04003509static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3510{
Mark Lordcae5a292009-04-06 16:43:45 -04003511 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
Brett Russ31961942005-09-30 01:36:00 -04003512
Jeff Garzik8b260242005-11-12 12:32:50 -05003513 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04003514 */
3515 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05003516 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04003517 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3518 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3519 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3520 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3521 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3522 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05003523 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04003524 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3525 /* special case: control/altstatus doesn't have ATA_REG_ address */
Mark Lordcae5a292009-04-06 16:43:45 -04003526 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
Brett Russ31961942005-09-30 01:36:00 -04003527
3528 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08003529 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04003530
Brett Russ31961942005-09-30 01:36:00 -04003531 /* Clear any currently outstanding port interrupt conditions */
Mark Lordcae5a292009-04-06 16:43:45 -04003532 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3533 writelfl(readl(serr), serr);
3534 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
Brett Russ31961942005-09-30 01:36:00 -04003535
Mark Lord646a4da2008-01-26 18:30:37 -05003536 /* unmask all non-transient EDMA error interrupts */
Mark Lordcae5a292009-04-06 16:43:45 -04003537 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
Brett Russ20f733e2005-09-01 18:26:17 -04003538
Jeff Garzik8b260242005-11-12 12:32:50 -05003539 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Mark Lordcae5a292009-04-06 16:43:45 -04003540 readl(port_mmio + EDMA_CFG),
3541 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3542 readl(port_mmio + EDMA_ERR_IRQ_MASK));
Brett Russ20f733e2005-09-01 18:26:17 -04003543}
3544
Mark Lord616d4a92008-05-02 02:08:32 -04003545static unsigned int mv_in_pcix_mode(struct ata_host *host)
3546{
3547 struct mv_host_priv *hpriv = host->private_data;
3548 void __iomem *mmio = hpriv->base;
3549 u32 reg;
3550
Mark Lord1f398472008-05-27 17:54:48 -04003551 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
Mark Lord616d4a92008-05-02 02:08:32 -04003552 return 0; /* not PCI-X capable */
Mark Lordcae5a292009-04-06 16:43:45 -04003553 reg = readl(mmio + MV_PCI_MODE);
Mark Lord616d4a92008-05-02 02:08:32 -04003554 if ((reg & MV_PCI_MODE_MASK) == 0)
3555 return 0; /* conventional PCI mode */
3556 return 1; /* chip is in PCI-X mode */
3557}
3558
3559static int mv_pci_cut_through_okay(struct ata_host *host)
3560{
3561 struct mv_host_priv *hpriv = host->private_data;
3562 void __iomem *mmio = hpriv->base;
3563 u32 reg;
3564
3565 if (!mv_in_pcix_mode(host)) {
Mark Lordcae5a292009-04-06 16:43:45 -04003566 reg = readl(mmio + MV_PCI_COMMAND);
3567 if (reg & MV_PCI_COMMAND_MRDTRIG)
Mark Lord616d4a92008-05-02 02:08:32 -04003568 return 0; /* not okay */
3569 }
3570 return 1; /* okay */
3571}
3572
Mark Lord65ad7fef2009-04-06 15:24:14 -04003573static void mv_60x1b2_errata_pci7(struct ata_host *host)
3574{
3575 struct mv_host_priv *hpriv = host->private_data;
3576 void __iomem *mmio = hpriv->base;
3577
3578 /* workaround for 60x1-B2 errata PCI#7 */
3579 if (mv_in_pcix_mode(host)) {
Mark Lordcae5a292009-04-06 16:43:45 -04003580 u32 reg = readl(mmio + MV_PCI_COMMAND);
3581 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
Mark Lord65ad7fef2009-04-06 15:24:14 -04003582 }
3583}
3584
Tejun Heo4447d352007-04-17 23:44:08 +09003585static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003586{
Tejun Heo4447d352007-04-17 23:44:08 +09003587 struct pci_dev *pdev = to_pci_dev(host->dev);
3588 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003589 u32 hp_flags = hpriv->hp_flags;
3590
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003591 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003592 case chip_5080:
3593 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003594 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003595
Auke Kok44c10132007-06-08 15:46:36 -07003596 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003597 case 0x1:
3598 hp_flags |= MV_HP_ERRATA_50XXB0;
3599 break;
3600 case 0x3:
3601 hp_flags |= MV_HP_ERRATA_50XXB2;
3602 break;
3603 default:
3604 dev_printk(KERN_WARNING, &pdev->dev,
3605 "Applying 50XXB2 workarounds to unknown rev\n");
3606 hp_flags |= MV_HP_ERRATA_50XXB2;
3607 break;
3608 }
3609 break;
3610
3611 case chip_504x:
3612 case chip_508x:
3613 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003614 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003615
Auke Kok44c10132007-06-08 15:46:36 -07003616 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003617 case 0x0:
3618 hp_flags |= MV_HP_ERRATA_50XXB0;
3619 break;
3620 case 0x3:
3621 hp_flags |= MV_HP_ERRATA_50XXB2;
3622 break;
3623 default:
3624 dev_printk(KERN_WARNING, &pdev->dev,
3625 "Applying B2 workarounds to unknown rev\n");
3626 hp_flags |= MV_HP_ERRATA_50XXB2;
3627 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003628 }
3629 break;
3630
3631 case chip_604x:
3632 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05003633 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04003634 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003635
Auke Kok44c10132007-06-08 15:46:36 -07003636 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05003637 case 0x7:
Mark Lord65ad7fef2009-04-06 15:24:14 -04003638 mv_60x1b2_errata_pci7(host);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003639 hp_flags |= MV_HP_ERRATA_60X1B2;
3640 break;
3641 case 0x9:
3642 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003643 break;
3644 default:
3645 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05003646 "Applying B2 workarounds to unknown rev\n");
3647 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003648 break;
3649 }
3650 break;
3651
Jeff Garzike4e7b892006-01-31 12:18:41 -05003652 case chip_7042:
Mark Lord616d4a92008-05-02 02:08:32 -04003653 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
Mark Lord306b30f2007-12-04 14:07:52 -05003654 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3655 (pdev->device == 0x2300 || pdev->device == 0x2310))
3656 {
Mark Lord4e520032007-12-11 12:58:05 -05003657 /*
3658 * Highpoint RocketRAID PCIe 23xx series cards:
3659 *
3660 * Unconfigured drives are treated as "Legacy"
3661 * by the BIOS, and it overwrites sector 8 with
3662 * a "Lgcy" metadata block prior to Linux boot.
3663 *
3664 * Configured drives (RAID or JBOD) leave sector 8
3665 * alone, but instead overwrite a high numbered
3666 * sector for the RAID metadata. This sector can
3667 * be determined exactly, by truncating the physical
3668 * drive capacity to a nice even GB value.
3669 *
3670 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3671 *
3672 * Warn the user, lest they think we're just buggy.
3673 */
3674 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3675 " BIOS CORRUPTS DATA on all attached drives,"
3676 " regardless of if/how they are configured."
3677 " BEWARE!\n");
3678 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3679 " use sectors 8-9 on \"Legacy\" drives,"
3680 " and avoid the final two gigabytes on"
3681 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05003682 }
Mark Lord8e7decd2008-05-02 02:07:51 -04003683 /* drop through */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003684 case chip_6042:
3685 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003686 hp_flags |= MV_HP_GEN_IIE;
Mark Lord616d4a92008-05-02 02:08:32 -04003687 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3688 hp_flags |= MV_HP_CUT_THROUGH;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003689
Auke Kok44c10132007-06-08 15:46:36 -07003690 switch (pdev->revision) {
Mark Lord5cf73bf2008-05-27 17:58:56 -04003691 case 0x2: /* Rev.B0: the first/only public release */
Jeff Garzike4e7b892006-01-31 12:18:41 -05003692 hp_flags |= MV_HP_ERRATA_60X1C0;
3693 break;
3694 default:
3695 dev_printk(KERN_WARNING, &pdev->dev,
3696 "Applying 60X1C0 workarounds to unknown rev\n");
3697 hp_flags |= MV_HP_ERRATA_60X1C0;
3698 break;
3699 }
3700 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003701 case chip_soc:
3702 hpriv->ops = &mv_soc_ops;
Saeed Bisharaeb3a55a2008-08-04 00:52:55 -11003703 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3704 MV_HP_ERRATA_60X1C0;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003705 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05003706
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003707 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003708 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04003709 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003710 return 1;
3711 }
3712
3713 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05003714 if (hp_flags & MV_HP_PCIE) {
Mark Lordcae5a292009-04-06 16:43:45 -04003715 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3716 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
Mark Lord02a121d2007-12-01 13:07:22 -05003717 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3718 } else {
Mark Lordcae5a292009-04-06 16:43:45 -04003719 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3720 hpriv->irq_mask_offset = PCI_IRQ_MASK;
Mark Lord02a121d2007-12-01 13:07:22 -05003721 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3722 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003723
3724 return 0;
3725}
3726
Brett Russ05b308e2005-10-05 17:08:53 -04003727/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05003728 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09003729 * @host: ATA host to initialize
3730 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04003731 *
3732 * If possible, do an early global reset of the host. Then do
3733 * our port init and clear/unmask all/relevant host interrupts.
3734 *
3735 * LOCKING:
3736 * Inherited from caller.
3737 */
Tejun Heo4447d352007-04-17 23:44:08 +09003738static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04003739{
3740 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09003741 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003742 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05003743
Tejun Heo4447d352007-04-17 23:44:08 +09003744 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003745 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04003746 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003747
Mark Lord1f398472008-05-27 17:54:48 -04003748 if (IS_SOC(hpriv)) {
Mark Lordcae5a292009-04-06 16:43:45 -04003749 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3750 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
Mark Lord1f398472008-05-27 17:54:48 -04003751 } else {
Mark Lordcae5a292009-04-06 16:43:45 -04003752 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3753 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003754 }
Mark Lord352fab72008-04-19 14:43:42 -04003755
Thomas Reitmayr5d0fb2e2009-01-24 20:24:58 +01003756 /* initialize shadow irq mask with register's value */
3757 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3758
Mark Lord352fab72008-04-19 14:43:42 -04003759 /* global interrupt mask: 0 == mask everything */
Mark Lordc4de5732008-05-17 13:35:21 -04003760 mv_set_main_irq_mask(host, ~0, 0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003761
Tejun Heo4447d352007-04-17 23:44:08 +09003762 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05003763
Tejun Heo4447d352007-04-17 23:44:08 +09003764 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05003765 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003766
Jeff Garzikc9d39132005-11-13 17:47:51 -05003767 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003768 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003769 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04003770
Jeff Garzik522479f2005-11-12 22:14:02 -05003771 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003772 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05003773 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04003774
Tejun Heo4447d352007-04-17 23:44:08 +09003775 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09003776 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05003777 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09003778
3779 mv_port_init(&ap->ioaddr, port_mmio);
3780
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003781#ifdef CONFIG_PCI
Mark Lord1f398472008-05-27 17:54:48 -04003782 if (!IS_SOC(hpriv)) {
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003783 unsigned int offset = port_mmio - mmio;
3784 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3785 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3786 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003787#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003788 }
3789
3790 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04003791 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3792
3793 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3794 "(before clear)=0x%08x\n", hc,
Mark Lordcae5a292009-04-06 16:43:45 -04003795 readl(hc_mmio + HC_CFG),
3796 readl(hc_mmio + HC_IRQ_CAUSE));
Brett Russ31961942005-09-30 01:36:00 -04003797
3798 /* Clear any currently outstanding hc interrupt conditions */
Mark Lordcae5a292009-04-06 16:43:45 -04003799 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
Brett Russ20f733e2005-09-01 18:26:17 -04003800 }
3801
Mark Lord44c65d12009-04-06 12:29:49 -04003802 if (!IS_SOC(hpriv)) {
3803 /* Clear any currently outstanding host interrupt conditions */
Mark Lordcae5a292009-04-06 16:43:45 -04003804 writelfl(0, mmio + hpriv->irq_cause_offset);
Brett Russ31961942005-09-30 01:36:00 -04003805
Mark Lord44c65d12009-04-06 12:29:49 -04003806 /* and unmask interrupt generation for host regs */
Mark Lordcae5a292009-04-06 16:43:45 -04003807 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
Mark Lord44c65d12009-04-06 12:29:49 -04003808 }
Jeff Garzikfb621e22007-02-25 04:19:45 -05003809
Mark Lord6be96ac2009-02-19 10:38:04 -05003810 /*
3811 * enable only global host interrupts for now.
3812 * The per-port interrupts get done later as ports are set up.
3813 */
3814 mv_set_main_irq_mask(host, 0, PCI_ERR);
Mark Lord2b748a02009-03-10 22:01:17 -04003815 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3816 irq_coalescing_usecs);
Brett Russ31961942005-09-30 01:36:00 -04003817done:
Brett Russ20f733e2005-09-01 18:26:17 -04003818 return rc;
3819}
3820
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003821static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3822{
3823 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3824 MV_CRQB_Q_SZ, 0);
3825 if (!hpriv->crqb_pool)
3826 return -ENOMEM;
3827
3828 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3829 MV_CRPB_Q_SZ, 0);
3830 if (!hpriv->crpb_pool)
3831 return -ENOMEM;
3832
3833 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3834 MV_SG_TBL_SZ, 0);
3835 if (!hpriv->sg_tbl_pool)
3836 return -ENOMEM;
3837
3838 return 0;
3839}
3840
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003841static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3842 struct mbus_dram_target_info *dram)
3843{
3844 int i;
3845
3846 for (i = 0; i < 4; i++) {
3847 writel(0, hpriv->base + WINDOW_CTRL(i));
3848 writel(0, hpriv->base + WINDOW_BASE(i));
3849 }
3850
3851 for (i = 0; i < dram->num_cs; i++) {
3852 struct mbus_dram_window *cs = dram->cs + i;
3853
3854 writel(((cs->size - 1) & 0xffff0000) |
3855 (cs->mbus_attr << 8) |
3856 (dram->mbus_dram_target_id << 4) | 1,
3857 hpriv->base + WINDOW_CTRL(i));
3858 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3859 }
3860}
3861
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003862/**
3863 * mv_platform_probe - handle a positive probe of an soc Marvell
3864 * host
3865 * @pdev: platform device found
3866 *
3867 * LOCKING:
3868 * Inherited from caller.
3869 */
3870static int mv_platform_probe(struct platform_device *pdev)
3871{
3872 static int printed_version;
3873 const struct mv_sata_platform_data *mv_platform_data;
3874 const struct ata_port_info *ppi[] =
3875 { &mv_port_info[chip_soc], NULL };
3876 struct ata_host *host;
3877 struct mv_host_priv *hpriv;
3878 struct resource *res;
3879 int n_ports, rc;
3880
3881 if (!printed_version++)
3882 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3883
3884 /*
3885 * Simple resource validation ..
3886 */
3887 if (unlikely(pdev->num_resources != 2)) {
3888 dev_err(&pdev->dev, "invalid number of resources\n");
3889 return -EINVAL;
3890 }
3891
3892 /*
3893 * Get the register base first
3894 */
3895 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3896 if (res == NULL)
3897 return -EINVAL;
3898
3899 /* allocate host */
3900 mv_platform_data = pdev->dev.platform_data;
3901 n_ports = mv_platform_data->n_ports;
3902
3903 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3904 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3905
3906 if (!host || !hpriv)
3907 return -ENOMEM;
3908 host->private_data = hpriv;
3909 hpriv->n_ports = n_ports;
3910
3911 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11003912 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3913 res->end - res->start + 1);
Mark Lordcae5a292009-04-06 16:43:45 -04003914 hpriv->base -= SATAHC0_REG_BASE;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003915
Lennert Buytenhek15a32632008-03-27 14:51:39 -04003916 /*
3917 * (Re-)program MBUS remapping windows if we are asked to.
3918 */
3919 if (mv_platform_data->dram != NULL)
3920 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3921
Byron Bradleyfbf14e22008-02-10 21:17:30 +00003922 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3923 if (rc)
3924 return rc;
3925
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003926 /* initialize adapter */
3927 rc = mv_init_host(host, chip_soc);
3928 if (rc)
3929 return rc;
3930
3931 dev_printk(KERN_INFO, &pdev->dev,
3932 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3933 host->n_ports);
3934
3935 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3936 IRQF_SHARED, &mv6_sht);
3937}
3938
3939/*
3940 *
3941 * mv_platform_remove - unplug a platform interface
3942 * @pdev: platform device
3943 *
3944 * A platform bus SATA device has been unplugged. Perform the needed
3945 * cleanup. Also called on module unload for any active devices.
3946 */
3947static int __devexit mv_platform_remove(struct platform_device *pdev)
3948{
3949 struct device *dev = &pdev->dev;
3950 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003951
3952 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003953 return 0;
3954}
3955
3956static struct platform_driver mv_platform_driver = {
3957 .probe = mv_platform_probe,
3958 .remove = __devexit_p(mv_platform_remove),
3959 .driver = {
3960 .name = DRV_NAME,
3961 .owner = THIS_MODULE,
3962 },
3963};
3964
3965
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003966#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003967static int mv_pci_init_one(struct pci_dev *pdev,
3968 const struct pci_device_id *ent);
3969
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003970
3971static struct pci_driver mv_pci_driver = {
3972 .name = DRV_NAME,
3973 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003974 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003975 .remove = ata_pci_remove_one,
3976};
3977
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003978/* move to PCI layer or libata core? */
3979static int pci_go_64(struct pci_dev *pdev)
3980{
3981 int rc;
3982
Yang Hongyang6a355282009-04-06 19:01:13 -07003983 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3984 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003985 if (rc) {
Yang Hongyang284901a2009-04-06 19:01:15 -07003986 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003987 if (rc) {
3988 dev_printk(KERN_ERR, &pdev->dev,
3989 "64-bit DMA enable failed\n");
3990 return rc;
3991 }
3992 }
3993 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07003994 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003995 if (rc) {
3996 dev_printk(KERN_ERR, &pdev->dev,
3997 "32-bit DMA enable failed\n");
3998 return rc;
3999 }
Yang Hongyang284901a2009-04-06 19:01:15 -07004000 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004001 if (rc) {
4002 dev_printk(KERN_ERR, &pdev->dev,
4003 "32-bit consistent DMA enable failed\n");
4004 return rc;
4005 }
4006 }
4007
4008 return rc;
4009}
4010
Brett Russ05b308e2005-10-05 17:08:53 -04004011/**
4012 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09004013 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04004014 *
4015 * FIXME: complete this.
4016 *
4017 * LOCKING:
4018 * Inherited from caller.
4019 */
Tejun Heo4447d352007-04-17 23:44:08 +09004020static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04004021{
Tejun Heo4447d352007-04-17 23:44:08 +09004022 struct pci_dev *pdev = to_pci_dev(host->dev);
4023 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07004024 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004025 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04004026
4027 /* Use this to determine the HW stepping of the chip so we know
4028 * what errata to workaround
4029 */
Brett Russ31961942005-09-30 01:36:00 -04004030 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4031 if (scc == 0)
4032 scc_s = "SCSI";
4033 else if (scc == 0x01)
4034 scc_s = "RAID";
4035 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004036 scc_s = "?";
4037
4038 if (IS_GEN_I(hpriv))
4039 gen = "I";
4040 else if (IS_GEN_II(hpriv))
4041 gen = "II";
4042 else if (IS_GEN_IIE(hpriv))
4043 gen = "IIE";
4044 else
4045 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04004046
Jeff Garzika9524a72005-10-30 14:39:11 -05004047 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04004048 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4049 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04004050 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4051}
4052
Brett Russ05b308e2005-10-05 17:08:53 -04004053/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004054 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04004055 * @pdev: PCI device found
4056 * @ent: PCI device ID entry for the matched host
4057 *
4058 * LOCKING:
4059 * Inherited from caller.
4060 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004061static int mv_pci_init_one(struct pci_dev *pdev,
4062 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04004063{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04004064 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04004065 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09004066 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4067 struct ata_host *host;
4068 struct mv_host_priv *hpriv;
4069 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004070
Jeff Garzika9524a72005-10-30 14:39:11 -05004071 if (!printed_version++)
4072 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04004073
Tejun Heo4447d352007-04-17 23:44:08 +09004074 /* allocate host */
4075 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4076
4077 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4078 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4079 if (!host || !hpriv)
4080 return -ENOMEM;
4081 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004082 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09004083
4084 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09004085 rc = pcim_enable_device(pdev);
4086 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04004087 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004088
Tejun Heo0d5ff562007-02-01 15:06:36 +09004089 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4090 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09004091 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09004092 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09004093 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09004094 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004095 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04004096
Jeff Garzikd88184f2007-02-26 01:26:06 -05004097 rc = pci_go_64(pdev);
4098 if (rc)
4099 return rc;
4100
Mark Lordda2fa9b2008-01-26 18:32:45 -05004101 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4102 if (rc)
4103 return rc;
4104
Brett Russ20f733e2005-09-01 18:26:17 -04004105 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09004106 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09004107 if (rc)
4108 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004109
Mark Lord6d3c30e2009-01-21 10:31:29 -05004110 /* Enable message-switched interrupts, if requested */
4111 if (msi && pci_enable_msi(pdev) == 0)
4112 hpriv->hp_flags |= MV_HP_FLAG_MSI;
Brett Russ20f733e2005-09-01 18:26:17 -04004113
Brett Russ31961942005-09-30 01:36:00 -04004114 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09004115 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04004116
Tejun Heo4447d352007-04-17 23:44:08 +09004117 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04004118 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09004119 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04004120 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04004121}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004122#endif
Brett Russ20f733e2005-09-01 18:26:17 -04004123
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004124static int mv_platform_probe(struct platform_device *pdev);
4125static int __devexit mv_platform_remove(struct platform_device *pdev);
4126
Brett Russ20f733e2005-09-01 18:26:17 -04004127static int __init mv_init(void)
4128{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004129 int rc = -ENODEV;
4130#ifdef CONFIG_PCI
4131 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004132 if (rc < 0)
4133 return rc;
4134#endif
4135 rc = platform_driver_register(&mv_platform_driver);
4136
4137#ifdef CONFIG_PCI
4138 if (rc < 0)
4139 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004140#endif
4141 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04004142}
4143
4144static void __exit mv_exit(void)
4145{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004146#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04004147 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11004148#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05004149 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04004150}
4151
4152MODULE_AUTHOR("Brett Russ");
4153MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4154MODULE_LICENSE("GPL");
4155MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4156MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04004157MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04004158
Brett Russ20f733e2005-09-01 18:26:17 -04004159module_init(mv_init);
4160module_exit(mv_exit);