blob: 298f17d6e12ce31619d98abce3fe0427ac81bb78 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e2005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e2005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e2005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
Brett Russ20f733e2005-09-01 18:26:17 -0400119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400132
Brett Russ31961942005-09-30 01:36:00 -0400133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
Brett Russ20f733e2005-09-01 18:26:17 -0400147 /* PCI interface registers */
148
Brett Russ31961942005-09-30 01:36:00 -0400149 PCI_COMMAND_OFS = 0xc00,
150
Brett Russ20f733e2005-09-01 18:26:17 -0400151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
Jeff Garzik522479f2005-11-12 22:14:02 -0500156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
Mark Lord02a121d2007-12-01 13:07:22 -0500167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
Mark Lord02a121d2007-12-01 13:07:22 -0500171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500174
Brett Russ20f733e2005-09-01 18:26:17 -0400175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500213 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
Brett Russ31961942005-09-30 01:36:00 -0400225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500247
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400248 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500249 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
250 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
251 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
252 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
253
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500255
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500257 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
258 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
259 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
260 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
261 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
262
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400263 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500264
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400265 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400266 EDMA_ERR_OVERRUN_5 = (1 << 5),
267 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500268
269 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
270 EDMA_ERR_LNK_CTRL_RX_1 |
271 EDMA_ERR_LNK_CTRL_RX_3 |
272 EDMA_ERR_LNK_CTRL_TX,
273
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400274 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
275 EDMA_ERR_PRD_PAR |
276 EDMA_ERR_DEV_DCON |
277 EDMA_ERR_DEV_CON |
278 EDMA_ERR_SERR |
279 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400280 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400281 EDMA_ERR_CRPB_PAR |
282 EDMA_ERR_INTRL_PAR |
283 EDMA_ERR_IORDY |
284 EDMA_ERR_LNK_CTRL_RX_2 |
285 EDMA_ERR_LNK_DATA_RX |
286 EDMA_ERR_LNK_DATA_TX |
287 EDMA_ERR_TRANS_PROTO,
288 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
289 EDMA_ERR_PRD_PAR |
290 EDMA_ERR_DEV_DCON |
291 EDMA_ERR_DEV_CON |
292 EDMA_ERR_OVERRUN_5 |
293 EDMA_ERR_UNDERRUN_5 |
294 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400295 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400296 EDMA_ERR_CRPB_PAR |
297 EDMA_ERR_INTRL_PAR |
298 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400299
Brett Russ31961942005-09-30 01:36:00 -0400300 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
301 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400302
303 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
304 EDMA_REQ_Q_PTR_SHIFT = 5,
305
306 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
307 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
308 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400309 EDMA_RSP_Q_PTR_SHIFT = 3,
310
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400311 EDMA_CMD_OFS = 0x28, /* EDMA command register */
312 EDMA_EN = (1 << 0), /* enable EDMA */
313 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
314 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400315
Jeff Garzikc9d39132005-11-13 17:47:51 -0500316 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500317 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500318
Brett Russ31961942005-09-30 01:36:00 -0400319 /* Host private flags (hp_flags) */
320 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500321 MV_HP_ERRATA_50XXB0 = (1 << 1),
322 MV_HP_ERRATA_50XXB2 = (1 << 2),
323 MV_HP_ERRATA_60X1B2 = (1 << 3),
324 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500325 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400326 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
327 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
328 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500329 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400330
Brett Russ31961942005-09-30 01:36:00 -0400331 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400332 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400334};
335
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400336#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339
Jeff Garzik095fec82005-11-12 09:50:49 -0500340enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
343 */
344 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500345
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
348 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
350
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400351 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
353};
354
Jeff Garzik522479f2005-11-12 22:14:02 -0500355enum chip_type {
356 chip_504x,
357 chip_508x,
358 chip_5080,
359 chip_604x,
360 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 chip_6042,
362 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500363};
364
Brett Russ31961942005-09-30 01:36:00 -0400365/* Command ReQuest Block: 32B */
366struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400367 __le32 sg_addr;
368 __le32 sg_addr_hi;
369 __le16 ctrl_flags;
370 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400371};
372
Jeff Garzike4e7b892006-01-31 12:18:41 -0500373struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400374 __le32 addr;
375 __le32 addr_hi;
376 __le32 flags;
377 __le32 len;
378 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ResPonse Block: 8B */
382struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le16 id;
384 __le16 flags;
385 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400386};
387
388/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
389struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 flags_size;
392 __le32 addr_hi;
393 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400394};
395
396struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400397 struct mv_crqb *crqb;
398 dma_addr_t crqb_dma;
399 struct mv_crpb *crpb;
400 dma_addr_t crpb_dma;
401 struct mv_sg *sg_tbl;
402 dma_addr_t sg_tbl_dma;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400403
404 unsigned int req_idx;
405 unsigned int resp_idx;
406
Brett Russ31961942005-09-30 01:36:00 -0400407 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400408};
409
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500410struct mv_port_signal {
411 u32 amps;
412 u32 pre;
413};
414
Mark Lord02a121d2007-12-01 13:07:22 -0500415struct mv_host_priv {
416 u32 hp_flags;
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
419 u32 irq_cause_ofs;
420 u32 irq_mask_ofs;
421 u32 unmask_all_irqs;
422};
423
Jeff Garzik47c2b672005-11-12 21:13:17 -0500424struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500425 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
426 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500427 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
428 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
429 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500430 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
431 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500432 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
433 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500434};
435
Brett Russ20f733e2005-09-01 18:26:17 -0400436static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900437static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
438static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
439static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400441static int mv_port_start(struct ata_port *ap);
442static void mv_port_stop(struct ata_port *ap);
443static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500444static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900445static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400446static void mv_error_handler(struct ata_port *ap);
447static void mv_post_int_cmd(struct ata_queued_cmd *qc);
448static void mv_eh_freeze(struct ata_port *ap);
449static void mv_eh_thaw(struct ata_port *ap);
Brett Russ20f733e2005-09-01 18:26:17 -0400450static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
451
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500452static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500454static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
455static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
456 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500457static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
458 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500459static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
460static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500461
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500462static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
463 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500464static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
465static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
466 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500467static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
468 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500469static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
470static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500471static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port_no);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400474static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400475 .module = THIS_MODULE,
476 .name = DRV_NAME,
477 .ioctl = ata_scsi_ioctl,
478 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400479 .can_queue = ATA_DEF_QUEUE,
480 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400481 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400482 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
483 .emulated = ATA_SHT_EMULATED,
484 .use_clustering = 1,
485 .proc_name = DRV_NAME,
486 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400487 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400488 .slave_destroy = ata_scsi_slave_destroy,
489 .bios_param = ata_std_bios_param,
490};
491
492static struct scsi_host_template mv6_sht = {
493 .module = THIS_MODULE,
494 .name = DRV_NAME,
495 .ioctl = ata_scsi_ioctl,
496 .queuecommand = ata_scsi_queuecmd,
497 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e2005-09-01 18:26:17 -0400498 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400499 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400500 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
501 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500502 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400503 .proc_name = DRV_NAME,
504 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400505 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900506 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400507 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400508};
509
Jeff Garzikc9d39132005-11-13 17:47:51 -0500510static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500511 .tf_load = ata_tf_load,
512 .tf_read = ata_tf_read,
513 .check_status = ata_check_status,
514 .exec_command = ata_exec_command,
515 .dev_select = ata_std_dev_select,
516
Jeff Garzikcffacd82007-03-09 09:46:47 -0500517 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500518
519 .qc_prep = mv_qc_prep,
520 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900521 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500522
Jeff Garzikc9d39132005-11-13 17:47:51 -0500523 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900524 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500525
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400526 .error_handler = mv_error_handler,
527 .post_internal_cmd = mv_post_int_cmd,
528 .freeze = mv_eh_freeze,
529 .thaw = mv_eh_thaw,
530
Jeff Garzikc9d39132005-11-13 17:47:51 -0500531 .scr_read = mv5_scr_read,
532 .scr_write = mv5_scr_write,
533
534 .port_start = mv_port_start,
535 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500536};
537
538static const struct ata_port_operations mv6_ops = {
Brett Russ20f733e2005-09-01 18:26:17 -0400539 .tf_load = ata_tf_load,
540 .tf_read = ata_tf_read,
541 .check_status = ata_check_status,
542 .exec_command = ata_exec_command,
543 .dev_select = ata_std_dev_select,
544
Jeff Garzikcffacd82007-03-09 09:46:47 -0500545 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400546
Brett Russ31961942005-09-30 01:36:00 -0400547 .qc_prep = mv_qc_prep,
548 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900549 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400550
Brett Russ20f733e2005-09-01 18:26:17 -0400551 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900552 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400553
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400554 .error_handler = mv_error_handler,
555 .post_internal_cmd = mv_post_int_cmd,
556 .freeze = mv_eh_freeze,
557 .thaw = mv_eh_thaw,
558
Brett Russ20f733e2005-09-01 18:26:17 -0400559 .scr_read = mv_scr_read,
560 .scr_write = mv_scr_write,
561
Brett Russ31961942005-09-30 01:36:00 -0400562 .port_start = mv_port_start,
563 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400564};
565
Jeff Garzike4e7b892006-01-31 12:18:41 -0500566static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500567 .tf_load = ata_tf_load,
568 .tf_read = ata_tf_read,
569 .check_status = ata_check_status,
570 .exec_command = ata_exec_command,
571 .dev_select = ata_std_dev_select,
572
Jeff Garzikcffacd82007-03-09 09:46:47 -0500573 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500574
575 .qc_prep = mv_qc_prep_iie,
576 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900577 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500578
Jeff Garzike4e7b892006-01-31 12:18:41 -0500579 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900580 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500581
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400582 .error_handler = mv_error_handler,
583 .post_internal_cmd = mv_post_int_cmd,
584 .freeze = mv_eh_freeze,
585 .thaw = mv_eh_thaw,
586
Jeff Garzike4e7b892006-01-31 12:18:41 -0500587 .scr_read = mv_scr_read,
588 .scr_write = mv_scr_write,
589
590 .port_start = mv_port_start,
591 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500592};
593
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100594static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400595 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400596 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400597 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400598 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500599 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400600 },
601 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400602 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500605 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400606 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500607 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500609 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400610 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500611 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 },
Brett Russ20f733e2005-09-01 18:26:17 -0400613 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400615 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400616 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500617 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400618 },
619 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
621 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400622 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400623 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500624 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400625 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500626 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500628 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400629 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500630 .port_ops = &mv_iie_ops,
631 },
632 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500634 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400635 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500636 .port_ops = &mv_iie_ops,
637 },
Brett Russ20f733e2005-09-01 18:26:17 -0400638};
639
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500640static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400641 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
642 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
643 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
644 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100645 /* RocketRAID 1740/174x have different identifiers */
646 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
647 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400648
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400649 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
650 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
651 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
652 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
653 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500654
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400655 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
656
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200657 /* Adaptec 1430SA */
658 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
659
Mark Lord02a121d2007-12-01 13:07:22 -0500660 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800661 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
662
Mark Lord02a121d2007-12-01 13:07:22 -0500663 /* Highpoint RocketRAID PCIe series */
664 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
665 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
666
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400667 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400668};
669
670static struct pci_driver mv_pci_driver = {
671 .name = DRV_NAME,
672 .id_table = mv_pci_tbl,
673 .probe = mv_init_one,
674 .remove = ata_pci_remove_one,
675};
676
Jeff Garzik47c2b672005-11-12 21:13:17 -0500677static const struct mv_hw_ops mv5xxx_ops = {
678 .phy_errata = mv5_phy_errata,
679 .enable_leds = mv5_enable_leds,
680 .read_preamp = mv5_read_preamp,
681 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500682 .reset_flash = mv5_reset_flash,
683 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500684};
685
686static const struct mv_hw_ops mv6xxx_ops = {
687 .phy_errata = mv6_phy_errata,
688 .enable_leds = mv6_enable_leds,
689 .read_preamp = mv6_read_preamp,
690 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500691 .reset_flash = mv6_reset_flash,
692 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500693};
694
Brett Russ20f733e2005-09-01 18:26:17 -0400695/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500696 * module options
697 */
698static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
699
700
Jeff Garzikd88184f2007-02-26 01:26:06 -0500701/* move to PCI layer or libata core? */
702static int pci_go_64(struct pci_dev *pdev)
703{
704 int rc;
705
706 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
707 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
708 if (rc) {
709 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
710 if (rc) {
711 dev_printk(KERN_ERR, &pdev->dev,
712 "64-bit DMA enable failed\n");
713 return rc;
714 }
715 }
716 } else {
717 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
718 if (rc) {
719 dev_printk(KERN_ERR, &pdev->dev,
720 "32-bit DMA enable failed\n");
721 return rc;
722 }
723 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
724 if (rc) {
725 dev_printk(KERN_ERR, &pdev->dev,
726 "32-bit consistent DMA enable failed\n");
727 return rc;
728 }
729 }
730
731 return rc;
732}
733
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500734/*
Brett Russ20f733e2005-09-01 18:26:17 -0400735 * Functions
736 */
737
738static inline void writelfl(unsigned long data, void __iomem *addr)
739{
740 writel(data, addr);
741 (void) readl(addr); /* flush to avoid PCI posted write */
742}
743
Brett Russ20f733e2005-09-01 18:26:17 -0400744static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
745{
746 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
747}
748
Jeff Garzikc9d39132005-11-13 17:47:51 -0500749static inline unsigned int mv_hc_from_port(unsigned int port)
750{
751 return port >> MV_PORT_HC_SHIFT;
752}
753
754static inline unsigned int mv_hardport_from_port(unsigned int port)
755{
756 return port & MV_PORT_MASK;
757}
758
759static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
760 unsigned int port)
761{
762 return mv_hc_base(base, mv_hc_from_port(port));
763}
764
Brett Russ20f733e2005-09-01 18:26:17 -0400765static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
766{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500767 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500768 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500769 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400770}
771
772static inline void __iomem *mv_ap_base(struct ata_port *ap)
773{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900774 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400775}
776
Jeff Garzikcca39742006-08-24 03:19:22 -0400777static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400778{
Jeff Garzikcca39742006-08-24 03:19:22 -0400779 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400780}
781
782static void mv_irq_clear(struct ata_port *ap)
783{
784}
785
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786static void mv_set_edma_ptrs(void __iomem *port_mmio,
787 struct mv_host_priv *hpriv,
788 struct mv_port_priv *pp)
789{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 u32 index;
791
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400792 /*
793 * initialize request queue
794 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400795 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
796
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 WARN_ON(pp->crqb_dma & 0x3ff);
798 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400799 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
801
802 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400803 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400804 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
805 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400806 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400807
808 /*
809 * initialize response queue
810 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
812
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400813 WARN_ON(pp->crpb_dma & 0xff);
814 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
815
816 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400817 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
819 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400820 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400822 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400824}
825
Brett Russ05b308e2005-10-05 17:08:53 -0400826/**
827 * mv_start_dma - Enable eDMA engine
828 * @base: port base address
829 * @pp: port private data
830 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900831 * Verify the local cache of the eDMA state is accurate with a
832 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400833 *
834 * LOCKING:
835 * Inherited from caller.
836 */
Mark Lordf630d562008-01-26 18:31:00 -0500837static void mv_start_dma(void __iomem *port_mmio, struct mv_host_priv *hpriv,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400838 struct mv_port_priv *pp)
Brett Russ31961942005-09-30 01:36:00 -0400839{
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400840 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400841 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843
Mark Lordf630d562008-01-26 18:31:00 -0500844 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400845
Mark Lordf630d562008-01-26 18:31:00 -0500846 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400847 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
848 }
Mark Lordf630d562008-01-26 18:31:00 -0500849 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400850}
851
Brett Russ05b308e2005-10-05 17:08:53 -0400852/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400853 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400854 * @ap: ATA channel to manipulate
855 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900856 * Verify the local cache of the eDMA state is accurate with a
857 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400858 *
859 * LOCKING:
860 * Inherited from caller.
861 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400862static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400863{
864 void __iomem *port_mmio = mv_ap_base(ap);
865 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400866 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400867 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400868
Jeff Garzik4537deb2007-07-12 14:30:19 -0400869 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400870 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400871 */
Brett Russ31961942005-09-30 01:36:00 -0400872 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
873 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400874 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900875 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400876 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500877
Brett Russ31961942005-09-30 01:36:00 -0400878 /* now properly wait for the eDMA to stop */
879 for (i = 1000; i > 0; i--) {
880 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400881 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400882 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400883
Brett Russ31961942005-09-30 01:36:00 -0400884 udelay(100);
885 }
886
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400887 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900888 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400889 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400890 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400891
892 return err;
Brett Russ31961942005-09-30 01:36:00 -0400893}
894
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400895static int mv_stop_dma(struct ata_port *ap)
896{
897 unsigned long flags;
898 int rc;
899
900 spin_lock_irqsave(&ap->host->lock, flags);
901 rc = __mv_stop_dma(ap);
902 spin_unlock_irqrestore(&ap->host->lock, flags);
903
904 return rc;
905}
906
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400907#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400908static void mv_dump_mem(void __iomem *start, unsigned bytes)
909{
Brett Russ31961942005-09-30 01:36:00 -0400910 int b, w;
911 for (b = 0; b < bytes; ) {
912 DPRINTK("%p: ", start + b);
913 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400914 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400915 b += sizeof(u32);
916 }
917 printk("\n");
918 }
Brett Russ31961942005-09-30 01:36:00 -0400919}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400920#endif
921
Brett Russ31961942005-09-30 01:36:00 -0400922static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
923{
924#ifdef ATA_DEBUG
925 int b, w;
926 u32 dw;
927 for (b = 0; b < bytes; ) {
928 DPRINTK("%02x: ", b);
929 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400930 (void) pci_read_config_dword(pdev, b, &dw);
931 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400932 b += sizeof(u32);
933 }
934 printk("\n");
935 }
936#endif
937}
938static void mv_dump_all_regs(void __iomem *mmio_base, int port,
939 struct pci_dev *pdev)
940{
941#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500942 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400943 port >> MV_PORT_HC_SHIFT);
944 void __iomem *port_base;
945 int start_port, num_ports, p, start_hc, num_hcs, hc;
946
947 if (0 > port) {
948 start_hc = start_port = 0;
949 num_ports = 8; /* shld be benign for 4 port devs */
950 num_hcs = 2;
951 } else {
952 start_hc = port >> MV_PORT_HC_SHIFT;
953 start_port = port;
954 num_ports = num_hcs = 1;
955 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500956 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400957 num_ports > 1 ? num_ports - 1 : start_port);
958
959 if (NULL != pdev) {
960 DPRINTK("PCI config space regs:\n");
961 mv_dump_pci_cfg(pdev, 0x68);
962 }
963 DPRINTK("PCI regs:\n");
964 mv_dump_mem(mmio_base+0xc00, 0x3c);
965 mv_dump_mem(mmio_base+0xd00, 0x34);
966 mv_dump_mem(mmio_base+0xf00, 0x4);
967 mv_dump_mem(mmio_base+0x1d00, 0x6c);
968 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700969 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400970 DPRINTK("HC regs (HC %i):\n", hc);
971 mv_dump_mem(hc_base, 0x1c);
972 }
973 for (p = start_port; p < start_port + num_ports; p++) {
974 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400975 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400976 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400977 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400978 mv_dump_mem(port_base+0x300, 0x60);
979 }
980#endif
981}
982
Brett Russ20f733e2005-09-01 18:26:17 -0400983static unsigned int mv_scr_offset(unsigned int sc_reg_in)
984{
985 unsigned int ofs;
986
987 switch (sc_reg_in) {
988 case SCR_STATUS:
989 case SCR_CONTROL:
990 case SCR_ERROR:
991 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
992 break;
993 case SCR_ACTIVE:
994 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
995 break;
996 default:
997 ofs = 0xffffffffU;
998 break;
999 }
1000 return ofs;
1001}
1002
Tejun Heoda3dbb12007-07-16 14:29:40 +09001003static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001004{
1005 unsigned int ofs = mv_scr_offset(sc_reg_in);
1006
Tejun Heoda3dbb12007-07-16 14:29:40 +09001007 if (ofs != 0xffffffffU) {
1008 *val = readl(mv_ap_base(ap) + ofs);
1009 return 0;
1010 } else
1011 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001012}
1013
Tejun Heoda3dbb12007-07-16 14:29:40 +09001014static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001015{
1016 unsigned int ofs = mv_scr_offset(sc_reg_in);
1017
Tejun Heoda3dbb12007-07-16 14:29:40 +09001018 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001019 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001020 return 0;
1021 } else
1022 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001023}
1024
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001025static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1026 void __iomem *port_mmio)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001027{
1028 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1029
1030 /* set up non-NCQ EDMA configuration */
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001031 cfg &= ~(1 << 9); /* disable eQue */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001032
Jeff Garzike728eab2007-02-25 02:53:41 -05001033 if (IS_GEN_I(hpriv)) {
1034 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001035 cfg |= (1 << 8); /* enab config burst size mask */
Jeff Garzike728eab2007-02-25 02:53:41 -05001036 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001037
Jeff Garzike728eab2007-02-25 02:53:41 -05001038 else if (IS_GEN_II(hpriv)) {
1039 cfg &= ~0x1f; /* clear queue depth */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001040 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
Jeff Garzike728eab2007-02-25 02:53:41 -05001041 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1042 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001043
1044 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001045 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1046 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001047 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1048 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001049 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1050 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
Jeff Garzik4537deb2007-07-12 14:30:19 -04001051 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001052 }
1053
1054 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1055}
1056
Brett Russ05b308e2005-10-05 17:08:53 -04001057/**
1058 * mv_port_start - Port specific init/start routine.
1059 * @ap: ATA channel to manipulate
1060 *
1061 * Allocate and point to DMA memory, init port private memory,
1062 * zero indices.
1063 *
1064 * LOCKING:
1065 * Inherited from caller.
1066 */
Brett Russ31961942005-09-30 01:36:00 -04001067static int mv_port_start(struct ata_port *ap)
1068{
Jeff Garzikcca39742006-08-24 03:19:22 -04001069 struct device *dev = ap->host->dev;
1070 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001071 struct mv_port_priv *pp;
1072 void __iomem *port_mmio = mv_ap_base(ap);
1073 void *mem;
1074 dma_addr_t mem_dma;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001075 unsigned long flags;
Tejun Heo24dc5f32007-01-20 16:00:28 +09001076 int rc;
Brett Russ31961942005-09-30 01:36:00 -04001077
Tejun Heo24dc5f32007-01-20 16:00:28 +09001078 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001079 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001080 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001081
Tejun Heo24dc5f32007-01-20 16:00:28 +09001082 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1083 GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001084 if (!mem)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001085 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001086 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1087
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001088 rc = ata_pad_alloc(ap, dev);
1089 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001090 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001091
Jeff Garzik8b260242005-11-12 12:32:50 -05001092 /* First item in chunk of DMA memory:
Brett Russ31961942005-09-30 01:36:00 -04001093 * 32-slot command request table (CRQB), 32 bytes each in size
1094 */
1095 pp->crqb = mem;
1096 pp->crqb_dma = mem_dma;
1097 mem += MV_CRQB_Q_SZ;
1098 mem_dma += MV_CRQB_Q_SZ;
1099
Jeff Garzik8b260242005-11-12 12:32:50 -05001100 /* Second item:
Brett Russ31961942005-09-30 01:36:00 -04001101 * 32-slot command response table (CRPB), 8 bytes each in size
1102 */
1103 pp->crpb = mem;
1104 pp->crpb_dma = mem_dma;
1105 mem += MV_CRPB_Q_SZ;
1106 mem_dma += MV_CRPB_Q_SZ;
1107
1108 /* Third item:
1109 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1110 */
1111 pp->sg_tbl = mem;
1112 pp->sg_tbl_dma = mem_dma;
1113
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001114 spin_lock_irqsave(&ap->host->lock, flags);
1115
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001116 mv_edma_cfg(ap, hpriv, port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04001117
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001118 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001119
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001120 spin_unlock_irqrestore(&ap->host->lock, flags);
1121
Brett Russ31961942005-09-30 01:36:00 -04001122 /* Don't turn on EDMA here...do it before DMA commands only. Else
1123 * we'll be unable to send non-data, PIO, etc due to restricted access
1124 * to shadow regs.
1125 */
1126 ap->private_data = pp;
1127 return 0;
1128}
1129
Brett Russ05b308e2005-10-05 17:08:53 -04001130/**
1131 * mv_port_stop - Port specific cleanup/stop routine.
1132 * @ap: ATA channel to manipulate
1133 *
1134 * Stop DMA, cleanup port memory.
1135 *
1136 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001137 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001138 */
Brett Russ31961942005-09-30 01:36:00 -04001139static void mv_port_stop(struct ata_port *ap)
1140{
Brett Russ31961942005-09-30 01:36:00 -04001141 mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001142}
1143
Brett Russ05b308e2005-10-05 17:08:53 -04001144/**
1145 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1146 * @qc: queued command whose SG list to source from
1147 *
1148 * Populate the SG list and mark the last entry.
1149 *
1150 * LOCKING:
1151 * Inherited from caller.
1152 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001153static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001154{
1155 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001156 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001157 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001158 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001159
Jeff Garzikd88184f2007-02-26 01:26:06 -05001160 mv_sg = pp->sg_tbl;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001161 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001162 dma_addr_t addr = sg_dma_address(sg);
1163 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001164
Olof Johansson4007b492007-10-02 20:45:27 -05001165 while (sg_len) {
1166 u32 offset = addr & 0xffff;
1167 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001168
Olof Johansson4007b492007-10-02 20:45:27 -05001169 if ((offset + sg_len > 0x10000))
1170 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001171
Olof Johansson4007b492007-10-02 20:45:27 -05001172 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1173 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001174 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001175
1176 sg_len -= len;
1177 addr += len;
1178
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001179 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001180 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001181 }
Brett Russ31961942005-09-30 01:36:00 -04001182 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001183
1184 if (likely(last_sg))
1185 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001186}
1187
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001188static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001189{
Mark Lord559eeda2006-05-19 16:40:15 -04001190 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001191 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001192 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001193}
1194
Brett Russ05b308e2005-10-05 17:08:53 -04001195/**
1196 * mv_qc_prep - Host specific command preparation.
1197 * @qc: queued command to prepare
1198 *
1199 * This routine simply redirects to the general purpose routine
1200 * if command is not DMA. Else, it handles prep of the CRQB
1201 * (command request block), does some sanity checking, and calls
1202 * the SG load routine.
1203 *
1204 * LOCKING:
1205 * Inherited from caller.
1206 */
Brett Russ31961942005-09-30 01:36:00 -04001207static void mv_qc_prep(struct ata_queued_cmd *qc)
1208{
1209 struct ata_port *ap = qc->ap;
1210 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001211 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001212 struct ata_taskfile *tf;
1213 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001214 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001215
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001216 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001217 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001218
Brett Russ31961942005-09-30 01:36:00 -04001219 /* Fill in command request block
1220 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001221 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001222 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001223 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001224 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzik4537deb2007-07-12 14:30:19 -04001225 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
Brett Russ31961942005-09-30 01:36:00 -04001226
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001227 /* get current queue index from software */
1228 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001229
Mark Lorda6432432006-05-19 16:36:36 -04001230 pp->crqb[in_index].sg_addr =
1231 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1232 pp->crqb[in_index].sg_addr_hi =
1233 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1234 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1235
1236 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001237 tf = &qc->tf;
1238
1239 /* Sadly, the CRQB cannot accomodate all registers--there are
1240 * only 11 bytes...so we must pick and choose required
1241 * registers based on the command. So, we drop feature and
1242 * hob_feature for [RW] DMA commands, but they are needed for
1243 * NCQ. NCQ will drop hob_nsect.
1244 */
1245 switch (tf->command) {
1246 case ATA_CMD_READ:
1247 case ATA_CMD_READ_EXT:
1248 case ATA_CMD_WRITE:
1249 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001250 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001251 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1252 break;
1253#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1254 case ATA_CMD_FPDMA_READ:
1255 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001256 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001257 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1258 break;
1259#endif /* FIXME: remove this line when NCQ added */
1260 default:
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1265 *
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1268 */
1269 BUG_ON(tf->command);
1270 break;
1271 }
1272 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1273 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1274 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1275 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1276 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1279 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1280 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1281
Jeff Garzike4e7b892006-01-31 12:18:41 -05001282 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001283 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001284 mv_fill_sg(qc);
1285}
1286
1287/**
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1290 *
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1295 *
1296 * LOCKING:
1297 * Inherited from caller.
1298 */
1299static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1300{
1301 struct ata_port *ap = qc->ap;
1302 struct mv_port_priv *pp = ap->private_data;
1303 struct mv_crqb_iie *crqb;
1304 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001305 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001306 u32 flags = 0;
1307
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001308 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001309 return;
1310
Jeff Garzike4e7b892006-01-31 12:18:41 -05001311 /* Fill in Gen IIE command request block
1312 */
1313 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1314 flags |= CRQB_FLAG_READ;
1315
Tejun Heobeec7db2006-02-11 19:11:13 +09001316 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001317 flags |= qc->tag << CRQB_TAG_SHIFT;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001318 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
Jeff Garzik4537deb2007-07-12 14:30:19 -04001319 what we use as our tag */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001320
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001321 /* get current queue index from software */
1322 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001323
1324 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Jeff Garzike4e7b892006-01-31 12:18:41 -05001325 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1326 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1327 crqb->flags = cpu_to_le32(flags);
1328
1329 tf = &qc->tf;
1330 crqb->ata_cmd[0] = cpu_to_le32(
1331 (tf->command << 16) |
1332 (tf->feature << 24)
1333 );
1334 crqb->ata_cmd[1] = cpu_to_le32(
1335 (tf->lbal << 0) |
1336 (tf->lbam << 8) |
1337 (tf->lbah << 16) |
1338 (tf->device << 24)
1339 );
1340 crqb->ata_cmd[2] = cpu_to_le32(
1341 (tf->hob_lbal << 0) |
1342 (tf->hob_lbam << 8) |
1343 (tf->hob_lbah << 16) |
1344 (tf->hob_feature << 24)
1345 );
1346 crqb->ata_cmd[3] = cpu_to_le32(
1347 (tf->nsect << 0) |
1348 (tf->hob_nsect << 8)
1349 );
1350
1351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1352 return;
Brett Russ31961942005-09-30 01:36:00 -04001353 mv_fill_sg(qc);
1354}
1355
Brett Russ05b308e2005-10-05 17:08:53 -04001356/**
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1359 *
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1364 *
1365 * LOCKING:
1366 * Inherited from caller.
1367 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001368static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001369{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001370 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001374 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001375
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001376 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1380 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001381 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001382 return ata_qc_issue_prot(qc);
1383 }
1384
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001385 mv_start_dma(port_mmio, hpriv, pp);
1386
1387 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001388
Brett Russ31961942005-09-30 01:36:00 -04001389 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001390 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1391 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001392
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001393 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001394
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001396
1397 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001400
1401 return 0;
1402}
1403
Brett Russ05b308e2005-10-05 17:08:53 -04001404/**
Brett Russ05b308e2005-10-05 17:08:53 -04001405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001407 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001408 *
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1414 *
1415 * LOCKING:
1416 * Inherited from caller.
1417 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001418static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001419{
Brett Russ31961942005-09-30 01:36:00 -04001420 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001426 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001427
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001428 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001429
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1433 */
Tejun Heo936fd732007-08-06 18:36:23 +09001434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001436 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001437
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1439
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1441
1442 /*
1443 * all generations share these EDMA error cause bits
1444 */
1445
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
1452 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001453 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001454 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001458 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001459 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 }
1461
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001462 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1464
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1466 struct mv_port_priv *pp = ap->private_data;
1467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001468 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001469 }
1470 } else {
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1472
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1474 struct mv_port_priv *pp = ap->private_data;
1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001476 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 }
1478
1479 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001482 err_mask = AC_ERR_ATA_BUS;
1483 action |= ATA_EH_HARDRESET;
1484 }
1485 }
Brett Russ20f733e2005-09-01 18:26:17 -04001486
1487 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001489
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 if (!err_mask) {
1491 err_mask = AC_ERR_OTHER;
1492 action |= ATA_EH_HARDRESET;
1493 }
1494
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1497
1498 if (qc)
1499 qc->err_mask |= err_mask;
1500 else
1501 ehi->err_mask |= err_mask;
1502
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1505 else
1506 ata_port_abort(ap);
1507}
1508
1509static void mv_intr_pio(struct ata_port *ap)
1510{
1511 struct ata_queued_cmd *qc;
1512 u8 ata_status;
1513
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1517 return;
1518
1519 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001521 if (unlikely(!qc)) /* no active tag */
1522 return;
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1524 return;
1525
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1529}
1530
1531static void mv_intr_edma(struct ata_port *ap)
1532{
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1539
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1543
1544 while (1) {
1545 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001546 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1551 break;
1552
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001553 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001554 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001555 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001560 */
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001561 else if (IS_GEN_II(hpriv))
1562 tag = (le16_to_cpu(pp->crpb[out_index].id)
1563 >> CRPB_IOID_SHIFT_6) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001564
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001565 else /* IS_GEN_IIE */
1566 tag = (le16_to_cpu(pp->crpb[out_index].id)
1567 >> CRPB_IOID_SHIFT_7) & 0x3f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001568
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001569 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570
1571 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1572 * bits (WARNING: might not necessarily be associated
1573 * with this command), which -should- be clear
1574 * if all is well
1575 */
1576 status = le16_to_cpu(pp->crpb[out_index].flags);
1577 if (unlikely(status & 0xff)) {
1578 mv_err_intr(ap, qc);
1579 return;
1580 }
1581
1582 /* and finally, complete the ATA command */
1583 if (qc) {
1584 qc->err_mask |=
1585 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1586 ata_qc_complete(qc);
1587 }
1588
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001589 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001590 * indicate (after the loop completes) to hardware
1591 * that we have consumed a response queue entry.
1592 */
1593 work_done = true;
1594 pp->resp_idx++;
1595 }
1596
1597 if (work_done)
1598 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1599 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1600 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001601}
1602
Brett Russ05b308e2005-10-05 17:08:53 -04001603/**
1604 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001605 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001606 * @relevant: port error bits relevant to this host controller
1607 * @hc: which host controller we're to look at
1608 *
1609 * Read then write clear the HC interrupt status then walk each
1610 * port connected to the HC and see if it needs servicing. Port
1611 * success ints are reported in the HC interrupt status reg, the
1612 * port error ints are reported in the higher level main
1613 * interrupt status register and thus are passed in via the
1614 * 'relevant' argument.
1615 *
1616 * LOCKING:
1617 * Inherited from caller.
1618 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001619static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001620{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001621 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001622 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001623 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001624 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001625
Jeff Garzik35177262007-02-24 21:26:42 -05001626 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001627 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001628 else
Brett Russ20f733e2005-09-01 18:26:17 -04001629 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001630
1631 /* we'll need the HC success int register in most cases */
1632 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001633 if (!hc_irq_cause)
1634 return;
1635
1636 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001637
1638 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001639 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001640
1641 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001642 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001643 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001645
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001646 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001647 continue;
1648
Brett Russ31961942005-09-30 01:36:00 -04001649 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001650 if (port >= MV_PORTS_PER_HC) {
1651 shift++; /* skip bit 8 in the HC Main IRQ reg */
1652 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001653 have_err_bits = ((PORT0_ERR << shift) & relevant);
1654
1655 if (unlikely(have_err_bits)) {
1656 struct ata_queued_cmd *qc;
1657
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001658 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001659 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1660 continue;
1661
1662 mv_err_intr(ap, qc);
1663 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001664 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001665
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001666 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1667
1668 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1669 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1670 mv_intr_edma(ap);
1671 } else {
1672 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1673 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001674 }
1675 }
1676 VPRINTK("EXIT\n");
1677}
1678
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001679static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1680{
Mark Lord02a121d2007-12-01 13:07:22 -05001681 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001682 struct ata_port *ap;
1683 struct ata_queued_cmd *qc;
1684 struct ata_eh_info *ehi;
1685 unsigned int i, err_mask, printed = 0;
1686 u32 err_cause;
1687
Mark Lord02a121d2007-12-01 13:07:22 -05001688 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001689
1690 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1691 err_cause);
1692
1693 DPRINTK("All regs @ PCI error\n");
1694 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1695
Mark Lord02a121d2007-12-01 13:07:22 -05001696 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001697
1698 for (i = 0; i < host->n_ports; i++) {
1699 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001700 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001701 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001702 ata_ehi_clear_desc(ehi);
1703 if (!printed++)
1704 ata_ehi_push_desc(ehi,
1705 "PCI err cause 0x%08x", err_cause);
1706 err_mask = AC_ERR_HOST_BUS;
1707 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001708 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001709 if (qc)
1710 qc->err_mask |= err_mask;
1711 else
1712 ehi->err_mask |= err_mask;
1713
1714 ata_port_freeze(ap);
1715 }
1716 }
1717}
1718
Brett Russ05b308e2005-10-05 17:08:53 -04001719/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001720 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001721 * @irq: unused
1722 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001723 *
1724 * Read the read only register to determine if any host
1725 * controllers have pending interrupts. If so, call lower level
1726 * routine to handle. Also check for PCI errors which are only
1727 * reported here.
1728 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001729 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001730 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001731 * interrupts.
1732 */
David Howells7d12e782006-10-05 14:55:46 +01001733static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001734{
Jeff Garzikcca39742006-08-24 03:19:22 -04001735 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001736 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001737 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001738 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001739
Mark Lord646a4da2008-01-26 18:30:37 -05001740 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001741 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001742 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001743
1744 /* check the cases where we either have nothing pending or have read
1745 * a bogus register value which can indicate HW removal or PCI fault
1746 */
Mark Lord646a4da2008-01-26 18:30:37 -05001747 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1748 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001749
Jeff Garzikcca39742006-08-24 03:19:22 -04001750 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001751
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001752 if (unlikely(irq_stat & PCI_ERR)) {
1753 mv_pci_error(host, mmio);
1754 handled = 1;
1755 goto out_unlock; /* skip all other HC irq handling */
1756 }
1757
Brett Russ20f733e2005-09-01 18:26:17 -04001758 for (hc = 0; hc < n_hcs; hc++) {
1759 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1760 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001761 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001762 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001763 }
1764 }
Mark Lord615ab952006-05-19 16:24:56 -04001765
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001766out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001767 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001768
1769 return IRQ_RETVAL(handled);
1770}
1771
Jeff Garzikc9d39132005-11-13 17:47:51 -05001772static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1773{
1774 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1775 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1776
1777 return hc_mmio + ofs;
1778}
1779
1780static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1781{
1782 unsigned int ofs;
1783
1784 switch (sc_reg_in) {
1785 case SCR_STATUS:
1786 case SCR_ERROR:
1787 case SCR_CONTROL:
1788 ofs = sc_reg_in * sizeof(u32);
1789 break;
1790 default:
1791 ofs = 0xffffffffU;
1792 break;
1793 }
1794 return ofs;
1795}
1796
Tejun Heoda3dbb12007-07-16 14:29:40 +09001797static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001798{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001799 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1800 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001801 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1802
Tejun Heoda3dbb12007-07-16 14:29:40 +09001803 if (ofs != 0xffffffffU) {
1804 *val = readl(addr + ofs);
1805 return 0;
1806 } else
1807 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001808}
1809
Tejun Heoda3dbb12007-07-16 14:29:40 +09001810static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001811{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001812 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1813 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001814 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1815
Tejun Heoda3dbb12007-07-16 14:29:40 +09001816 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001817 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001818 return 0;
1819 } else
1820 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001821}
1822
Jeff Garzik522479f2005-11-12 22:14:02 -05001823static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1824{
Jeff Garzik522479f2005-11-12 22:14:02 -05001825 int early_5080;
1826
Auke Kok44c10132007-06-08 15:46:36 -07001827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001828
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 }
1834
1835 mv_reset_pci_bus(pdev, mmio);
1836}
1837
1838static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1839{
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1841}
1842
Jeff Garzik47c2b672005-11-12 21:13:17 -05001843static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001844 void __iomem *mmio)
1845{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001853}
1854
Jeff Garzik47c2b672005-11-12 21:13:17 -05001855static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001856{
Jeff Garzik522479f2005-11-12 22:14:02 -05001857 u32 tmp;
1858
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1860
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1862
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001866}
1867
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001868static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001870{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1875
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1880
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1885 }
1886
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001892}
1893
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894
1895#undef ZERO
1896#define ZERO(reg) writel(0, port_mmio + (reg))
1897static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001899{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001900 void __iomem *port_mmio = mv_port_base(mmio, port);
1901
1902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1903
1904 mv_channel_reset(hpriv, mmio, port);
1905
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1919}
1920#undef ZERO
1921
1922#define ZERO(reg) writel(0, hc_mmio + (reg))
1923static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int hc)
1925{
1926 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1927 u32 tmp;
1928
1929 ZERO(0x00c);
1930 ZERO(0x010);
1931 ZERO(0x014);
1932 ZERO(0x018);
1933
1934 tmp = readl(hc_mmio + 0x20);
1935 tmp &= 0x1c1c1c1c;
1936 tmp |= 0x03030303;
1937 writel(tmp, hc_mmio + 0x20);
1938}
1939#undef ZERO
1940
1941static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 unsigned int n_hc)
1943{
1944 unsigned int hc, port;
1945
1946 for (hc = 0; hc < n_hc; hc++) {
1947 for (port = 0; port < MV_PORTS_PER_HC; port++)
1948 mv5_reset_hc_port(hpriv, mmio,
1949 (hc * MV_PORTS_PER_HC) + port);
1950
1951 mv5_reset_one_hc(hpriv, mmio, hc);
1952 }
1953
1954 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001955}
1956
Jeff Garzik101ffae2005-11-12 22:17:49 -05001957#undef ZERO
1958#define ZERO(reg) writel(0, mmio + (reg))
1959static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1960{
Mark Lord02a121d2007-12-01 13:07:22 -05001961 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1962 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001963 u32 tmp;
1964
1965 tmp = readl(mmio + MV_PCI_MODE);
1966 tmp &= 0xff00ffff;
1967 writel(tmp, mmio + MV_PCI_MODE);
1968
1969 ZERO(MV_PCI_DISC_TIMER);
1970 ZERO(MV_PCI_MSI_TRIGGER);
1971 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1972 ZERO(HC_MAIN_IRQ_MASK_OFS);
1973 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05001974 ZERO(hpriv->irq_cause_ofs);
1975 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05001976 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1977 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1978 ZERO(MV_PCI_ERR_ATTRIBUTE);
1979 ZERO(MV_PCI_ERR_COMMAND);
1980}
1981#undef ZERO
1982
1983static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1984{
1985 u32 tmp;
1986
1987 mv5_reset_flash(hpriv, mmio);
1988
1989 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1990 tmp &= 0x3;
1991 tmp |= (1 << 5) | (1 << 6);
1992 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1993}
1994
1995/**
1996 * mv6_reset_hc - Perform the 6xxx global soft reset
1997 * @mmio: base address of the HBA
1998 *
1999 * This routine only applies to 6xxx parts.
2000 *
2001 * LOCKING:
2002 * Inherited from caller.
2003 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002004static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002006{
2007 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2008 int i, rc = 0;
2009 u32 t;
2010
2011 /* Following procedure defined in PCI "main command and status
2012 * register" table.
2013 */
2014 t = readl(reg);
2015 writel(t | STOP_PCI_MASTER, reg);
2016
2017 for (i = 0; i < 1000; i++) {
2018 udelay(1);
2019 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002020 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002021 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002022 }
2023 if (!(PCI_MASTER_EMPTY & t)) {
2024 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2025 rc = 1;
2026 goto done;
2027 }
2028
2029 /* set reset */
2030 i = 5;
2031 do {
2032 writel(t | GLOB_SFT_RST, reg);
2033 t = readl(reg);
2034 udelay(1);
2035 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2036
2037 if (!(GLOB_SFT_RST & t)) {
2038 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2039 rc = 1;
2040 goto done;
2041 }
2042
2043 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2044 i = 5;
2045 do {
2046 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2047 t = readl(reg);
2048 udelay(1);
2049 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2050
2051 if (GLOB_SFT_RST & t) {
2052 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2053 rc = 1;
2054 }
2055done:
2056 return rc;
2057}
2058
Jeff Garzik47c2b672005-11-12 21:13:17 -05002059static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002060 void __iomem *mmio)
2061{
2062 void __iomem *port_mmio;
2063 u32 tmp;
2064
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002065 tmp = readl(mmio + MV_RESET_CFG);
2066 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002067 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002068 hpriv->signal[idx].pre = 0x1 << 5;
2069 return;
2070 }
2071
2072 port_mmio = mv_port_base(mmio, idx);
2073 tmp = readl(port_mmio + PHY_MODE2);
2074
2075 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2076 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2077}
2078
Jeff Garzik47c2b672005-11-12 21:13:17 -05002079static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002080{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002081 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002082}
2083
Jeff Garzikc9d39132005-11-13 17:47:51 -05002084static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002085 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002086{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002087 void __iomem *port_mmio = mv_port_base(mmio, port);
2088
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002089 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002090 int fix_phy_mode2 =
2091 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002092 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002093 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2094 u32 m2, tmp;
2095
2096 if (fix_phy_mode2) {
2097 m2 = readl(port_mmio + PHY_MODE2);
2098 m2 &= ~(1 << 16);
2099 m2 |= (1 << 31);
2100 writel(m2, port_mmio + PHY_MODE2);
2101
2102 udelay(200);
2103
2104 m2 = readl(port_mmio + PHY_MODE2);
2105 m2 &= ~((1 << 16) | (1 << 31));
2106 writel(m2, port_mmio + PHY_MODE2);
2107
2108 udelay(200);
2109 }
2110
2111 /* who knows what this magic does */
2112 tmp = readl(port_mmio + PHY_MODE3);
2113 tmp &= ~0x7F800000;
2114 tmp |= 0x2A800000;
2115 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002116
2117 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002118 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002119
2120 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002121
2122 if (hp_flags & MV_HP_ERRATA_60X1B2)
2123 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002124
2125 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2126
2127 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002128
2129 if (hp_flags & MV_HP_ERRATA_60X1B2)
2130 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002131 }
2132
2133 /* Revert values of pre-emphasis and signal amps to the saved ones */
2134 m2 = readl(port_mmio + PHY_MODE2);
2135
2136 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002137 m2 |= hpriv->signal[port].amps;
2138 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002139 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002140
Jeff Garzike4e7b892006-01-31 12:18:41 -05002141 /* according to mvSata 3.6.1, some IIE values are fixed */
2142 if (IS_GEN_IIE(hpriv)) {
2143 m2 &= ~0xC30FF01F;
2144 m2 |= 0x0000900F;
2145 }
2146
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002147 writel(m2, port_mmio + PHY_MODE2);
2148}
2149
Jeff Garzikc9d39132005-11-13 17:47:51 -05002150static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2151 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002152{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002153 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002154
Brett Russ31961942005-09-30 01:36:00 -04002155 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002156
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002157 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002158 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002159 ifctl |= (1 << 7); /* enable gen2i speed */
2160 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002161 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2162 }
2163
Brett Russ20f733e2005-09-01 18:26:17 -04002164 udelay(25); /* allow reset propagation */
2165
2166 /* Spec never mentions clearing the bit. Marvell's driver does
2167 * clear the bit, however.
2168 */
Brett Russ31961942005-09-30 01:36:00 -04002169 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002170
Jeff Garzikc9d39132005-11-13 17:47:51 -05002171 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2172
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002173 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002174 mdelay(1);
2175}
2176
Jeff Garzikc9d39132005-11-13 17:47:51 -05002177/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002178 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002179 * @ap: ATA channel to manipulate
2180 *
2181 * Part of this is taken from __sata_phy_reset and modified to
2182 * not sleep since this routine gets called from interrupt level.
2183 *
2184 * LOCKING:
2185 * Inherited from caller. This is coded to safe to call at
2186 * interrupt level, i.e. it does not sleep.
2187 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002188static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2189 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002190{
2191 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002192 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002193 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002194 int retry = 5;
2195 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002196
2197 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002198
Tejun Heoda3dbb12007-07-16 14:29:40 +09002199#ifdef DEBUG
2200 {
2201 u32 sstatus, serror, scontrol;
2202
2203 mv_scr_read(ap, SCR_STATUS, &sstatus);
2204 mv_scr_read(ap, SCR_ERROR, &serror);
2205 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2206 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002207 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002208 }
2209#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002210
Jeff Garzik22374672005-11-17 10:59:48 -05002211 /* Issue COMRESET via SControl */
2212comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002213 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002214 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002215
Tejun Heo936fd732007-08-06 18:36:23 +09002216 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002217 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002218
Brett Russ31961942005-09-30 01:36:00 -04002219 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002220 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002221 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002222 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002223
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002224 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002225 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002226
Jeff Garzik22374672005-11-17 10:59:48 -05002227 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002228 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002229 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2230 (retry-- > 0))
2231 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002232
Tejun Heoda3dbb12007-07-16 14:29:40 +09002233#ifdef DEBUG
2234 {
2235 u32 sstatus, serror, scontrol;
2236
2237 mv_scr_read(ap, SCR_STATUS, &sstatus);
2238 mv_scr_read(ap, SCR_ERROR, &serror);
2239 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2240 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2241 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2242 }
2243#endif
Brett Russ31961942005-09-30 01:36:00 -04002244
Tejun Heo936fd732007-08-06 18:36:23 +09002245 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002246 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002247 return;
2248 }
2249
Jeff Garzik22374672005-11-17 10:59:48 -05002250 /* even after SStatus reflects that device is ready,
2251 * it seems to take a while for link to be fully
2252 * established (and thus Status no longer 0x80/0x7F),
2253 * so we poll a bit for that, here.
2254 */
2255 retry = 20;
2256 while (1) {
2257 u8 drv_stat = ata_check_status(ap);
2258 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2259 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002260 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002261 if (retry-- <= 0)
2262 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002263 if (time_after(jiffies, deadline))
2264 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002265 }
2266
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002267 /* FIXME: if we passed the deadline, the following
2268 * code probably produces an invalid result
2269 */
Brett Russ20f733e2005-09-01 18:26:17 -04002270
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002271 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002272 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002273
2274 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2275
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002276 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002277
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002278 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002279}
2280
Tejun Heocc0680a2007-08-06 18:36:23 +09002281static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002282{
Tejun Heocc0680a2007-08-06 18:36:23 +09002283 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002284 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002285 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002286 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002287
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002288 rc = mv_stop_dma(ap);
2289 if (rc)
2290 ehc->i.action |= ATA_EH_HARDRESET;
2291
2292 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2293 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2294 ehc->i.action |= ATA_EH_HARDRESET;
2295 }
2296
2297 /* if we're about to do hardreset, nothing more to do */
2298 if (ehc->i.action & ATA_EH_HARDRESET)
2299 return 0;
2300
Tejun Heocc0680a2007-08-06 18:36:23 +09002301 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002302 rc = ata_wait_ready(ap, deadline);
2303 else
2304 rc = -ENODEV;
2305
2306 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002307}
2308
Tejun Heocc0680a2007-08-06 18:36:23 +09002309static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002310 unsigned long deadline)
2311{
Tejun Heocc0680a2007-08-06 18:36:23 +09002312 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002313 struct mv_host_priv *hpriv = ap->host->private_data;
2314 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2315
2316 mv_stop_dma(ap);
2317
2318 mv_channel_reset(hpriv, mmio, ap->port_no);
2319
2320 mv_phy_reset(ap, class, deadline);
2321
2322 return 0;
2323}
2324
Tejun Heocc0680a2007-08-06 18:36:23 +09002325static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002326{
Tejun Heocc0680a2007-08-06 18:36:23 +09002327 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002328 u32 serr;
2329
2330 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002331 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002332
2333 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002334 sata_scr_read(link, SCR_ERROR, &serr);
2335 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002336
2337 /* bail out if no device is present */
2338 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2339 DPRINTK("EXIT, no device\n");
2340 return;
2341 }
2342
2343 /* set up device control */
2344 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2345}
2346
2347static void mv_error_handler(struct ata_port *ap)
2348{
2349 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2350 mv_hardreset, mv_postreset);
2351}
2352
2353static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2354{
2355 mv_stop_dma(qc->ap);
2356}
2357
2358static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002359{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002360 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002361 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2362 u32 tmp, mask;
2363 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002364
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002365 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002366
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002367 shift = ap->port_no * 2;
2368 if (hc > 0)
2369 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002370
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002371 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002372
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002373 /* disable assertion of portN err, done events */
2374 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2375 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2376}
2377
2378static void mv_eh_thaw(struct ata_port *ap)
2379{
2380 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2381 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2382 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2383 void __iomem *port_mmio = mv_ap_base(ap);
2384 u32 tmp, mask, hc_irq_cause;
2385 unsigned int shift, hc_port_no = ap->port_no;
2386
2387 /* FIXME: handle coalescing completion events properly */
2388
2389 shift = ap->port_no * 2;
2390 if (hc > 0) {
2391 shift++;
2392 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002393 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002394
2395 mask = 0x3 << shift;
2396
2397 /* clear EDMA errors on this port */
2398 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2399
2400 /* clear pending irq events */
2401 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2402 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2403 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2404 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2405
2406 /* enable assertion of portN err, done events */
2407 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2408 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002409}
2410
Brett Russ05b308e2005-10-05 17:08:53 -04002411/**
2412 * mv_port_init - Perform some early initialization on a single port.
2413 * @port: libata data structure storing shadow register addresses
2414 * @port_mmio: base address of the port
2415 *
2416 * Initialize shadow register mmio addresses, clear outstanding
2417 * interrupts on the port, and unmask interrupts for the future
2418 * start of the port.
2419 *
2420 * LOCKING:
2421 * Inherited from caller.
2422 */
Brett Russ31961942005-09-30 01:36:00 -04002423static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2424{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002425 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002426 unsigned serr_ofs;
2427
Jeff Garzik8b260242005-11-12 12:32:50 -05002428 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002429 */
2430 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002431 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002432 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2433 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2434 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2435 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2436 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2437 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002438 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002439 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2440 /* special case: control/altstatus doesn't have ATA_REG_ address */
2441 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2442
2443 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002444 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002445
Brett Russ31961942005-09-30 01:36:00 -04002446 /* Clear any currently outstanding port interrupt conditions */
2447 serr_ofs = mv_scr_offset(SCR_ERROR);
2448 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2449 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2450
Mark Lord646a4da2008-01-26 18:30:37 -05002451 /* unmask all non-transient EDMA error interrupts */
2452 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002453
Jeff Garzik8b260242005-11-12 12:32:50 -05002454 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002455 readl(port_mmio + EDMA_CFG_OFS),
2456 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2457 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002458}
2459
Tejun Heo4447d352007-04-17 23:44:08 +09002460static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002461{
Tejun Heo4447d352007-04-17 23:44:08 +09002462 struct pci_dev *pdev = to_pci_dev(host->dev);
2463 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002464 u32 hp_flags = hpriv->hp_flags;
2465
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002466 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002467 case chip_5080:
2468 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002469 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002470
Auke Kok44c10132007-06-08 15:46:36 -07002471 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002472 case 0x1:
2473 hp_flags |= MV_HP_ERRATA_50XXB0;
2474 break;
2475 case 0x3:
2476 hp_flags |= MV_HP_ERRATA_50XXB2;
2477 break;
2478 default:
2479 dev_printk(KERN_WARNING, &pdev->dev,
2480 "Applying 50XXB2 workarounds to unknown rev\n");
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2482 break;
2483 }
2484 break;
2485
2486 case chip_504x:
2487 case chip_508x:
2488 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002489 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002490
Auke Kok44c10132007-06-08 15:46:36 -07002491 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002492 case 0x0:
2493 hp_flags |= MV_HP_ERRATA_50XXB0;
2494 break;
2495 case 0x3:
2496 hp_flags |= MV_HP_ERRATA_50XXB2;
2497 break;
2498 default:
2499 dev_printk(KERN_WARNING, &pdev->dev,
2500 "Applying B2 workarounds to unknown rev\n");
2501 hp_flags |= MV_HP_ERRATA_50XXB2;
2502 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002503 }
2504 break;
2505
2506 case chip_604x:
2507 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002508 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002509 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002510
Auke Kok44c10132007-06-08 15:46:36 -07002511 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002512 case 0x7:
2513 hp_flags |= MV_HP_ERRATA_60X1B2;
2514 break;
2515 case 0x9:
2516 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002517 break;
2518 default:
2519 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002520 "Applying B2 workarounds to unknown rev\n");
2521 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002522 break;
2523 }
2524 break;
2525
Jeff Garzike4e7b892006-01-31 12:18:41 -05002526 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002527 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002528 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2529 (pdev->device == 0x2300 || pdev->device == 0x2310))
2530 {
Mark Lord4e520032007-12-11 12:58:05 -05002531 /*
2532 * Highpoint RocketRAID PCIe 23xx series cards:
2533 *
2534 * Unconfigured drives are treated as "Legacy"
2535 * by the BIOS, and it overwrites sector 8 with
2536 * a "Lgcy" metadata block prior to Linux boot.
2537 *
2538 * Configured drives (RAID or JBOD) leave sector 8
2539 * alone, but instead overwrite a high numbered
2540 * sector for the RAID metadata. This sector can
2541 * be determined exactly, by truncating the physical
2542 * drive capacity to a nice even GB value.
2543 *
2544 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2545 *
2546 * Warn the user, lest they think we're just buggy.
2547 */
2548 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2549 " BIOS CORRUPTS DATA on all attached drives,"
2550 " regardless of if/how they are configured."
2551 " BEWARE!\n");
2552 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2553 " use sectors 8-9 on \"Legacy\" drives,"
2554 " and avoid the final two gigabytes on"
2555 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002556 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002557 case chip_6042:
2558 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002559 hp_flags |= MV_HP_GEN_IIE;
2560
Auke Kok44c10132007-06-08 15:46:36 -07002561 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002562 case 0x0:
2563 hp_flags |= MV_HP_ERRATA_XX42A0;
2564 break;
2565 case 0x1:
2566 hp_flags |= MV_HP_ERRATA_60X1C0;
2567 break;
2568 default:
2569 dev_printk(KERN_WARNING, &pdev->dev,
2570 "Applying 60X1C0 workarounds to unknown rev\n");
2571 hp_flags |= MV_HP_ERRATA_60X1C0;
2572 break;
2573 }
2574 break;
2575
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002576 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002577 dev_printk(KERN_ERR, &pdev->dev,
2578 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002579 return 1;
2580 }
2581
2582 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002583 if (hp_flags & MV_HP_PCIE) {
2584 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2585 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2586 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2587 } else {
2588 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2589 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2590 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2591 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002592
2593 return 0;
2594}
2595
Brett Russ05b308e2005-10-05 17:08:53 -04002596/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002597 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002598 * @host: ATA host to initialize
2599 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002600 *
2601 * If possible, do an early global reset of the host. Then do
2602 * our port init and clear/unmask all/relevant host interrupts.
2603 *
2604 * LOCKING:
2605 * Inherited from caller.
2606 */
Tejun Heo4447d352007-04-17 23:44:08 +09002607static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002608{
2609 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002610 struct pci_dev *pdev = to_pci_dev(host->dev);
2611 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2612 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002613
Jeff Garzik47c2b672005-11-12 21:13:17 -05002614 /* global interrupt mask */
2615 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2616
Tejun Heo4447d352007-04-17 23:44:08 +09002617 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002618 if (rc)
2619 goto done;
2620
Tejun Heo4447d352007-04-17 23:44:08 +09002621 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002622
Tejun Heo4447d352007-04-17 23:44:08 +09002623 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002624 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002625
Jeff Garzikc9d39132005-11-13 17:47:51 -05002626 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002627 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002628 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002629
Jeff Garzik522479f2005-11-12 22:14:02 -05002630 hpriv->ops->reset_flash(hpriv, mmio);
2631 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002632 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002633
Tejun Heo4447d352007-04-17 23:44:08 +09002634 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002635 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002636 void __iomem *port_mmio = mv_port_base(mmio, port);
2637
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002638 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002639 ifctl |= (1 << 7); /* enable gen2i speed */
2640 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002641 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2642 }
2643
Jeff Garzikc9d39132005-11-13 17:47:51 -05002644 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002645 }
2646
Tejun Heo4447d352007-04-17 23:44:08 +09002647 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002648 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002649 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002650 unsigned int offset = port_mmio - mmio;
2651
2652 mv_port_init(&ap->ioaddr, port_mmio);
2653
2654 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2655 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002656 }
2657
2658 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002659 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2660
2661 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2662 "(before clear)=0x%08x\n", hc,
2663 readl(hc_mmio + HC_CFG_OFS),
2664 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2665
2666 /* Clear any currently outstanding hc interrupt conditions */
2667 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002668 }
2669
Brett Russ31961942005-09-30 01:36:00 -04002670 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002671 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002672
2673 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002674 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002675
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002676 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002677 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2678 else
2679 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002680
2681 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002682 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002683 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2684 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002685 readl(mmio + hpriv->irq_cause_ofs),
2686 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002687
Brett Russ31961942005-09-30 01:36:00 -04002688done:
Brett Russ20f733e2005-09-01 18:26:17 -04002689 return rc;
2690}
2691
Brett Russ05b308e2005-10-05 17:08:53 -04002692/**
2693 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002694 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002695 *
2696 * FIXME: complete this.
2697 *
2698 * LOCKING:
2699 * Inherited from caller.
2700 */
Tejun Heo4447d352007-04-17 23:44:08 +09002701static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002702{
Tejun Heo4447d352007-04-17 23:44:08 +09002703 struct pci_dev *pdev = to_pci_dev(host->dev);
2704 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002705 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002706 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002707
2708 /* Use this to determine the HW stepping of the chip so we know
2709 * what errata to workaround
2710 */
Brett Russ31961942005-09-30 01:36:00 -04002711 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2712 if (scc == 0)
2713 scc_s = "SCSI";
2714 else if (scc == 0x01)
2715 scc_s = "RAID";
2716 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002717 scc_s = "?";
2718
2719 if (IS_GEN_I(hpriv))
2720 gen = "I";
2721 else if (IS_GEN_II(hpriv))
2722 gen = "II";
2723 else if (IS_GEN_IIE(hpriv))
2724 gen = "IIE";
2725 else
2726 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002727
Jeff Garzika9524a72005-10-30 14:39:11 -05002728 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002729 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2730 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002731 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2732}
2733
Brett Russ05b308e2005-10-05 17:08:53 -04002734/**
2735 * mv_init_one - handle a positive probe of a Marvell host
2736 * @pdev: PCI device found
2737 * @ent: PCI device ID entry for the matched host
2738 *
2739 * LOCKING:
2740 * Inherited from caller.
2741 */
Brett Russ20f733e2005-09-01 18:26:17 -04002742static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2743{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002744 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002745 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002746 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2747 struct ata_host *host;
2748 struct mv_host_priv *hpriv;
2749 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002750
Jeff Garzika9524a72005-10-30 14:39:11 -05002751 if (!printed_version++)
2752 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002753
Tejun Heo4447d352007-04-17 23:44:08 +09002754 /* allocate host */
2755 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2756
2757 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2758 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2759 if (!host || !hpriv)
2760 return -ENOMEM;
2761 host->private_data = hpriv;
2762
2763 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002764 rc = pcim_enable_device(pdev);
2765 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002766 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002767
Tejun Heo0d5ff562007-02-01 15:06:36 +09002768 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2769 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002770 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002771 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002772 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002773 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002774
Jeff Garzikd88184f2007-02-26 01:26:06 -05002775 rc = pci_go_64(pdev);
2776 if (rc)
2777 return rc;
2778
Brett Russ20f733e2005-09-01 18:26:17 -04002779 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002780 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002781 if (rc)
2782 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002783
Brett Russ31961942005-09-30 01:36:00 -04002784 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002785 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002786 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002787
Brett Russ31961942005-09-30 01:36:00 -04002788 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002789 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002790
Tejun Heo4447d352007-04-17 23:44:08 +09002791 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002792 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002793 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002794 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002795}
2796
2797static int __init mv_init(void)
2798{
Pavel Roskinb7887192006-08-10 18:13:18 +09002799 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002800}
2801
2802static void __exit mv_exit(void)
2803{
2804 pci_unregister_driver(&mv_pci_driver);
2805}
2806
2807MODULE_AUTHOR("Brett Russ");
2808MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2809MODULE_LICENSE("GPL");
2810MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2811MODULE_VERSION(DRV_VERSION);
2812
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002813module_param(msi, int, 0444);
2814MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2815
Brett Russ20f733e2005-09-01 18:26:17 -04002816module_init(mv_init);
2817module_exit(mv_exit);