blob: 3c1b5c9027db5a4446a041b3316440c77c1b1468 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050073#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050075#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040076#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040078
79#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050080#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040081
82enum {
83 /* BAR's are enumerated in terms of pci_resource_start() terms */
84 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
85 MV_IO_BAR = 2, /* offset 0x18: IO space */
86 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
87
88 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
89 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
90
91 MV_PCI_REG_BASE = 0,
92 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040093 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
94 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
95 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
96 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
97 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
98
Brett Russ20f733e2005-09-01 18:26:17 -040099 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500100 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500101 MV_GPIO_PORT_CTL = 0x104f0,
102 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400103
104 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
105 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
106 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
107 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
108
Brett Russ31961942005-09-30 01:36:00 -0400109 MV_MAX_Q_DEPTH = 32,
110 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
111
112 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
113 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400114 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
115 */
116 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
117 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500118 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400119 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400120
Brett Russ20f733e2005-09-01 18:26:17 -0400121 MV_PORTS_PER_HC = 4,
122 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
123 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400124 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400125 MV_PORT_MASK = 3,
126
127 /* Host Flags */
128 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
129 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100130 /* SoC integrated controllers, no PCI interface */
131 MV_FLAG_SOC = (1 << 28),
132
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400133 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400134 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
135 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500136 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400137
Brett Russ31961942005-09-30 01:36:00 -0400138 CRQB_FLAG_READ = (1 << 0),
139 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400140 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
141 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400142 CRQB_CMD_ADDR_SHIFT = 8,
143 CRQB_CMD_CS = (0x2 << 11),
144 CRQB_CMD_LAST = (1 << 15),
145
146 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400147 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
148 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400149
150 EPRD_FLAG_END_OF_TBL = (1 << 31),
151
Brett Russ20f733e2005-09-01 18:26:17 -0400152 /* PCI interface registers */
153
Brett Russ31961942005-09-30 01:36:00 -0400154 PCI_COMMAND_OFS = 0xc00,
155
Brett Russ20f733e2005-09-01 18:26:17 -0400156 PCI_MAIN_CMD_STS_OFS = 0xd30,
157 STOP_PCI_MASTER = (1 << 2),
158 PCI_MASTER_EMPTY = (1 << 3),
159 GLOB_SFT_RST = (1 << 4),
160
Jeff Garzik522479f2005-11-12 22:14:02 -0500161 MV_PCI_MODE = 0xd00,
162 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
163 MV_PCI_DISC_TIMER = 0xd04,
164 MV_PCI_MSI_TRIGGER = 0xc38,
165 MV_PCI_SERR_MASK = 0xc28,
166 MV_PCI_XBAR_TMOUT = 0x1d04,
167 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
168 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
169 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
170 MV_PCI_ERR_COMMAND = 0x1d50,
171
Mark Lord02a121d2007-12-01 13:07:22 -0500172 PCI_IRQ_CAUSE_OFS = 0x1d58,
173 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400174 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
175
Mark Lord02a121d2007-12-01 13:07:22 -0500176 PCIE_IRQ_CAUSE_OFS = 0x1900,
177 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500178 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500179
Brett Russ20f733e2005-09-01 18:26:17 -0400180 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
181 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
182 PORT0_ERR = (1 << 0), /* shift by port # */
183 PORT0_DONE = (1 << 1), /* shift by port # */
184 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
185 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
186 PCI_ERR = (1 << 18),
187 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
188 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500189 PORTS_0_3_COAL_DONE = (1 << 8),
190 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400191 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
192 GPIO_INT = (1 << 22),
193 SELF_INT = (1 << 23),
194 TWSI_INT = (1 << 24),
195 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500196 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500197 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400198 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
199 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500200 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
201 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400202
203 /* SATAHC registers */
204 HC_CFG_OFS = 0,
205
206 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400207 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400208 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
209 DEV_IRQ = (1 << 8), /* shift by port # */
210
211 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400212 SHD_BLK_OFS = 0x100,
213 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400214
215 /* SATA registers */
216 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
217 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500218 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500219 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500220 PHY_MODE4 = 0x314,
221 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500222 MV5_PHY_MODE = 0x74,
223 MV5_LT_MODE = 0x30,
224 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500225 SATA_INTERFACE_CTL = 0x050,
226
227 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400228
229 /* Port registers */
230 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500231 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
232 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
233 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
234 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
235 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400236
237 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
238 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400239 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
240 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
241 EDMA_ERR_DEV = (1 << 2), /* device error */
242 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
243 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
244 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400245 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
246 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400247 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400248 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400249 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
250 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
251 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
252 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500253
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500255 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
256 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
257 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
258 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
259
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400260 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500261
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500263 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
264 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
265 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
266 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
267 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400271 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400272 EDMA_ERR_OVERRUN_5 = (1 << 5),
273 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500274
275 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
276 EDMA_ERR_LNK_CTRL_RX_1 |
277 EDMA_ERR_LNK_CTRL_RX_3 |
278 EDMA_ERR_LNK_CTRL_TX,
279
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400280 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
281 EDMA_ERR_PRD_PAR |
282 EDMA_ERR_DEV_DCON |
283 EDMA_ERR_DEV_CON |
284 EDMA_ERR_SERR |
285 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400287 EDMA_ERR_CRPB_PAR |
288 EDMA_ERR_INTRL_PAR |
289 EDMA_ERR_IORDY |
290 EDMA_ERR_LNK_CTRL_RX_2 |
291 EDMA_ERR_LNK_DATA_RX |
292 EDMA_ERR_LNK_DATA_TX |
293 EDMA_ERR_TRANS_PROTO,
294 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
295 EDMA_ERR_PRD_PAR |
296 EDMA_ERR_DEV_DCON |
297 EDMA_ERR_DEV_CON |
298 EDMA_ERR_OVERRUN_5 |
299 EDMA_ERR_UNDERRUN_5 |
300 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400301 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400302 EDMA_ERR_CRPB_PAR |
303 EDMA_ERR_INTRL_PAR |
304 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400305
Brett Russ31961942005-09-30 01:36:00 -0400306 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
307 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400308
309 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
310 EDMA_REQ_Q_PTR_SHIFT = 5,
311
312 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
313 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
314 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400315 EDMA_RSP_Q_PTR_SHIFT = 3,
316
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400317 EDMA_CMD_OFS = 0x28, /* EDMA command register */
318 EDMA_EN = (1 << 0), /* enable EDMA */
319 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
320 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400321
Jeff Garzikc9d39132005-11-13 17:47:51 -0500322 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500323 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500324
Brett Russ31961942005-09-30 01:36:00 -0400325 /* Host private flags (hp_flags) */
326 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500327 MV_HP_ERRATA_50XXB0 = (1 << 1),
328 MV_HP_ERRATA_50XXB2 = (1 << 2),
329 MV_HP_ERRATA_60X1B2 = (1 << 3),
330 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500331 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400332 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
333 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
334 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500335 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400336
Brett Russ31961942005-09-30 01:36:00 -0400337 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400338 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500339 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400340 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400341};
342
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400343#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
344#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500345#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100346#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500347
Jeff Garzik095fec82005-11-12 09:50:49 -0500348enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400349 /* DMA boundary 0xffff is required by the s/g splitting
350 * we need on /length/ in mv_fill-sg().
351 */
352 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500353
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400354 /* mask of register bits containing lower 32 bits
355 * of EDMA request queue DMA address
356 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500357 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
358
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400359 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500360 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
361};
362
Jeff Garzik522479f2005-11-12 22:14:02 -0500363enum chip_type {
364 chip_504x,
365 chip_508x,
366 chip_5080,
367 chip_604x,
368 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500369 chip_6042,
370 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500371};
372
Brett Russ31961942005-09-30 01:36:00 -0400373/* Command ReQuest Block: 32B */
374struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400375 __le32 sg_addr;
376 __le32 sg_addr_hi;
377 __le16 ctrl_flags;
378 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400379};
380
Jeff Garzike4e7b892006-01-31 12:18:41 -0500381struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400382 __le32 addr;
383 __le32 addr_hi;
384 __le32 flags;
385 __le32 len;
386 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500387};
388
Brett Russ31961942005-09-30 01:36:00 -0400389/* Command ResPonse Block: 8B */
390struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400391 __le16 id;
392 __le16 flags;
393 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400394};
395
396/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
397struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400398 __le32 addr;
399 __le32 flags_size;
400 __le32 addr_hi;
401 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400402};
403
404struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400405 struct mv_crqb *crqb;
406 dma_addr_t crqb_dma;
407 struct mv_crpb *crpb;
408 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500409 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
410 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400411
412 unsigned int req_idx;
413 unsigned int resp_idx;
414
Brett Russ31961942005-09-30 01:36:00 -0400415 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400416};
417
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500418struct mv_port_signal {
419 u32 amps;
420 u32 pre;
421};
422
Mark Lord02a121d2007-12-01 13:07:22 -0500423struct mv_host_priv {
424 u32 hp_flags;
425 struct mv_port_signal signal[8];
426 const struct mv_hw_ops *ops;
427 u32 irq_cause_ofs;
428 u32 irq_mask_ofs;
429 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500430 /*
431 * These consistent DMA memory pools give us guaranteed
432 * alignment for hardware-accessed data structures,
433 * and less memory waste in accomplishing the alignment.
434 */
435 struct dma_pool *crqb_pool;
436 struct dma_pool *crpb_pool;
437 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500438};
439
Jeff Garzik47c2b672005-11-12 21:13:17 -0500440struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500441 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500443 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
444 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
445 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500446 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500448 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100449 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500450};
451
Brett Russ20f733e2005-09-01 18:26:17 -0400452static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900453static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
454static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
455static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
456static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400457static int mv_port_start(struct ata_port *ap);
458static void mv_port_stop(struct ata_port *ap);
459static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500460static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900461static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400462static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400463static void mv_eh_freeze(struct ata_port *ap);
464static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500465static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400466
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500467static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
468 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500469static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
470static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
471 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500472static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500474static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100475static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500476
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500477static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
478 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500479static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
480static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
481 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500482static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
483 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500484static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100485static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500486static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
487 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500488static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
489 void __iomem *port_mmio, int want_ncq);
490static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500491
Mark Lordeb73d552008-01-29 13:24:00 -0500492/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
493 * because we have to allow room for worst case splitting of
494 * PRDs for 64K boundaries in mv_fill_sg().
495 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400496static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400497 .module = THIS_MODULE,
498 .name = DRV_NAME,
499 .ioctl = ata_scsi_ioctl,
500 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400501 .can_queue = ATA_DEF_QUEUE,
502 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400503 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400504 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
505 .emulated = ATA_SHT_EMULATED,
506 .use_clustering = 1,
507 .proc_name = DRV_NAME,
508 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400509 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400510 .slave_destroy = ata_scsi_slave_destroy,
511 .bios_param = ata_std_bios_param,
512};
513
514static struct scsi_host_template mv6_sht = {
515 .module = THIS_MODULE,
516 .name = DRV_NAME,
517 .ioctl = ata_scsi_ioctl,
518 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500519 .change_queue_depth = ata_scsi_change_queue_depth,
520 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400521 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400522 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400523 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
524 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500525 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400526 .proc_name = DRV_NAME,
527 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400528 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900529 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400530 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400531};
532
Jeff Garzikc9d39132005-11-13 17:47:51 -0500533static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500534 .tf_load = ata_tf_load,
535 .tf_read = ata_tf_read,
536 .check_status = ata_check_status,
537 .exec_command = ata_exec_command,
538 .dev_select = ata_std_dev_select,
539
Jeff Garzikcffacd82007-03-09 09:46:47 -0500540 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541
542 .qc_prep = mv_qc_prep,
543 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900544 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500545
Jeff Garzikc9d39132005-11-13 17:47:51 -0500546 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900547 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500548
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400549 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400550 .freeze = mv_eh_freeze,
551 .thaw = mv_eh_thaw,
552
Jeff Garzikc9d39132005-11-13 17:47:51 -0500553 .scr_read = mv5_scr_read,
554 .scr_write = mv5_scr_write,
555
556 .port_start = mv_port_start,
557 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500558};
559
560static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500561 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400562 .tf_load = ata_tf_load,
563 .tf_read = ata_tf_read,
564 .check_status = ata_check_status,
565 .exec_command = ata_exec_command,
566 .dev_select = ata_std_dev_select,
567
Jeff Garzikcffacd82007-03-09 09:46:47 -0500568 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400569
Brett Russ31961942005-09-30 01:36:00 -0400570 .qc_prep = mv_qc_prep,
571 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900572 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400573
Brett Russ20f733e2005-09-01 18:26:17 -0400574 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900575 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400576
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400577 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400578 .freeze = mv_eh_freeze,
579 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500580 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400581
Brett Russ20f733e2005-09-01 18:26:17 -0400582 .scr_read = mv_scr_read,
583 .scr_write = mv_scr_write,
584
Brett Russ31961942005-09-30 01:36:00 -0400585 .port_start = mv_port_start,
586 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400587};
588
Jeff Garzike4e7b892006-01-31 12:18:41 -0500589static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500590 .tf_load = ata_tf_load,
591 .tf_read = ata_tf_read,
592 .check_status = ata_check_status,
593 .exec_command = ata_exec_command,
594 .dev_select = ata_std_dev_select,
595
Jeff Garzikcffacd82007-03-09 09:46:47 -0500596 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597
598 .qc_prep = mv_qc_prep_iie,
599 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900600 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500601
Jeff Garzike4e7b892006-01-31 12:18:41 -0500602 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900603 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400605 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400606 .freeze = mv_eh_freeze,
607 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500608 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400609
Jeff Garzike4e7b892006-01-31 12:18:41 -0500610 .scr_read = mv_scr_read,
611 .scr_write = mv_scr_write,
612
613 .port_start = mv_port_start,
614 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500615};
616
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100617static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400618 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400619 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500622 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400623 },
624 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400625 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400626 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400627 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500628 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400629 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500630 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400631 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500632 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400633 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500634 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500635 },
Brett Russ20f733e2005-09-01 18:26:17 -0400636 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500641 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400642 },
643 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500645 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400646 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400647 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500648 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400649 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500650 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
652 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500653 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400654 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500655 .port_ops = &mv_iie_ops,
656 },
657 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500660 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400661 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500662 .port_ops = &mv_iie_ops,
663 },
Brett Russ20f733e2005-09-01 18:26:17 -0400664};
665
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500666static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400667 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
668 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
669 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
670 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100671 /* RocketRAID 1740/174x have different identifiers */
672 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
673 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400674
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400675 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
676 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
677 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
678 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
679 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500680
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400681 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
682
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200683 /* Adaptec 1430SA */
684 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
685
Mark Lord02a121d2007-12-01 13:07:22 -0500686 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800687 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
688
Mark Lord02a121d2007-12-01 13:07:22 -0500689 /* Highpoint RocketRAID PCIe series */
690 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
691 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
692
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400693 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400694};
695
Jeff Garzik47c2b672005-11-12 21:13:17 -0500696static const struct mv_hw_ops mv5xxx_ops = {
697 .phy_errata = mv5_phy_errata,
698 .enable_leds = mv5_enable_leds,
699 .read_preamp = mv5_read_preamp,
700 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500701 .reset_flash = mv5_reset_flash,
702 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500703};
704
705static const struct mv_hw_ops mv6xxx_ops = {
706 .phy_errata = mv6_phy_errata,
707 .enable_leds = mv6_enable_leds,
708 .read_preamp = mv6_read_preamp,
709 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500710 .reset_flash = mv6_reset_flash,
711 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500712};
713
Brett Russ20f733e2005-09-01 18:26:17 -0400714/*
715 * Functions
716 */
717
718static inline void writelfl(unsigned long data, void __iomem *addr)
719{
720 writel(data, addr);
721 (void) readl(addr); /* flush to avoid PCI posted write */
722}
723
Brett Russ20f733e2005-09-01 18:26:17 -0400724static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
725{
726 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
727}
728
Jeff Garzikc9d39132005-11-13 17:47:51 -0500729static inline unsigned int mv_hc_from_port(unsigned int port)
730{
731 return port >> MV_PORT_HC_SHIFT;
732}
733
734static inline unsigned int mv_hardport_from_port(unsigned int port)
735{
736 return port & MV_PORT_MASK;
737}
738
739static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
740 unsigned int port)
741{
742 return mv_hc_base(base, mv_hc_from_port(port));
743}
744
Brett Russ20f733e2005-09-01 18:26:17 -0400745static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
746{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500747 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500748 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500749 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400750}
751
752static inline void __iomem *mv_ap_base(struct ata_port *ap)
753{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900754 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400755}
756
Jeff Garzikcca39742006-08-24 03:19:22 -0400757static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400758{
Jeff Garzikcca39742006-08-24 03:19:22 -0400759 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400760}
761
762static void mv_irq_clear(struct ata_port *ap)
763{
764}
765
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400766static void mv_set_edma_ptrs(void __iomem *port_mmio,
767 struct mv_host_priv *hpriv,
768 struct mv_port_priv *pp)
769{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400770 u32 index;
771
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400772 /*
773 * initialize request queue
774 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400775 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
776
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400777 WARN_ON(pp->crqb_dma & 0x3ff);
778 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400779 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400780 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
781
782 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
785 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400786 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787
788 /*
789 * initialize response queue
790 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400791 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
792
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793 WARN_ON(pp->crpb_dma & 0xff);
794 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
795
796 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400797 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
799 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400800 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400801
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400802 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400803 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400804}
805
Brett Russ05b308e2005-10-05 17:08:53 -0400806/**
807 * mv_start_dma - Enable eDMA engine
808 * @base: port base address
809 * @pp: port private data
810 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900811 * Verify the local cache of the eDMA state is accurate with a
812 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400813 *
814 * LOCKING:
815 * Inherited from caller.
816 */
Mark Lord0c589122008-01-26 18:31:16 -0500817static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500818 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400819{
Mark Lord72109162008-01-26 18:31:33 -0500820 int want_ncq = (protocol == ATA_PROT_NCQ);
821
822 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
823 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
824 if (want_ncq != using_ncq)
825 __mv_stop_dma(ap);
826 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400827 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500828 struct mv_host_priv *hpriv = ap->host->private_data;
829 int hard_port = mv_hardport_from_port(ap->port_no);
830 void __iomem *hc_mmio = mv_hc_base_from_port(
831 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
832 u32 hc_irq_cause, ipending;
833
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500835 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400836
Mark Lord0c589122008-01-26 18:31:16 -0500837 /* clear EDMA interrupt indicator, if any */
838 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
839 ipending = (DEV_IRQ << hard_port) |
840 (CRPB_DMA_DONE << hard_port);
841 if (hc_irq_cause & ipending) {
842 writelfl(hc_irq_cause & ~ipending,
843 hc_mmio + HC_IRQ_CAUSE_OFS);
844 }
845
Mark Lord72109162008-01-26 18:31:33 -0500846 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500847
848 /* clear FIS IRQ Cause */
849 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
850
Mark Lordf630d562008-01-26 18:31:00 -0500851 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400852
Mark Lordf630d562008-01-26 18:31:00 -0500853 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400854 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
855 }
Mark Lordf630d562008-01-26 18:31:00 -0500856 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400857}
858
Brett Russ05b308e2005-10-05 17:08:53 -0400859/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400860 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400861 * @ap: ATA channel to manipulate
862 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900863 * Verify the local cache of the eDMA state is accurate with a
864 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400865 *
866 * LOCKING:
867 * Inherited from caller.
868 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400869static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400870{
871 void __iomem *port_mmio = mv_ap_base(ap);
872 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400873 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400874 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400875
Jeff Garzik4537deb52007-07-12 14:30:19 -0400876 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400877 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400878 */
Brett Russ31961942005-09-30 01:36:00 -0400879 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
880 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400881 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900882 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400883 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500884
Brett Russ31961942005-09-30 01:36:00 -0400885 /* now properly wait for the eDMA to stop */
886 for (i = 1000; i > 0; i--) {
887 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400888 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400889 break;
Jeff Garzik4537deb52007-07-12 14:30:19 -0400890
Brett Russ31961942005-09-30 01:36:00 -0400891 udelay(100);
892 }
893
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400894 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900895 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400896 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400897 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400898
899 return err;
Brett Russ31961942005-09-30 01:36:00 -0400900}
901
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400902static int mv_stop_dma(struct ata_port *ap)
903{
904 unsigned long flags;
905 int rc;
906
907 spin_lock_irqsave(&ap->host->lock, flags);
908 rc = __mv_stop_dma(ap);
909 spin_unlock_irqrestore(&ap->host->lock, flags);
910
911 return rc;
912}
913
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400914#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400915static void mv_dump_mem(void __iomem *start, unsigned bytes)
916{
Brett Russ31961942005-09-30 01:36:00 -0400917 int b, w;
918 for (b = 0; b < bytes; ) {
919 DPRINTK("%p: ", start + b);
920 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400921 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400922 b += sizeof(u32);
923 }
924 printk("\n");
925 }
Brett Russ31961942005-09-30 01:36:00 -0400926}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400927#endif
928
Brett Russ31961942005-09-30 01:36:00 -0400929static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
930{
931#ifdef ATA_DEBUG
932 int b, w;
933 u32 dw;
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%02x: ", b);
936 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400937 (void) pci_read_config_dword(pdev, b, &dw);
938 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400939 b += sizeof(u32);
940 }
941 printk("\n");
942 }
943#endif
944}
945static void mv_dump_all_regs(void __iomem *mmio_base, int port,
946 struct pci_dev *pdev)
947{
948#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500949 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400950 port >> MV_PORT_HC_SHIFT);
951 void __iomem *port_base;
952 int start_port, num_ports, p, start_hc, num_hcs, hc;
953
954 if (0 > port) {
955 start_hc = start_port = 0;
956 num_ports = 8; /* shld be benign for 4 port devs */
957 num_hcs = 2;
958 } else {
959 start_hc = port >> MV_PORT_HC_SHIFT;
960 start_port = port;
961 num_ports = num_hcs = 1;
962 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500963 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400964 num_ports > 1 ? num_ports - 1 : start_port);
965
966 if (NULL != pdev) {
967 DPRINTK("PCI config space regs:\n");
968 mv_dump_pci_cfg(pdev, 0x68);
969 }
970 DPRINTK("PCI regs:\n");
971 mv_dump_mem(mmio_base+0xc00, 0x3c);
972 mv_dump_mem(mmio_base+0xd00, 0x34);
973 mv_dump_mem(mmio_base+0xf00, 0x4);
974 mv_dump_mem(mmio_base+0x1d00, 0x6c);
975 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -0700976 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400977 DPRINTK("HC regs (HC %i):\n", hc);
978 mv_dump_mem(hc_base, 0x1c);
979 }
980 for (p = start_port; p < start_port + num_ports; p++) {
981 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400982 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400983 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400984 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400985 mv_dump_mem(port_base+0x300, 0x60);
986 }
987#endif
988}
989
Brett Russ20f733e2005-09-01 18:26:17 -0400990static unsigned int mv_scr_offset(unsigned int sc_reg_in)
991{
992 unsigned int ofs;
993
994 switch (sc_reg_in) {
995 case SCR_STATUS:
996 case SCR_CONTROL:
997 case SCR_ERROR:
998 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
999 break;
1000 case SCR_ACTIVE:
1001 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1002 break;
1003 default:
1004 ofs = 0xffffffffU;
1005 break;
1006 }
1007 return ofs;
1008}
1009
Tejun Heoda3dbb12007-07-16 14:29:40 +09001010static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001011{
1012 unsigned int ofs = mv_scr_offset(sc_reg_in);
1013
Tejun Heoda3dbb12007-07-16 14:29:40 +09001014 if (ofs != 0xffffffffU) {
1015 *val = readl(mv_ap_base(ap) + ofs);
1016 return 0;
1017 } else
1018 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001019}
1020
Tejun Heoda3dbb12007-07-16 14:29:40 +09001021static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001022{
1023 unsigned int ofs = mv_scr_offset(sc_reg_in);
1024
Tejun Heoda3dbb12007-07-16 14:29:40 +09001025 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001026 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001027 return 0;
1028 } else
1029 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001030}
1031
Mark Lordf2738272008-01-26 18:32:29 -05001032static void mv6_dev_config(struct ata_device *adev)
1033{
1034 /*
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1037 */
1038 if (adev->flags & ATA_DFLAG_NCQ)
1039 if (adev->max_sectors > ATA_MAX_SECTORS)
1040 adev->max_sectors = ATA_MAX_SECTORS;
1041}
1042
Mark Lord72109162008-01-26 18:31:33 -05001043static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1044 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001045{
Mark Lord0c589122008-01-26 18:31:16 -05001046 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001047
1048 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001049 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001050
Mark Lord0c589122008-01-26 18:31:16 -05001051 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001052 cfg |= (1 << 8); /* enab config burst size mask */
1053
Mark Lord0c589122008-01-26 18:31:16 -05001054 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001055 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1056
1057 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001058 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1059 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001060 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001061 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001062 }
1063
Mark Lord72109162008-01-26 18:31:33 -05001064 if (want_ncq) {
1065 cfg |= EDMA_CFG_NCQ;
1066 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1067 } else
1068 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1069
Jeff Garzike4e7b892006-01-31 12:18:41 -05001070 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1071}
1072
Mark Lordda2fa9b2008-01-26 18:32:45 -05001073static void mv_port_free_dma_mem(struct ata_port *ap)
1074{
1075 struct mv_host_priv *hpriv = ap->host->private_data;
1076 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001077 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001078
1079 if (pp->crqb) {
1080 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1081 pp->crqb = NULL;
1082 }
1083 if (pp->crpb) {
1084 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1085 pp->crpb = NULL;
1086 }
Mark Lordeb73d552008-01-29 13:24:00 -05001087 /*
1088 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1089 * For later hardware, we have one unique sg_tbl per NCQ tag.
1090 */
1091 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1092 if (pp->sg_tbl[tag]) {
1093 if (tag == 0 || !IS_GEN_I(hpriv))
1094 dma_pool_free(hpriv->sg_tbl_pool,
1095 pp->sg_tbl[tag],
1096 pp->sg_tbl_dma[tag]);
1097 pp->sg_tbl[tag] = NULL;
1098 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001099 }
1100}
1101
Brett Russ05b308e2005-10-05 17:08:53 -04001102/**
1103 * mv_port_start - Port specific init/start routine.
1104 * @ap: ATA channel to manipulate
1105 *
1106 * Allocate and point to DMA memory, init port private memory,
1107 * zero indices.
1108 *
1109 * LOCKING:
1110 * Inherited from caller.
1111 */
Brett Russ31961942005-09-30 01:36:00 -04001112static int mv_port_start(struct ata_port *ap)
1113{
Jeff Garzikcca39742006-08-24 03:19:22 -04001114 struct device *dev = ap->host->dev;
1115 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001116 struct mv_port_priv *pp;
1117 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001118 unsigned long flags;
Mark Lordeb73d552008-01-29 13:24:00 -05001119 int tag, rc;
Brett Russ31961942005-09-30 01:36:00 -04001120
Tejun Heo24dc5f32007-01-20 16:00:28 +09001121 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001122 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001123 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001124 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001125
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001126 rc = ata_pad_alloc(ap, dev);
1127 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001128 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001129
Mark Lordda2fa9b2008-01-26 18:32:45 -05001130 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1131 if (!pp->crqb)
1132 return -ENOMEM;
1133 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001134
Mark Lordda2fa9b2008-01-26 18:32:45 -05001135 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1136 if (!pp->crpb)
1137 goto out_port_free_dma_mem;
1138 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001139
Mark Lordeb73d552008-01-29 13:24:00 -05001140 /*
1141 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1142 * For later hardware, we need one unique sg_tbl per NCQ tag.
1143 */
1144 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1145 if (tag == 0 || !IS_GEN_I(hpriv)) {
1146 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1147 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1148 if (!pp->sg_tbl[tag])
1149 goto out_port_free_dma_mem;
1150 } else {
1151 pp->sg_tbl[tag] = pp->sg_tbl[0];
1152 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1153 }
1154 }
Brett Russ31961942005-09-30 01:36:00 -04001155
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001156 spin_lock_irqsave(&ap->host->lock, flags);
1157
Mark Lord72109162008-01-26 18:31:33 -05001158 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001159 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001160
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001161 spin_unlock_irqrestore(&ap->host->lock, flags);
1162
Brett Russ31961942005-09-30 01:36:00 -04001163 /* Don't turn on EDMA here...do it before DMA commands only. Else
1164 * we'll be unable to send non-data, PIO, etc due to restricted access
1165 * to shadow regs.
1166 */
Brett Russ31961942005-09-30 01:36:00 -04001167 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001168
1169out_port_free_dma_mem:
1170 mv_port_free_dma_mem(ap);
1171 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001172}
1173
Brett Russ05b308e2005-10-05 17:08:53 -04001174/**
1175 * mv_port_stop - Port specific cleanup/stop routine.
1176 * @ap: ATA channel to manipulate
1177 *
1178 * Stop DMA, cleanup port memory.
1179 *
1180 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001181 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001182 */
Brett Russ31961942005-09-30 01:36:00 -04001183static void mv_port_stop(struct ata_port *ap)
1184{
Brett Russ31961942005-09-30 01:36:00 -04001185 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001186 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001187}
1188
Brett Russ05b308e2005-10-05 17:08:53 -04001189/**
1190 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1191 * @qc: queued command whose SG list to source from
1192 *
1193 * Populate the SG list and mark the last entry.
1194 *
1195 * LOCKING:
1196 * Inherited from caller.
1197 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001198static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001199{
1200 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001201 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001202 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001203 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001204
Mark Lordeb73d552008-01-29 13:24:00 -05001205 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001206 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001207 dma_addr_t addr = sg_dma_address(sg);
1208 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001209
Olof Johansson4007b492007-10-02 20:45:27 -05001210 while (sg_len) {
1211 u32 offset = addr & 0xffff;
1212 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001213
Olof Johansson4007b492007-10-02 20:45:27 -05001214 if ((offset + sg_len > 0x10000))
1215 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001216
Olof Johansson4007b492007-10-02 20:45:27 -05001217 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1218 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001219 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001220
1221 sg_len -= len;
1222 addr += len;
1223
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001224 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001225 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001226 }
Brett Russ31961942005-09-30 01:36:00 -04001227 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001228
1229 if (likely(last_sg))
1230 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001231}
1232
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001233static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001234{
Mark Lord559eeda2006-05-19 16:40:15 -04001235 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001236 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001237 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001238}
1239
Brett Russ05b308e2005-10-05 17:08:53 -04001240/**
1241 * mv_qc_prep - Host specific command preparation.
1242 * @qc: queued command to prepare
1243 *
1244 * This routine simply redirects to the general purpose routine
1245 * if command is not DMA. Else, it handles prep of the CRQB
1246 * (command request block), does some sanity checking, and calls
1247 * the SG load routine.
1248 *
1249 * LOCKING:
1250 * Inherited from caller.
1251 */
Brett Russ31961942005-09-30 01:36:00 -04001252static void mv_qc_prep(struct ata_queued_cmd *qc)
1253{
1254 struct ata_port *ap = qc->ap;
1255 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001256 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001257 struct ata_taskfile *tf;
1258 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001259 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001260
Mark Lord138bfdd2008-01-26 18:33:18 -05001261 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1262 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001263 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001264
Brett Russ31961942005-09-30 01:36:00 -04001265 /* Fill in command request block
1266 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001267 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001268 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001269 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001270 flags |= qc->tag << CRQB_TAG_SHIFT;
1271
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001272 /* get current queue index from software */
1273 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001274
Mark Lorda6432432006-05-19 16:36:36 -04001275 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001276 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001277 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001278 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001279 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1280
1281 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001282 tf = &qc->tf;
1283
1284 /* Sadly, the CRQB cannot accomodate all registers--there are
1285 * only 11 bytes...so we must pick and choose required
1286 * registers based on the command. So, we drop feature and
1287 * hob_feature for [RW] DMA commands, but they are needed for
1288 * NCQ. NCQ will drop hob_nsect.
1289 */
1290 switch (tf->command) {
1291 case ATA_CMD_READ:
1292 case ATA_CMD_READ_EXT:
1293 case ATA_CMD_WRITE:
1294 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001295 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001296 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1297 break;
Brett Russ31961942005-09-30 01:36:00 -04001298 case ATA_CMD_FPDMA_READ:
1299 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001300 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001301 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1302 break;
Brett Russ31961942005-09-30 01:36:00 -04001303 default:
1304 /* The only other commands EDMA supports in non-queued and
1305 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1306 * of which are defined/used by Linux. If we get here, this
1307 * driver needs work.
1308 *
1309 * FIXME: modify libata to give qc_prep a return value and
1310 * return error here.
1311 */
1312 BUG_ON(tf->command);
1313 break;
1314 }
1315 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1316 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1317 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1318 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1319 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1320 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1321 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1322 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1323 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1324
Jeff Garzike4e7b892006-01-31 12:18:41 -05001325 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001326 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001327 mv_fill_sg(qc);
1328}
1329
1330/**
1331 * mv_qc_prep_iie - Host specific command preparation.
1332 * @qc: queued command to prepare
1333 *
1334 * This routine simply redirects to the general purpose routine
1335 * if command is not DMA. Else, it handles prep of the CRQB
1336 * (command request block), does some sanity checking, and calls
1337 * the SG load routine.
1338 *
1339 * LOCKING:
1340 * Inherited from caller.
1341 */
1342static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1343{
1344 struct ata_port *ap = qc->ap;
1345 struct mv_port_priv *pp = ap->private_data;
1346 struct mv_crqb_iie *crqb;
1347 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001348 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001349 u32 flags = 0;
1350
Mark Lord138bfdd2008-01-26 18:33:18 -05001351 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1352 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001353 return;
1354
Jeff Garzike4e7b892006-01-31 12:18:41 -05001355 /* Fill in Gen IIE command request block
1356 */
1357 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1358 flags |= CRQB_FLAG_READ;
1359
Tejun Heobeec7db2006-02-11 19:11:13 +09001360 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001361 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001362 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001363
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001364 /* get current queue index from software */
1365 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001366
1367 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001368 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1369 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001370 crqb->flags = cpu_to_le32(flags);
1371
1372 tf = &qc->tf;
1373 crqb->ata_cmd[0] = cpu_to_le32(
1374 (tf->command << 16) |
1375 (tf->feature << 24)
1376 );
1377 crqb->ata_cmd[1] = cpu_to_le32(
1378 (tf->lbal << 0) |
1379 (tf->lbam << 8) |
1380 (tf->lbah << 16) |
1381 (tf->device << 24)
1382 );
1383 crqb->ata_cmd[2] = cpu_to_le32(
1384 (tf->hob_lbal << 0) |
1385 (tf->hob_lbam << 8) |
1386 (tf->hob_lbah << 16) |
1387 (tf->hob_feature << 24)
1388 );
1389 crqb->ata_cmd[3] = cpu_to_le32(
1390 (tf->nsect << 0) |
1391 (tf->hob_nsect << 8)
1392 );
1393
1394 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1395 return;
Brett Russ31961942005-09-30 01:36:00 -04001396 mv_fill_sg(qc);
1397}
1398
Brett Russ05b308e2005-10-05 17:08:53 -04001399/**
1400 * mv_qc_issue - Initiate a command to the host
1401 * @qc: queued command to start
1402 *
1403 * This routine simply redirects to the general purpose routine
1404 * if command is not DMA. Else, it sanity checks our local
1405 * caches of the request producer/consumer indices then enables
1406 * DMA and bumps the request producer index.
1407 *
1408 * LOCKING:
1409 * Inherited from caller.
1410 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001411static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001412{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001413 struct ata_port *ap = qc->ap;
1414 void __iomem *port_mmio = mv_ap_base(ap);
1415 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001416 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001417
Mark Lord138bfdd2008-01-26 18:33:18 -05001418 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1419 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001420 /* We're about to send a non-EDMA capable command to the
1421 * port. Turn off EDMA so there won't be problems accessing
1422 * shadow block, etc registers.
1423 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001424 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001425 return ata_qc_issue_prot(qc);
1426 }
1427
Mark Lord72109162008-01-26 18:31:33 -05001428 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001429
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001430 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001431
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001432 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001433
1434 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001435 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1436 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001437
1438 return 0;
1439}
1440
Brett Russ05b308e2005-10-05 17:08:53 -04001441/**
Brett Russ05b308e2005-10-05 17:08:53 -04001442 * mv_err_intr - Handle error interrupts on the port
1443 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001444 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001445 *
1446 * In most cases, just clear the interrupt and move on. However,
1447 * some cases require an eDMA reset, which is done right before
1448 * the COMRESET in mv_phy_reset(). The SERR case requires a
1449 * clear of pending errors in the SATA SERROR register. Finally,
1450 * if the port disabled DMA, update our cached copy to match.
1451 *
1452 * LOCKING:
1453 * Inherited from caller.
1454 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001456{
Brett Russ31961942005-09-30 01:36:00 -04001457 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1459 struct mv_port_priv *pp = ap->private_data;
1460 struct mv_host_priv *hpriv = ap->host->private_data;
1461 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1462 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001463 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001464
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001465 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001466
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001467 if (!edma_enabled) {
1468 /* just a guess: do we need to do this? should we
1469 * expand this, and do it in all cases?
1470 */
Tejun Heo936fd732007-08-06 18:36:23 +09001471 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1472 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001473 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474
1475 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1476
1477 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1478
1479 /*
1480 * all generations share these EDMA error cause bits
1481 */
1482
1483 if (edma_err_cause & EDMA_ERR_DEV)
1484 err_mask |= AC_ERR_DEV;
1485 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001486 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001487 EDMA_ERR_INTRL_PAR)) {
1488 err_mask |= AC_ERR_ATA_BUS;
1489 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001490 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001491 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001492 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1493 ata_ehi_hotplugged(ehi);
1494 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001495 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001496 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001497 }
1498
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001499 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500 eh_freeze_mask = EDMA_EH_FREEZE_5;
1501
1502 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1503 struct mv_port_priv *pp = ap->private_data;
1504 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001505 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001506 }
1507 } else {
1508 eh_freeze_mask = EDMA_EH_FREEZE;
1509
1510 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1511 struct mv_port_priv *pp = ap->private_data;
1512 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001513 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001514 }
1515
1516 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001517 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1518 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001519 err_mask = AC_ERR_ATA_BUS;
1520 action |= ATA_EH_HARDRESET;
1521 }
1522 }
Brett Russ20f733e2005-09-01 18:26:17 -04001523
1524 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001525 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001526
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001527 if (!err_mask) {
1528 err_mask = AC_ERR_OTHER;
1529 action |= ATA_EH_HARDRESET;
1530 }
1531
1532 ehi->serror |= serr;
1533 ehi->action |= action;
1534
1535 if (qc)
1536 qc->err_mask |= err_mask;
1537 else
1538 ehi->err_mask |= err_mask;
1539
1540 if (edma_err_cause & eh_freeze_mask)
1541 ata_port_freeze(ap);
1542 else
1543 ata_port_abort(ap);
1544}
1545
1546static void mv_intr_pio(struct ata_port *ap)
1547{
1548 struct ata_queued_cmd *qc;
1549 u8 ata_status;
1550
1551 /* ignore spurious intr if drive still BUSY */
1552 ata_status = readb(ap->ioaddr.status_addr);
1553 if (unlikely(ata_status & ATA_BUSY))
1554 return;
1555
1556 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001557 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001558 if (unlikely(!qc)) /* no active tag */
1559 return;
1560 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1561 return;
1562
1563 /* and finally, complete the ATA command */
1564 qc->err_mask |= ac_err_mask(ata_status);
1565 ata_qc_complete(qc);
1566}
1567
1568static void mv_intr_edma(struct ata_port *ap)
1569{
1570 void __iomem *port_mmio = mv_ap_base(ap);
1571 struct mv_host_priv *hpriv = ap->host->private_data;
1572 struct mv_port_priv *pp = ap->private_data;
1573 struct ata_queued_cmd *qc;
1574 u32 out_index, in_index;
1575 bool work_done = false;
1576
1577 /* get h/w response queue pointer */
1578 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1579 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1580
1581 while (1) {
1582 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001583 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001584
1585 /* get s/w response queue last-read pointer, and compare */
1586 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1587 if (in_index == out_index)
1588 break;
1589
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001590 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001591 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001592 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001593
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001594 /* Gen II/IIE: get active ATA command via tag, to enable
1595 * support for queueing. this works transparently for
1596 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001597 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001598 else
1599 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001600
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001601 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001602
Mark Lordcb924412008-01-26 18:32:09 -05001603 /* For non-NCQ mode, the lower 8 bits of status
1604 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1605 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001606 */
1607 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001608 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001609 mv_err_intr(ap, qc);
1610 return;
1611 }
1612
1613 /* and finally, complete the ATA command */
1614 if (qc) {
1615 qc->err_mask |=
1616 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1617 ata_qc_complete(qc);
1618 }
1619
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001620 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001621 * indicate (after the loop completes) to hardware
1622 * that we have consumed a response queue entry.
1623 */
1624 work_done = true;
1625 pp->resp_idx++;
1626 }
1627
1628 if (work_done)
1629 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1630 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1631 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001632}
1633
Brett Russ05b308e2005-10-05 17:08:53 -04001634/**
1635 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001636 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001637 * @relevant: port error bits relevant to this host controller
1638 * @hc: which host controller we're to look at
1639 *
1640 * Read then write clear the HC interrupt status then walk each
1641 * port connected to the HC and see if it needs servicing. Port
1642 * success ints are reported in the HC interrupt status reg, the
1643 * port error ints are reported in the higher level main
1644 * interrupt status register and thus are passed in via the
1645 * 'relevant' argument.
1646 *
1647 * LOCKING:
1648 * Inherited from caller.
1649 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001650static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001651{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001652 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001653 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001654 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001655 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001656
Jeff Garzik35177262007-02-24 21:26:42 -05001657 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001658 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001659 else
Brett Russ20f733e2005-09-01 18:26:17 -04001660 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001661
1662 /* we'll need the HC success int register in most cases */
1663 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001664 if (!hc_irq_cause)
1665 return;
1666
1667 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001668
1669 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001670 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001671
1672 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001673 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001674 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001675 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001676
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001677 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001678 continue;
1679
Brett Russ31961942005-09-30 01:36:00 -04001680 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001681 if (port >= MV_PORTS_PER_HC) {
1682 shift++; /* skip bit 8 in the HC Main IRQ reg */
1683 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001684 have_err_bits = ((PORT0_ERR << shift) & relevant);
1685
1686 if (unlikely(have_err_bits)) {
1687 struct ata_queued_cmd *qc;
1688
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001690 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1691 continue;
1692
1693 mv_err_intr(ap, qc);
1694 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001695 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001696
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001697 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1698
1699 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1700 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1701 mv_intr_edma(ap);
1702 } else {
1703 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1704 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001705 }
1706 }
1707 VPRINTK("EXIT\n");
1708}
1709
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001710static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1711{
Mark Lord02a121d2007-12-01 13:07:22 -05001712 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001713 struct ata_port *ap;
1714 struct ata_queued_cmd *qc;
1715 struct ata_eh_info *ehi;
1716 unsigned int i, err_mask, printed = 0;
1717 u32 err_cause;
1718
Mark Lord02a121d2007-12-01 13:07:22 -05001719 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720
1721 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1722 err_cause);
1723
1724 DPRINTK("All regs @ PCI error\n");
1725 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1726
Mark Lord02a121d2007-12-01 13:07:22 -05001727 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001728
1729 for (i = 0; i < host->n_ports; i++) {
1730 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001731 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001732 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001733 ata_ehi_clear_desc(ehi);
1734 if (!printed++)
1735 ata_ehi_push_desc(ehi,
1736 "PCI err cause 0x%08x", err_cause);
1737 err_mask = AC_ERR_HOST_BUS;
1738 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001739 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001740 if (qc)
1741 qc->err_mask |= err_mask;
1742 else
1743 ehi->err_mask |= err_mask;
1744
1745 ata_port_freeze(ap);
1746 }
1747 }
1748}
1749
Brett Russ05b308e2005-10-05 17:08:53 -04001750/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001751 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001752 * @irq: unused
1753 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001754 *
1755 * Read the read only register to determine if any host
1756 * controllers have pending interrupts. If so, call lower level
1757 * routine to handle. Also check for PCI errors which are only
1758 * reported here.
1759 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001760 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001761 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001762 * interrupts.
1763 */
David Howells7d12e782006-10-05 14:55:46 +01001764static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001765{
Jeff Garzikcca39742006-08-24 03:19:22 -04001766 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001767 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001768 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001769 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001770
Mark Lord646a4da2008-01-26 18:30:37 -05001771 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001772 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001773 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001774
1775 /* check the cases where we either have nothing pending or have read
1776 * a bogus register value which can indicate HW removal or PCI fault
1777 */
Mark Lord646a4da2008-01-26 18:30:37 -05001778 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1779 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001780
Jeff Garzikcca39742006-08-24 03:19:22 -04001781 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001782
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001783 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001784 mv_pci_error(host, mmio);
1785 handled = 1;
1786 goto out_unlock; /* skip all other HC irq handling */
1787 }
1788
Brett Russ20f733e2005-09-01 18:26:17 -04001789 for (hc = 0; hc < n_hcs; hc++) {
1790 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1791 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001792 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001793 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001794 }
1795 }
Mark Lord615ab952006-05-19 16:24:56 -04001796
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001797out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001798 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001799
1800 return IRQ_RETVAL(handled);
1801}
1802
Jeff Garzikc9d39132005-11-13 17:47:51 -05001803static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1804{
1805 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1806 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1807
1808 return hc_mmio + ofs;
1809}
1810
1811static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1812{
1813 unsigned int ofs;
1814
1815 switch (sc_reg_in) {
1816 case SCR_STATUS:
1817 case SCR_ERROR:
1818 case SCR_CONTROL:
1819 ofs = sc_reg_in * sizeof(u32);
1820 break;
1821 default:
1822 ofs = 0xffffffffU;
1823 break;
1824 }
1825 return ofs;
1826}
1827
Tejun Heoda3dbb12007-07-16 14:29:40 +09001828static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001829{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001830 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1831 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001832 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1833
Tejun Heoda3dbb12007-07-16 14:29:40 +09001834 if (ofs != 0xffffffffU) {
1835 *val = readl(addr + ofs);
1836 return 0;
1837 } else
1838 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001839}
1840
Tejun Heoda3dbb12007-07-16 14:29:40 +09001841static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001842{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001843 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1844 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001845 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1846
Tejun Heoda3dbb12007-07-16 14:29:40 +09001847 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001848 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001849 return 0;
1850 } else
1851 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001852}
1853
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001854static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001855{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001856 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001857 int early_5080;
1858
Auke Kok44c10132007-06-08 15:46:36 -07001859 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001860
1861 if (!early_5080) {
1862 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1863 tmp |= (1 << 0);
1864 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1865 }
1866
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001867 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001868}
1869
1870static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1871{
1872 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1873}
1874
Jeff Garzik47c2b672005-11-12 21:13:17 -05001875static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001876 void __iomem *mmio)
1877{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001878 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1879 u32 tmp;
1880
1881 tmp = readl(phy_mmio + MV5_PHY_MODE);
1882
1883 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1884 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001885}
1886
Jeff Garzik47c2b672005-11-12 21:13:17 -05001887static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001888{
Jeff Garzik522479f2005-11-12 22:14:02 -05001889 u32 tmp;
1890
1891 writel(0, mmio + MV_GPIO_PORT_CTL);
1892
1893 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1894
1895 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1896 tmp |= ~(1 << 0);
1897 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001898}
1899
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001900static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001902{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001903 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1904 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1905 u32 tmp;
1906 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1907
1908 if (fix_apm_sq) {
1909 tmp = readl(phy_mmio + MV5_LT_MODE);
1910 tmp |= (1 << 19);
1911 writel(tmp, phy_mmio + MV5_LT_MODE);
1912
1913 tmp = readl(phy_mmio + MV5_PHY_CTL);
1914 tmp &= ~0x3;
1915 tmp |= 0x1;
1916 writel(tmp, phy_mmio + MV5_PHY_CTL);
1917 }
1918
1919 tmp = readl(phy_mmio + MV5_PHY_MODE);
1920 tmp &= ~mask;
1921 tmp |= hpriv->signal[port].pre;
1922 tmp |= hpriv->signal[port].amps;
1923 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001924}
1925
Jeff Garzikc9d39132005-11-13 17:47:51 -05001926
1927#undef ZERO
1928#define ZERO(reg) writel(0, port_mmio + (reg))
1929static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001931{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001932 void __iomem *port_mmio = mv_port_base(mmio, port);
1933
1934 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1935
1936 mv_channel_reset(hpriv, mmio, port);
1937
1938 ZERO(0x028); /* command */
1939 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1940 ZERO(0x004); /* timer */
1941 ZERO(0x008); /* irq err cause */
1942 ZERO(0x00c); /* irq err mask */
1943 ZERO(0x010); /* rq bah */
1944 ZERO(0x014); /* rq inp */
1945 ZERO(0x018); /* rq outp */
1946 ZERO(0x01c); /* respq bah */
1947 ZERO(0x024); /* respq outp */
1948 ZERO(0x020); /* respq inp */
1949 ZERO(0x02c); /* test control */
1950 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1951}
1952#undef ZERO
1953
1954#define ZERO(reg) writel(0, hc_mmio + (reg))
1955static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int hc)
1957{
1958 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1959 u32 tmp;
1960
1961 ZERO(0x00c);
1962 ZERO(0x010);
1963 ZERO(0x014);
1964 ZERO(0x018);
1965
1966 tmp = readl(hc_mmio + 0x20);
1967 tmp &= 0x1c1c1c1c;
1968 tmp |= 0x03030303;
1969 writel(tmp, hc_mmio + 0x20);
1970}
1971#undef ZERO
1972
1973static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 unsigned int n_hc)
1975{
1976 unsigned int hc, port;
1977
1978 for (hc = 0; hc < n_hc; hc++) {
1979 for (port = 0; port < MV_PORTS_PER_HC; port++)
1980 mv5_reset_hc_port(hpriv, mmio,
1981 (hc * MV_PORTS_PER_HC) + port);
1982
1983 mv5_reset_one_hc(hpriv, mmio, hc);
1984 }
1985
1986 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05001987}
1988
Jeff Garzik101ffae2005-11-12 22:17:49 -05001989#undef ZERO
1990#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001991static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05001992{
Mark Lord02a121d2007-12-01 13:07:22 -05001993 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05001994 u32 tmp;
1995
1996 tmp = readl(mmio + MV_PCI_MODE);
1997 tmp &= 0xff00ffff;
1998 writel(tmp, mmio + MV_PCI_MODE);
1999
2000 ZERO(MV_PCI_DISC_TIMER);
2001 ZERO(MV_PCI_MSI_TRIGGER);
2002 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2003 ZERO(HC_MAIN_IRQ_MASK_OFS);
2004 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002005 ZERO(hpriv->irq_cause_ofs);
2006 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002007 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2008 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2009 ZERO(MV_PCI_ERR_ATTRIBUTE);
2010 ZERO(MV_PCI_ERR_COMMAND);
2011}
2012#undef ZERO
2013
2014static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2015{
2016 u32 tmp;
2017
2018 mv5_reset_flash(hpriv, mmio);
2019
2020 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2021 tmp &= 0x3;
2022 tmp |= (1 << 5) | (1 << 6);
2023 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2024}
2025
2026/**
2027 * mv6_reset_hc - Perform the 6xxx global soft reset
2028 * @mmio: base address of the HBA
2029 *
2030 * This routine only applies to 6xxx parts.
2031 *
2032 * LOCKING:
2033 * Inherited from caller.
2034 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002035static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2036 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002037{
2038 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2039 int i, rc = 0;
2040 u32 t;
2041
2042 /* Following procedure defined in PCI "main command and status
2043 * register" table.
2044 */
2045 t = readl(reg);
2046 writel(t | STOP_PCI_MASTER, reg);
2047
2048 for (i = 0; i < 1000; i++) {
2049 udelay(1);
2050 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002051 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002052 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002053 }
2054 if (!(PCI_MASTER_EMPTY & t)) {
2055 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2056 rc = 1;
2057 goto done;
2058 }
2059
2060 /* set reset */
2061 i = 5;
2062 do {
2063 writel(t | GLOB_SFT_RST, reg);
2064 t = readl(reg);
2065 udelay(1);
2066 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2067
2068 if (!(GLOB_SFT_RST & t)) {
2069 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2070 rc = 1;
2071 goto done;
2072 }
2073
2074 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2075 i = 5;
2076 do {
2077 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2078 t = readl(reg);
2079 udelay(1);
2080 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2081
2082 if (GLOB_SFT_RST & t) {
2083 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2084 rc = 1;
2085 }
2086done:
2087 return rc;
2088}
2089
Jeff Garzik47c2b672005-11-12 21:13:17 -05002090static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002091 void __iomem *mmio)
2092{
2093 void __iomem *port_mmio;
2094 u32 tmp;
2095
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002096 tmp = readl(mmio + MV_RESET_CFG);
2097 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002098 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002099 hpriv->signal[idx].pre = 0x1 << 5;
2100 return;
2101 }
2102
2103 port_mmio = mv_port_base(mmio, idx);
2104 tmp = readl(port_mmio + PHY_MODE2);
2105
2106 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2107 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2108}
2109
Jeff Garzik47c2b672005-11-12 21:13:17 -05002110static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002111{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002112 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002113}
2114
Jeff Garzikc9d39132005-11-13 17:47:51 -05002115static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002116 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002117{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002118 void __iomem *port_mmio = mv_port_base(mmio, port);
2119
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002120 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002121 int fix_phy_mode2 =
2122 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002123 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002124 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2125 u32 m2, tmp;
2126
2127 if (fix_phy_mode2) {
2128 m2 = readl(port_mmio + PHY_MODE2);
2129 m2 &= ~(1 << 16);
2130 m2 |= (1 << 31);
2131 writel(m2, port_mmio + PHY_MODE2);
2132
2133 udelay(200);
2134
2135 m2 = readl(port_mmio + PHY_MODE2);
2136 m2 &= ~((1 << 16) | (1 << 31));
2137 writel(m2, port_mmio + PHY_MODE2);
2138
2139 udelay(200);
2140 }
2141
2142 /* who knows what this magic does */
2143 tmp = readl(port_mmio + PHY_MODE3);
2144 tmp &= ~0x7F800000;
2145 tmp |= 0x2A800000;
2146 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002147
2148 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002149 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002150
2151 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002152
2153 if (hp_flags & MV_HP_ERRATA_60X1B2)
2154 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002155
2156 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2157
2158 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002159
2160 if (hp_flags & MV_HP_ERRATA_60X1B2)
2161 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002162 }
2163
2164 /* Revert values of pre-emphasis and signal amps to the saved ones */
2165 m2 = readl(port_mmio + PHY_MODE2);
2166
2167 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002168 m2 |= hpriv->signal[port].amps;
2169 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002170 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002171
Jeff Garzike4e7b892006-01-31 12:18:41 -05002172 /* according to mvSata 3.6.1, some IIE values are fixed */
2173 if (IS_GEN_IIE(hpriv)) {
2174 m2 &= ~0xC30FF01F;
2175 m2 |= 0x0000900F;
2176 }
2177
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002178 writel(m2, port_mmio + PHY_MODE2);
2179}
2180
Jeff Garzikc9d39132005-11-13 17:47:51 -05002181static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2182 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002183{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002184 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002185
Brett Russ31961942005-09-30 01:36:00 -04002186 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002187
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002188 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002189 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002190 ifctl |= (1 << 7); /* enable gen2i speed */
2191 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002192 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2193 }
2194
Brett Russ20f733e2005-09-01 18:26:17 -04002195 udelay(25); /* allow reset propagation */
2196
2197 /* Spec never mentions clearing the bit. Marvell's driver does
2198 * clear the bit, however.
2199 */
Brett Russ31961942005-09-30 01:36:00 -04002200 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002201
Jeff Garzikc9d39132005-11-13 17:47:51 -05002202 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2203
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002204 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002205 mdelay(1);
2206}
2207
Jeff Garzikc9d39132005-11-13 17:47:51 -05002208/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002209 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002210 * @ap: ATA channel to manipulate
2211 *
2212 * Part of this is taken from __sata_phy_reset and modified to
2213 * not sleep since this routine gets called from interrupt level.
2214 *
2215 * LOCKING:
2216 * Inherited from caller. This is coded to safe to call at
2217 * interrupt level, i.e. it does not sleep.
2218 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002219static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2220 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002221{
2222 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002223 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002224 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002225 int retry = 5;
2226 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002227
2228 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002229
Tejun Heoda3dbb12007-07-16 14:29:40 +09002230#ifdef DEBUG
2231 {
2232 u32 sstatus, serror, scontrol;
2233
2234 mv_scr_read(ap, SCR_STATUS, &sstatus);
2235 mv_scr_read(ap, SCR_ERROR, &serror);
2236 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2237 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002238 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002239 }
2240#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002241
Jeff Garzik22374672005-11-17 10:59:48 -05002242 /* Issue COMRESET via SControl */
2243comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002244 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002245 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002246
Tejun Heo936fd732007-08-06 18:36:23 +09002247 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002248 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002249
Brett Russ31961942005-09-30 01:36:00 -04002250 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002251 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002252 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002253 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002254
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002255 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002256 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002257
Jeff Garzik22374672005-11-17 10:59:48 -05002258 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002259 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002260 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2261 (retry-- > 0))
2262 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002263
Tejun Heoda3dbb12007-07-16 14:29:40 +09002264#ifdef DEBUG
2265 {
2266 u32 sstatus, serror, scontrol;
2267
2268 mv_scr_read(ap, SCR_STATUS, &sstatus);
2269 mv_scr_read(ap, SCR_ERROR, &serror);
2270 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2271 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2272 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2273 }
2274#endif
Brett Russ31961942005-09-30 01:36:00 -04002275
Tejun Heo936fd732007-08-06 18:36:23 +09002276 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002277 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002278 return;
2279 }
2280
Jeff Garzik22374672005-11-17 10:59:48 -05002281 /* even after SStatus reflects that device is ready,
2282 * it seems to take a while for link to be fully
2283 * established (and thus Status no longer 0x80/0x7F),
2284 * so we poll a bit for that, here.
2285 */
2286 retry = 20;
2287 while (1) {
2288 u8 drv_stat = ata_check_status(ap);
2289 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2290 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002291 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002292 if (retry-- <= 0)
2293 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002294 if (time_after(jiffies, deadline))
2295 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002296 }
2297
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298 /* FIXME: if we passed the deadline, the following
2299 * code probably produces an invalid result
2300 */
Brett Russ20f733e2005-09-01 18:26:17 -04002301
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002302 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002303 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002304
2305 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2306
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002307 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002308
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002309 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002310}
2311
Tejun Heocc0680a2007-08-06 18:36:23 +09002312static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002313{
Tejun Heocc0680a2007-08-06 18:36:23 +09002314 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002315 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002316 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002317 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002318
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002319 rc = mv_stop_dma(ap);
2320 if (rc)
2321 ehc->i.action |= ATA_EH_HARDRESET;
2322
2323 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2324 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2325 ehc->i.action |= ATA_EH_HARDRESET;
2326 }
2327
2328 /* if we're about to do hardreset, nothing more to do */
2329 if (ehc->i.action & ATA_EH_HARDRESET)
2330 return 0;
2331
Tejun Heocc0680a2007-08-06 18:36:23 +09002332 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002333 rc = ata_wait_ready(ap, deadline);
2334 else
2335 rc = -ENODEV;
2336
2337 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002338}
2339
Tejun Heocc0680a2007-08-06 18:36:23 +09002340static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002341 unsigned long deadline)
2342{
Tejun Heocc0680a2007-08-06 18:36:23 +09002343 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002344 struct mv_host_priv *hpriv = ap->host->private_data;
2345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2346
2347 mv_stop_dma(ap);
2348
2349 mv_channel_reset(hpriv, mmio, ap->port_no);
2350
2351 mv_phy_reset(ap, class, deadline);
2352
2353 return 0;
2354}
2355
Tejun Heocc0680a2007-08-06 18:36:23 +09002356static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002357{
Tejun Heocc0680a2007-08-06 18:36:23 +09002358 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002359 u32 serr;
2360
2361 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002362 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002363
2364 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002365 sata_scr_read(link, SCR_ERROR, &serr);
2366 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002367
2368 /* bail out if no device is present */
2369 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2370 DPRINTK("EXIT, no device\n");
2371 return;
2372 }
2373
2374 /* set up device control */
2375 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2376}
2377
2378static void mv_error_handler(struct ata_port *ap)
2379{
2380 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2381 mv_hardreset, mv_postreset);
2382}
2383
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002385{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002386 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2388 u32 tmp, mask;
2389 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002390
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002391 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002392
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002393 shift = ap->port_no * 2;
2394 if (hc > 0)
2395 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002396
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002397 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002398
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 /* disable assertion of portN err, done events */
2400 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2401 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2402}
2403
2404static void mv_eh_thaw(struct ata_port *ap)
2405{
2406 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2407 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2408 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2409 void __iomem *port_mmio = mv_ap_base(ap);
2410 u32 tmp, mask, hc_irq_cause;
2411 unsigned int shift, hc_port_no = ap->port_no;
2412
2413 /* FIXME: handle coalescing completion events properly */
2414
2415 shift = ap->port_no * 2;
2416 if (hc > 0) {
2417 shift++;
2418 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002419 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002420
2421 mask = 0x3 << shift;
2422
2423 /* clear EDMA errors on this port */
2424 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2425
2426 /* clear pending irq events */
2427 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2428 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2429 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2430 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2431
2432 /* enable assertion of portN err, done events */
2433 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2434 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002435}
2436
Brett Russ05b308e2005-10-05 17:08:53 -04002437/**
2438 * mv_port_init - Perform some early initialization on a single port.
2439 * @port: libata data structure storing shadow register addresses
2440 * @port_mmio: base address of the port
2441 *
2442 * Initialize shadow register mmio addresses, clear outstanding
2443 * interrupts on the port, and unmask interrupts for the future
2444 * start of the port.
2445 *
2446 * LOCKING:
2447 * Inherited from caller.
2448 */
Brett Russ31961942005-09-30 01:36:00 -04002449static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2450{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002451 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002452 unsigned serr_ofs;
2453
Jeff Garzik8b260242005-11-12 12:32:50 -05002454 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002455 */
2456 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002457 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002458 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2459 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2460 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2461 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2462 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2463 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002464 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002465 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2466 /* special case: control/altstatus doesn't have ATA_REG_ address */
2467 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2468
2469 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002470 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002471
Brett Russ31961942005-09-30 01:36:00 -04002472 /* Clear any currently outstanding port interrupt conditions */
2473 serr_ofs = mv_scr_offset(SCR_ERROR);
2474 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2475 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2476
Mark Lord646a4da2008-01-26 18:30:37 -05002477 /* unmask all non-transient EDMA error interrupts */
2478 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002479
Jeff Garzik8b260242005-11-12 12:32:50 -05002480 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002481 readl(port_mmio + EDMA_CFG_OFS),
2482 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2483 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002484}
2485
Tejun Heo4447d352007-04-17 23:44:08 +09002486static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002487{
Tejun Heo4447d352007-04-17 23:44:08 +09002488 struct pci_dev *pdev = to_pci_dev(host->dev);
2489 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002490 u32 hp_flags = hpriv->hp_flags;
2491
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002492 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002493 case chip_5080:
2494 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002495 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002496
Auke Kok44c10132007-06-08 15:46:36 -07002497 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002498 case 0x1:
2499 hp_flags |= MV_HP_ERRATA_50XXB0;
2500 break;
2501 case 0x3:
2502 hp_flags |= MV_HP_ERRATA_50XXB2;
2503 break;
2504 default:
2505 dev_printk(KERN_WARNING, &pdev->dev,
2506 "Applying 50XXB2 workarounds to unknown rev\n");
2507 hp_flags |= MV_HP_ERRATA_50XXB2;
2508 break;
2509 }
2510 break;
2511
2512 case chip_504x:
2513 case chip_508x:
2514 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002515 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002516
Auke Kok44c10132007-06-08 15:46:36 -07002517 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002518 case 0x0:
2519 hp_flags |= MV_HP_ERRATA_50XXB0;
2520 break;
2521 case 0x3:
2522 hp_flags |= MV_HP_ERRATA_50XXB2;
2523 break;
2524 default:
2525 dev_printk(KERN_WARNING, &pdev->dev,
2526 "Applying B2 workarounds to unknown rev\n");
2527 hp_flags |= MV_HP_ERRATA_50XXB2;
2528 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002529 }
2530 break;
2531
2532 case chip_604x:
2533 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002534 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002535 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002536
Auke Kok44c10132007-06-08 15:46:36 -07002537 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002538 case 0x7:
2539 hp_flags |= MV_HP_ERRATA_60X1B2;
2540 break;
2541 case 0x9:
2542 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002543 break;
2544 default:
2545 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002546 "Applying B2 workarounds to unknown rev\n");
2547 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002548 break;
2549 }
2550 break;
2551
Jeff Garzike4e7b892006-01-31 12:18:41 -05002552 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002553 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002554 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2555 (pdev->device == 0x2300 || pdev->device == 0x2310))
2556 {
Mark Lord4e520032007-12-11 12:58:05 -05002557 /*
2558 * Highpoint RocketRAID PCIe 23xx series cards:
2559 *
2560 * Unconfigured drives are treated as "Legacy"
2561 * by the BIOS, and it overwrites sector 8 with
2562 * a "Lgcy" metadata block prior to Linux boot.
2563 *
2564 * Configured drives (RAID or JBOD) leave sector 8
2565 * alone, but instead overwrite a high numbered
2566 * sector for the RAID metadata. This sector can
2567 * be determined exactly, by truncating the physical
2568 * drive capacity to a nice even GB value.
2569 *
2570 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2571 *
2572 * Warn the user, lest they think we're just buggy.
2573 */
2574 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2575 " BIOS CORRUPTS DATA on all attached drives,"
2576 " regardless of if/how they are configured."
2577 " BEWARE!\n");
2578 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2579 " use sectors 8-9 on \"Legacy\" drives,"
2580 " and avoid the final two gigabytes on"
2581 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002582 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002583 case chip_6042:
2584 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002585 hp_flags |= MV_HP_GEN_IIE;
2586
Auke Kok44c10132007-06-08 15:46:36 -07002587 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002588 case 0x0:
2589 hp_flags |= MV_HP_ERRATA_XX42A0;
2590 break;
2591 case 0x1:
2592 hp_flags |= MV_HP_ERRATA_60X1C0;
2593 break;
2594 default:
2595 dev_printk(KERN_WARNING, &pdev->dev,
2596 "Applying 60X1C0 workarounds to unknown rev\n");
2597 hp_flags |= MV_HP_ERRATA_60X1C0;
2598 break;
2599 }
2600 break;
2601
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002602 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002603 dev_printk(KERN_ERR, &pdev->dev,
2604 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002605 return 1;
2606 }
2607
2608 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002609 if (hp_flags & MV_HP_PCIE) {
2610 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2611 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2612 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2613 } else {
2614 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2615 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2616 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2617 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002618
2619 return 0;
2620}
2621
Brett Russ05b308e2005-10-05 17:08:53 -04002622/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002623 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002624 * @host: ATA host to initialize
2625 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002626 *
2627 * If possible, do an early global reset of the host. Then do
2628 * our port init and clear/unmask all/relevant host interrupts.
2629 *
2630 * LOCKING:
2631 * Inherited from caller.
2632 */
Tejun Heo4447d352007-04-17 23:44:08 +09002633static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002634{
2635 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002636 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2637 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002638
Jeff Garzik47c2b672005-11-12 21:13:17 -05002639 /* global interrupt mask */
2640 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2641
Tejun Heo4447d352007-04-17 23:44:08 +09002642 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002643 if (rc)
2644 goto done;
2645
Tejun Heo4447d352007-04-17 23:44:08 +09002646 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002647
Tejun Heo4447d352007-04-17 23:44:08 +09002648 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002649 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002650
Jeff Garzikc9d39132005-11-13 17:47:51 -05002651 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002652 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002653 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002654
Jeff Garzik522479f2005-11-12 22:14:02 -05002655 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002656 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002657 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002658
Tejun Heo4447d352007-04-17 23:44:08 +09002659 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002660 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002661 void __iomem *port_mmio = mv_port_base(mmio, port);
2662
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002663 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002664 ifctl |= (1 << 7); /* enable gen2i speed */
2665 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002666 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2667 }
2668
Jeff Garzikc9d39132005-11-13 17:47:51 -05002669 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002670 }
2671
Tejun Heo4447d352007-04-17 23:44:08 +09002672 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002673 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002674 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002675 unsigned int offset = port_mmio - mmio;
2676
2677 mv_port_init(&ap->ioaddr, port_mmio);
2678
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002679#ifdef CONFIG_PCI
Tejun Heocbcdd872007-08-18 13:14:55 +09002680 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2681 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002682#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002683 }
2684
2685 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002686 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2687
2688 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2689 "(before clear)=0x%08x\n", hc,
2690 readl(hc_mmio + HC_CFG_OFS),
2691 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2692
2693 /* Clear any currently outstanding hc interrupt conditions */
2694 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002695 }
2696
Brett Russ31961942005-09-30 01:36:00 -04002697 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002698 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002699
2700 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002701 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002702
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002703 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002704 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2705 else
2706 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002707
2708 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002709 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002710 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2711 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002712 readl(mmio + hpriv->irq_cause_ofs),
2713 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002714
Brett Russ31961942005-09-30 01:36:00 -04002715done:
Brett Russ20f733e2005-09-01 18:26:17 -04002716 return rc;
2717}
2718
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002719#ifdef CONFIG_PCI
2720static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
2721
2722static struct pci_driver mv_pci_driver = {
2723 .name = DRV_NAME,
2724 .id_table = mv_pci_tbl,
2725 .probe = mv_init_one,
2726 .remove = ata_pci_remove_one,
2727};
2728
2729/*
2730 * module options
2731 */
2732static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2733
2734
2735/* move to PCI layer or libata core? */
2736static int pci_go_64(struct pci_dev *pdev)
2737{
2738 int rc;
2739
2740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2741 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2742 if (rc) {
2743 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2744 if (rc) {
2745 dev_printk(KERN_ERR, &pdev->dev,
2746 "64-bit DMA enable failed\n");
2747 return rc;
2748 }
2749 }
2750 } else {
2751 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2752 if (rc) {
2753 dev_printk(KERN_ERR, &pdev->dev,
2754 "32-bit DMA enable failed\n");
2755 return rc;
2756 }
2757 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2758 if (rc) {
2759 dev_printk(KERN_ERR, &pdev->dev,
2760 "32-bit consistent DMA enable failed\n");
2761 return rc;
2762 }
2763 }
2764
2765 return rc;
2766}
2767
Brett Russ05b308e2005-10-05 17:08:53 -04002768/**
2769 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002770 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002771 *
2772 * FIXME: complete this.
2773 *
2774 * LOCKING:
2775 * Inherited from caller.
2776 */
Tejun Heo4447d352007-04-17 23:44:08 +09002777static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002778{
Tejun Heo4447d352007-04-17 23:44:08 +09002779 struct pci_dev *pdev = to_pci_dev(host->dev);
2780 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002781 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002782 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002783
2784 /* Use this to determine the HW stepping of the chip so we know
2785 * what errata to workaround
2786 */
Brett Russ31961942005-09-30 01:36:00 -04002787 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2788 if (scc == 0)
2789 scc_s = "SCSI";
2790 else if (scc == 0x01)
2791 scc_s = "RAID";
2792 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002793 scc_s = "?";
2794
2795 if (IS_GEN_I(hpriv))
2796 gen = "I";
2797 else if (IS_GEN_II(hpriv))
2798 gen = "II";
2799 else if (IS_GEN_IIE(hpriv))
2800 gen = "IIE";
2801 else
2802 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002803
Jeff Garzika9524a72005-10-30 14:39:11 -05002804 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002805 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2806 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002807 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2808}
2809
Mark Lordda2fa9b2008-01-26 18:32:45 -05002810static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2811{
2812 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2813 MV_CRQB_Q_SZ, 0);
2814 if (!hpriv->crqb_pool)
2815 return -ENOMEM;
2816
2817 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2818 MV_CRPB_Q_SZ, 0);
2819 if (!hpriv->crpb_pool)
2820 return -ENOMEM;
2821
2822 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2823 MV_SG_TBL_SZ, 0);
2824 if (!hpriv->sg_tbl_pool)
2825 return -ENOMEM;
2826
2827 return 0;
2828}
2829
Brett Russ05b308e2005-10-05 17:08:53 -04002830/**
2831 * mv_init_one - handle a positive probe of a Marvell host
2832 * @pdev: PCI device found
2833 * @ent: PCI device ID entry for the matched host
2834 *
2835 * LOCKING:
2836 * Inherited from caller.
2837 */
Brett Russ20f733e2005-09-01 18:26:17 -04002838static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2839{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002840 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002841 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002842 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2843 struct ata_host *host;
2844 struct mv_host_priv *hpriv;
2845 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002846
Jeff Garzika9524a72005-10-30 14:39:11 -05002847 if (!printed_version++)
2848 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002849
Tejun Heo4447d352007-04-17 23:44:08 +09002850 /* allocate host */
2851 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2852
2853 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2854 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2855 if (!host || !hpriv)
2856 return -ENOMEM;
2857 host->private_data = hpriv;
2858
2859 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002860 rc = pcim_enable_device(pdev);
2861 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002862 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002863
Tejun Heo0d5ff562007-02-01 15:06:36 +09002864 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2865 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002866 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002867 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002868 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002869 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002870
Jeff Garzikd88184f2007-02-26 01:26:06 -05002871 rc = pci_go_64(pdev);
2872 if (rc)
2873 return rc;
2874
Mark Lordda2fa9b2008-01-26 18:32:45 -05002875 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2876 if (rc)
2877 return rc;
2878
Brett Russ20f733e2005-09-01 18:26:17 -04002879 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002880 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002881 if (rc)
2882 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002883
Brett Russ31961942005-09-30 01:36:00 -04002884 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002885 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002886 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002887
Brett Russ31961942005-09-30 01:36:00 -04002888 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002889 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002890
Tejun Heo4447d352007-04-17 23:44:08 +09002891 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002892 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002893 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002894 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002895}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002896#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002897
2898static int __init mv_init(void)
2899{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002900 int rc = -ENODEV;
2901#ifdef CONFIG_PCI
2902 rc = pci_register_driver(&mv_pci_driver);
2903#endif
2904 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002905}
2906
2907static void __exit mv_exit(void)
2908{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002909#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04002910 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002911#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002912}
2913
2914MODULE_AUTHOR("Brett Russ");
2915MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2916MODULE_LICENSE("GPL");
2917MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2918MODULE_VERSION(DRV_VERSION);
2919
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002920#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002921module_param(msi, int, 0444);
2922MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002923#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002924
Brett Russ20f733e2005-09-01 18:26:17 -04002925module_init(mv_init);
2926module_exit(mv_exit);