blob: 6a583c19c6e5435a29739ab7b3b04d1b95767eb0 [file] [log] [blame]
Jeff Garzikb5762942007-10-25 20:58:22 -04001/*
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002 mv_sas.c - Marvell 88SE6440 SAS/SATA support
Jeff Garzikb5762942007-10-25 20:58:22 -04003
4 Copyright 2007 Red Hat, Inc.
Ke Wei8f261aa2008-02-23 21:15:27 +08005 Copyright 2008 Marvell. <kewei@marvell.com>
Jeff Garzikb5762942007-10-25 20:58:22 -04006
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
Jeff Garzikdd4969a2009-05-08 17:44:01 -040031#include "mv_sas.h"
32#include "mv_64xx.h"
33#include "mv_chips.h"
Ke Wei8f261aa2008-02-23 21:15:27 +080034
35/* offset for D2H FIS in the Received FIS List Structure */
36#define SATA_RECEIVED_D2H_FIS(reg_set) \
37 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
38#define SATA_RECEIVED_PIO_FIS(reg_set) \
39 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
40#define UNASSOC_D2H_FIS(id) \
41 ((void *) mvi->rx_fis + 0x100 * id)
42
Jeff Garzikdd4969a2009-05-08 17:44:01 -040043struct mvs_task_exec_info {
44 struct sas_task *task;
45 struct mvs_cmd_hdr *hdr;
46 struct mvs_port *port;
47 u32 tag;
48 int n_elem;
Jeff Garzikb5762942007-10-25 20:58:22 -040049};
50
Ke Wei0eb9ddd2008-03-27 14:53:24 +080051static void mvs_release_task(struct mvs_info *mvi, int phy_no);
Jeff Garzikdd4969a2009-05-08 17:44:01 -040052static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
53static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
54 int get_st);
55static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
56static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
57 u32 slot_idx);
Ke Wei8f261aa2008-02-23 21:15:27 +080058
Jeff Garzikdd4969a2009-05-08 17:44:01 -040059static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
60{
61 if (task->lldd_task) {
62 struct mvs_slot_info *slot;
63 slot = (struct mvs_slot_info *) task->lldd_task;
64 *tag = slot - mvi->slot_info;
65 return 1;
66 }
67 return 0;
68}
Ke Wei8f261aa2008-02-23 21:15:27 +080069
Jeff Garzikdd4969a2009-05-08 17:44:01 -040070static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
71{
72 void *bitmap = (void *) &mvi->tags;
73 clear_bit(tag, bitmap);
74}
Jeff Garzikb5762942007-10-25 20:58:22 -040075
Jeff Garzikdd4969a2009-05-08 17:44:01 -040076static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
77{
78 mvs_tag_clear(mvi, tag);
79}
Jeff Garzikb5762942007-10-25 20:58:22 -040080
Jeff Garzikdd4969a2009-05-08 17:44:01 -040081static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
82{
83 void *bitmap = (void *) &mvi->tags;
84 set_bit(tag, bitmap);
85}
86
87static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
88{
89 unsigned int index, tag;
90 void *bitmap = (void *) &mvi->tags;
91
92 index = find_first_zero_bit(bitmap, MVS_SLOTS);
93 tag = index;
94 if (tag >= MVS_SLOTS)
95 return -SAS_QUEUE_FULL;
96 mvs_tag_set(mvi, tag);
97 *tag_out = tag;
98 return 0;
99}
100
101void mvs_tag_init(struct mvs_info *mvi)
102{
103 int i;
104 for (i = 0; i < MVS_SLOTS; ++i)
105 mvs_tag_clear(mvi, i);
106}
Jeff Garzikb5762942007-10-25 20:58:22 -0400107
Ke Wei8f261aa2008-02-23 21:15:27 +0800108static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
109{
110 u32 i;
111 u32 run;
112 u32 offset;
113
114 offset = 0;
115 while (size) {
116 printk("%08X : ", baseaddr + offset);
117 if (size >= 16)
118 run = 16;
119 else
120 run = size;
121 size -= run;
122 for (i = 0; i < 16; i++) {
123 if (i < run)
124 printk("%02X ", (u32)data[i]);
125 else
126 printk(" ");
127 }
128 printk(": ");
129 for (i = 0; i < run; i++)
130 printk("%c", isalnum(data[i]) ? data[i] : '.');
131 printk("\n");
132 data = &data[16];
133 offset += run;
134 }
135 printk("\n");
136}
137
Ke Weiee1f1c22008-03-27 14:53:47 +0800138#if _MV_DUMP
Ke Wei8f261aa2008-02-23 21:15:27 +0800139static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
140 enum sas_protocol proto)
141{
Ke Wei8f261aa2008-02-23 21:15:27 +0800142 u32 offset;
143 struct pci_dev *pdev = mvi->pdev;
144 struct mvs_slot_info *slot = &mvi->slot_info[tag];
145
146 offset = slot->cmd_size + MVS_OAF_SZ +
147 sizeof(struct mvs_prd) * slot->n_elem;
148 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
149 tag);
150 mvs_hexdump(32, (u8 *) slot->response,
151 (u32) slot->buf_dma + offset);
Ke Wei8f261aa2008-02-23 21:15:27 +0800152}
Ke Weiee1f1c22008-03-27 14:53:47 +0800153#endif
Ke Wei8f261aa2008-02-23 21:15:27 +0800154
155static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
156 enum sas_protocol proto)
157{
158#if _MV_DUMP
Ke Weiee1f1c22008-03-27 14:53:47 +0800159 u32 sz, w_ptr;
Ke Wei8f261aa2008-02-23 21:15:27 +0800160 u64 addr;
161 void __iomem *regs = mvi->regs;
162 struct pci_dev *pdev = mvi->pdev;
163 struct mvs_slot_info *slot = &mvi->slot_info[tag];
164
165 /*Delivery Queue */
166 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
Ke Weiee1f1c22008-03-27 14:53:47 +0800167 w_ptr = slot->tx;
Ke Wei8f261aa2008-02-23 21:15:27 +0800168 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
169 dev_printk(KERN_DEBUG, &pdev->dev,
Ke Weiee1f1c22008-03-27 14:53:47 +0800170 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
Ke Wei8f261aa2008-02-23 21:15:27 +0800171 dev_printk(KERN_DEBUG, &pdev->dev,
172 "Delivery Queue Base Address=0x%llX (PA)"
173 "(tx_dma=0x%llX), Entry=%04d\n",
174 addr, mvi->tx_dma, w_ptr);
175 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
176 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
177 /*Command List */
Ke Weiee1f1c22008-03-27 14:53:47 +0800178 addr = mvi->slot_dma;
Ke Wei8f261aa2008-02-23 21:15:27 +0800179 dev_printk(KERN_DEBUG, &pdev->dev,
180 "Command List Base Address=0x%llX (PA)"
181 "(slot_dma=0x%llX), Header=%03d\n",
Ke Weiee1f1c22008-03-27 14:53:47 +0800182 addr, slot->buf_dma, tag);
Ke Wei8f261aa2008-02-23 21:15:27 +0800183 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
184 /*mvs_cmd_hdr */
185 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
186 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
187 /*1.command table area */
188 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
189 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
190 /*2.open address frame area */
191 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
192 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
193 (u32) slot->buf_dma + slot->cmd_size);
194 /*3.status buffer */
195 mvs_hba_sb_dump(mvi, tag, proto);
196 /*4.PRD table */
197 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
198 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
199 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
200 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
201#endif
202}
203
204static void mvs_hba_cq_dump(struct mvs_info *mvi)
205{
Ke Weiee1f1c22008-03-27 14:53:47 +0800206#if (_MV_DUMP > 2)
Ke Wei8f261aa2008-02-23 21:15:27 +0800207 u64 addr;
208 void __iomem *regs = mvi->regs;
209 struct pci_dev *pdev = mvi->pdev;
210 u32 entry = mvi->rx_cons + 1;
211 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
212
213 /*Completion Queue */
214 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
Ke Weiee1f1c22008-03-27 14:53:47 +0800215 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
216 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
Ke Wei8f261aa2008-02-23 21:15:27 +0800217 dev_printk(KERN_DEBUG, &pdev->dev,
218 "Completion List Base Address=0x%llX (PA), "
219 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
220 addr, entry - 1, mvi->rx[0]);
221 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
222 mvi->rx_dma + sizeof(u32) * entry);
223#endif
224}
225
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400226/* FIXME: locking? */
227int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata)
Ke Wei8f261aa2008-02-23 21:15:27 +0800228{
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400229 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
230 int rc = 0, phy_id = sas_phy->id;
Ke Wei8f261aa2008-02-23 21:15:27 +0800231 u32 tmp;
232
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400233 tmp = mvs_read_phy_ctl(mvi, phy_id);
Ke Wei8f261aa2008-02-23 21:15:27 +0800234
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400235 switch (func) {
236 case PHY_FUNC_SET_LINK_RATE:{
237 struct sas_phy_linkrates *rates = funcdata;
238 u32 lrmin = 0, lrmax = 0;
Ke Wei8f261aa2008-02-23 21:15:27 +0800239
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400240 lrmin = (rates->minimum_linkrate << 8);
241 lrmax = (rates->maximum_linkrate << 12);
Ke Wei8f261aa2008-02-23 21:15:27 +0800242
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400243 if (lrmin) {
244 tmp &= ~(0xf << 8);
245 tmp |= lrmin;
Jeff Garzikb5762942007-10-25 20:58:22 -0400246 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400247 if (lrmax) {
248 tmp &= ~(0xf << 12);
249 tmp |= lrmax;
250 }
251 mvs_write_phy_ctl(mvi, phy_id, tmp);
252 break;
Jeff Garzikb5762942007-10-25 20:58:22 -0400253 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400254
255 case PHY_FUNC_HARD_RESET:
256 if (tmp & PHY_RST_HARD)
257 break;
258 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
259 break;
260
261 case PHY_FUNC_LINK_RESET:
262 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
263 break;
264
265 case PHY_FUNC_DISABLE:
266 case PHY_FUNC_RELEASE_SPINUP_HOLD:
267 default:
268 rc = -EOPNOTSUPP;
Jeff Garzikb5762942007-10-25 20:58:22 -0400269 }
270
271 return rc;
272}
273
Ke Wei8f261aa2008-02-23 21:15:27 +0800274static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
275{
276 struct mvs_phy *phy = &mvi->phy[i];
Ke Weiee1f1c22008-03-27 14:53:47 +0800277 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
Ke Wei8f261aa2008-02-23 21:15:27 +0800278
279 if (!phy->phy_attached)
280 return;
281
Ke Weiee1f1c22008-03-27 14:53:47 +0800282 if (sas_phy->phy) {
283 struct sas_phy *sphy = sas_phy->phy;
284
285 sphy->negotiated_linkrate = sas_phy->linkrate;
286 sphy->minimum_linkrate = phy->minimum_linkrate;
287 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
288 sphy->maximum_linkrate = phy->maximum_linkrate;
289 sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
290 }
291
Ke Wei8f261aa2008-02-23 21:15:27 +0800292 if (phy->phy_type & PORT_TYPE_SAS) {
293 struct sas_identify_frame *id;
294
295 id = (struct sas_identify_frame *)phy->frame_rcvd;
296 id->dev_type = phy->identify.device_type;
297 id->initiator_bits = SAS_PROTOCOL_ALL;
298 id->target_bits = phy->identify.target_port_protocols;
299 } else if (phy->phy_type & PORT_TYPE_SATA) {
300 /* TODO */
301 }
302 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
303 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
304 PORTE_BYTES_DMAED);
305}
306
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400307int mvs_slave_configure(struct scsi_device *sdev)
Ke Wei8f261aa2008-02-23 21:15:27 +0800308{
Ke Weiee1f1c22008-03-27 14:53:47 +0800309 struct domain_device *dev = sdev_to_domain_dev(sdev);
310 int ret = sas_slave_configure(sdev);
Ke Wei8f261aa2008-02-23 21:15:27 +0800311
Ke Weiee1f1c22008-03-27 14:53:47 +0800312 if (ret)
313 return ret;
Ke Wei8f261aa2008-02-23 21:15:27 +0800314
Ke Weiee1f1c22008-03-27 14:53:47 +0800315 if (dev_is_sata(dev)) {
316 /* struct ata_port *ap = dev->sata_dev.ap; */
317 /* struct ata_device *adev = ap->link.device; */
318
319 /* clamp at no NCQ for the time being */
320 /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
321 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
322 }
323 return 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400324}
325
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400326void mvs_scan_start(struct Scsi_Host *shost)
Jeff Garzikb5762942007-10-25 20:58:22 -0400327{
Jeff Garzikb5762942007-10-25 20:58:22 -0400328 int i;
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400329 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
Jeff Garzikb5762942007-10-25 20:58:22 -0400330
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400331 for (i = 0; i < mvi->chip->n_phy; ++i) {
332 mvs_bytes_dmaed(mvi, i);
Jeff Garzikb5762942007-10-25 20:58:22 -0400333 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400334}
335
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400336int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
Jeff Garzikb5762942007-10-25 20:58:22 -0400337{
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400338 /* give the phy enabling interrupt event time to come in (1s
339 * is empirically about all it takes) */
340 if (time < HZ)
Ke Wei8f261aa2008-02-23 21:15:27 +0800341 return 0;
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400342 /* Wait for discovery to finish */
343 scsi_flush_work(shost);
344 return 1;
Jeff Garzikb5762942007-10-25 20:58:22 -0400345}
346
Ke Wei8f261aa2008-02-23 21:15:27 +0800347static int mvs_task_prep_smp(struct mvs_info *mvi,
348 struct mvs_task_exec_info *tei)
Jeff Garzikb5762942007-10-25 20:58:22 -0400349{
Ke Wei8f261aa2008-02-23 21:15:27 +0800350 int elem, rc, i;
351 struct sas_task *task = tei->task;
Jeff Garzikb5762942007-10-25 20:58:22 -0400352 struct mvs_cmd_hdr *hdr = tei->hdr;
353 struct scatterlist *sg_req, *sg_resp;
Ke Wei8f261aa2008-02-23 21:15:27 +0800354 u32 req_len, resp_len, tag = tei->tag;
355 void *buf_tmp;
356 u8 *buf_oaf;
357 dma_addr_t buf_tmp_dma;
358 struct mvs_prd *buf_prd;
359 struct scatterlist *sg;
360 struct mvs_slot_info *slot = &mvi->slot_info[tag];
361 struct asd_sas_port *sas_port = task->dev->port;
362 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
363#if _MV_DUMP
364 u8 *buf_cmd;
365 void *from;
366#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400367 /*
368 * DMA-map SMP request, response buffers
369 */
Ke Wei8f261aa2008-02-23 21:15:27 +0800370 sg_req = &task->smp_task.smp_req;
Jeff Garzikb5762942007-10-25 20:58:22 -0400371 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
372 if (!elem)
373 return -ENOMEM;
374 req_len = sg_dma_len(sg_req);
375
Ke Wei8f261aa2008-02-23 21:15:27 +0800376 sg_resp = &task->smp_task.smp_resp;
Jeff Garzikb5762942007-10-25 20:58:22 -0400377 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
378 if (!elem) {
379 rc = -ENOMEM;
380 goto err_out;
381 }
382 resp_len = sg_dma_len(sg_resp);
383
384 /* must be in dwords */
385 if ((req_len & 0x3) || (resp_len & 0x3)) {
386 rc = -EINVAL;
387 goto err_out_2;
388 }
389
390 /*
Ke Wei8f261aa2008-02-23 21:15:27 +0800391 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
Jeff Garzikb5762942007-10-25 20:58:22 -0400392 */
393
Ke Wei8f261aa2008-02-23 21:15:27 +0800394 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
395 buf_tmp = slot->buf;
396 buf_tmp_dma = slot->buf_dma;
Jeff Garzikb5762942007-10-25 20:58:22 -0400397
Ke Wei8f261aa2008-02-23 21:15:27 +0800398#if _MV_DUMP
399 buf_cmd = buf_tmp;
400 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
401 buf_tmp += req_len;
402 buf_tmp_dma += req_len;
403 slot->cmd_size = req_len;
404#else
405 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
406#endif
407
408 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
409 buf_oaf = buf_tmp;
410 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
411
412 buf_tmp += MVS_OAF_SZ;
413 buf_tmp_dma += MVS_OAF_SZ;
414
415 /* region 3: PRD table ********************************************* */
416 buf_prd = buf_tmp;
417 if (tei->n_elem)
418 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
419 else
420 hdr->prd_tbl = 0;
421
422 i = sizeof(struct mvs_prd) * tei->n_elem;
423 buf_tmp += i;
424 buf_tmp_dma += i;
425
426 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
427 slot->response = buf_tmp;
428 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
429
430 /*
431 * Fill in TX ring and command slot header
432 */
433 slot->tx = mvi->tx_prod;
434 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
435 TXQ_MODE_I | tag |
436 (sas_port->phy_mask << TXQ_PHY_SHIFT));
437
438 hdr->flags |= flags;
439 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
Jeff Garzikb5762942007-10-25 20:58:22 -0400440 hdr->tags = cpu_to_le32(tag);
441 hdr->data_len = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400442
Ke Wei8f261aa2008-02-23 21:15:27 +0800443 /* generate open address frame hdr (first 12 bytes) */
444 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
445 buf_oaf[1] = task->dev->linkrate & 0xf;
446 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
447 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
448
449 /* fill in PRD (scatter/gather) table, if any */
450 for_each_sg(task->scatter, sg, tei->n_elem, i) {
451 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
452 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
453 buf_prd++;
454 }
455
456#if _MV_DUMP
457 /* copy cmd table */
458 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
459 memcpy(buf_cmd, from + sg_req->offset, req_len);
460 kunmap_atomic(from, KM_IRQ0);
461#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400462 return 0;
463
464err_out_2:
465 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
466 PCI_DMA_FROMDEVICE);
467err_out:
468 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
469 PCI_DMA_TODEVICE);
470 return rc;
471}
472
Ke Wei4e52fc02008-03-27 14:54:50 +0800473static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
Ke Wei8f261aa2008-02-23 21:15:27 +0800474{
Ke Wei8f261aa2008-02-23 21:15:27 +0800475 struct ata_queued_cmd *qc = task->uldd_task;
476
Ke Wei4e52fc02008-03-27 14:54:50 +0800477 if (qc) {
478 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
479 qc->tf.command == ATA_CMD_FPDMA_READ) {
480 *tag = qc->tag;
481 return 1;
482 }
483 }
Ke Wei8f261aa2008-02-23 21:15:27 +0800484
Ke Wei4e52fc02008-03-27 14:54:50 +0800485 return 0;
Ke Wei8f261aa2008-02-23 21:15:27 +0800486}
487
Jeff Garzikb5762942007-10-25 20:58:22 -0400488static int mvs_task_prep_ata(struct mvs_info *mvi,
489 struct mvs_task_exec_info *tei)
490{
491 struct sas_task *task = tei->task;
492 struct domain_device *dev = task->dev;
493 struct mvs_cmd_hdr *hdr = tei->hdr;
494 struct asd_sas_port *sas_port = dev->port;
Ke Wei8f261aa2008-02-23 21:15:27 +0800495 struct mvs_slot_info *slot;
Jeff Garzikb5762942007-10-25 20:58:22 -0400496 struct scatterlist *sg;
497 struct mvs_prd *buf_prd;
Ke Wei8f261aa2008-02-23 21:15:27 +0800498 struct mvs_port *port = tei->port;
499 u32 tag = tei->tag;
500 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
Jeff Garzikb5762942007-10-25 20:58:22 -0400501 void *buf_tmp;
502 u8 *buf_cmd, *buf_oaf;
503 dma_addr_t buf_tmp_dma;
Ke Wei8f261aa2008-02-23 21:15:27 +0800504 u32 i, req_len, resp_len;
505 const u32 max_resp_len = SB_RFB_MAX;
Jeff Garzikb5762942007-10-25 20:58:22 -0400506
Ke Wei8f261aa2008-02-23 21:15:27 +0800507 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
508 return -EBUSY;
509
510 slot = &mvi->slot_info[tag];
511 slot->tx = mvi->tx_prod;
512 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
513 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
514 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
515 (port->taskfileset << TXQ_SRS_SHIFT));
Jeff Garzikb5762942007-10-25 20:58:22 -0400516
517 if (task->ata_task.use_ncq)
518 flags |= MCH_FPDMA;
Ke Wei8f261aa2008-02-23 21:15:27 +0800519 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
520 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
521 flags |= MCH_ATAPI;
522 }
523
Jeff Garzikb5762942007-10-25 20:58:22 -0400524 /* FIXME: fill in port multiplier number */
525
526 hdr->flags = cpu_to_le32(flags);
Ke Wei8f261aa2008-02-23 21:15:27 +0800527
528 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
Ke Wei4e52fc02008-03-27 14:54:50 +0800529 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
530 task->ata_task.fis.sector_count |= hdr->tags << 3;
531 else
Ke Wei8f261aa2008-02-23 21:15:27 +0800532 hdr->tags = cpu_to_le32(tag);
Jeff Garzikb5762942007-10-25 20:58:22 -0400533 hdr->data_len = cpu_to_le32(task->total_xfer_len);
534
535 /*
536 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
537 */
Jeff Garzikb5762942007-10-25 20:58:22 -0400538
Ke Wei8f261aa2008-02-23 21:15:27 +0800539 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
540 buf_cmd = buf_tmp = slot->buf;
Jeff Garzikb5762942007-10-25 20:58:22 -0400541 buf_tmp_dma = slot->buf_dma;
542
543 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
544
545 buf_tmp += MVS_ATA_CMD_SZ;
546 buf_tmp_dma += MVS_ATA_CMD_SZ;
Ke Wei8f261aa2008-02-23 21:15:27 +0800547#if _MV_DUMP
548 slot->cmd_size = MVS_ATA_CMD_SZ;
549#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400550
Ke Wei8f261aa2008-02-23 21:15:27 +0800551 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400552 /* used for STP. unused for SATA? */
553 buf_oaf = buf_tmp;
554 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
555
556 buf_tmp += MVS_OAF_SZ;
557 buf_tmp_dma += MVS_OAF_SZ;
558
Ke Wei8f261aa2008-02-23 21:15:27 +0800559 /* region 3: PRD table ********************************************* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400560 buf_prd = buf_tmp;
Ke Wei8f261aa2008-02-23 21:15:27 +0800561 if (tei->n_elem)
562 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
563 else
564 hdr->prd_tbl = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400565
566 i = sizeof(struct mvs_prd) * tei->n_elem;
567 buf_tmp += i;
568 buf_tmp_dma += i;
569
Ke Wei8f261aa2008-02-23 21:15:27 +0800570 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
Jeff Garzikb5762942007-10-25 20:58:22 -0400571 /* FIXME: probably unused, for SATA. kept here just in case
572 * we get a STP/SATA error information record
573 */
574 slot->response = buf_tmp;
575 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
576
Ke Wei8f261aa2008-02-23 21:15:27 +0800577 req_len = sizeof(struct host_to_dev_fis);
Jeff Garzikb5762942007-10-25 20:58:22 -0400578 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
Ke Wei8f261aa2008-02-23 21:15:27 +0800579 sizeof(struct mvs_err_info) - i;
Jeff Garzikb5762942007-10-25 20:58:22 -0400580
581 /* request, response lengths */
Ke Wei8f261aa2008-02-23 21:15:27 +0800582 resp_len = min(resp_len, max_resp_len);
Jeff Garzikb5762942007-10-25 20:58:22 -0400583 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
584
Ke Wei8f261aa2008-02-23 21:15:27 +0800585 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
Jeff Garzikb5762942007-10-25 20:58:22 -0400586 /* fill in command FIS and ATAPI CDB */
Ke Wei8f261aa2008-02-23 21:15:27 +0800587 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
588 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
589 memcpy(buf_cmd + STP_ATAPI_CMD,
590 task->ata_task.atapi_packet, 16);
591
592 /* generate open address frame hdr (first 12 bytes) */
593 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
594 buf_oaf[1] = task->dev->linkrate & 0xf;
595 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
596 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
Jeff Garzikb5762942007-10-25 20:58:22 -0400597
598 /* fill in PRD (scatter/gather) table, if any */
Ke Wei8f261aa2008-02-23 21:15:27 +0800599 for_each_sg(task->scatter, sg, tei->n_elem, i) {
Jeff Garzikb5762942007-10-25 20:58:22 -0400600 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
601 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
Jeff Garzikb5762942007-10-25 20:58:22 -0400602 buf_prd++;
603 }
604
605 return 0;
606}
607
608static int mvs_task_prep_ssp(struct mvs_info *mvi,
609 struct mvs_task_exec_info *tei)
610{
611 struct sas_task *task = tei->task;
Jeff Garzikb5762942007-10-25 20:58:22 -0400612 struct mvs_cmd_hdr *hdr = tei->hdr;
Ke Wei8f261aa2008-02-23 21:15:27 +0800613 struct mvs_port *port = tei->port;
Jeff Garzikb5762942007-10-25 20:58:22 -0400614 struct mvs_slot_info *slot;
615 struct scatterlist *sg;
Jeff Garzikb5762942007-10-25 20:58:22 -0400616 struct mvs_prd *buf_prd;
617 struct ssp_frame_hdr *ssp_hdr;
618 void *buf_tmp;
619 u8 *buf_cmd, *buf_oaf, fburst = 0;
620 dma_addr_t buf_tmp_dma;
621 u32 flags;
Ke Wei8f261aa2008-02-23 21:15:27 +0800622 u32 resp_len, req_len, i, tag = tei->tag;
623 const u32 max_resp_len = SB_RFB_MAX;
Ke Wei4e52fc02008-03-27 14:54:50 +0800624 u8 phy_mask;
Jeff Garzikb5762942007-10-25 20:58:22 -0400625
626 slot = &mvi->slot_info[tag];
627
Ke Wei4e52fc02008-03-27 14:54:50 +0800628 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
629 task->dev->port->phy_mask;
Ke Wei8f261aa2008-02-23 21:15:27 +0800630 slot->tx = mvi->tx_prod;
631 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
632 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
Ke Wei4e52fc02008-03-27 14:54:50 +0800633 (phy_mask << TXQ_PHY_SHIFT));
Jeff Garzikb5762942007-10-25 20:58:22 -0400634
635 flags = MCH_RETRY;
636 if (task->ssp_task.enable_first_burst) {
637 flags |= MCH_FBURST;
638 fburst = (1 << 7);
639 }
640 hdr->flags = cpu_to_le32(flags |
Ke Wei8f261aa2008-02-23 21:15:27 +0800641 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
642 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
Jeff Garzikb5762942007-10-25 20:58:22 -0400643
644 hdr->tags = cpu_to_le32(tag);
645 hdr->data_len = cpu_to_le32(task->total_xfer_len);
646
647 /*
648 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
649 */
Jeff Garzikb5762942007-10-25 20:58:22 -0400650
Ke Wei8f261aa2008-02-23 21:15:27 +0800651 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
652 buf_cmd = buf_tmp = slot->buf;
Jeff Garzikb5762942007-10-25 20:58:22 -0400653 buf_tmp_dma = slot->buf_dma;
654
655 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
656
657 buf_tmp += MVS_SSP_CMD_SZ;
658 buf_tmp_dma += MVS_SSP_CMD_SZ;
Ke Wei8f261aa2008-02-23 21:15:27 +0800659#if _MV_DUMP
660 slot->cmd_size = MVS_SSP_CMD_SZ;
661#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400662
Ke Wei8f261aa2008-02-23 21:15:27 +0800663 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400664 buf_oaf = buf_tmp;
665 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
666
667 buf_tmp += MVS_OAF_SZ;
668 buf_tmp_dma += MVS_OAF_SZ;
669
Ke Wei8f261aa2008-02-23 21:15:27 +0800670 /* region 3: PRD table ********************************************* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400671 buf_prd = buf_tmp;
Ke Wei8f261aa2008-02-23 21:15:27 +0800672 if (tei->n_elem)
673 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
674 else
675 hdr->prd_tbl = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400676
677 i = sizeof(struct mvs_prd) * tei->n_elem;
678 buf_tmp += i;
679 buf_tmp_dma += i;
680
Ke Wei8f261aa2008-02-23 21:15:27 +0800681 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
Jeff Garzikb5762942007-10-25 20:58:22 -0400682 slot->response = buf_tmp;
683 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
684
Jeff Garzikb5762942007-10-25 20:58:22 -0400685 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
Ke Wei8f261aa2008-02-23 21:15:27 +0800686 sizeof(struct mvs_err_info) - i;
687 resp_len = min(resp_len, max_resp_len);
688
689 req_len = sizeof(struct ssp_frame_hdr) + 28;
Jeff Garzikb5762942007-10-25 20:58:22 -0400690
691 /* request, response lengths */
692 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
693
694 /* generate open address frame hdr (first 12 bytes) */
695 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
696 buf_oaf[1] = task->dev->linkrate & 0xf;
Ke Wei8f261aa2008-02-23 21:15:27 +0800697 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
Jeff Garzikb5762942007-10-25 20:58:22 -0400698 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
699
Ke Wei8f261aa2008-02-23 21:15:27 +0800700 /* fill in SSP frame header (Command Table.SSP frame header) */
701 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
Jeff Garzikb5762942007-10-25 20:58:22 -0400702 ssp_hdr->frame_type = SSP_COMMAND;
703 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
704 HASHED_SAS_ADDR_SIZE);
705 memcpy(ssp_hdr->hashed_src_addr,
706 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
707 ssp_hdr->tag = cpu_to_be16(tag);
708
709 /* fill in command frame IU */
710 buf_cmd += sizeof(*ssp_hdr);
711 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
Ke Wei8f261aa2008-02-23 21:15:27 +0800712 buf_cmd[9] = fburst | task->ssp_task.task_attr |
713 (task->ssp_task.task_prio << 3);
Jeff Garzikb5762942007-10-25 20:58:22 -0400714 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
715
716 /* fill in PRD (scatter/gather) table, if any */
Ke Wei8f261aa2008-02-23 21:15:27 +0800717 for_each_sg(task->scatter, sg, tei->n_elem, i) {
Jeff Garzikb5762942007-10-25 20:58:22 -0400718 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
719 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
Jeff Garzikb5762942007-10-25 20:58:22 -0400720 buf_prd++;
721 }
722
723 return 0;
724}
725
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400726int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
Jeff Garzikb5762942007-10-25 20:58:22 -0400727{
Ke Wei8f261aa2008-02-23 21:15:27 +0800728 struct domain_device *dev = task->dev;
729 struct mvs_info *mvi = dev->port->ha->lldd_ha;
730 struct pci_dev *pdev = mvi->pdev;
Jeff Garzikb5762942007-10-25 20:58:22 -0400731 void __iomem *regs = mvi->regs;
Jeff Garzikb5762942007-10-25 20:58:22 -0400732 struct mvs_task_exec_info tei;
Ke Wei8f261aa2008-02-23 21:15:27 +0800733 struct sas_task *t = task;
Ke Wei4e52fc02008-03-27 14:54:50 +0800734 struct mvs_slot_info *slot;
Ke Wei8f261aa2008-02-23 21:15:27 +0800735 u32 tag = 0xdeadbeef, rc, n_elem = 0;
736 unsigned long flags;
737 u32 n = num, pass = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400738
739 spin_lock_irqsave(&mvi->lock, flags);
Ke Wei8f261aa2008-02-23 21:15:27 +0800740 do {
Ke Wei4e52fc02008-03-27 14:54:50 +0800741 dev = t->dev;
Ke Wei8f261aa2008-02-23 21:15:27 +0800742 tei.port = &mvi->port[dev->port->id];
Jeff Garzikb5762942007-10-25 20:58:22 -0400743
Ke Wei8f261aa2008-02-23 21:15:27 +0800744 if (!tei.port->port_attached) {
Ke Wei4e52fc02008-03-27 14:54:50 +0800745 if (sas_protocol_ata(t->task_proto)) {
746 rc = SAS_PHY_DOWN;
747 goto out_done;
748 } else {
749 struct task_status_struct *ts = &t->task_status;
750 ts->resp = SAS_TASK_UNDELIVERED;
751 ts->stat = SAS_PHY_DOWN;
752 t->task_done(t);
753 if (n > 1)
754 t = list_entry(t->list.next,
755 struct sas_task, list);
756 continue;
757 }
Ke Wei8f261aa2008-02-23 21:15:27 +0800758 }
Ke Wei4e52fc02008-03-27 14:54:50 +0800759
Ke Wei8f261aa2008-02-23 21:15:27 +0800760 if (!sas_protocol_ata(t->task_proto)) {
761 if (t->num_scatter) {
762 n_elem = pci_map_sg(mvi->pdev, t->scatter,
763 t->num_scatter,
764 t->data_dir);
765 if (!n_elem) {
766 rc = -ENOMEM;
767 goto err_out;
768 }
769 }
770 } else {
771 n_elem = t->num_scatter;
772 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400773
Ke Wei8f261aa2008-02-23 21:15:27 +0800774 rc = mvs_tag_alloc(mvi, &tag);
775 if (rc)
776 goto err_out;
Jeff Garzikb5762942007-10-25 20:58:22 -0400777
Ke Wei4e52fc02008-03-27 14:54:50 +0800778 slot = &mvi->slot_info[tag];
779 t->lldd_task = NULL;
780 slot->n_elem = n_elem;
781 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
Ke Wei8f261aa2008-02-23 21:15:27 +0800782 tei.task = t;
783 tei.hdr = &mvi->slot[tag];
784 tei.tag = tag;
785 tei.n_elem = n_elem;
Jeff Garzikb5762942007-10-25 20:58:22 -0400786
Ke Wei8f261aa2008-02-23 21:15:27 +0800787 switch (t->task_proto) {
788 case SAS_PROTOCOL_SMP:
789 rc = mvs_task_prep_smp(mvi, &tei);
790 break;
791 case SAS_PROTOCOL_SSP:
792 rc = mvs_task_prep_ssp(mvi, &tei);
793 break;
794 case SAS_PROTOCOL_SATA:
795 case SAS_PROTOCOL_STP:
796 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
797 rc = mvs_task_prep_ata(mvi, &tei);
798 break;
799 default:
800 dev_printk(KERN_ERR, &pdev->dev,
801 "unknown sas_task proto: 0x%x\n",
802 t->task_proto);
803 rc = -EINVAL;
804 break;
805 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400806
Ke Wei8f261aa2008-02-23 21:15:27 +0800807 if (rc)
808 goto err_out_tag;
Jeff Garzikb5762942007-10-25 20:58:22 -0400809
Ke Wei4e52fc02008-03-27 14:54:50 +0800810 slot->task = t;
811 slot->port = tei.port;
812 t->lldd_task = (void *) slot;
813 list_add_tail(&slot->list, &slot->port->list);
Ke Wei8f261aa2008-02-23 21:15:27 +0800814 /* TODO: select normal or high priority */
Jeff Garzikb5762942007-10-25 20:58:22 -0400815
Ke Wei8f261aa2008-02-23 21:15:27 +0800816 spin_lock(&t->task_state_lock);
817 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
818 spin_unlock(&t->task_state_lock);
Jeff Garzikb5762942007-10-25 20:58:22 -0400819
Ke Wei8f261aa2008-02-23 21:15:27 +0800820 mvs_hba_memory_dump(mvi, tag, t->task_proto);
821
822 ++pass;
823 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
Ke Wei4e52fc02008-03-27 14:54:50 +0800824 if (n > 1)
825 t = list_entry(t->list.next, struct sas_task, list);
Ke Wei8f261aa2008-02-23 21:15:27 +0800826 } while (--n);
827
Ke Wei4e52fc02008-03-27 14:54:50 +0800828 rc = 0;
829 goto out_done;
Jeff Garzikb5762942007-10-25 20:58:22 -0400830
831err_out_tag:
Ke Wei8f261aa2008-02-23 21:15:27 +0800832 mvs_tag_free(mvi, tag);
Jeff Garzikb5762942007-10-25 20:58:22 -0400833err_out:
Ke Wei8f261aa2008-02-23 21:15:27 +0800834 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
835 if (!sas_protocol_ata(t->task_proto))
836 if (n_elem)
837 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
838 t->data_dir);
Ke Wei4e52fc02008-03-27 14:54:50 +0800839out_done:
Ke Wei8f261aa2008-02-23 21:15:27 +0800840 if (pass)
841 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
Jeff Garzikb5762942007-10-25 20:58:22 -0400842 spin_unlock_irqrestore(&mvi->lock, flags);
843 return rc;
844}
845
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400846static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
Ke Wei8f261aa2008-02-23 21:15:27 +0800847{
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400848 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
849 mvs_tag_clear(mvi, slot_idx);
850}
Ke Wei8f261aa2008-02-23 21:15:27 +0800851
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400852static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
853 struct mvs_slot_info *slot, u32 slot_idx)
854{
855 if (!sas_protocol_ata(task->task_proto))
856 if (slot->n_elem)
857 pci_unmap_sg(mvi->pdev, task->scatter,
858 slot->n_elem, task->data_dir);
Ke Wei8f261aa2008-02-23 21:15:27 +0800859
Ke Wei8f261aa2008-02-23 21:15:27 +0800860 switch (task->task_proto) {
861 case SAS_PROTOCOL_SMP:
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400862 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
863 PCI_DMA_FROMDEVICE);
864 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
865 PCI_DMA_TODEVICE);
Ke Wei8f261aa2008-02-23 21:15:27 +0800866 break;
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400867
Ke Wei8f261aa2008-02-23 21:15:27 +0800868 case SAS_PROTOCOL_SATA:
869 case SAS_PROTOCOL_STP:
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400870 case SAS_PROTOCOL_SSP:
Ke Wei8f261aa2008-02-23 21:15:27 +0800871 default:
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400872 /* do nothing */
Ke Wei8f261aa2008-02-23 21:15:27 +0800873 break;
874 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400875 list_del(&slot->list);
876 task->lldd_task = NULL;
877 slot->task = NULL;
878 slot->port = NULL;
Ke Wei8f261aa2008-02-23 21:15:27 +0800879}
880
881static void mvs_update_wideport(struct mvs_info *mvi, int i)
882{
883 struct mvs_phy *phy = &mvi->phy[i];
884 struct mvs_port *port = phy->port;
885 int j, no;
886
887 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
888 if (no & 1) {
889 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
890 mvs_write_port_cfg_data(mvi, no,
891 port->wide_port_phymap);
892 } else {
893 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
894 mvs_write_port_cfg_data(mvi, no, 0);
895 }
896}
897
898static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
899{
900 u32 tmp;
901 struct mvs_phy *phy = &mvi->phy[i];
Ke Wei963829e2008-03-27 14:55:23 +0800902 struct mvs_port *port = phy->port;;
Ke Wei8f261aa2008-02-23 21:15:27 +0800903
904 tmp = mvs_read_phy_ctl(mvi, i);
905
906 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
Ke Wei963829e2008-03-27 14:55:23 +0800907 if (!port)
Ke Wei8f261aa2008-02-23 21:15:27 +0800908 phy->phy_attached = 1;
909 return tmp;
910 }
911
Ke Wei8f261aa2008-02-23 21:15:27 +0800912 if (port) {
913 if (phy->phy_type & PORT_TYPE_SAS) {
914 port->wide_port_phymap &= ~(1U << i);
915 if (!port->wide_port_phymap)
916 port->port_attached = 0;
917 mvs_update_wideport(mvi, i);
918 } else if (phy->phy_type & PORT_TYPE_SATA)
919 port->port_attached = 0;
920 mvs_free_reg_set(mvi, phy->port);
921 phy->port = NULL;
922 phy->phy_attached = 0;
923 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
924 }
925 return 0;
926}
927
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400928static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
929{
930 u32 *s = (u32 *) buf;
931
932 if (!s)
933 return NULL;
934
935 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
936 s[3] = mvs_read_port_cfg_data(mvi, i);
937
938 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
939 s[2] = mvs_read_port_cfg_data(mvi, i);
940
941 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
942 s[1] = mvs_read_port_cfg_data(mvi, i);
943
944 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
945 s[0] = mvs_read_port_cfg_data(mvi, i);
946
947 return (void *)s;
948}
949
950static u32 mvs_is_sig_fis_received(u32 irq_status)
951{
952 return irq_status & PHYEV_SIG_FIS;
953}
954
Ke Wei8f261aa2008-02-23 21:15:27 +0800955static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
956 int get_st)
957{
958 struct mvs_phy *phy = &mvi->phy[i];
959 struct pci_dev *pdev = mvi->pdev;
Ke Weie9ff91b2008-03-27 14:55:33 +0800960 u32 tmp;
Ke Wei8f261aa2008-02-23 21:15:27 +0800961 u64 tmp64;
962
963 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
964 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
965
966 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
967 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
968
969 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
970 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
971
972 if (get_st) {
973 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
974 phy->phy_status = mvs_is_phy_ready(mvi, i);
975 }
976
977 if (phy->phy_status) {
978 u32 phy_st;
979 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
980
981 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
982 phy_st = mvs_read_port_cfg_data(mvi, i);
983
984 sas_phy->linkrate =
985 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
986 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
Ke Weie9ff91b2008-03-27 14:55:33 +0800987 phy->minimum_linkrate =
988 (phy->phy_status &
989 PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
990 phy->maximum_linkrate =
991 (phy->phy_status &
992 PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
Ke Wei8f261aa2008-02-23 21:15:27 +0800993
994 if (phy->phy_type & PORT_TYPE_SAS) {
Ke Weie9ff91b2008-03-27 14:55:33 +0800995 /* Updated attached_sas_addr */
996 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
997 phy->att_dev_sas_addr =
998 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
999 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
1000 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
Ke Wei8f261aa2008-02-23 21:15:27 +08001001 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
1002 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
1003 phy->identify.device_type =
1004 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1005
1006 if (phy->identify.device_type == SAS_END_DEV)
1007 phy->identify.target_port_protocols =
1008 SAS_PROTOCOL_SSP;
1009 else if (phy->identify.device_type != NO_DEVICE)
1010 phy->identify.target_port_protocols =
1011 SAS_PROTOCOL_SMP;
1012 if (phy_st & PHY_OOB_DTCTD)
1013 sas_phy->oob_mode = SAS_OOB_MODE;
1014 phy->frame_rcvd_size =
1015 sizeof(struct sas_identify_frame);
1016 } else if (phy->phy_type & PORT_TYPE_SATA) {
1017 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1018 if (mvs_is_sig_fis_received(phy->irq_status)) {
Ke Weie9ff91b2008-03-27 14:55:33 +08001019 phy->att_dev_sas_addr = i; /* temp */
Ke Wei8f261aa2008-02-23 21:15:27 +08001020 if (phy_st & PHY_OOB_DTCTD)
1021 sas_phy->oob_mode = SATA_OOB_MODE;
1022 phy->frame_rcvd_size =
1023 sizeof(struct dev_to_host_fis);
1024 mvs_get_d2h_reg(mvi, i,
1025 (void *)sas_phy->frame_rcvd);
1026 } else {
1027 dev_printk(KERN_DEBUG, &pdev->dev,
1028 "No sig fis\n");
Ke Weie9ff91b2008-03-27 14:55:33 +08001029 phy->phy_type &= ~(PORT_TYPE_SATA);
1030 goto out_done;
Ke Wei8f261aa2008-02-23 21:15:27 +08001031 }
1032 }
Ke Weie9ff91b2008-03-27 14:55:33 +08001033 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
1034 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
1035
1036 dev_printk(KERN_DEBUG, &pdev->dev,
1037 "phy[%d] Get Attached Address 0x%llX ,"
1038 " SAS Address 0x%llX\n",
Andrew Morton10ed2192008-05-01 04:35:20 -07001039 i,
1040 (unsigned long long)phy->att_dev_sas_addr,
1041 (unsigned long long)phy->dev_sas_addr);
Ke Weie9ff91b2008-03-27 14:55:33 +08001042 dev_printk(KERN_DEBUG, &pdev->dev,
1043 "Rate = %x , type = %d\n",
1044 sas_phy->linkrate, phy->phy_type);
1045
Ke Wei8f261aa2008-02-23 21:15:27 +08001046 /* workaround for HW phy decoding error on 1.5g disk drive */
1047 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
1048 tmp = mvs_read_port_vsr_data(mvi, i);
1049 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
1050 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
1051 SAS_LINK_RATE_1_5_GBPS)
Ke Weie9ff91b2008-03-27 14:55:33 +08001052 tmp &= ~PHY_MODE6_LATECLK;
Ke Wei8f261aa2008-02-23 21:15:27 +08001053 else
Ke Weie9ff91b2008-03-27 14:55:33 +08001054 tmp |= PHY_MODE6_LATECLK;
Ke Wei8f261aa2008-02-23 21:15:27 +08001055 mvs_write_port_vsr_data(mvi, i, tmp);
1056
1057 }
Ke Weie9ff91b2008-03-27 14:55:33 +08001058out_done:
Ke Wei8f261aa2008-02-23 21:15:27 +08001059 if (get_st)
1060 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
1061}
1062
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001063void mvs_port_formed(struct asd_sas_phy *sas_phy)
Ke Wei8f261aa2008-02-23 21:15:27 +08001064{
1065 struct sas_ha_struct *sas_ha = sas_phy->ha;
1066 struct mvs_info *mvi = sas_ha->lldd_ha;
1067 struct asd_sas_port *sas_port = sas_phy->port;
1068 struct mvs_phy *phy = sas_phy->lldd_phy;
1069 struct mvs_port *port = &mvi->port[sas_port->id];
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&mvi->lock, flags);
1073 port->port_attached = 1;
1074 phy->port = port;
1075 port->taskfileset = MVS_ID_NOT_MAPPED;
1076 if (phy->phy_type & PORT_TYPE_SAS) {
1077 port->wide_port_phymap = sas_port->phy_mask;
1078 mvs_update_wideport(mvi, sas_phy->id);
1079 }
1080 spin_unlock_irqrestore(&mvi->lock, flags);
Jeff Garzikb5762942007-10-25 20:58:22 -04001081}
1082
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001083int mvs_I_T_nexus_reset(struct domain_device *dev)
Ke Weie9ff91b2008-03-27 14:55:33 +08001084{
1085 return TMF_RESP_FUNC_FAILED;
1086}
1087
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001088static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1089 u32 slot_idx, int err)
1090{
1091 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1092 struct task_status_struct *tstat = &task->task_status;
1093 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1094 int stat = SAM_GOOD;
1095
1096 resp->frame_len = sizeof(struct dev_to_host_fis);
1097 memcpy(&resp->ending_fis[0],
1098 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1099 sizeof(struct dev_to_host_fis));
1100 tstat->buf_valid_size = sizeof(*resp);
1101 if (unlikely(err))
1102 stat = SAS_PROTO_RESPONSE;
1103 return stat;
1104}
1105
1106static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1107 u32 slot_idx)
1108{
1109 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1110 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1111 u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
1112 int stat = SAM_CHECK_COND;
1113
1114 if (err_dw1 & SLOT_BSY_ERR) {
1115 stat = SAS_QUEUE_FULL;
1116 mvs_slot_reset(mvi, task, slot_idx);
1117 }
1118 switch (task->task_proto) {
1119 case SAS_PROTOCOL_SSP:
1120 break;
1121 case SAS_PROTOCOL_SMP:
1122 break;
1123 case SAS_PROTOCOL_SATA:
1124 case SAS_PROTOCOL_STP:
1125 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1126 if (err_dw0 & TFILE_ERR)
1127 stat = mvs_sata_done(mvi, task, slot_idx, 1);
1128 break;
1129 default:
1130 break;
1131 }
1132
1133 mvs_hexdump(16, (u8 *) slot->response, 0);
1134 return stat;
1135}
1136
1137static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1138{
1139 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1140 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1141 struct sas_task *task = slot->task;
1142 struct task_status_struct *tstat;
1143 struct mvs_port *port;
1144 bool aborted;
1145 void *to;
1146
1147 if (unlikely(!task || !task->lldd_task))
1148 return -1;
1149
1150 mvs_hba_cq_dump(mvi);
1151
1152 spin_lock(&task->task_state_lock);
1153 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1154 if (!aborted) {
1155 task->task_state_flags &=
1156 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1157 task->task_state_flags |= SAS_TASK_STATE_DONE;
1158 }
1159 spin_unlock(&task->task_state_lock);
1160
1161 if (aborted) {
1162 mvs_slot_task_free(mvi, task, slot, slot_idx);
1163 mvs_slot_free(mvi, rx_desc);
1164 return -1;
1165 }
1166
1167 port = slot->port;
1168 tstat = &task->task_status;
1169 memset(tstat, 0, sizeof(*tstat));
1170 tstat->resp = SAS_TASK_COMPLETE;
1171
1172 if (unlikely(!port->port_attached || flags)) {
1173 mvs_slot_err(mvi, task, slot_idx);
1174 if (!sas_protocol_ata(task->task_proto))
1175 tstat->stat = SAS_PHY_DOWN;
1176 goto out;
1177 }
1178
1179 /* error info record present */
1180 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1181 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1182 goto out;
1183 }
1184
1185 switch (task->task_proto) {
1186 case SAS_PROTOCOL_SSP:
1187 /* hw says status == 0, datapres == 0 */
1188 if (rx_desc & RXQ_GOOD) {
1189 tstat->stat = SAM_GOOD;
1190 tstat->resp = SAS_TASK_COMPLETE;
1191 }
1192 /* response frame present */
1193 else if (rx_desc & RXQ_RSP) {
1194 struct ssp_response_iu *iu =
1195 slot->response + sizeof(struct mvs_err_info);
1196 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1197 }
1198
1199 /* should never happen? */
1200 else
1201 tstat->stat = SAM_CHECK_COND;
1202 break;
1203
1204 case SAS_PROTOCOL_SMP: {
1205 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1206 tstat->stat = SAM_GOOD;
1207 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1208 memcpy(to + sg_resp->offset,
1209 slot->response + sizeof(struct mvs_err_info),
1210 sg_dma_len(sg_resp));
1211 kunmap_atomic(to, KM_IRQ0);
1212 break;
1213 }
1214
1215 case SAS_PROTOCOL_SATA:
1216 case SAS_PROTOCOL_STP:
1217 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1218 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1219 break;
1220 }
1221
1222 default:
1223 tstat->stat = SAM_CHECK_COND;
1224 break;
1225 }
1226
1227out:
1228 mvs_slot_task_free(mvi, task, slot, slot_idx);
1229 if (unlikely(tstat->stat != SAS_QUEUE_FULL))
1230 mvs_slot_free(mvi, rx_desc);
1231
1232 spin_unlock(&mvi->lock);
1233 task->task_done(task);
1234 spin_lock(&mvi->lock);
1235 return tstat->stat;
1236}
1237
1238static void mvs_release_task(struct mvs_info *mvi, int phy_no)
1239{
1240 struct list_head *pos, *n;
1241 struct mvs_slot_info *slot;
1242 struct mvs_phy *phy = &mvi->phy[phy_no];
1243 struct mvs_port *port = phy->port;
1244 u32 rx_desc;
1245
1246 if (!port)
1247 return;
1248
1249 list_for_each_safe(pos, n, &port->list) {
1250 slot = container_of(pos, struct mvs_slot_info, list);
1251 rx_desc = (u32) (slot - mvi->slot_info);
1252 mvs_slot_complete(mvi, rx_desc, 1);
1253 }
1254}
1255
1256static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1257{
1258 struct pci_dev *pdev = mvi->pdev;
1259 struct sas_ha_struct *sas_ha = &mvi->sas;
1260 struct mvs_phy *phy = &mvi->phy[phy_no];
1261 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1262
1263 phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
1264 /*
1265 * events is port event now ,
1266 * we need check the interrupt status which belongs to per port.
1267 */
1268 dev_printk(KERN_DEBUG, &pdev->dev,
1269 "Port %d Event = %X\n",
1270 phy_no, phy->irq_status);
1271
1272 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1273 mvs_release_task(mvi, phy_no);
1274 if (!mvs_is_phy_ready(mvi, phy_no)) {
1275 sas_phy_disconnected(sas_phy);
1276 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1277 dev_printk(KERN_INFO, &pdev->dev,
1278 "Port %d Unplug Notice\n", phy_no);
1279
1280 } else
1281 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1282 }
1283 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1284 if (phy->irq_status & PHYEV_COMWAKE) {
1285 u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
1286 mvs_write_port_irq_mask(mvi, phy_no,
1287 tmp | PHYEV_SIG_FIS);
1288 }
1289 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1290 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
1291 if (phy->phy_status) {
1292 mvs_detect_porttype(mvi, phy_no);
1293
1294 if (phy->phy_type & PORT_TYPE_SATA) {
1295 u32 tmp = mvs_read_port_irq_mask(mvi,
1296 phy_no);
1297 tmp &= ~PHYEV_SIG_FIS;
1298 mvs_write_port_irq_mask(mvi,
1299 phy_no, tmp);
1300 }
1301
1302 mvs_update_phyinfo(mvi, phy_no, 0);
1303 sas_ha->notify_phy_event(sas_phy,
1304 PHYE_OOB_DONE);
1305 mvs_bytes_dmaed(mvi, phy_no);
1306 } else {
1307 dev_printk(KERN_DEBUG, &pdev->dev,
1308 "plugin interrupt but phy is gone\n");
1309 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1310 NULL);
1311 }
1312 } else if (phy->irq_status & PHYEV_BROAD_CH) {
1313 mvs_release_task(mvi, phy_no);
1314 sas_ha->notify_port_event(sas_phy,
1315 PORTE_BROADCAST_RCVD);
1316 }
1317 }
1318 mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
1319}
1320
1321static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1322{
1323 void __iomem *regs = mvi->regs;
1324 u32 rx_prod_idx, rx_desc;
1325 bool attn = false;
1326 struct pci_dev *pdev = mvi->pdev;
1327
1328 /* the first dword in the RX ring is special: it contains
1329 * a mirror of the hardware's RX producer index, so that
1330 * we don't have to stall the CPU reading that register.
1331 * The actual RX ring is offset by one dword, due to this.
1332 */
1333 rx_prod_idx = mvi->rx_cons;
1334 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
1335 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
1336 return 0;
1337
1338 /* The CMPL_Q may come late, read from register and try again
1339 * note: if coalescing is enabled,
1340 * it will need to read from register every time for sure
1341 */
1342 if (mvi->rx_cons == rx_prod_idx)
1343 mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1344
1345 if (mvi->rx_cons == rx_prod_idx)
1346 return 0;
1347
1348 while (mvi->rx_cons != rx_prod_idx) {
1349
1350 /* increment our internal RX consumer pointer */
1351 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
1352
1353 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
1354
1355 if (likely(rx_desc & RXQ_DONE))
1356 mvs_slot_complete(mvi, rx_desc, 0);
1357 if (rx_desc & RXQ_ATTN) {
1358 attn = true;
1359 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1360 rx_desc);
1361 } else if (rx_desc & RXQ_ERR) {
1362 if (!(rx_desc & RXQ_DONE))
1363 mvs_slot_complete(mvi, rx_desc, 0);
1364 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1365 rx_desc);
1366 } else if (rx_desc & RXQ_SLOT_RESET) {
1367 dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
1368 rx_desc);
1369 mvs_slot_free(mvi, rx_desc);
1370 }
1371 }
1372
1373 if (attn && self_clear)
1374 mvs_int_full(mvi);
1375
1376 return 0;
1377}
1378
1379#ifndef MVS_DISABLE_NVRAM
1380static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
1381{
1382 int timeout = 1000;
1383
1384 if (addr & ~SPI_ADDR_MASK)
1385 return -EINVAL;
1386
1387 writel(addr, regs + SPI_CMD);
1388 writel(TWSI_RD, regs + SPI_CTL);
1389
1390 while (timeout-- > 0) {
1391 if (readl(regs + SPI_CTL) & TWSI_RDY) {
1392 *data = readl(regs + SPI_DATA);
1393 return 0;
1394 }
1395
1396 udelay(10);
1397 }
1398
1399 return -EBUSY;
1400}
1401
1402static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
1403 void *buf, u32 buflen)
1404{
1405 u32 addr_end, tmp_addr, i, j;
1406 u32 tmp = 0;
1407 int rc;
1408 u8 *tmp8, *buf8 = buf;
1409
1410 addr_end = addr + buflen;
1411 tmp_addr = ALIGN(addr, 4);
1412 if (addr > 0xff)
1413 return -EINVAL;
1414
1415 j = addr & 0x3;
1416 if (j) {
1417 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1418 if (rc)
1419 return rc;
1420
1421 tmp8 = (u8 *)&tmp;
1422 for (i = j; i < 4; i++)
1423 *buf8++ = tmp8[i];
1424
1425 tmp_addr += 4;
1426 }
1427
1428 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
1429 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1430 if (rc)
1431 return rc;
1432
1433 memcpy(buf8, &tmp, 4);
1434 buf8 += 4;
1435 }
1436
1437 if (tmp_addr < addr_end) {
1438 rc = mvs_eep_read(regs, tmp_addr, &tmp);
1439 if (rc)
1440 return rc;
1441
1442 tmp8 = (u8 *)&tmp;
1443 j = addr_end - tmp_addr;
1444 for (i = 0; i < j; i++)
1445 *buf8++ = tmp8[i];
1446
1447 tmp_addr += 4;
1448 }
1449
1450 return 0;
1451}
1452#endif
1453
1454int mvs_nvram_read(struct mvs_info *mvi, u32 addr, void *buf, u32 buflen)
1455{
1456#ifndef MVS_DISABLE_NVRAM
1457 void __iomem *regs = mvi->regs;
1458 int rc, i;
1459 u32 sum;
1460 u8 hdr[2], *tmp;
1461 const char *msg;
1462
1463 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
1464 if (rc) {
1465 msg = "nvram hdr read failed";
1466 goto err_out;
1467 }
1468 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
1469 if (rc) {
1470 msg = "nvram read failed";
1471 goto err_out;
1472 }
1473
1474 if (hdr[0] != 0x5A) {
1475 /* entry id */
1476 msg = "invalid nvram entry id";
1477 rc = -ENOENT;
1478 goto err_out;
1479 }
1480
1481 tmp = buf;
1482 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
1483 for (i = 0; i < buflen; i++)
1484 sum += ((u32)tmp[i]);
1485
1486 if (sum) {
1487 msg = "nvram checksum failure";
1488 rc = -EILSEQ;
1489 goto err_out;
1490 }
1491
1492 return 0;
1493
1494err_out:
1495 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1496 return rc;
1497#else
1498 /* FIXME , For SAS target mode */
1499 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1500 return 0;
1501#endif
1502}
1503
1504static void mvs_int_sata(struct mvs_info *mvi)
1505{
1506 u32 tmp;
1507 void __iomem *regs = mvi->regs;
1508 tmp = mr32(INT_STAT_SRS);
1509 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1510}
1511
1512static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
1513 u32 slot_idx)
1514{
1515 void __iomem *regs = mvi->regs;
1516 struct domain_device *dev = task->dev;
1517 struct asd_sas_port *sas_port = dev->port;
1518 struct mvs_port *port = mvi->slot_info[slot_idx].port;
1519 u32 reg_set, phy_mask;
1520
1521 if (!sas_protocol_ata(task->task_proto)) {
1522 reg_set = 0;
1523 phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
1524 sas_port->phy_mask;
1525 } else {
1526 reg_set = port->taskfileset;
1527 phy_mask = sas_port->phy_mask;
1528 }
1529 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
1530 (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
1531 (phy_mask << TXQ_PHY_SHIFT) |
1532 (reg_set << TXQ_SRS_SHIFT));
1533
1534 mw32(TX_PROD_IDX, mvi->tx_prod);
1535 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1536}
1537
1538void mvs_int_full(struct mvs_info *mvi)
1539{
1540 void __iomem *regs = mvi->regs;
1541 u32 tmp, stat;
1542 int i;
1543
1544 stat = mr32(INT_STAT);
1545
1546 mvs_int_rx(mvi, false);
1547
1548 for (i = 0; i < MVS_MAX_PORTS; i++) {
1549 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1550 if (tmp)
1551 mvs_int_port(mvi, i, tmp);
1552 }
1553
1554 if (stat & CINT_SRS)
1555 mvs_int_sata(mvi);
1556
1557 mw32(INT_STAT, stat);
1558}
1559
1560#ifndef MVS_DISABLE_MSI
1561static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1562{
1563 struct mvs_info *mvi = opaque;
1564
1565#ifndef MVS_USE_TASKLET
1566 spin_lock(&mvi->lock);
1567
1568 mvs_int_rx(mvi, true);
1569
1570 spin_unlock(&mvi->lock);
1571#else
1572 tasklet_schedule(&mvi->tasklet);
1573#endif
1574 return IRQ_HANDLED;
1575}
1576#endif
1577
1578int mvs_task_abort(struct sas_task *task)
1579{
1580 int rc;
1581 unsigned long flags;
1582 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1583 struct pci_dev *pdev = mvi->pdev;
1584 int tag;
1585
1586 spin_lock_irqsave(&task->task_state_lock, flags);
1587 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1588 rc = TMF_RESP_FUNC_COMPLETE;
1589 spin_unlock_irqrestore(&task->task_state_lock, flags);
1590 goto out_done;
1591 }
1592 spin_unlock_irqrestore(&task->task_state_lock, flags);
1593
1594 switch (task->task_proto) {
1595 case SAS_PROTOCOL_SMP:
1596 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
1597 break;
1598 case SAS_PROTOCOL_SSP:
1599 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
1600 break;
1601 case SAS_PROTOCOL_SATA:
1602 case SAS_PROTOCOL_STP:
1603 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1604 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
1605#if _MV_DUMP
1606 dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
1607 mvs_hexdump(sizeof(struct host_to_dev_fis),
1608 (void *)&task->ata_task.fis, 0);
1609 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1610 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1611#endif
1612 spin_lock_irqsave(&task->task_state_lock, flags);
1613 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
1614 /* TODO */
1615 ;
1616 }
1617 spin_unlock_irqrestore(&task->task_state_lock, flags);
1618 break;
1619 }
1620 default:
1621 break;
1622 }
1623
1624 if (mvs_find_tag(mvi, task, &tag)) {
1625 spin_lock_irqsave(&mvi->lock, flags);
1626 mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
1627 spin_unlock_irqrestore(&mvi->lock, flags);
1628 }
1629 if (!mvs_task_exec(task, 1, GFP_ATOMIC))
1630 rc = TMF_RESP_FUNC_COMPLETE;
1631 else
1632 rc = TMF_RESP_FUNC_FAILED;
1633out_done:
1634 return rc;
1635}
1636
1637int __devinit mvs_hw_init(struct mvs_info *mvi)
Jeff Garzikb5762942007-10-25 20:58:22 -04001638{
1639 void __iomem *regs = mvi->regs;
1640 int i;
1641 u32 tmp, cctl;
1642
1643 /* make sure interrupts are masked immediately (paranoia) */
1644 mw32(GBL_CTL, 0);
1645 tmp = mr32(GBL_CTL);
1646
Ke Wei8f261aa2008-02-23 21:15:27 +08001647 /* Reset Controller */
Jeff Garzikb5762942007-10-25 20:58:22 -04001648 if (!(tmp & HBA_RST)) {
1649 if (mvi->flags & MVF_PHY_PWR_FIX) {
1650 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1651 tmp &= ~PCTL_PWR_ON;
1652 tmp |= PCTL_OFF;
1653 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1654
1655 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1656 tmp &= ~PCTL_PWR_ON;
1657 tmp |= PCTL_OFF;
1658 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1659 }
1660
1661 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
1662 mw32_f(GBL_CTL, HBA_RST);
1663 }
1664
Jeff Garzikb5762942007-10-25 20:58:22 -04001665 /* wait for reset to finish; timeout is just a guess */
1666 i = 1000;
1667 while (i-- > 0) {
1668 msleep(10);
1669
1670 if (!(mr32(GBL_CTL) & HBA_RST))
1671 break;
1672 }
1673 if (mr32(GBL_CTL) & HBA_RST) {
1674 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
1675 return -EBUSY;
1676 }
1677
Ke Wei8f261aa2008-02-23 21:15:27 +08001678 /* Init Chip */
Jeff Garzikb5762942007-10-25 20:58:22 -04001679 /* make sure RST is set; HBA_RST /should/ have done that for us */
1680 cctl = mr32(CTL);
1681 if (cctl & CCTL_RST)
1682 cctl &= ~CCTL_RST;
1683 else
1684 mw32_f(CTL, cctl | CCTL_RST);
1685
Ke Wei8f261aa2008-02-23 21:15:27 +08001686 /* write to device control _AND_ device status register? - A.C. */
1687 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
1688 tmp &= ~PRD_REQ_MASK;
1689 tmp |= PRD_REQ_SIZE;
1690 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
1691
Jeff Garzikb5762942007-10-25 20:58:22 -04001692 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
1693 tmp |= PCTL_PWR_ON;
1694 tmp &= ~PCTL_OFF;
1695 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
1696
1697 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
1698 tmp |= PCTL_PWR_ON;
1699 tmp &= ~PCTL_OFF;
1700 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
1701
1702 mw32_f(CTL, cctl);
1703
Ke Wei8f261aa2008-02-23 21:15:27 +08001704 /* reset control */
1705 mw32(PCS, 0); /*MVS_PCS */
1706
Jeff Garzikb5762942007-10-25 20:58:22 -04001707 mvs_phy_hacks(mvi);
1708
1709 mw32(CMD_LIST_LO, mvi->slot_dma);
1710 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
1711
1712 mw32(RX_FIS_LO, mvi->rx_fis_dma);
1713 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
1714
Ke Wei8f261aa2008-02-23 21:15:27 +08001715 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
Jeff Garzikb5762942007-10-25 20:58:22 -04001716 mw32(TX_LO, mvi->tx_dma);
1717 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
1718
1719 mw32(RX_CFG, MVS_RX_RING_SZ);
1720 mw32(RX_LO, mvi->rx_dma);
1721 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
1722
Ke Wei8f261aa2008-02-23 21:15:27 +08001723 /* enable auto port detection */
1724 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
Reinhard Nisslddccf302008-12-11 00:31:03 +01001725 msleep(1100);
Jeff Garzikb5762942007-10-25 20:58:22 -04001726 /* init and reset phys */
1727 for (i = 0; i < mvi->chip->n_phy; i++) {
Ke Wei00da7142008-02-27 20:50:25 +08001728 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
1729 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
Ke Wei8f261aa2008-02-23 21:15:27 +08001730
1731 mvs_detect_porttype(mvi, i);
Jeff Garzikb5762942007-10-25 20:58:22 -04001732
1733 /* set phy local SAS address */
Ke Wei8f261aa2008-02-23 21:15:27 +08001734 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
1735 mvs_write_port_cfg_data(mvi, i, lo);
1736 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
1737 mvs_write_port_cfg_data(mvi, i, hi);
Jeff Garzikb5762942007-10-25 20:58:22 -04001738
1739 /* reset phy */
Ke Wei8f261aa2008-02-23 21:15:27 +08001740 tmp = mvs_read_phy_ctl(mvi, i);
Jeff Garzikb5762942007-10-25 20:58:22 -04001741 tmp |= PHY_RST;
Ke Wei8f261aa2008-02-23 21:15:27 +08001742 mvs_write_phy_ctl(mvi, i, tmp);
Jeff Garzikb5762942007-10-25 20:58:22 -04001743 }
1744
1745 msleep(100);
1746
1747 for (i = 0; i < mvi->chip->n_phy; i++) {
Jeff Garzikb5762942007-10-25 20:58:22 -04001748 /* clear phy int status */
Ke Wei8f261aa2008-02-23 21:15:27 +08001749 tmp = mvs_read_port_irq_stat(mvi, i);
1750 tmp &= ~PHYEV_SIG_FIS;
1751 mvs_write_port_irq_stat(mvi, i, tmp);
1752
1753 /* set phy int mask */
1754 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
1755 PHYEV_ID_DONE | PHYEV_DEC_ERR;
1756 mvs_write_port_irq_mask(mvi, i, tmp);
1757
1758 msleep(100);
1759 mvs_update_phyinfo(mvi, i, 1);
1760 mvs_enable_xmt(mvi, i);
Jeff Garzikb5762942007-10-25 20:58:22 -04001761 }
1762
1763 /* FIXME: update wide port bitmaps */
1764
Ke Wei8f261aa2008-02-23 21:15:27 +08001765 /* little endian for open address and command table, etc. */
1766 /* A.C.
1767 * it seems that ( from the spec ) turning on big-endian won't
1768 * do us any good on big-endian machines, need further confirmation
1769 */
1770 cctl = mr32(CTL);
1771 cctl |= CCTL_ENDIAN_CMD;
1772 cctl |= CCTL_ENDIAN_DATA;
1773 cctl &= ~CCTL_ENDIAN_OPEN;
1774 cctl |= CCTL_ENDIAN_RSP;
1775 mw32_f(CTL, cctl);
1776
1777 /* reset CMD queue */
1778 tmp = mr32(PCS);
1779 tmp |= PCS_CMD_RST;
1780 mw32(PCS, tmp);
1781 /* interrupt coalescing may cause missing HW interrput in some case,
1782 * and the max count is 0x1ff, while our max slot is 0x200,
1783 * it will make count 0.
1784 */
1785 tmp = 0;
1786 mw32(INT_COAL, tmp);
1787
1788 tmp = 0x100;
1789 mw32(INT_COAL_TMOUT, tmp);
1790
Jeff Garzikb5762942007-10-25 20:58:22 -04001791 /* ladies and gentlemen, start your engines */
Ke Wei8f261aa2008-02-23 21:15:27 +08001792 mw32(TX_CFG, 0);
1793 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
Jeff Garzikb5762942007-10-25 20:58:22 -04001794 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
Ke Wei8f261aa2008-02-23 21:15:27 +08001795 /* enable CMD/CMPL_Q/RESP mode */
1796 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
Jeff Garzikb5762942007-10-25 20:58:22 -04001797
Ke Wei8f261aa2008-02-23 21:15:27 +08001798 /* enable completion queue interrupt */
Ke Weie9ff91b2008-03-27 14:55:33 +08001799 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
Ke Wei8f261aa2008-02-23 21:15:27 +08001800 mw32(INT_MASK, tmp);
Jeff Garzikb5762942007-10-25 20:58:22 -04001801
Ke Weie9ff91b2008-03-27 14:55:33 +08001802 /* Enable SRS interrupt */
1803 mw32(INT_MASK_SRS, 0xFF);
Jeff Garzikb5762942007-10-25 20:58:22 -04001804 return 0;
1805}
1806
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001807void __devinit mvs_print_info(struct mvs_info *mvi)
Jeff Garzikb5762942007-10-25 20:58:22 -04001808{
1809 struct pci_dev *pdev = mvi->pdev;
1810 static int printed_version;
1811
1812 if (!printed_version++)
1813 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
1814
1815 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
1816 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
1817}
1818