blob: 258a1a923290389370a37c12e480df7563ddbf3d [file] [log] [blame]
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001/*
2 mv_init.c - Marvell 88SE6440 SAS/SATA init support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 */
23
24#include "mv_sas.h"
25#include "mv_64xx.h"
26#include "mv_chips.h"
27
28static struct scsi_transport_template *mvs_stt;
29
30static const struct mvs_chip_info mvs_chips[] = {
31 [chip_6320] = { 2, 16, 9 },
32 [chip_6440] = { 4, 16, 9 },
33 [chip_6480] = { 8, 32, 10 },
34};
35
36static struct scsi_host_template mvs_sht = {
37 .module = THIS_MODULE,
38 .name = DRV_NAME,
39 .queuecommand = sas_queuecommand,
40 .target_alloc = sas_target_alloc,
41 .slave_configure = mvs_slave_configure,
42 .slave_destroy = sas_slave_destroy,
43 .scan_finished = mvs_scan_finished,
44 .scan_start = mvs_scan_start,
45 .change_queue_depth = sas_change_queue_depth,
46 .change_queue_type = sas_change_queue_type,
47 .bios_param = sas_bios_param,
48 .can_queue = 1,
49 .cmd_per_lun = 1,
50 .this_id = -1,
51 .sg_tablesize = SG_ALL,
52 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
53 .use_clustering = ENABLE_CLUSTERING,
54 .eh_device_reset_handler = sas_eh_device_reset_handler,
55 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
56 .slave_alloc = sas_slave_alloc,
57 .target_destroy = sas_target_destroy,
58 .ioctl = sas_ioctl,
59};
60
61static struct sas_domain_function_template mvs_transport_ops = {
62 .lldd_execute_task = mvs_task_exec,
63 .lldd_control_phy = mvs_phy_control,
64 .lldd_abort_task = mvs_task_abort,
65 .lldd_port_formed = mvs_port_formed,
66 .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
67};
68
69static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
70{
71 struct mvs_phy *phy = &mvi->phy[phy_id];
72 struct asd_sas_phy *sas_phy = &phy->sas_phy;
73
74 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
75 sas_phy->class = SAS;
76 sas_phy->iproto = SAS_PROTOCOL_ALL;
77 sas_phy->tproto = 0;
78 sas_phy->type = PHY_TYPE_PHYSICAL;
79 sas_phy->role = PHY_ROLE_INITIATOR;
80 sas_phy->oob_mode = OOB_NOT_CONNECTED;
81 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
82
83 sas_phy->id = phy_id;
84 sas_phy->sas_addr = &mvi->sas_addr[0];
85 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
86 sas_phy->ha = &mvi->sas;
87 sas_phy->lldd_phy = phy;
88}
89
90static void mvs_free(struct mvs_info *mvi)
91{
92 int i;
93
94 if (!mvi)
95 return;
96
97 for (i = 0; i < MVS_SLOTS; i++) {
98 struct mvs_slot_info *slot = &mvi->slot_info[i];
99
100 if (slot->buf)
101 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
102 slot->buf, slot->buf_dma);
103 }
104
105 if (mvi->tx)
106 dma_free_coherent(&mvi->pdev->dev,
107 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
108 mvi->tx, mvi->tx_dma);
109 if (mvi->rx_fis)
110 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
111 mvi->rx_fis, mvi->rx_fis_dma);
112 if (mvi->rx)
113 dma_free_coherent(&mvi->pdev->dev,
114 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
115 mvi->rx, mvi->rx_dma);
116 if (mvi->slot)
117 dma_free_coherent(&mvi->pdev->dev,
118 sizeof(*mvi->slot) * MVS_SLOTS,
119 mvi->slot, mvi->slot_dma);
120#ifdef MVS_ENABLE_PERI
121 if (mvi->peri_regs)
122 iounmap(mvi->peri_regs);
123#endif
124 if (mvi->regs)
125 iounmap(mvi->regs);
126 if (mvi->shost)
127 scsi_host_put(mvi->shost);
128 kfree(mvi->sas.sas_port);
129 kfree(mvi->sas.sas_phy);
130 kfree(mvi);
131}
132
133#ifdef MVS_USE_TASKLET
134static void mvs_tasklet(unsigned long data)
135{
136 struct mvs_info *mvi = (struct mvs_info *) data;
137 unsigned long flags;
138
139 spin_lock_irqsave(&mvi->lock, flags);
140
141#ifdef MVS_DISABLE_MSI
142 mvs_int_full(mvi);
143#else
144 mvs_int_rx(mvi, true);
145#endif
146 spin_unlock_irqrestore(&mvi->lock, flags);
147}
148#endif
149
150static irqreturn_t mvs_interrupt(int irq, void *opaque)
151{
152 struct mvs_info *mvi = opaque;
153 void __iomem *regs = mvi->regs;
154 u32 stat;
155
156 stat = mr32(GBL_INT_STAT);
157
158 if (stat == 0 || stat == 0xffffffff)
159 return IRQ_NONE;
160
161 /* clear CMD_CMPLT ASAP */
162 mw32_f(INT_STAT, CINT_DONE);
163
164#ifndef MVS_USE_TASKLET
165 spin_lock(&mvi->lock);
166
167 mvs_int_full(mvi);
168
169 spin_unlock(&mvi->lock);
170#else
171 tasklet_schedule(&mvi->tasklet);
172#endif
173 return IRQ_HANDLED;
174}
175
176static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
177 const struct pci_device_id *ent)
178{
179 struct mvs_info *mvi;
180 unsigned long res_start, res_len, res_flag;
181 struct asd_sas_phy **arr_phy;
182 struct asd_sas_port **arr_port;
183 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
184 int i;
185
186 /*
187 * alloc and init our per-HBA mvs_info struct
188 */
189
190 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
191 if (!mvi)
192 return NULL;
193
194 spin_lock_init(&mvi->lock);
195#ifdef MVS_USE_TASKLET
196 tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
197#endif
198 mvi->pdev = pdev;
199 mvi->chip = chip;
200
201 if (pdev->device == 0x6440 && pdev->revision == 0)
202 mvi->flags |= MVF_PHY_PWR_FIX;
203
204 /*
205 * alloc and init SCSI, SAS glue
206 */
207
208 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
209 if (!mvi->shost)
210 goto err_out;
211
212 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
213 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
214 if (!arr_phy || !arr_port)
215 goto err_out;
216
217 for (i = 0; i < MVS_MAX_PHYS; i++) {
218 mvs_phy_init(mvi, i);
219 arr_phy[i] = &mvi->phy[i].sas_phy;
220 arr_port[i] = &mvi->port[i].sas_port;
221 mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
222 mvi->port[i].wide_port_phymap = 0;
223 mvi->port[i].port_attached = 0;
224 INIT_LIST_HEAD(&mvi->port[i].list);
225 }
226
227 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
228 mvi->shost->transportt = mvs_stt;
229 mvi->shost->max_id = 21;
230 mvi->shost->max_lun = ~0;
231 mvi->shost->max_channel = 0;
232 mvi->shost->max_cmd_len = 16;
233
234 mvi->sas.sas_ha_name = DRV_NAME;
235 mvi->sas.dev = &pdev->dev;
236 mvi->sas.lldd_module = THIS_MODULE;
237 mvi->sas.sas_addr = &mvi->sas_addr[0];
238 mvi->sas.sas_phy = arr_phy;
239 mvi->sas.sas_port = arr_port;
240 mvi->sas.num_phys = chip->n_phy;
241 mvi->sas.lldd_max_execute_num = 1;
242 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
243 mvi->shost->can_queue = MVS_CAN_QUEUE;
244 mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
245 mvi->sas.lldd_ha = mvi;
246 mvi->sas.core.shost = mvi->shost;
247
248 mvs_tag_init(mvi);
249
250 /*
251 * ioremap main and peripheral registers
252 */
253
254#ifdef MVS_ENABLE_PERI
255 res_start = pci_resource_start(pdev, 2);
256 res_len = pci_resource_len(pdev, 2);
257 if (!res_start || !res_len)
258 goto err_out;
259
260 mvi->peri_regs = ioremap_nocache(res_start, res_len);
261 if (!mvi->peri_regs)
262 goto err_out;
263#endif
264
265 res_start = pci_resource_start(pdev, 4);
266 res_len = pci_resource_len(pdev, 4);
267 if (!res_start || !res_len)
268 goto err_out;
269
270 res_flag = pci_resource_flags(pdev, 4);
271 if (res_flag & IORESOURCE_CACHEABLE)
272 mvi->regs = ioremap(res_start, res_len);
273 else
274 mvi->regs = ioremap_nocache(res_start, res_len);
275
276 if (!mvi->regs)
277 goto err_out;
278
279 /*
280 * alloc and init our DMA areas
281 */
282
283 mvi->tx = dma_alloc_coherent(&pdev->dev,
284 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
285 &mvi->tx_dma, GFP_KERNEL);
286 if (!mvi->tx)
287 goto err_out;
288 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
289
290 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
291 &mvi->rx_fis_dma, GFP_KERNEL);
292 if (!mvi->rx_fis)
293 goto err_out;
294 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
295
296 mvi->rx = dma_alloc_coherent(&pdev->dev,
297 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
298 &mvi->rx_dma, GFP_KERNEL);
299 if (!mvi->rx)
300 goto err_out;
301 memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
302
303 mvi->rx[0] = cpu_to_le32(0xfff);
304 mvi->rx_cons = 0xfff;
305
306 mvi->slot = dma_alloc_coherent(&pdev->dev,
307 sizeof(*mvi->slot) * MVS_SLOTS,
308 &mvi->slot_dma, GFP_KERNEL);
309 if (!mvi->slot)
310 goto err_out;
311 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
312
313 for (i = 0; i < MVS_SLOTS; i++) {
314 struct mvs_slot_info *slot = &mvi->slot_info[i];
315
316 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
317 &slot->buf_dma, GFP_KERNEL);
318 if (!slot->buf)
319 goto err_out;
320 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
321 }
322
323 /* finally, read NVRAM to get our SAS address */
324 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
325 goto err_out;
326 return mvi;
327
328err_out:
329 mvs_free(mvi);
330 return NULL;
331}
332
333/* move to PCI layer or libata core? */
334static int pci_go_64(struct pci_dev *pdev)
335{
336 int rc;
337
338 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
339 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
340 if (rc) {
341 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
342 if (rc) {
343 dev_printk(KERN_ERR, &pdev->dev,
344 "64-bit DMA enable failed\n");
345 return rc;
346 }
347 }
348 } else {
349 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
350 if (rc) {
351 dev_printk(KERN_ERR, &pdev->dev,
352 "32-bit DMA enable failed\n");
353 return rc;
354 }
355 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
356 if (rc) {
357 dev_printk(KERN_ERR, &pdev->dev,
358 "32-bit consistent DMA enable failed\n");
359 return rc;
360 }
361 }
362
363 return rc;
364}
365
366static int __devinit mvs_pci_init(struct pci_dev *pdev,
367 const struct pci_device_id *ent)
368{
369 int rc;
370 struct mvs_info *mvi;
371 irq_handler_t irq_handler = mvs_interrupt;
372
373 rc = pci_enable_device(pdev);
374 if (rc)
375 return rc;
376
377 pci_set_master(pdev);
378
379 rc = pci_request_regions(pdev, DRV_NAME);
380 if (rc)
381 goto err_out_disable;
382
383 rc = pci_go_64(pdev);
384 if (rc)
385 goto err_out_regions;
386
387 mvi = mvs_alloc(pdev, ent);
388 if (!mvi) {
389 rc = -ENOMEM;
390 goto err_out_regions;
391 }
392
393 rc = mvs_hw_init(mvi);
394 if (rc)
395 goto err_out_mvi;
396
397#ifndef MVS_DISABLE_MSI
398 if (!pci_enable_msi(pdev)) {
399 u32 tmp;
400 void __iomem *regs = mvi->regs;
401 mvi->flags |= MVF_MSI;
402 irq_handler = mvs_msi_interrupt;
403 tmp = mr32(PCS);
404 mw32(PCS, tmp | PCS_SELF_CLEAR);
405 }
406#endif
407
408 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
409 if (rc)
410 goto err_out_msi;
411
412 rc = scsi_add_host(mvi->shost, &pdev->dev);
413 if (rc)
414 goto err_out_irq;
415
416 rc = sas_register_ha(&mvi->sas);
417 if (rc)
418 goto err_out_shost;
419
420 pci_set_drvdata(pdev, mvi);
421
422 mvs_print_info(mvi);
423
424 mvs_hba_interrupt_enable(mvi);
425
426 scsi_scan_host(mvi->shost);
427
428 return 0;
429
430err_out_shost:
431 scsi_remove_host(mvi->shost);
432err_out_irq:
433 free_irq(pdev->irq, mvi);
434err_out_msi:
435 if (mvi->flags |= MVF_MSI)
436 pci_disable_msi(pdev);
437err_out_mvi:
438 mvs_free(mvi);
439err_out_regions:
440 pci_release_regions(pdev);
441err_out_disable:
442 pci_disable_device(pdev);
443 return rc;
444}
445
446static void __devexit mvs_pci_remove(struct pci_dev *pdev)
447{
448 struct mvs_info *mvi = pci_get_drvdata(pdev);
449
450 pci_set_drvdata(pdev, NULL);
451
452 if (mvi) {
453 sas_unregister_ha(&mvi->sas);
454 mvs_hba_interrupt_disable(mvi);
455 sas_remove_host(mvi->shost);
456 scsi_remove_host(mvi->shost);
457
458 free_irq(pdev->irq, mvi);
459 if (mvi->flags & MVF_MSI)
460 pci_disable_msi(pdev);
461 mvs_free(mvi);
462 pci_release_regions(pdev);
463 }
464 pci_disable_device(pdev);
465}
466
467static struct pci_device_id __devinitdata mvs_pci_table[] = {
468 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
469 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
470 {
471 .vendor = PCI_VENDOR_ID_MARVELL,
472 .device = 0x6440,
473 .subvendor = PCI_ANY_ID,
474 .subdevice = 0x6480,
475 .class = 0,
476 .class_mask = 0,
477 .driver_data = chip_6480,
478 },
479 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
480 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
481
482 { } /* terminate list */
483};
484
485static struct pci_driver mvs_pci_driver = {
486 .name = DRV_NAME,
487 .id_table = mvs_pci_table,
488 .probe = mvs_pci_init,
489 .remove = __devexit_p(mvs_pci_remove),
490};
491
492static int __init mvs_init(void)
493{
494 int rc;
495
496 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
497 if (!mvs_stt)
498 return -ENOMEM;
499
500 rc = pci_register_driver(&mvs_pci_driver);
501 if (rc)
502 goto err_out;
503
504 return 0;
505
506err_out:
507 sas_release_transport(mvs_stt);
508 return rc;
509}
510
511static void __exit mvs_exit(void)
512{
513 pci_unregister_driver(&mvs_pci_driver);
514 sas_release_transport(mvs_stt);
515}
516
517module_init(mvs_init);
518module_exit(mvs_exit);
519
520MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
521MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
522MODULE_VERSION(DRV_VERSION);
523MODULE_LICENSE("GPL");
524MODULE_DEVICE_TABLE(pci, mvs_pci_table);