blob: d81b03c5165bf4527ec561c7eebaee7908993346 [file] [log] [blame]
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/aer.h>
42#include <linux/mm.h>
43#include <linux/notifier.h>
44#include <linux/kdebug.h>
45#include <linux/seq_file.h>
46#include <linux/debugfs.h>
47#include <linux/string.h>
48#include <linux/export.h>
49
50#include "csio_init.h"
51#include "csio_defs.h"
52
53#define CSIO_MIN_MEMPOOL_SZ 64
54
55static struct dentry *csio_debugfs_root;
56
57static struct scsi_transport_template *csio_fcoe_transport;
58static struct scsi_transport_template *csio_fcoe_transport_vport;
59
60/*
61 * debugfs support
62 */
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +053063static ssize_t
64csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65{
66 loff_t pos = *ppos;
67 loff_t avail = file->f_path.dentry->d_inode->i_size;
68 unsigned int mem = (uintptr_t)file->private_data & 3;
69 struct csio_hw *hw = file->private_data - mem;
70
71 if (pos < 0)
72 return -EINVAL;
73 if (pos >= avail)
74 return 0;
75 if (count > avail - pos)
76 count = avail - pos;
77
78 while (count) {
79 size_t len;
80 int ret, ofst;
81 __be32 data[16];
82
83 if (mem == MEM_MC)
84 ret = csio_hw_mc_read(hw, pos, data, NULL);
85 else
86 ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
87 if (ret)
88 return ret;
89
90 ofst = pos % sizeof(data);
91 len = min(count, sizeof(data) - ofst);
92 if (copy_to_user(buf, (u8 *)data + ofst, len))
93 return -EFAULT;
94
95 buf += len;
96 pos += len;
97 count -= len;
98 }
99 count = pos - *ppos;
100 *ppos = pos;
101 return count;
102}
103
104static const struct file_operations csio_mem_debugfs_fops = {
105 .owner = THIS_MODULE,
Wei Yongjunf07cda82012-12-03 01:22:39 -0500106 .open = simple_open,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530107 .read = csio_mem_read,
108 .llseek = default_llseek,
109};
110
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800111static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
112 unsigned int idx, unsigned int size_mb)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530113{
114 struct dentry *de;
115
116 de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
117 (void *)hw + idx, &csio_mem_debugfs_fops);
118 if (de && de->d_inode)
119 de->d_inode->i_size = size_mb << 20;
120}
121
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800122static int csio_setup_debugfs(struct csio_hw *hw)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530123{
124 int i;
125
126 if (IS_ERR_OR_NULL(hw->debugfs_root))
127 return -1;
128
129 i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
130 if (i & EDRAM0_ENABLE)
131 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
132 if (i & EDRAM1_ENABLE)
133 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
134 if (i & EXT_MEM_ENABLE)
135 csio_add_debugfs_mem(hw, "mc", MEM_MC,
136 EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
137 return 0;
138}
139
140/*
141 * csio_dfs_create - Creates and sets up per-hw debugfs.
142 *
143 */
144static int
145csio_dfs_create(struct csio_hw *hw)
146{
147 if (csio_debugfs_root) {
148 hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
149 csio_debugfs_root);
150 csio_setup_debugfs(hw);
151 }
152
153 return 0;
154}
155
156/*
157 * csio_dfs_destroy - Destroys per-hw debugfs.
158 */
159static int
160csio_dfs_destroy(struct csio_hw *hw)
161{
162 if (hw->debugfs_root)
163 debugfs_remove_recursive(hw->debugfs_root);
164
165 return 0;
166}
167
168/*
169 * csio_dfs_init - Debug filesystem initialization for the module.
170 *
171 */
172static int
173csio_dfs_init(void)
174{
175 csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
176 if (!csio_debugfs_root)
177 pr_warn("Could not create debugfs entry, continuing\n");
178
179 return 0;
180}
181
182/*
183 * csio_dfs_exit - debugfs cleanup for the module.
184 */
185static void
186csio_dfs_exit(void)
187{
188 debugfs_remove(csio_debugfs_root);
189}
190
191/*
192 * csio_pci_init - PCI initialization.
193 * @pdev: PCI device.
194 * @bars: Bitmask of bars to be requested.
195 *
196 * Initializes the PCI function by enabling MMIO, setting bus
197 * mastership and setting DMA mask.
198 */
199static int
200csio_pci_init(struct pci_dev *pdev, int *bars)
201{
202 int rv = -ENODEV;
203
204 *bars = pci_select_bars(pdev, IORESOURCE_MEM);
205
206 if (pci_enable_device_mem(pdev))
207 goto err;
208
209 if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
210 goto err_disable_device;
211
212 pci_set_master(pdev);
213 pci_try_set_mwi(pdev);
214
215 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
216 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
217 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
218 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
219 } else {
220 dev_err(&pdev->dev, "No suitable DMA available.\n");
221 goto err_release_regions;
222 }
223
224 return 0;
225
226err_release_regions:
227 pci_release_selected_regions(pdev, *bars);
228err_disable_device:
229 pci_disable_device(pdev);
230err:
231 return rv;
232
233}
234
235/*
236 * csio_pci_exit - PCI unitialization.
237 * @pdev: PCI device.
238 * @bars: Bars to be released.
239 *
240 */
241static void
242csio_pci_exit(struct pci_dev *pdev, int *bars)
243{
244 pci_release_selected_regions(pdev, *bars);
245 pci_disable_device(pdev);
246}
247
248/*
249 * csio_hw_init_workers - Initialize the HW module's worker threads.
250 * @hw: HW module.
251 *
252 */
253static void
254csio_hw_init_workers(struct csio_hw *hw)
255{
256 INIT_WORK(&hw->evtq_work, csio_evtq_worker);
257}
258
259static void
260csio_hw_exit_workers(struct csio_hw *hw)
261{
262 cancel_work_sync(&hw->evtq_work);
263 flush_scheduled_work();
264}
265
266static int
267csio_create_queues(struct csio_hw *hw)
268{
269 int i, j;
270 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
271 int rv;
272 struct csio_scsi_cpu_info *info;
273
274 if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
275 return 0;
276
277 if (hw->intr_mode != CSIO_IM_MSIX) {
278 rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
279 0, hw->pport[0].portid, false, NULL);
280 if (rv != 0) {
281 csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
282 return rv;
283 }
284 }
285
286 /* FW event queue */
287 rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
288 csio_get_fwevt_intr_idx(hw),
289 hw->pport[0].portid, true, NULL);
290 if (rv != 0) {
291 csio_err(hw, "FW event IQ config failed!: %d\n", rv);
292 return rv;
293 }
294
295 /* Create mgmt queue */
296 rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
297 mgmtm->iq_idx, hw->pport[0].portid, NULL);
298
299 if (rv != 0) {
300 csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
301 goto err;
302 }
303
304 /* Create SCSI queues */
305 for (i = 0; i < hw->num_pports; i++) {
306 info = &hw->scsi_cpu_info[i];
307
308 for (j = 0; j < info->max_cpus; j++) {
309 struct csio_scsi_qset *sqset = &hw->sqset[i][j];
310
311 rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
312 sqset->intr_idx, i, false, NULL);
313 if (rv != 0) {
314 csio_err(hw,
315 "SCSI module IQ config failed [%d][%d]:%d\n",
316 i, j, rv);
317 goto err;
318 }
319 rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
320 sqset->iq_idx, i, NULL);
321 if (rv != 0) {
322 csio_err(hw,
323 "SCSI module EQ config failed [%d][%d]:%d\n",
324 i, j, rv);
325 goto err;
326 }
327 } /* for all CPUs */
328 } /* For all ports */
329
330 hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
331 return 0;
332err:
333 csio_wr_destroy_queues(hw, true);
334 return -EINVAL;
335}
336
337/*
338 * csio_config_queues - Configure the DMA queues.
339 * @hw: HW module.
340 *
341 * Allocates memory for queues are registers them with FW.
342 */
343int
344csio_config_queues(struct csio_hw *hw)
345{
346 int i, j, idx, k = 0;
347 int rv;
348 struct csio_scsi_qset *sqset;
349 struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
350 struct csio_scsi_qset *orig;
351 struct csio_scsi_cpu_info *info;
352
353 if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
354 return csio_create_queues(hw);
355
356 /* Calculate number of SCSI queues for MSIX we would like */
357 hw->num_scsi_msix_cpus = num_online_cpus();
358 hw->num_sqsets = num_online_cpus() * hw->num_pports;
359
360 if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
361 hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
362 hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
363 }
364
365 /* Initialize max_cpus, may get reduced during msix allocations */
366 for (i = 0; i < hw->num_pports; i++)
367 hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
368
369 csio_dbg(hw, "nsqsets:%d scpus:%d\n",
370 hw->num_sqsets, hw->num_scsi_msix_cpus);
371
372 csio_intr_enable(hw);
373
374 if (hw->intr_mode != CSIO_IM_MSIX) {
375
376 /* Allocate Forward interrupt iq. */
377 hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
378 CSIO_INTR_WRSIZE, CSIO_INGRESS,
379 (void *)hw, 0, 0, NULL);
380 if (hw->intr_iq_idx == -1) {
381 csio_err(hw,
382 "Forward interrupt queue creation failed\n");
383 goto intr_disable;
384 }
385 }
386
387 /* Allocate the FW evt queue */
388 hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
389 CSIO_FWEVT_WRSIZE,
390 CSIO_INGRESS, (void *)hw,
391 CSIO_FWEVT_FLBUFS, 0,
392 csio_fwevt_intx_handler);
393 if (hw->fwevt_iq_idx == -1) {
394 csio_err(hw, "FW evt queue creation failed\n");
395 goto intr_disable;
396 }
397
398 /* Allocate the mgmt queue */
399 mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
400 CSIO_MGMT_EQ_WRSIZE,
401 CSIO_EGRESS, (void *)hw, 0, 0, NULL);
402 if (mgmtm->eq_idx == -1) {
403 csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
404 goto intr_disable;
405 }
406
407 /* Use FW IQ for MGMT req completion */
408 mgmtm->iq_idx = hw->fwevt_iq_idx;
409
410 /* Allocate SCSI queues */
411 for (i = 0; i < hw->num_pports; i++) {
412 info = &hw->scsi_cpu_info[i];
413
414 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
415 sqset = &hw->sqset[i][j];
416
417 if (j >= info->max_cpus) {
418 k = j % info->max_cpus;
419 orig = &hw->sqset[i][k];
420 sqset->eq_idx = orig->eq_idx;
421 sqset->iq_idx = orig->iq_idx;
422 continue;
423 }
424
425 idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
426 CSIO_EGRESS, (void *)hw, 0, 0,
427 NULL);
428 if (idx == -1) {
429 csio_err(hw, "EQ creation failed for idx:%d\n",
430 idx);
431 goto intr_disable;
432 }
433
434 sqset->eq_idx = idx;
435
436 idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
437 CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
438 (void *)hw, 0, 0,
439 csio_scsi_intx_handler);
440 if (idx == -1) {
441 csio_err(hw, "IQ creation failed for idx:%d\n",
442 idx);
443 goto intr_disable;
444 }
445 sqset->iq_idx = idx;
446 } /* for all CPUs */
447 } /* For all ports */
448
449 hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
450
451 rv = csio_create_queues(hw);
452 if (rv != 0)
453 goto intr_disable;
454
455 /*
456 * Now request IRQs for the vectors. In the event of a failure,
457 * cleanup is handled internally by this function.
458 */
459 rv = csio_request_irqs(hw);
460 if (rv != 0)
461 return -EINVAL;
462
463 return 0;
464
465intr_disable:
466 csio_intr_disable(hw, false);
467
468 return -EINVAL;
469}
470
471static int
472csio_resource_alloc(struct csio_hw *hw)
473{
474 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
475 int rv = -ENOMEM;
476
477 wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
478 CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
479
480 hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
481 sizeof(struct csio_mb));
482 if (!hw->mb_mempool)
483 goto err;
484
485 hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
486 sizeof(struct csio_rnode));
487 if (!hw->rnode_mempool)
488 goto err_free_mb_mempool;
489
490 hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
491 CSIO_SCSI_RSP_LEN, 8, 0);
492 if (!hw->scsi_pci_pool)
493 goto err_free_rn_pool;
494
495 return 0;
496
497err_free_rn_pool:
498 mempool_destroy(hw->rnode_mempool);
499 hw->rnode_mempool = NULL;
500err_free_mb_mempool:
501 mempool_destroy(hw->mb_mempool);
502 hw->mb_mempool = NULL;
503err:
504 return rv;
505}
506
507static void
508csio_resource_free(struct csio_hw *hw)
509{
510 pci_pool_destroy(hw->scsi_pci_pool);
511 hw->scsi_pci_pool = NULL;
512 mempool_destroy(hw->rnode_mempool);
513 hw->rnode_mempool = NULL;
514 mempool_destroy(hw->mb_mempool);
515 hw->mb_mempool = NULL;
516}
517
518/*
519 * csio_hw_alloc - Allocate and initialize the HW module.
520 * @pdev: PCI device.
521 *
522 * Allocates HW structure, DMA, memory resources, maps BARS to
523 * host memory and initializes HW module.
524 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800525static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530526{
527 struct csio_hw *hw;
528
529 hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
530 if (!hw)
531 goto err;
532
533 hw->pdev = pdev;
534 strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
535
536 /* memory pool/DMA pool allocation */
537 if (csio_resource_alloc(hw))
538 goto err_free_hw;
539
540 /* Get the start address of registers from BAR 0 */
541 hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
542 pci_resource_len(pdev, 0));
543 if (!hw->regstart) {
544 csio_err(hw, "Could not map BAR 0, regstart = %p\n",
545 hw->regstart);
546 goto err_resource_free;
547 }
548
549 csio_hw_init_workers(hw);
550
551 if (csio_hw_init(hw))
552 goto err_unmap_bar;
553
554 csio_dfs_create(hw);
555
556 csio_dbg(hw, "hw:%p\n", hw);
557
558 return hw;
559
560err_unmap_bar:
561 csio_hw_exit_workers(hw);
562 iounmap(hw->regstart);
563err_resource_free:
564 csio_resource_free(hw);
565err_free_hw:
566 kfree(hw);
567err:
568 return NULL;
569}
570
571/*
572 * csio_hw_free - Uninitialize and free the HW module.
573 * @hw: The HW module
574 *
575 * Disable interrupts, uninit the HW module, free resources, free hw.
576 */
577static void
578csio_hw_free(struct csio_hw *hw)
579{
580 csio_intr_disable(hw, true);
581 csio_hw_exit_workers(hw);
582 csio_hw_exit(hw);
583 iounmap(hw->regstart);
584 csio_dfs_destroy(hw);
585 csio_resource_free(hw);
586 kfree(hw);
587}
588
589/**
590 * csio_shost_init - Create and initialize the lnode module.
591 * @hw: The HW module.
592 * @dev: The device associated with this invocation.
593 * @probe: Called from probe context or not?
594 * @os_pln: Parent lnode if any.
595 *
596 * Allocates lnode structure via scsi_host_alloc, initializes
597 * shost, initializes lnode module and registers with SCSI ML
598 * via scsi_host_add. This function is shared between physical and
599 * virtual node ports.
600 */
601struct csio_lnode *
602csio_shost_init(struct csio_hw *hw, struct device *dev,
603 bool probe, struct csio_lnode *pln)
604{
605 struct Scsi_Host *shost = NULL;
606 struct csio_lnode *ln;
607
608 csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
609 csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
610
611 /*
612 * hw->pdev is the physical port's PCI dev structure,
613 * which will be different from the NPIV dev structure.
614 */
615 if (dev == &hw->pdev->dev)
616 shost = scsi_host_alloc(
617 &csio_fcoe_shost_template,
618 sizeof(struct csio_lnode));
619 else
620 shost = scsi_host_alloc(
621 &csio_fcoe_shost_vport_template,
622 sizeof(struct csio_lnode));
623
624 if (!shost)
625 goto err;
626
627 ln = shost_priv(shost);
628 memset(ln, 0, sizeof(struct csio_lnode));
629
630 /* Link common lnode to this lnode */
631 ln->dev_num = (shost->host_no << 16);
632
633 shost->can_queue = CSIO_MAX_QUEUE;
634 shost->this_id = -1;
635 shost->unique_id = shost->host_no;
636 shost->max_cmd_len = 16; /* Max CDB length supported */
637 shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
638 hw->fres_info.max_ssns);
639 shost->max_lun = CSIO_MAX_LUN;
640 if (dev == &hw->pdev->dev)
641 shost->transportt = csio_fcoe_transport;
642 else
643 shost->transportt = csio_fcoe_transport_vport;
644
645 /* root lnode */
646 if (!hw->rln)
647 hw->rln = ln;
648
649 /* Other initialization here: Common, Transport specific */
650 if (csio_lnode_init(ln, hw, pln))
651 goto err_shost_put;
652
653 if (scsi_add_host(shost, dev))
654 goto err_lnode_exit;
655
656 return ln;
657
658err_lnode_exit:
659 csio_lnode_exit(ln);
660err_shost_put:
661 scsi_host_put(shost);
662err:
663 return NULL;
664}
665
666/**
667 * csio_shost_exit - De-instantiate the shost.
668 * @ln: The lnode module corresponding to the shost.
669 *
670 */
671void
672csio_shost_exit(struct csio_lnode *ln)
673{
674 struct Scsi_Host *shost = csio_ln_to_shost(ln);
675 struct csio_hw *hw = csio_lnode_to_hw(ln);
676
677 /* Inform transport */
678 fc_remove_host(shost);
679
680 /* Inform SCSI ML */
681 scsi_remove_host(shost);
682
683 /* Flush all the events, so that any rnode removal events
684 * already queued are all handled, before we remove the lnode.
685 */
686 spin_lock_irq(&hw->lock);
687 csio_evtq_flush(hw);
688 spin_unlock_irq(&hw->lock);
689
690 csio_lnode_exit(ln);
691 scsi_host_put(shost);
692}
693
694struct csio_lnode *
695csio_lnode_alloc(struct csio_hw *hw)
696{
697 return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
698}
699
700void
701csio_lnodes_block_request(struct csio_hw *hw)
702{
703 struct Scsi_Host *shost;
704 struct csio_lnode *sln;
705 struct csio_lnode *ln;
706 struct list_head *cur_ln, *cur_cln;
707 struct csio_lnode **lnode_list;
708 int cur_cnt = 0, ii;
709
710 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
711 GFP_KERNEL);
712 if (!lnode_list) {
713 csio_err(hw, "Failed to allocate lnodes_list");
714 return;
715 }
716
717 spin_lock_irq(&hw->lock);
718 /* Traverse sibling lnodes */
719 list_for_each(cur_ln, &hw->sln_head) {
720 sln = (struct csio_lnode *) cur_ln;
721 lnode_list[cur_cnt++] = sln;
722
723 /* Traverse children lnodes */
724 list_for_each(cur_cln, &sln->cln_head)
725 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
726 }
727 spin_unlock_irq(&hw->lock);
728
729 for (ii = 0; ii < cur_cnt; ii++) {
730 csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
731 ln = lnode_list[ii];
732 shost = csio_ln_to_shost(ln);
733 scsi_block_requests(shost);
734
735 }
736 kfree(lnode_list);
737}
738
739void
740csio_lnodes_unblock_request(struct csio_hw *hw)
741{
742 struct csio_lnode *ln;
743 struct Scsi_Host *shost;
744 struct csio_lnode *sln;
745 struct list_head *cur_ln, *cur_cln;
746 struct csio_lnode **lnode_list;
747 int cur_cnt = 0, ii;
748
749 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
750 GFP_KERNEL);
751 if (!lnode_list) {
752 csio_err(hw, "Failed to allocate lnodes_list");
753 return;
754 }
755
756 spin_lock_irq(&hw->lock);
757 /* Traverse sibling lnodes */
758 list_for_each(cur_ln, &hw->sln_head) {
759 sln = (struct csio_lnode *) cur_ln;
760 lnode_list[cur_cnt++] = sln;
761
762 /* Traverse children lnodes */
763 list_for_each(cur_cln, &sln->cln_head)
764 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
765 }
766 spin_unlock_irq(&hw->lock);
767
768 for (ii = 0; ii < cur_cnt; ii++) {
769 csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
770 ln = lnode_list[ii];
771 shost = csio_ln_to_shost(ln);
772 scsi_unblock_requests(shost);
773 }
774 kfree(lnode_list);
775}
776
777void
778csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
779{
780 struct csio_lnode *ln;
781 struct Scsi_Host *shost;
782 struct csio_lnode *sln;
783 struct list_head *cur_ln, *cur_cln;
784 struct csio_lnode **lnode_list;
785 int cur_cnt = 0, ii;
786
787 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
788 GFP_KERNEL);
789 if (!lnode_list) {
790 csio_err(hw, "Failed to allocate lnodes_list");
791 return;
792 }
793
794 spin_lock_irq(&hw->lock);
795 /* Traverse sibling lnodes */
796 list_for_each(cur_ln, &hw->sln_head) {
797 sln = (struct csio_lnode *) cur_ln;
798 if (sln->portid != portid)
799 continue;
800
801 lnode_list[cur_cnt++] = sln;
802
803 /* Traverse children lnodes */
804 list_for_each(cur_cln, &sln->cln_head)
805 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
806 }
807 spin_unlock_irq(&hw->lock);
808
809 for (ii = 0; ii < cur_cnt; ii++) {
810 csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
811 ln = lnode_list[ii];
812 shost = csio_ln_to_shost(ln);
813 scsi_block_requests(shost);
814 }
815 kfree(lnode_list);
816}
817
818void
819csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
820{
821 struct csio_lnode *ln;
822 struct Scsi_Host *shost;
823 struct csio_lnode *sln;
824 struct list_head *cur_ln, *cur_cln;
825 struct csio_lnode **lnode_list;
826 int cur_cnt = 0, ii;
827
828 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
829 GFP_KERNEL);
830 if (!lnode_list) {
831 csio_err(hw, "Failed to allocate lnodes_list");
832 return;
833 }
834
835 spin_lock_irq(&hw->lock);
836 /* Traverse sibling lnodes */
837 list_for_each(cur_ln, &hw->sln_head) {
838 sln = (struct csio_lnode *) cur_ln;
839 if (sln->portid != portid)
840 continue;
841 lnode_list[cur_cnt++] = sln;
842
843 /* Traverse children lnodes */
844 list_for_each(cur_cln, &sln->cln_head)
845 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
846 }
847 spin_unlock_irq(&hw->lock);
848
849 for (ii = 0; ii < cur_cnt; ii++) {
850 csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
851 ln = lnode_list[ii];
852 shost = csio_ln_to_shost(ln);
853 scsi_unblock_requests(shost);
854 }
855 kfree(lnode_list);
856}
857
858void
859csio_lnodes_exit(struct csio_hw *hw, bool npiv)
860{
861 struct csio_lnode *sln;
862 struct csio_lnode *ln;
863 struct list_head *cur_ln, *cur_cln;
864 struct csio_lnode **lnode_list;
865 int cur_cnt = 0, ii;
866
867 lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
868 GFP_KERNEL);
869 if (!lnode_list) {
870 csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
871 return;
872 }
873
874 /* Get all child lnodes(NPIV ports) */
875 spin_lock_irq(&hw->lock);
876 list_for_each(cur_ln, &hw->sln_head) {
877 sln = (struct csio_lnode *) cur_ln;
878
879 /* Traverse children lnodes */
880 list_for_each(cur_cln, &sln->cln_head)
881 lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
882 }
883 spin_unlock_irq(&hw->lock);
884
885 /* Delete NPIV lnodes */
886 for (ii = 0; ii < cur_cnt; ii++) {
887 csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
888 ln = lnode_list[ii];
889 fc_vport_terminate(ln->fc_vport);
890 }
891
892 /* Delete only npiv lnodes */
893 if (npiv)
894 goto free_lnodes;
895
896 cur_cnt = 0;
897 /* Get all physical lnodes */
898 spin_lock_irq(&hw->lock);
899 /* Traverse sibling lnodes */
900 list_for_each(cur_ln, &hw->sln_head) {
901 sln = (struct csio_lnode *) cur_ln;
902 lnode_list[cur_cnt++] = sln;
903 }
904 spin_unlock_irq(&hw->lock);
905
906 /* Delete physical lnodes */
907 for (ii = 0; ii < cur_cnt; ii++) {
908 csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
909 csio_shost_exit(lnode_list[ii]);
910 }
911
912free_lnodes:
913 kfree(lnode_list);
914}
915
916/*
917 * csio_lnode_init_post: Set lnode attributes after starting HW.
918 * @ln: lnode.
919 *
920 */
921static void
922csio_lnode_init_post(struct csio_lnode *ln)
923{
924 struct Scsi_Host *shost = csio_ln_to_shost(ln);
925
926 csio_fchost_attr_init(ln);
927
928 scsi_scan_host(shost);
929}
930
931/*
932 * csio_probe_one - Instantiate this function.
933 * @pdev: PCI device
934 * @id: Device ID
935 *
936 * This is the .probe() callback of the driver. This function:
937 * - Initializes the PCI function by enabling MMIO, setting bus
938 * mastership and setting DMA mask.
939 * - Allocates HW structure, DMA, memory resources, maps BARS to
940 * host memory and initializes HW module.
941 * - Allocates lnode structure via scsi_host_alloc, initializes
942 * shost, initialized lnode module and registers with SCSI ML
943 * via scsi_host_add.
944 * - Enables interrupts, and starts the chip by kicking off the
945 * HW state machine.
946 * - Once hardware is ready, initiated scan of the host via
947 * scsi_scan_host.
948 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800949static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +0530950{
951 int rv;
952 int bars;
953 int i;
954 struct csio_hw *hw;
955 struct csio_lnode *ln;
956
957 rv = csio_pci_init(pdev, &bars);
958 if (rv)
959 goto err;
960
961 hw = csio_hw_alloc(pdev);
962 if (!hw) {
963 rv = -ENODEV;
964 goto err_pci_exit;
965 }
966
967 pci_set_drvdata(pdev, hw);
968
969 if (csio_hw_start(hw) != 0) {
970 dev_err(&pdev->dev,
971 "Failed to start FW, continuing in debug mode.\n");
972 return 0;
973 }
974
975 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
976 FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
977 FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
978 FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
979 FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
980
981 for (i = 0; i < hw->num_pports; i++) {
982 ln = csio_shost_init(hw, &pdev->dev, true, NULL);
983 if (!ln) {
984 rv = -ENODEV;
985 break;
986 }
987 /* Initialize portid */
988 ln->portid = hw->pport[i].portid;
989
990 spin_lock_irq(&hw->lock);
991 if (csio_lnode_start(ln) != 0)
992 rv = -ENODEV;
993 spin_unlock_irq(&hw->lock);
994
995 if (rv)
996 break;
997
998 csio_lnode_init_post(ln);
999 }
1000
1001 if (rv)
1002 goto err_lnode_exit;
1003
1004 return 0;
1005
1006err_lnode_exit:
1007 csio_lnodes_block_request(hw);
1008 spin_lock_irq(&hw->lock);
1009 csio_hw_stop(hw);
1010 spin_unlock_irq(&hw->lock);
1011 csio_lnodes_unblock_request(hw);
1012 pci_set_drvdata(hw->pdev, NULL);
1013 csio_lnodes_exit(hw, 0);
1014 csio_hw_free(hw);
1015err_pci_exit:
1016 csio_pci_exit(pdev, &bars);
1017err:
1018 dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
1019 return rv;
1020}
1021
1022/*
1023 * csio_remove_one - Remove one instance of the driver at this PCI function.
1024 * @pdev: PCI device
1025 *
1026 * Used during hotplug operation.
1027 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08001028static void csio_remove_one(struct pci_dev *pdev)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301029{
1030 struct csio_hw *hw = pci_get_drvdata(pdev);
1031 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
1032
1033 csio_lnodes_block_request(hw);
1034 spin_lock_irq(&hw->lock);
1035
1036 /* Stops lnode, Rnode s/m
1037 * Quiesce IOs.
1038 * All sessions with remote ports are unregistered.
1039 */
1040 csio_hw_stop(hw);
1041 spin_unlock_irq(&hw->lock);
1042 csio_lnodes_unblock_request(hw);
1043
1044 csio_lnodes_exit(hw, 0);
1045 csio_hw_free(hw);
1046 pci_set_drvdata(pdev, NULL);
1047 csio_pci_exit(pdev, &bars);
1048}
1049
1050/*
1051 * csio_pci_error_detected - PCI error was detected
1052 * @pdev: PCI device
1053 *
1054 */
1055static pci_ers_result_t
1056csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1057{
1058 struct csio_hw *hw = pci_get_drvdata(pdev);
1059
1060 csio_lnodes_block_request(hw);
1061 spin_lock_irq(&hw->lock);
1062
1063 /* Post PCI error detected evt to HW s/m
1064 * HW s/m handles this evt by quiescing IOs, unregisters rports
1065 * and finally takes the device to offline.
1066 */
1067 csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
1068 spin_unlock_irq(&hw->lock);
1069 csio_lnodes_unblock_request(hw);
1070 csio_lnodes_exit(hw, 0);
1071 csio_intr_disable(hw, true);
1072 pci_disable_device(pdev);
1073 return state == pci_channel_io_perm_failure ?
1074 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1075}
1076
1077/*
1078 * csio_pci_slot_reset - PCI slot has been reset.
1079 * @pdev: PCI device
1080 *
1081 */
1082static pci_ers_result_t
1083csio_pci_slot_reset(struct pci_dev *pdev)
1084{
1085 struct csio_hw *hw = pci_get_drvdata(pdev);
1086 int ready;
1087
1088 if (pci_enable_device(pdev)) {
1089 dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
1090 return PCI_ERS_RESULT_DISCONNECT;
1091 }
1092
1093 pci_set_master(pdev);
1094 pci_restore_state(pdev);
1095 pci_save_state(pdev);
1096 pci_cleanup_aer_uncorrect_error_status(pdev);
1097
1098 /* Bring HW s/m to ready state.
1099 * but don't resume IOs.
1100 */
1101 spin_lock_irq(&hw->lock);
1102 csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
1103 ready = csio_is_hw_ready(hw);
1104 spin_unlock_irq(&hw->lock);
1105
1106 if (ready) {
1107 return PCI_ERS_RESULT_RECOVERED;
1108 } else {
1109 dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
1110 return PCI_ERS_RESULT_DISCONNECT;
1111 }
1112}
1113
1114/*
1115 * csio_pci_resume - Resume normal operations
1116 * @pdev: PCI device
1117 *
1118 */
1119static void
1120csio_pci_resume(struct pci_dev *pdev)
1121{
1122 struct csio_hw *hw = pci_get_drvdata(pdev);
1123 struct csio_lnode *ln;
1124 int rv = 0;
1125 int i;
1126
1127 /* Bring the LINK UP and Resume IO */
1128
1129 for (i = 0; i < hw->num_pports; i++) {
1130 ln = csio_shost_init(hw, &pdev->dev, true, NULL);
1131 if (!ln) {
1132 rv = -ENODEV;
1133 break;
1134 }
1135 /* Initialize portid */
1136 ln->portid = hw->pport[i].portid;
1137
1138 spin_lock_irq(&hw->lock);
1139 if (csio_lnode_start(ln) != 0)
1140 rv = -ENODEV;
1141 spin_unlock_irq(&hw->lock);
1142
1143 if (rv)
1144 break;
1145
1146 csio_lnode_init_post(ln);
1147 }
1148
1149 if (rv)
1150 goto err_resume_exit;
1151
1152 return;
1153
1154err_resume_exit:
1155 csio_lnodes_block_request(hw);
1156 spin_lock_irq(&hw->lock);
1157 csio_hw_stop(hw);
1158 spin_unlock_irq(&hw->lock);
1159 csio_lnodes_unblock_request(hw);
1160 csio_lnodes_exit(hw, 0);
1161 csio_hw_free(hw);
1162 dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
1163}
1164
1165static struct pci_error_handlers csio_err_handler = {
1166 .error_detected = csio_pci_error_detected,
1167 .slot_reset = csio_pci_slot_reset,
1168 .resume = csio_pci_resume,
1169};
1170
1171static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1172 CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
1173 CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
1174 CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
1175 CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
1176 CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
1177 CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
1178 CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
1179 CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
1180 CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
1181 CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
1182 CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
1183 CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
1184 CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
1185 CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
1186 CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
1187 CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
1188 CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
1189 { 0, 0, 0, 0, 0, 0, 0 }
1190};
1191
1192
1193static struct pci_driver csio_pci_driver = {
1194 .name = KBUILD_MODNAME,
1195 .driver = {
1196 .owner = THIS_MODULE,
1197 },
1198 .id_table = csio_pci_tbl,
1199 .probe = csio_probe_one,
1200 .remove = csio_remove_one,
1201 .err_handler = &csio_err_handler,
1202};
1203
1204/*
1205 * csio_init - Chelsio storage driver initialization function.
1206 *
1207 */
1208static int __init
1209csio_init(void)
1210{
1211 int rv = -ENOMEM;
1212
1213 pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
1214
1215 csio_dfs_init();
1216
1217 csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
1218 if (!csio_fcoe_transport)
1219 goto err;
1220
1221 csio_fcoe_transport_vport =
1222 fc_attach_transport(&csio_fc_transport_vport_funcs);
1223 if (!csio_fcoe_transport_vport)
1224 goto err_vport;
1225
1226 rv = pci_register_driver(&csio_pci_driver);
1227 if (rv)
1228 goto err_pci;
1229
1230 return 0;
1231
1232err_pci:
1233 fc_release_transport(csio_fcoe_transport_vport);
1234err_vport:
1235 fc_release_transport(csio_fcoe_transport);
1236err:
1237 csio_dfs_exit();
1238 return rv;
1239}
1240
1241/*
1242 * csio_exit - Chelsio storage driver uninitialization .
1243 *
1244 * Function that gets called in the unload path.
1245 */
1246static void __exit
1247csio_exit(void)
1248{
1249 pci_unregister_driver(&csio_pci_driver);
1250 csio_dfs_exit();
1251 fc_release_transport(csio_fcoe_transport_vport);
1252 fc_release_transport(csio_fcoe_transport);
1253}
1254
1255module_init(csio_init);
1256module_exit(csio_exit);
1257MODULE_AUTHOR(CSIO_DRV_AUTHOR);
1258MODULE_DESCRIPTION(CSIO_DRV_DESC);
1259MODULE_LICENSE(CSIO_DRV_LICENSE);
1260MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1261MODULE_VERSION(CSIO_DRV_VERSION);
1262MODULE_FIRMWARE(CSIO_FW_FNAME);