blob: 6c512b6416c2cb99475c8d4f119159f08682ea8c [file] [log] [blame]
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/blkdev.h>
24#include <linux/pci.h>
25#include <linux/string.h>
26#include <linux/kernel.h>
27#include <linux/semaphore.h>
28
29#include <scsi/libiscsi.h>
30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi.h>
36#include "be_main.h"
37#include "be_iscsi.h"
38#include "be_mgmt.h"
39
40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053042static unsigned int enable_msix = 1;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +053043static unsigned int ring_mode;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053044
45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47MODULE_AUTHOR("ServerEngines Corporation");
48MODULE_LICENSE("GPL");
49module_param(be_iopoll_budget, int, 0);
50module_param(enable_msix, int, 0);
51module_param(be_max_phys_size, uint, S_IRUGO);
52MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53 "contiguous memory that can be allocated."
54 "Range is 16 - 128");
55
56static int beiscsi_slave_configure(struct scsi_device *sdev)
57{
58 blk_queue_max_segment_size(sdev->request_queue, 65536);
59 return 0;
60}
61
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053062/*------------------- PCI Driver operations and data ----------------- */
63static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69 { 0 }
70};
71MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053073static struct scsi_host_template beiscsi_sht = {
74 .module = THIS_MODULE,
75 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76 .proc_name = DRV_NAME,
77 .queuecommand = iscsi_queuecommand,
78 .eh_abort_handler = iscsi_eh_abort,
79 .change_queue_depth = iscsi_change_queue_depth,
80 .slave_configure = beiscsi_slave_configure,
81 .target_alloc = iscsi_target_alloc,
82 .eh_device_reset_handler = iscsi_eh_device_reset,
83 .eh_target_reset_handler = iscsi_eh_target_reset,
84 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85 .can_queue = BE2_IO_DEPTH,
86 .this_id = -1,
87 .max_sectors = BEISCSI_MAX_SECTORS,
88 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
89 .use_clustering = ENABLE_CLUSTERING,
90};
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053091
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053092static struct scsi_transport_template *beiscsi_scsi_transport;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053093
94static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95{
96 struct beiscsi_hba *phba;
97 struct Scsi_Host *shost;
98
99 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100 if (!shost) {
101 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102 "iscsi_host_alloc failed \n");
103 return NULL;
104 }
105 shost->dma_boundary = pcidev->dma_mask;
106 shost->max_id = BE2_MAX_SESSIONS;
107 shost->max_channel = 0;
108 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109 shost->max_lun = BEISCSI_NUM_MAX_LUN;
110 shost->transportt = beiscsi_scsi_transport;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530111 phba = iscsi_host_priv(shost);
112 memset(phba, 0, sizeof(*phba));
113 phba->shost = shost;
114 phba->pcidev = pci_dev_get(pcidev);
115
116 if (iscsi_host_add(shost, &phba->pcidev->dev))
117 goto free_devices;
118 return phba;
119
120free_devices:
121 pci_dev_put(phba->pcidev);
122 iscsi_host_free(phba->shost);
123 return NULL;
124}
125
126static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127{
128 if (phba->csr_va) {
129 iounmap(phba->csr_va);
130 phba->csr_va = NULL;
131 }
132 if (phba->db_va) {
133 iounmap(phba->db_va);
134 phba->db_va = NULL;
135 }
136 if (phba->pci_va) {
137 iounmap(phba->pci_va);
138 phba->pci_va = NULL;
139 }
140}
141
142static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143 struct pci_dev *pcidev)
144{
145 u8 __iomem *addr;
146
147 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148 pci_resource_len(pcidev, 2));
149 if (addr == NULL)
150 return -ENOMEM;
151 phba->ctrl.csr = addr;
152 phba->csr_va = addr;
153 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
154
155 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156 if (addr == NULL)
157 goto pci_map_err;
158 phba->ctrl.db = addr;
159 phba->db_va = addr;
160 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
161
162 addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163 pci_resource_len(pcidev, 1));
164 if (addr == NULL)
165 goto pci_map_err;
166 phba->ctrl.pcicfg = addr;
167 phba->pci_va = addr;
168 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169 return 0;
170
171pci_map_err:
172 beiscsi_unmap_pci_function(phba);
173 return -ENOMEM;
174}
175
176static int beiscsi_enable_pci(struct pci_dev *pcidev)
177{
178 int ret;
179
180 ret = pci_enable_device(pcidev);
181 if (ret) {
182 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183 "failed. Returning -ENODEV\n");
184 return ret;
185 }
186
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530187 pci_set_master(pcidev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530188 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190 if (ret) {
191 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192 pci_disable_device(pcidev);
193 return ret;
194 }
195 }
196 return 0;
197}
198
199static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
200{
201 struct be_ctrl_info *ctrl = &phba->ctrl;
202 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204 int status = 0;
205
206 ctrl->pdev = pdev;
207 status = beiscsi_map_pci_bars(phba, pdev);
208 if (status)
209 return status;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530210 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212 mbox_mem_alloc->size,
213 &mbox_mem_alloc->dma);
214 if (!mbox_mem_alloc->va) {
215 beiscsi_unmap_pci_function(phba);
216 status = -ENOMEM;
217 return status;
218 }
219
220 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224 spin_lock_init(&ctrl->mbox_lock);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530225 spin_lock_init(&phba->ctrl.mcc_lock);
226 spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530228 return status;
229}
230
231static void beiscsi_get_params(struct beiscsi_hba *phba)
232{
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530233 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234 - (phba->fw_config.iscsi_cid_count
235 + BE2_TMFS
236 + BE2_NOPOUT_REQ));
237 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
238 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
239 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530240 phba->params.num_sge_per_io = BE2_SGE;
241 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
242 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
243 phba->params.eq_timer = 64;
244 phba->params.num_eq_entries =
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530245 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
246 + BE2_TMFS) / 512) + 1) * 512;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530247 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
248 ? 1024 : phba->params.num_eq_entries;
249 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530250 phba->params.num_eq_entries);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530251 phba->params.num_cq_entries =
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530252 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
253 + BE2_TMFS) / 512) + 1) * 512;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530254 phba->params.wrbs_per_cxn = 256;
255}
256
257static void hwi_ring_eq_db(struct beiscsi_hba *phba,
258 unsigned int id, unsigned int clr_interrupt,
259 unsigned int num_processed,
260 unsigned char rearm, unsigned char event)
261{
262 u32 val = 0;
263 val |= id & DB_EQ_RING_ID_MASK;
264 if (rearm)
265 val |= 1 << DB_EQ_REARM_SHIFT;
266 if (clr_interrupt)
267 val |= 1 << DB_EQ_CLR_SHIFT;
268 if (event)
269 val |= 1 << DB_EQ_EVNT_SHIFT;
270 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
271 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
272}
273
274/**
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530275 * be_isr_mcc - The isr routine of the driver.
276 * @irq: Not used
277 * @dev_id: Pointer to host adapter structure
278 */
279static irqreturn_t be_isr_mcc(int irq, void *dev_id)
280{
281 struct beiscsi_hba *phba;
282 struct be_eq_entry *eqe = NULL;
283 struct be_queue_info *eq;
284 struct be_queue_info *mcc;
285 unsigned int num_eq_processed;
286 struct be_eq_obj *pbe_eq;
287 unsigned long flags;
288
289 pbe_eq = dev_id;
290 eq = &pbe_eq->q;
291 phba = pbe_eq->phba;
292 mcc = &phba->ctrl.mcc_obj.cq;
293 eqe = queue_tail_node(eq);
294 if (!eqe)
295 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
296
297 num_eq_processed = 0;
298
299 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
300 & EQE_VALID_MASK) {
301 if (((eqe->dw[offsetof(struct amap_eq_entry,
302 resource_id) / 32] &
303 EQE_RESID_MASK) >> 16) == mcc->id) {
304 spin_lock_irqsave(&phba->isr_lock, flags);
305 phba->todo_mcc_cq = 1;
306 spin_unlock_irqrestore(&phba->isr_lock, flags);
307 }
308 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
309 queue_tail_inc(eq);
310 eqe = queue_tail_node(eq);
311 num_eq_processed++;
312 }
313 if (phba->todo_mcc_cq)
314 queue_work(phba->wq, &phba->work_cqs);
315 if (num_eq_processed)
316 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
317
318 return IRQ_HANDLED;
319}
320
321/**
322 * be_isr_msix - The isr routine of the driver.
323 * @irq: Not used
324 * @dev_id: Pointer to host adapter structure
325 */
326static irqreturn_t be_isr_msix(int irq, void *dev_id)
327{
328 struct beiscsi_hba *phba;
329 struct be_eq_entry *eqe = NULL;
330 struct be_queue_info *eq;
331 struct be_queue_info *cq;
332 unsigned int num_eq_processed;
333 struct be_eq_obj *pbe_eq;
334 unsigned long flags;
335
336 pbe_eq = dev_id;
337 eq = &pbe_eq->q;
338 cq = pbe_eq->cq;
339 eqe = queue_tail_node(eq);
340 if (!eqe)
341 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
342
343 phba = pbe_eq->phba;
344 num_eq_processed = 0;
345 if (blk_iopoll_enabled) {
346 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
347 & EQE_VALID_MASK) {
348 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
349 blk_iopoll_sched(&pbe_eq->iopoll);
350
351 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
352 queue_tail_inc(eq);
353 eqe = queue_tail_node(eq);
354 num_eq_processed++;
355 }
356 if (num_eq_processed)
357 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
358
359 return IRQ_HANDLED;
360 } else {
361 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
362 & EQE_VALID_MASK) {
363 spin_lock_irqsave(&phba->isr_lock, flags);
364 phba->todo_cq = 1;
365 spin_unlock_irqrestore(&phba->isr_lock, flags);
366 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
367 queue_tail_inc(eq);
368 eqe = queue_tail_node(eq);
369 num_eq_processed++;
370 }
371 if (phba->todo_cq)
372 queue_work(phba->wq, &phba->work_cqs);
373
374 if (num_eq_processed)
375 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
376
377 return IRQ_HANDLED;
378 }
379}
380
381/**
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530382 * be_isr - The isr routine of the driver.
383 * @irq: Not used
384 * @dev_id: Pointer to host adapter structure
385 */
386static irqreturn_t be_isr(int irq, void *dev_id)
387{
388 struct beiscsi_hba *phba;
389 struct hwi_controller *phwi_ctrlr;
390 struct hwi_context_memory *phwi_context;
391 struct be_eq_entry *eqe = NULL;
392 struct be_queue_info *eq;
393 struct be_queue_info *cq;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530394 struct be_queue_info *mcc;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530395 unsigned long flags, index;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530396 unsigned int num_mcceq_processed, num_ioeq_processed;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530397 struct be_ctrl_info *ctrl;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530398 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530399 int isr;
400
401 phba = dev_id;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530402 ctrl = &phba->ctrl;;
403 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
404 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
405 if (!isr)
406 return IRQ_NONE;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530407
408 phwi_ctrlr = phba->phwi_ctrlr;
409 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530410 pbe_eq = &phwi_context->be_eq[0];
411
412 eq = &phwi_context->be_eq[0].q;
413 mcc = &phba->ctrl.mcc_obj.cq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530414 index = 0;
415 eqe = queue_tail_node(eq);
416 if (!eqe)
417 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
418
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530419 num_ioeq_processed = 0;
420 num_mcceq_processed = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530421 if (blk_iopoll_enabled) {
422 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
423 & EQE_VALID_MASK) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530424 if (((eqe->dw[offsetof(struct amap_eq_entry,
425 resource_id) / 32] &
426 EQE_RESID_MASK) >> 16) == mcc->id) {
427 spin_lock_irqsave(&phba->isr_lock, flags);
428 phba->todo_mcc_cq = 1;
429 spin_unlock_irqrestore(&phba->isr_lock, flags);
430 num_mcceq_processed++;
431 } else {
432 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
433 blk_iopoll_sched(&pbe_eq->iopoll);
434 num_ioeq_processed++;
435 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530436 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
437 queue_tail_inc(eq);
438 eqe = queue_tail_node(eq);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530439 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530440 if (num_ioeq_processed || num_mcceq_processed) {
441 if (phba->todo_mcc_cq)
442 queue_work(phba->wq, &phba->work_cqs);
443
444 if ((num_mcceq_processed) && (!num_ioeq_processed))
445 hwi_ring_eq_db(phba, eq->id, 0,
446 (num_ioeq_processed +
447 num_mcceq_processed) , 1, 1);
448 else
449 hwi_ring_eq_db(phba, eq->id, 0,
450 (num_ioeq_processed +
451 num_mcceq_processed), 0, 1);
452
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530453 return IRQ_HANDLED;
454 } else
455 return IRQ_NONE;
456 } else {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530457 cq = &phwi_context->be_cq[0];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530458 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
459 & EQE_VALID_MASK) {
460
461 if (((eqe->dw[offsetof(struct amap_eq_entry,
462 resource_id) / 32] &
463 EQE_RESID_MASK) >> 16) != cq->id) {
464 spin_lock_irqsave(&phba->isr_lock, flags);
465 phba->todo_mcc_cq = 1;
466 spin_unlock_irqrestore(&phba->isr_lock, flags);
467 } else {
468 spin_lock_irqsave(&phba->isr_lock, flags);
469 phba->todo_cq = 1;
470 spin_unlock_irqrestore(&phba->isr_lock, flags);
471 }
472 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
473 queue_tail_inc(eq);
474 eqe = queue_tail_node(eq);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530475 num_ioeq_processed++;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530476 }
477 if (phba->todo_cq || phba->todo_mcc_cq)
478 queue_work(phba->wq, &phba->work_cqs);
479
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530480 if (num_ioeq_processed) {
481 hwi_ring_eq_db(phba, eq->id, 0,
482 num_ioeq_processed, 1, 1);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530483 return IRQ_HANDLED;
484 } else
485 return IRQ_NONE;
486 }
487}
488
489static int beiscsi_init_irqs(struct beiscsi_hba *phba)
490{
491 struct pci_dev *pcidev = phba->pcidev;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530492 struct hwi_controller *phwi_ctrlr;
493 struct hwi_context_memory *phwi_context;
494 int ret, msix_vec, i = 0;
495 char desc[32];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530496
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530497 phwi_ctrlr = phba->phwi_ctrlr;
498 phwi_context = phwi_ctrlr->phwi_ctxt;
499
500 if (phba->msix_enabled) {
501 for (i = 0; i < phba->num_cpus; i++) {
502 sprintf(desc, "beiscsi_msix_%04x", i);
503 msix_vec = phba->msix_entries[i].vector;
504 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
505 &phwi_context->be_eq[i]);
506 }
507 msix_vec = phba->msix_entries[i].vector;
508 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
509 &phwi_context->be_eq[i]);
510 } else {
511 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
512 "beiscsi", phba);
513 if (ret) {
514 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
515 "Failed to register irq\\n");
516 return ret;
517 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530518 }
519 return 0;
520}
521
522static void hwi_ring_cq_db(struct beiscsi_hba *phba,
523 unsigned int id, unsigned int num_processed,
524 unsigned char rearm, unsigned char event)
525{
526 u32 val = 0;
527 val |= id & DB_CQ_RING_ID_MASK;
528 if (rearm)
529 val |= 1 << DB_CQ_REARM_SHIFT;
530 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
531 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
532}
533
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530534static unsigned int
535beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
536 struct beiscsi_hba *phba,
537 unsigned short cid,
538 struct pdu_base *ppdu,
539 unsigned long pdu_len,
540 void *pbuffer, unsigned long buf_len)
541{
542 struct iscsi_conn *conn = beiscsi_conn->conn;
543 struct iscsi_session *session = conn->session;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530544 struct iscsi_task *task;
545 struct beiscsi_io_task *io_task;
546 struct iscsi_hdr *login_hdr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530547
548 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
549 PDUBASE_OPCODE_MASK) {
550 case ISCSI_OP_NOOP_IN:
551 pbuffer = NULL;
552 buf_len = 0;
553 break;
554 case ISCSI_OP_ASYNC_EVENT:
555 break;
556 case ISCSI_OP_REJECT:
557 WARN_ON(!pbuffer);
558 WARN_ON(!(buf_len == 48));
559 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
560 break;
561 case ISCSI_OP_LOGIN_RSP:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530562 task = conn->login_task;
563 io_task = task->dd_data;
564 login_hdr = (struct iscsi_hdr *)ppdu;
565 login_hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530566 break;
567 default:
568 shost_printk(KERN_WARNING, phba->shost,
569 "Unrecognized opcode 0x%x in async msg \n",
570 (ppdu->
571 dw[offsetof(struct amap_pdu_base, opcode) / 32]
572 & PDUBASE_OPCODE_MASK));
573 return 1;
574 }
575
576 spin_lock_bh(&session->lock);
577 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
578 spin_unlock_bh(&session->lock);
579 return 0;
580}
581
582static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
583{
584 struct sgl_handle *psgl_handle;
585
586 if (phba->io_sgl_hndl_avbl) {
587 SE_DEBUG(DBG_LVL_8,
588 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
589 phba->io_sgl_alloc_index);
590 psgl_handle = phba->io_sgl_hndl_base[phba->
591 io_sgl_alloc_index];
592 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
593 phba->io_sgl_hndl_avbl--;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530594 if (phba->io_sgl_alloc_index == (phba->params.
595 ios_per_ctrl - 1))
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530596 phba->io_sgl_alloc_index = 0;
597 else
598 phba->io_sgl_alloc_index++;
599 } else
600 psgl_handle = NULL;
601 return psgl_handle;
602}
603
604static void
605free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
606{
607 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
608 phba->io_sgl_free_index);
609 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
610 /*
611 * this can happen if clean_task is called on a task that
612 * failed in xmit_task or alloc_pdu.
613 */
614 SE_DEBUG(DBG_LVL_8,
615 "Double Free in IO SGL io_sgl_free_index=%d,"
616 "value there=%p \n", phba->io_sgl_free_index,
617 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
618 return;
619 }
620 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
621 phba->io_sgl_hndl_avbl++;
622 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
623 phba->io_sgl_free_index = 0;
624 else
625 phba->io_sgl_free_index++;
626}
627
628/**
629 * alloc_wrb_handle - To allocate a wrb handle
630 * @phba: The hba pointer
631 * @cid: The cid to use for allocation
632 * @index: index allocation and wrb index
633 *
634 * This happens under session_lock until submission to chip
635 */
636struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
637 int index)
638{
639 struct hwi_wrb_context *pwrb_context;
640 struct hwi_controller *phwi_ctrlr;
641 struct wrb_handle *pwrb_handle;
642
643 phwi_ctrlr = phba->phwi_ctrlr;
644 pwrb_context = &phwi_ctrlr->wrb_context[cid];
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530645 if (pwrb_context->wrb_handles_available) {
646 pwrb_handle = pwrb_context->pwrb_handle_base[
647 pwrb_context->alloc_index];
648 pwrb_context->wrb_handles_available--;
649 pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
650 if (pwrb_context->alloc_index ==
651 (phba->params.wrbs_per_cxn - 1))
652 pwrb_context->alloc_index = 0;
653 else
654 pwrb_context->alloc_index++;
655 } else
656 pwrb_handle = NULL;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530657 return pwrb_handle;
658}
659
660/**
661 * free_wrb_handle - To free the wrb handle back to pool
662 * @phba: The hba pointer
663 * @pwrb_context: The context to free from
664 * @pwrb_handle: The wrb_handle to free
665 *
666 * This happens under session_lock until submission to chip
667 */
668static void
669free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
670 struct wrb_handle *pwrb_handle)
671{
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530672 if (!ring_mode)
673 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
674 pwrb_handle;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530675 pwrb_context->wrb_handles_available++;
676 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
677 pwrb_context->free_index = 0;
678 else
679 pwrb_context->free_index++;
680
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530681 SE_DEBUG(DBG_LVL_8,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530682 "FREE WRB: pwrb_handle=%p free_index=0x%x"
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530683 "wrb_handles_available=%d \n",
684 pwrb_handle, pwrb_context->free_index,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530685 pwrb_context->wrb_handles_available);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530686}
687
688static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
689{
690 struct sgl_handle *psgl_handle;
691
692 if (phba->eh_sgl_hndl_avbl) {
693 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
694 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
695 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
696 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
697 phba->eh_sgl_hndl_avbl--;
698 if (phba->eh_sgl_alloc_index ==
699 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
700 1))
701 phba->eh_sgl_alloc_index = 0;
702 else
703 phba->eh_sgl_alloc_index++;
704 } else
705 psgl_handle = NULL;
706 return psgl_handle;
707}
708
709void
710free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
711{
712
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530713 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
714 phba->eh_sgl_free_index);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530715 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
716 /*
717 * this can happen if clean_task is called on a task that
718 * failed in xmit_task or alloc_pdu.
719 */
720 SE_DEBUG(DBG_LVL_8,
721 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
722 phba->eh_sgl_free_index);
723 return;
724 }
725 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
726 phba->eh_sgl_hndl_avbl++;
727 if (phba->eh_sgl_free_index ==
728 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
729 phba->eh_sgl_free_index = 0;
730 else
731 phba->eh_sgl_free_index++;
732}
733
734static void
735be_complete_io(struct beiscsi_conn *beiscsi_conn,
736 struct iscsi_task *task, struct sol_cqe *psol)
737{
738 struct beiscsi_io_task *io_task = task->dd_data;
739 struct be_status_bhs *sts_bhs =
740 (struct be_status_bhs *)io_task->cmd_bhs;
741 struct iscsi_conn *conn = beiscsi_conn->conn;
742 unsigned int sense_len;
743 unsigned char *sense;
744 u32 resid = 0, exp_cmdsn, max_cmdsn;
745 u8 rsp, status, flags;
746
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530747 exp_cmdsn = (psol->
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530748 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
749 & SOL_EXP_CMD_SN_MASK);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530750 max_cmdsn = ((psol->
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530751 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
752 & SOL_EXP_CMD_SN_MASK) +
753 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
754 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
755 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
756 & SOL_RESP_MASK) >> 16);
757 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
758 & SOL_STS_MASK) >> 8);
759 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
760 & SOL_FLAGS_MASK) >> 24) | 0x80;
761
762 task->sc->result = (DID_OK << 16) | status;
763 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
764 task->sc->result = DID_ERROR << 16;
765 goto unmap;
766 }
767
768 /* bidi not initially supported */
769 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
770 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
771 32] & SOL_RES_CNT_MASK);
772
773 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
774 task->sc->result = DID_ERROR << 16;
775
776 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
777 scsi_set_resid(task->sc, resid);
778 if (!status && (scsi_bufflen(task->sc) - resid <
779 task->sc->underflow))
780 task->sc->result = DID_ERROR << 16;
781 }
782 }
783
784 if (status == SAM_STAT_CHECK_CONDITION) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530785 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530786 sense = sts_bhs->sense_info + sizeof(unsigned short);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530787 sense_len = cpu_to_be16(*slen);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530788 memcpy(task->sc->sense_buffer, sense,
789 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
790 }
791 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
792 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
793 & SOL_RES_CNT_MASK)
794 conn->rxdata_octets += (psol->
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530795 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796 & SOL_RES_CNT_MASK);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530797 }
798unmap:
799 scsi_dma_unmap(io_task->scsi_cmnd);
800 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
801}
802
803static void
804be_complete_logout(struct beiscsi_conn *beiscsi_conn,
805 struct iscsi_task *task, struct sol_cqe *psol)
806{
807 struct iscsi_logout_rsp *hdr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530808 struct beiscsi_io_task *io_task = task->dd_data;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530809 struct iscsi_conn *conn = beiscsi_conn->conn;
810
811 hdr = (struct iscsi_logout_rsp *)task->hdr;
812 hdr->t2wait = 5;
813 hdr->t2retain = 0;
814 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
815 & SOL_FLAGS_MASK) >> 24) | 0x80;
816 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
817 32] & SOL_RESP_MASK);
818 hdr->exp_cmdsn = cpu_to_be32(psol->
819 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
820 & SOL_EXP_CMD_SN_MASK);
821 hdr->max_cmdsn = be32_to_cpu((psol->
822 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
823 & SOL_EXP_CMD_SN_MASK) +
824 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
825 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
826 hdr->hlength = 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530827 hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530828 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
829}
830
831static void
832be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
833 struct iscsi_task *task, struct sol_cqe *psol)
834{
835 struct iscsi_tm_rsp *hdr;
836 struct iscsi_conn *conn = beiscsi_conn->conn;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530837 struct beiscsi_io_task *io_task = task->dd_data;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530838
839 hdr = (struct iscsi_tm_rsp *)task->hdr;
840 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
841 & SOL_FLAGS_MASK) >> 24) | 0x80;
842 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
843 32] & SOL_RESP_MASK);
844 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530845 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530846 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
847 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
848 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
849 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530850 hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530851 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
852}
853
854static void
855hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
856 struct beiscsi_hba *phba, struct sol_cqe *psol)
857{
858 struct hwi_wrb_context *pwrb_context;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530859 struct wrb_handle *pwrb_handle = NULL;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530860 struct sgl_handle *psgl_handle = NULL;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530861 struct hwi_controller *phwi_ctrlr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530862 struct iscsi_task *task;
863 struct beiscsi_io_task *io_task;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530864 struct iscsi_conn *conn = beiscsi_conn->conn;
865 struct iscsi_session *session = conn->session;
866
867 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530868 if (ring_mode) {
869 psgl_handle = phba->sgl_hndl_array[((psol->
870 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
871 32] & SOL_ICD_INDEX_MASK) >> 6)];
872 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
873 task = psgl_handle->task;
874 pwrb_handle = NULL;
875 } else {
876 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
877 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530878 SOL_CID_MASK) >> 6) -
879 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530880 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
881 dw[offsetof(struct amap_sol_cqe, wrb_index) /
882 32] & SOL_WRB_INDEX_MASK) >> 16)];
883 task = pwrb_handle->pio_handle;
884 }
885
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530886 io_task = task->dd_data;
887 spin_lock(&phba->mgmt_sgl_lock);
888 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
889 spin_unlock(&phba->mgmt_sgl_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530890 spin_lock_bh(&session->lock);
891 free_wrb_handle(phba, pwrb_context, pwrb_handle);
892 spin_unlock_bh(&session->lock);
893}
894
895static void
896be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
897 struct iscsi_task *task, struct sol_cqe *psol)
898{
899 struct iscsi_nopin *hdr;
900 struct iscsi_conn *conn = beiscsi_conn->conn;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530901 struct beiscsi_io_task *io_task = task->dd_data;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530902
903 hdr = (struct iscsi_nopin *)task->hdr;
904 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
905 & SOL_FLAGS_MASK) >> 24) | 0x80;
906 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
907 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
908 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
909 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
910 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
911 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
912 hdr->opcode = ISCSI_OP_NOOP_IN;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530913 hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530914 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
915}
916
917static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
918 struct beiscsi_hba *phba, struct sol_cqe *psol)
919{
920 struct hwi_wrb_context *pwrb_context;
921 struct wrb_handle *pwrb_handle;
922 struct iscsi_wrb *pwrb = NULL;
923 struct hwi_controller *phwi_ctrlr;
924 struct iscsi_task *task;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530925 struct sgl_handle *psgl_handle = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530926 unsigned int type;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530927 struct iscsi_conn *conn = beiscsi_conn->conn;
928 struct iscsi_session *session = conn->session;
929
930 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530931 if (ring_mode) {
932 psgl_handle = phba->sgl_hndl_array[((psol->
933 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
934 32] & SOL_ICD_INDEX_MASK) >> 6)];
935 task = psgl_handle->task;
936 type = psgl_handle->type;
937 } else {
938 pwrb_context = &phwi_ctrlr->
939 wrb_context[((psol->dw[offsetof
940 (struct amap_sol_cqe, cid) / 32]
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530941 & SOL_CID_MASK) >> 6) -
942 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530943 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
944 dw[offsetof(struct amap_sol_cqe, wrb_index) /
945 32] & SOL_WRB_INDEX_MASK) >> 16)];
946 task = pwrb_handle->pio_handle;
947 pwrb = pwrb_handle->pwrb;
948 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530949 WRB_TYPE_MASK) >> 28;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530950 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530951 spin_lock_bh(&session->lock);
952 switch (type) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530953 case HWH_TYPE_IO:
954 case HWH_TYPE_IO_RD:
955 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
956 ISCSI_OP_NOOP_OUT) {
957 be_complete_nopin_resp(beiscsi_conn, task, psol);
958 } else
959 be_complete_io(beiscsi_conn, task, psol);
960 break;
961
962 case HWH_TYPE_LOGOUT:
963 be_complete_logout(beiscsi_conn, task, psol);
964 break;
965
966 case HWH_TYPE_LOGIN:
967 SE_DEBUG(DBG_LVL_1,
968 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
969 "- Solicited path \n");
970 break;
971
972 case HWH_TYPE_TMF:
973 be_complete_tmf(beiscsi_conn, task, psol);
974 break;
975
976 case HWH_TYPE_NOP:
977 be_complete_nopin_resp(beiscsi_conn, task, psol);
978 break;
979
980 default:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530981 if (ring_mode)
982 shost_printk(KERN_WARNING, phba->shost,
983 "In hwi_complete_cmd, unknown type = %d"
984 "icd_index 0x%x CID 0x%x\n", type,
985 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
986 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
987 psgl_handle->cid);
988 else
989 shost_printk(KERN_WARNING, phba->shost,
990 "In hwi_complete_cmd, unknown type = %d"
991 "wrb_index 0x%x CID 0x%x\n", type,
992 ((psol->dw[offsetof(struct amap_iscsi_wrb,
993 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
994 ((psol->dw[offsetof(struct amap_sol_cqe,
995 cid) / 32] & SOL_CID_MASK) >> 6));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530996 break;
997 }
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530998
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530999 spin_unlock_bh(&session->lock);
1000}
1001
1002static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1003 *pasync_ctx, unsigned int is_header,
1004 unsigned int host_write_ptr)
1005{
1006 if (is_header)
1007 return &pasync_ctx->async_entry[host_write_ptr].
1008 header_busy_list;
1009 else
1010 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1011}
1012
1013static struct async_pdu_handle *
1014hwi_get_async_handle(struct beiscsi_hba *phba,
1015 struct beiscsi_conn *beiscsi_conn,
1016 struct hwi_async_pdu_context *pasync_ctx,
1017 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1018{
1019 struct be_bus_address phys_addr;
1020 struct list_head *pbusy_list;
1021 struct async_pdu_handle *pasync_handle = NULL;
1022 int buffer_len = 0;
1023 unsigned char buffer_index = -1;
1024 unsigned char is_header = 0;
1025
1026 phys_addr.u.a32.address_lo =
1027 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1028 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1029 & PDUCQE_DPL_MASK) >> 16);
1030 phys_addr.u.a32.address_hi =
1031 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1032
1033 phys_addr.u.a64.address =
1034 *((unsigned long long *)(&phys_addr.u.a64.address));
1035
1036 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1037 & PDUCQE_CODE_MASK) {
1038 case UNSOL_HDR_NOTIFY:
1039 is_header = 1;
1040
1041 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1042 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1043 index) / 32] & PDUCQE_INDEX_MASK));
1044
1045 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1046 pasync_ctx->async_header.pa_base.u.a64.address);
1047
1048 buffer_index = buffer_len /
1049 pasync_ctx->async_header.buffer_size;
1050
1051 break;
1052 case UNSOL_DATA_NOTIFY:
1053 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1054 dw[offsetof(struct amap_i_t_dpdu_cqe,
1055 index) / 32] & PDUCQE_INDEX_MASK));
1056 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1057 pasync_ctx->async_data.pa_base.u.
1058 a64.address);
1059 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1060 break;
1061 default:
1062 pbusy_list = NULL;
1063 shost_printk(KERN_WARNING, phba->shost,
1064 "Unexpected code=%d \n",
1065 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1066 code) / 32] & PDUCQE_CODE_MASK);
1067 return NULL;
1068 }
1069
1070 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1071 WARN_ON(list_empty(pbusy_list));
1072 list_for_each_entry(pasync_handle, pbusy_list, link) {
1073 WARN_ON(pasync_handle->consumed);
1074 if (pasync_handle->index == buffer_index)
1075 break;
1076 }
1077
1078 WARN_ON(!pasync_handle);
1079
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301080 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1081 phba->fw_config.iscsi_cid_start;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301082 pasync_handle->is_header = is_header;
1083 pasync_handle->buffer_len = ((pdpdu_cqe->
1084 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1085 & PDUCQE_DPL_MASK) >> 16);
1086
1087 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1088 index) / 32] & PDUCQE_INDEX_MASK);
1089 return pasync_handle;
1090}
1091
1092static unsigned int
1093hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1094 unsigned int is_header, unsigned int cq_index)
1095{
1096 struct list_head *pbusy_list;
1097 struct async_pdu_handle *pasync_handle;
1098 unsigned int num_entries, writables = 0;
1099 unsigned int *pep_read_ptr, *pwritables;
1100
1101
1102 if (is_header) {
1103 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1104 pwritables = &pasync_ctx->async_header.writables;
1105 num_entries = pasync_ctx->async_header.num_entries;
1106 } else {
1107 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1108 pwritables = &pasync_ctx->async_data.writables;
1109 num_entries = pasync_ctx->async_data.num_entries;
1110 }
1111
1112 while ((*pep_read_ptr) != cq_index) {
1113 (*pep_read_ptr)++;
1114 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1115
1116 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1117 *pep_read_ptr);
1118 if (writables == 0)
1119 WARN_ON(list_empty(pbusy_list));
1120
1121 if (!list_empty(pbusy_list)) {
1122 pasync_handle = list_entry(pbusy_list->next,
1123 struct async_pdu_handle,
1124 link);
1125 WARN_ON(!pasync_handle);
1126 pasync_handle->consumed = 1;
1127 }
1128
1129 writables++;
1130 }
1131
1132 if (!writables) {
1133 SE_DEBUG(DBG_LVL_1,
1134 "Duplicate notification received - index 0x%x!!\n",
1135 cq_index);
1136 WARN_ON(1);
1137 }
1138
1139 *pwritables = *pwritables + writables;
1140 return 0;
1141}
1142
1143static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1144 unsigned int cri)
1145{
1146 struct hwi_controller *phwi_ctrlr;
1147 struct hwi_async_pdu_context *pasync_ctx;
1148 struct async_pdu_handle *pasync_handle, *tmp_handle;
1149 struct list_head *plist;
1150 unsigned int i = 0;
1151
1152 phwi_ctrlr = phba->phwi_ctrlr;
1153 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1154
1155 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1156
1157 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1158 list_del(&pasync_handle->link);
1159
1160 if (i == 0) {
1161 list_add_tail(&pasync_handle->link,
1162 &pasync_ctx->async_header.free_list);
1163 pasync_ctx->async_header.free_entries++;
1164 i++;
1165 } else {
1166 list_add_tail(&pasync_handle->link,
1167 &pasync_ctx->async_data.free_list);
1168 pasync_ctx->async_data.free_entries++;
1169 i++;
1170 }
1171 }
1172
1173 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1174 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1175 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1176 return 0;
1177}
1178
1179static struct phys_addr *
1180hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1181 unsigned int is_header, unsigned int host_write_ptr)
1182{
1183 struct phys_addr *pasync_sge = NULL;
1184
1185 if (is_header)
1186 pasync_sge = pasync_ctx->async_header.ring_base;
1187 else
1188 pasync_sge = pasync_ctx->async_data.ring_base;
1189
1190 return pasync_sge + host_write_ptr;
1191}
1192
1193static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1194 unsigned int is_header)
1195{
1196 struct hwi_controller *phwi_ctrlr;
1197 struct hwi_async_pdu_context *pasync_ctx;
1198 struct async_pdu_handle *pasync_handle;
1199 struct list_head *pfree_link, *pbusy_list;
1200 struct phys_addr *pasync_sge;
1201 unsigned int ring_id, num_entries;
1202 unsigned int host_write_num;
1203 unsigned int writables;
1204 unsigned int i = 0;
1205 u32 doorbell = 0;
1206
1207 phwi_ctrlr = phba->phwi_ctrlr;
1208 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1209
1210 if (is_header) {
1211 num_entries = pasync_ctx->async_header.num_entries;
1212 writables = min(pasync_ctx->async_header.writables,
1213 pasync_ctx->async_header.free_entries);
1214 pfree_link = pasync_ctx->async_header.free_list.next;
1215 host_write_num = pasync_ctx->async_header.host_write_ptr;
1216 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1217 } else {
1218 num_entries = pasync_ctx->async_data.num_entries;
1219 writables = min(pasync_ctx->async_data.writables,
1220 pasync_ctx->async_data.free_entries);
1221 pfree_link = pasync_ctx->async_data.free_list.next;
1222 host_write_num = pasync_ctx->async_data.host_write_ptr;
1223 ring_id = phwi_ctrlr->default_pdu_data.id;
1224 }
1225
1226 writables = (writables / 8) * 8;
1227 if (writables) {
1228 for (i = 0; i < writables; i++) {
1229 pbusy_list =
1230 hwi_get_async_busy_list(pasync_ctx, is_header,
1231 host_write_num);
1232 pasync_handle =
1233 list_entry(pfree_link, struct async_pdu_handle,
1234 link);
1235 WARN_ON(!pasync_handle);
1236 pasync_handle->consumed = 0;
1237
1238 pfree_link = pfree_link->next;
1239
1240 pasync_sge = hwi_get_ring_address(pasync_ctx,
1241 is_header, host_write_num);
1242
1243 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1244 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1245
1246 list_move(&pasync_handle->link, pbusy_list);
1247
1248 host_write_num++;
1249 host_write_num = host_write_num % num_entries;
1250 }
1251
1252 if (is_header) {
1253 pasync_ctx->async_header.host_write_ptr =
1254 host_write_num;
1255 pasync_ctx->async_header.free_entries -= writables;
1256 pasync_ctx->async_header.writables -= writables;
1257 pasync_ctx->async_header.busy_entries += writables;
1258 } else {
1259 pasync_ctx->async_data.host_write_ptr = host_write_num;
1260 pasync_ctx->async_data.free_entries -= writables;
1261 pasync_ctx->async_data.writables -= writables;
1262 pasync_ctx->async_data.busy_entries += writables;
1263 }
1264
1265 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1266 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1267 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1268 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1269 << DB_DEF_PDU_CQPROC_SHIFT;
1270
1271 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1272 }
1273}
1274
1275static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1276 struct beiscsi_conn *beiscsi_conn,
1277 struct i_t_dpdu_cqe *pdpdu_cqe)
1278{
1279 struct hwi_controller *phwi_ctrlr;
1280 struct hwi_async_pdu_context *pasync_ctx;
1281 struct async_pdu_handle *pasync_handle = NULL;
1282 unsigned int cq_index = -1;
1283
1284 phwi_ctrlr = phba->phwi_ctrlr;
1285 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1286
1287 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1288 pdpdu_cqe, &cq_index);
1289 BUG_ON(pasync_handle->is_header != 0);
1290 if (pasync_handle->consumed == 0)
1291 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1292 cq_index);
1293
1294 hwi_free_async_msg(phba, pasync_handle->cri);
1295 hwi_post_async_buffers(phba, pasync_handle->is_header);
1296}
1297
1298static unsigned int
1299hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1300 struct beiscsi_hba *phba,
1301 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1302{
1303 struct list_head *plist;
1304 struct async_pdu_handle *pasync_handle;
1305 void *phdr = NULL;
1306 unsigned int hdr_len = 0, buf_len = 0;
1307 unsigned int status, index = 0, offset = 0;
1308 void *pfirst_buffer = NULL;
1309 unsigned int num_buf = 0;
1310
1311 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1312
1313 list_for_each_entry(pasync_handle, plist, link) {
1314 if (index == 0) {
1315 phdr = pasync_handle->pbuffer;
1316 hdr_len = pasync_handle->buffer_len;
1317 } else {
1318 buf_len = pasync_handle->buffer_len;
1319 if (!num_buf) {
1320 pfirst_buffer = pasync_handle->pbuffer;
1321 num_buf++;
1322 }
1323 memcpy(pfirst_buffer + offset,
1324 pasync_handle->pbuffer, buf_len);
1325 offset = buf_len;
1326 }
1327 index++;
1328 }
1329
1330 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301331 (beiscsi_conn->beiscsi_conn_cid -
1332 phba->fw_config.iscsi_cid_start),
1333 phdr, hdr_len, pfirst_buffer,
1334 buf_len);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301335
1336 if (status == 0)
1337 hwi_free_async_msg(phba, cri);
1338 return 0;
1339}
1340
1341static unsigned int
1342hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1343 struct beiscsi_hba *phba,
1344 struct async_pdu_handle *pasync_handle)
1345{
1346 struct hwi_async_pdu_context *pasync_ctx;
1347 struct hwi_controller *phwi_ctrlr;
1348 unsigned int bytes_needed = 0, status = 0;
1349 unsigned short cri = pasync_handle->cri;
1350 struct pdu_base *ppdu;
1351
1352 phwi_ctrlr = phba->phwi_ctrlr;
1353 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1354
1355 list_del(&pasync_handle->link);
1356 if (pasync_handle->is_header) {
1357 pasync_ctx->async_header.busy_entries--;
1358 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1359 hwi_free_async_msg(phba, cri);
1360 BUG();
1361 }
1362
1363 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1364 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1365 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1366 (unsigned short)pasync_handle->buffer_len;
1367 list_add_tail(&pasync_handle->link,
1368 &pasync_ctx->async_entry[cri].wait_queue.list);
1369
1370 ppdu = pasync_handle->pbuffer;
1371 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1372 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1373 0xFFFF0000) | ((be16_to_cpu((ppdu->
1374 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1375 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1376
1377 if (status == 0) {
1378 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1379 bytes_needed;
1380
1381 if (bytes_needed == 0)
1382 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1383 pasync_ctx, cri);
1384 }
1385 } else {
1386 pasync_ctx->async_data.busy_entries--;
1387 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1388 list_add_tail(&pasync_handle->link,
1389 &pasync_ctx->async_entry[cri].wait_queue.
1390 list);
1391 pasync_ctx->async_entry[cri].wait_queue.
1392 bytes_received +=
1393 (unsigned short)pasync_handle->buffer_len;
1394
1395 if (pasync_ctx->async_entry[cri].wait_queue.
1396 bytes_received >=
1397 pasync_ctx->async_entry[cri].wait_queue.
1398 bytes_needed)
1399 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1400 pasync_ctx, cri);
1401 }
1402 }
1403 return status;
1404}
1405
1406static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1407 struct beiscsi_hba *phba,
1408 struct i_t_dpdu_cqe *pdpdu_cqe)
1409{
1410 struct hwi_controller *phwi_ctrlr;
1411 struct hwi_async_pdu_context *pasync_ctx;
1412 struct async_pdu_handle *pasync_handle = NULL;
1413 unsigned int cq_index = -1;
1414
1415 phwi_ctrlr = phba->phwi_ctrlr;
1416 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1417 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1418 pdpdu_cqe, &cq_index);
1419
1420 if (pasync_handle->consumed == 0)
1421 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1422 cq_index);
1423 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1424 hwi_post_async_buffers(phba, pasync_handle->is_header);
1425}
1426
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301427
1428static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301429{
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301430 struct be_queue_info *cq;
1431 struct sol_cqe *sol;
1432 struct dmsg_cqe *dmsg;
1433 unsigned int num_processed = 0;
1434 unsigned int tot_nump = 0;
1435 struct beiscsi_conn *beiscsi_conn;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301436 struct sgl_handle *psgl_handle = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301437 struct beiscsi_hba *phba;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301438
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301439 cq = pbe_eq->cq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301440 sol = queue_tail_node(cq);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301441 phba = pbe_eq->phba;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301442
1443 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1444 CQE_VALID_MASK) {
1445 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1446
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301447 if (ring_mode) {
1448 psgl_handle = phba->sgl_hndl_array[((sol->
1449 dw[offsetof(struct amap_sol_cqe_ring,
1450 icd_index) / 32] & SOL_ICD_INDEX_MASK)
1451 >> 6)];
1452 beiscsi_conn = phba->conn_table[psgl_handle->cid];
1453 if (!beiscsi_conn || !beiscsi_conn->ep) {
1454 shost_printk(KERN_WARNING, phba->shost,
1455 "Connection table empty for cid = %d\n",
1456 psgl_handle->cid);
1457 return 0;
1458 }
1459
1460 } else {
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301461 beiscsi_conn = phba->conn_table[(u32) ((sol->
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301462 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301463 SOL_CID_MASK) >> 6) -
1464 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301465 if (!beiscsi_conn || !beiscsi_conn->ep) {
1466 shost_printk(KERN_WARNING, phba->shost,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301467 "Connection table empty for cid = %d\n",
1468 (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1469 cid) / 32] & SOL_CID_MASK) >> 6);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301470 return 0;
1471 }
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301472 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301473
1474 if (num_processed >= 32) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301475 hwi_ring_cq_db(phba, cq->id,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301476 num_processed, 0, 0);
1477 tot_nump += num_processed;
1478 num_processed = 0;
1479 }
1480
1481 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1482 32] & CQE_CODE_MASK) {
1483 case SOL_CMD_COMPLETE:
1484 hwi_complete_cmd(beiscsi_conn, phba, sol);
1485 break;
1486 case DRIVERMSG_NOTIFY:
1487 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1488 dmsg = (struct dmsg_cqe *)sol;
1489 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1490 break;
1491 case UNSOL_HDR_NOTIFY:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301492 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1493 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1494 (struct i_t_dpdu_cqe *)sol);
1495 break;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301496 case UNSOL_DATA_NOTIFY:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301497 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301498 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1499 (struct i_t_dpdu_cqe *)sol);
1500 break;
1501 case CXN_INVALIDATE_INDEX_NOTIFY:
1502 case CMD_INVALIDATED_NOTIFY:
1503 case CXN_INVALIDATE_NOTIFY:
1504 SE_DEBUG(DBG_LVL_1,
1505 "Ignoring CQ Error notification for cmd/cxn"
1506 "invalidate\n");
1507 break;
1508 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1509 case CMD_KILLED_INVALID_STATSN_RCVD:
1510 case CMD_KILLED_INVALID_R2T_RCVD:
1511 case CMD_CXN_KILLED_LUN_INVALID:
1512 case CMD_CXN_KILLED_ICD_INVALID:
1513 case CMD_CXN_KILLED_ITT_INVALID:
1514 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1515 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301516 if (ring_mode) {
1517 SE_DEBUG(DBG_LVL_1,
1518 "CQ Error notification for cmd.. "
1519 "code %d cid 0x%x\n",
1520 sol->dw[offsetof(struct amap_sol_cqe, code) /
1521 32] & CQE_CODE_MASK, psgl_handle->cid);
1522 } else {
1523 SE_DEBUG(DBG_LVL_1,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301524 "CQ Error notification for cmd.. "
1525 "code %d cid 0x%x\n",
1526 sol->dw[offsetof(struct amap_sol_cqe, code) /
1527 32] & CQE_CODE_MASK,
1528 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1529 32] & SOL_CID_MASK));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301530 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301531 break;
1532 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1533 SE_DEBUG(DBG_LVL_1,
1534 "Digest error on def pdu ring, dropping..\n");
1535 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1536 (struct i_t_dpdu_cqe *) sol);
1537 break;
1538 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1539 case CXN_KILLED_BURST_LEN_MISMATCH:
1540 case CXN_KILLED_AHS_RCVD:
1541 case CXN_KILLED_HDR_DIGEST_ERR:
1542 case CXN_KILLED_UNKNOWN_HDR:
1543 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1544 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1545 case CXN_KILLED_TIMED_OUT:
1546 case CXN_KILLED_FIN_RCVD:
1547 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1548 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1549 case CXN_KILLED_OVER_RUN_RESIDUAL:
1550 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1551 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301552 if (ring_mode) {
1553 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1554 "0x%x...\n",
1555 sol->dw[offsetof(struct amap_sol_cqe, code) /
1556 32] & CQE_CODE_MASK, psgl_handle->cid);
1557 } else {
1558 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301559 "0x%x...\n",
1560 sol->dw[offsetof(struct amap_sol_cqe, code) /
1561 32] & CQE_CODE_MASK,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301562 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1563 32] & CQE_CID_MASK));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301564 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301565 iscsi_conn_failure(beiscsi_conn->conn,
1566 ISCSI_ERR_CONN_FAILED);
1567 break;
1568 case CXN_KILLED_RST_SENT:
1569 case CXN_KILLED_RST_RCVD:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301570 if (ring_mode) {
1571 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1572 "received/sent on CID 0x%x...\n",
1573 sol->dw[offsetof(struct amap_sol_cqe, code) /
1574 32] & CQE_CODE_MASK, psgl_handle->cid);
1575 } else {
1576 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301577 "received/sent on CID 0x%x...\n",
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301578 sol->dw[offsetof(struct amap_sol_cqe, code) /
1579 32] & CQE_CODE_MASK,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301580 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1581 32] & CQE_CID_MASK));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301582 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301583 iscsi_conn_failure(beiscsi_conn->conn,
1584 ISCSI_ERR_CONN_FAILED);
1585 break;
1586 default:
1587 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1588 "received on CID 0x%x...\n",
1589 sol->dw[offsetof(struct amap_sol_cqe, code) /
1590 32] & CQE_CODE_MASK,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301591 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1592 32] & CQE_CID_MASK));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301593 break;
1594 }
1595
1596 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1597 queue_tail_inc(cq);
1598 sol = queue_tail_node(cq);
1599 num_processed++;
1600 }
1601
1602 if (num_processed > 0) {
1603 tot_nump += num_processed;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301604 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301605 }
1606 return tot_nump;
1607}
1608
1609static void beiscsi_process_all_cqs(struct work_struct *work)
1610{
1611 unsigned long flags;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301612 struct hwi_controller *phwi_ctrlr;
1613 struct hwi_context_memory *phwi_context;
1614 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301615 struct beiscsi_hba *phba =
1616 container_of(work, struct beiscsi_hba, work_cqs);
1617
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301618 phwi_ctrlr = phba->phwi_ctrlr;
1619 phwi_context = phwi_ctrlr->phwi_ctxt;
1620 if (phba->msix_enabled)
1621 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1622 else
1623 pbe_eq = &phwi_context->be_eq[0];
1624
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301625 if (phba->todo_mcc_cq) {
1626 spin_lock_irqsave(&phba->isr_lock, flags);
1627 phba->todo_mcc_cq = 0;
1628 spin_unlock_irqrestore(&phba->isr_lock, flags);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301629 }
1630
1631 if (phba->todo_cq) {
1632 spin_lock_irqsave(&phba->isr_lock, flags);
1633 phba->todo_cq = 0;
1634 spin_unlock_irqrestore(&phba->isr_lock, flags);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301635 beiscsi_process_cq(pbe_eq);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301636 }
1637}
1638
1639static int be_iopoll(struct blk_iopoll *iop, int budget)
1640{
1641 static unsigned int ret;
1642 struct beiscsi_hba *phba;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301643 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301644
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301645 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1646 ret = beiscsi_process_cq(pbe_eq);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301647 if (ret < budget) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301648 phba = pbe_eq->phba;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301649 blk_iopoll_complete(iop);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301650 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1651 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301652 }
1653 return ret;
1654}
1655
1656static void
1657hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1658 unsigned int num_sg, struct beiscsi_io_task *io_task)
1659{
1660 struct iscsi_sge *psgl;
1661 unsigned short sg_len, index;
1662 unsigned int sge_len = 0;
1663 unsigned long long addr;
1664 struct scatterlist *l_sg;
1665 unsigned int offset;
1666
1667 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1668 io_task->bhs_pa.u.a32.address_lo);
1669 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1670 io_task->bhs_pa.u.a32.address_hi);
1671
1672 l_sg = sg;
1673 for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1674 if (index == 0) {
1675 sg_len = sg_dma_len(sg);
1676 addr = (u64) sg_dma_address(sg);
1677 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1678 (addr & 0xFFFFFFFF));
1679 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1680 (addr >> 32));
1681 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1682 sg_len);
1683 sge_len = sg_len;
1684 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1685 1);
1686 } else {
1687 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1688 0);
1689 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1690 pwrb, sge_len);
1691 sg_len = sg_dma_len(sg);
1692 addr = (u64) sg_dma_address(sg);
1693 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1694 (addr & 0xFFFFFFFF));
1695 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1696 (addr >> 32));
1697 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1698 sg_len);
1699 }
1700 }
1701 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1702 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1703
1704 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1705
1706 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1707 io_task->bhs_pa.u.a32.address_hi);
1708 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1709 io_task->bhs_pa.u.a32.address_lo);
1710
1711 if (num_sg == 2)
1712 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1713 sg = l_sg;
1714 psgl++;
1715 psgl++;
1716 offset = 0;
1717 for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1718 sg_len = sg_dma_len(sg);
1719 addr = (u64) sg_dma_address(sg);
1720 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1721 (addr & 0xFFFFFFFF));
1722 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1723 (addr >> 32));
1724 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1725 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1726 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1727 offset += sg_len;
1728 }
1729 psgl--;
1730 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1731}
1732
1733static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1734{
1735 struct iscsi_sge *psgl;
1736 unsigned long long addr;
1737 struct beiscsi_io_task *io_task = task->dd_data;
1738 struct beiscsi_conn *beiscsi_conn = io_task->conn;
1739 struct beiscsi_hba *phba = beiscsi_conn->phba;
1740
1741 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1742 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1743 io_task->bhs_pa.u.a32.address_lo);
1744 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1745 io_task->bhs_pa.u.a32.address_hi);
1746
1747 if (task->data) {
1748 if (task->data_count) {
1749 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1750 addr = (u64) pci_map_single(phba->pcidev,
1751 task->data,
1752 task->data_count, 1);
1753 } else {
1754 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1755 addr = 0;
1756 }
1757 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1758 (addr & 0xFFFFFFFF));
1759 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1760 (addr >> 32));
1761 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1762 task->data_count);
1763
1764 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1765 } else {
1766 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1767 addr = 0;
1768 }
1769
1770 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1771
1772 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1773
1774 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1775 io_task->bhs_pa.u.a32.address_hi);
1776 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1777 io_task->bhs_pa.u.a32.address_lo);
1778 if (task->data) {
1779 psgl++;
1780 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1781 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1782 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1783 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1784 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1785 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1786
1787 psgl++;
1788 if (task->data) {
1789 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1790 (addr & 0xFFFFFFFF));
1791 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1792 (addr >> 32));
1793 }
1794 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1795 }
1796 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1797}
1798
1799static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1800{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301801 unsigned int num_cq_pages, num_async_pdu_buf_pages;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301802 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1803 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1804
1805 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1806 sizeof(struct sol_cqe));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301807 num_async_pdu_buf_pages =
1808 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1809 phba->params.defpdu_hdr_sz);
1810 num_async_pdu_buf_sgl_pages =
1811 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1812 sizeof(struct phys_addr));
1813 num_async_pdu_data_pages =
1814 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1815 phba->params.defpdu_data_sz);
1816 num_async_pdu_data_sgl_pages =
1817 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1818 sizeof(struct phys_addr));
1819
1820 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1821
1822 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1823 BE_ISCSI_PDU_HEADER_SIZE;
1824 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1825 sizeof(struct hwi_context_memory);
1826
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301827
1828 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1829 * (phba->params.wrbs_per_cxn)
1830 * phba->params.cxns_per_ctrl;
1831 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
1832 (phba->params.wrbs_per_cxn);
1833 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1834 phba->params.cxns_per_ctrl);
1835
1836 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1837 phba->params.icds_per_ctrl;
1838 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1839 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1840
1841 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1842 num_async_pdu_buf_pages * PAGE_SIZE;
1843 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1844 num_async_pdu_data_pages * PAGE_SIZE;
1845 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1846 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1847 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1848 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1849 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1850 phba->params.asyncpdus_per_ctrl *
1851 sizeof(struct async_pdu_handle);
1852 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1853 phba->params.asyncpdus_per_ctrl *
1854 sizeof(struct async_pdu_handle);
1855 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1856 sizeof(struct hwi_async_pdu_context) +
1857 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1858}
1859
1860static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1861{
1862 struct be_mem_descriptor *mem_descr;
1863 dma_addr_t bus_add;
1864 struct mem_array *mem_arr, *mem_arr_orig;
1865 unsigned int i, j, alloc_size, curr_alloc_size;
1866
1867 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1868 if (!phba->phwi_ctrlr)
1869 return -ENOMEM;
1870
1871 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1872 GFP_KERNEL);
1873 if (!phba->init_mem) {
1874 kfree(phba->phwi_ctrlr);
1875 return -ENOMEM;
1876 }
1877
1878 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1879 GFP_KERNEL);
1880 if (!mem_arr_orig) {
1881 kfree(phba->init_mem);
1882 kfree(phba->phwi_ctrlr);
1883 return -ENOMEM;
1884 }
1885
1886 mem_descr = phba->init_mem;
1887 for (i = 0; i < SE_MEM_MAX; i++) {
1888 j = 0;
1889 mem_arr = mem_arr_orig;
1890 alloc_size = phba->mem_req[i];
1891 memset(mem_arr, 0, sizeof(struct mem_array) *
1892 BEISCSI_MAX_FRAGS_INIT);
1893 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1894 do {
1895 mem_arr->virtual_address = pci_alloc_consistent(
1896 phba->pcidev,
1897 curr_alloc_size,
1898 &bus_add);
1899 if (!mem_arr->virtual_address) {
1900 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1901 goto free_mem;
1902 if (curr_alloc_size -
1903 rounddown_pow_of_two(curr_alloc_size))
1904 curr_alloc_size = rounddown_pow_of_two
1905 (curr_alloc_size);
1906 else
1907 curr_alloc_size = curr_alloc_size / 2;
1908 } else {
1909 mem_arr->bus_address.u.
1910 a64.address = (__u64) bus_add;
1911 mem_arr->size = curr_alloc_size;
1912 alloc_size -= curr_alloc_size;
1913 curr_alloc_size = min(be_max_phys_size *
1914 1024, alloc_size);
1915 j++;
1916 mem_arr++;
1917 }
1918 } while (alloc_size);
1919 mem_descr->num_elements = j;
1920 mem_descr->size_in_bytes = phba->mem_req[i];
1921 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1922 GFP_KERNEL);
1923 if (!mem_descr->mem_array)
1924 goto free_mem;
1925
1926 memcpy(mem_descr->mem_array, mem_arr_orig,
1927 sizeof(struct mem_array) * j);
1928 mem_descr++;
1929 }
1930 kfree(mem_arr_orig);
1931 return 0;
1932free_mem:
1933 mem_descr->num_elements = j;
1934 while ((i) || (j)) {
1935 for (j = mem_descr->num_elements; j > 0; j--) {
1936 pci_free_consistent(phba->pcidev,
1937 mem_descr->mem_array[j - 1].size,
1938 mem_descr->mem_array[j - 1].
1939 virtual_address,
1940 mem_descr->mem_array[j - 1].
1941 bus_address.u.a64.address);
1942 }
1943 if (i) {
1944 i--;
1945 kfree(mem_descr->mem_array);
1946 mem_descr--;
1947 }
1948 }
1949 kfree(mem_arr_orig);
1950 kfree(phba->init_mem);
1951 kfree(phba->phwi_ctrlr);
1952 return -ENOMEM;
1953}
1954
1955static int beiscsi_get_memory(struct beiscsi_hba *phba)
1956{
1957 beiscsi_find_mem_req(phba);
1958 return beiscsi_alloc_mem(phba);
1959}
1960
1961static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1962{
1963 struct pdu_data_out *pdata_out;
1964 struct pdu_nop_out *pnop_out;
1965 struct be_mem_descriptor *mem_descr;
1966
1967 mem_descr = phba->init_mem;
1968 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1969 pdata_out =
1970 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1971 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1972
1973 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1974 IIOC_SCSI_DATA);
1975
1976 pnop_out =
1977 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1978 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1979
1980 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1981 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1982 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1983 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1984}
1985
1986static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1987{
1988 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1989 struct wrb_handle *pwrb_handle;
1990 struct hwi_controller *phwi_ctrlr;
1991 struct hwi_wrb_context *pwrb_context;
1992 struct iscsi_wrb *pwrb;
1993 unsigned int num_cxn_wrbh;
1994 unsigned int num_cxn_wrb, j, idx, index;
1995
1996 mem_descr_wrbh = phba->init_mem;
1997 mem_descr_wrbh += HWI_MEM_WRBH;
1998
1999 mem_descr_wrb = phba->init_mem;
2000 mem_descr_wrb += HWI_MEM_WRB;
2001
2002 idx = 0;
2003 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2004 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2005 ((sizeof(struct wrb_handle)) *
2006 phba->params.wrbs_per_cxn));
2007 phwi_ctrlr = phba->phwi_ctrlr;
2008
2009 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2010 pwrb_context = &phwi_ctrlr->wrb_context[index];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302011 pwrb_context->pwrb_handle_base =
2012 kzalloc(sizeof(struct wrb_handle *) *
2013 phba->params.wrbs_per_cxn, GFP_KERNEL);
2014 pwrb_context->pwrb_handle_basestd =
2015 kzalloc(sizeof(struct wrb_handle *) *
2016 phba->params.wrbs_per_cxn, GFP_KERNEL);
2017 if (num_cxn_wrbh) {
2018 pwrb_context->alloc_index = 0;
2019 pwrb_context->wrb_handles_available = 0;
2020 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2021 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2022 pwrb_context->pwrb_handle_basestd[j] =
2023 pwrb_handle;
2024 pwrb_context->wrb_handles_available++;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302025 pwrb_handle->wrb_index = j;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302026 pwrb_handle++;
2027 }
2028 pwrb_context->free_index = 0;
2029 num_cxn_wrbh--;
2030 } else {
2031 idx++;
2032 pwrb_handle =
2033 mem_descr_wrbh->mem_array[idx].virtual_address;
2034 num_cxn_wrbh =
2035 ((mem_descr_wrbh->mem_array[idx].size) /
2036 ((sizeof(struct wrb_handle)) *
2037 phba->params.wrbs_per_cxn));
2038 pwrb_context->alloc_index = 0;
2039 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2040 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2041 pwrb_context->pwrb_handle_basestd[j] =
2042 pwrb_handle;
2043 pwrb_context->wrb_handles_available++;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302044 pwrb_handle->wrb_index = j;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302045 pwrb_handle++;
2046 }
2047 pwrb_context->free_index = 0;
2048 num_cxn_wrbh--;
2049 }
2050 }
2051 idx = 0;
2052 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2053 num_cxn_wrb =
2054 ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2055 phba->params.wrbs_per_cxn);
2056
2057 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2058 pwrb_context = &phwi_ctrlr->wrb_context[index];
2059 if (num_cxn_wrb) {
2060 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2061 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2062 pwrb_handle->pwrb = pwrb;
2063 pwrb++;
2064 }
2065 num_cxn_wrb--;
2066 } else {
2067 idx++;
2068 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2069 num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2070 (sizeof(struct iscsi_wrb)) *
2071 phba->params.wrbs_per_cxn);
2072 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2073 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2074 pwrb_handle->pwrb = pwrb;
2075 pwrb++;
2076 }
2077 num_cxn_wrb--;
2078 }
2079 }
2080}
2081
2082static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2083{
2084 struct hwi_controller *phwi_ctrlr;
2085 struct hba_parameters *p = &phba->params;
2086 struct hwi_async_pdu_context *pasync_ctx;
2087 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2088 unsigned int index;
2089 struct be_mem_descriptor *mem_descr;
2090
2091 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2092 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2093
2094 phwi_ctrlr = phba->phwi_ctrlr;
2095 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2096 mem_descr->mem_array[0].virtual_address;
2097 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2098 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2099
2100 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2101 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2102 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2103 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2104
2105 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2106 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2107 if (mem_descr->mem_array[0].virtual_address) {
2108 SE_DEBUG(DBG_LVL_8,
2109 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2110 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2111 } else
2112 shost_printk(KERN_WARNING, phba->shost,
2113 "No Virtual address \n");
2114
2115 pasync_ctx->async_header.va_base =
2116 mem_descr->mem_array[0].virtual_address;
2117
2118 pasync_ctx->async_header.pa_base.u.a64.address =
2119 mem_descr->mem_array[0].bus_address.u.a64.address;
2120
2121 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2122 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2123 if (mem_descr->mem_array[0].virtual_address) {
2124 SE_DEBUG(DBG_LVL_8,
2125 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2126 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2127 } else
2128 shost_printk(KERN_WARNING, phba->shost,
2129 "No Virtual address \n");
2130 pasync_ctx->async_header.ring_base =
2131 mem_descr->mem_array[0].virtual_address;
2132
2133 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2134 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2135 if (mem_descr->mem_array[0].virtual_address) {
2136 SE_DEBUG(DBG_LVL_8,
2137 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2138 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2139 } else
2140 shost_printk(KERN_WARNING, phba->shost,
2141 "No Virtual address \n");
2142
2143 pasync_ctx->async_header.handle_base =
2144 mem_descr->mem_array[0].virtual_address;
2145 pasync_ctx->async_header.writables = 0;
2146 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2147
2148 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2149 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2150 if (mem_descr->mem_array[0].virtual_address) {
2151 SE_DEBUG(DBG_LVL_8,
2152 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2153 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2154 } else
2155 shost_printk(KERN_WARNING, phba->shost,
2156 "No Virtual address \n");
2157 pasync_ctx->async_data.va_base =
2158 mem_descr->mem_array[0].virtual_address;
2159 pasync_ctx->async_data.pa_base.u.a64.address =
2160 mem_descr->mem_array[0].bus_address.u.a64.address;
2161
2162 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2163 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2164 if (mem_descr->mem_array[0].virtual_address) {
2165 SE_DEBUG(DBG_LVL_8,
2166 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2167 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2168 } else
2169 shost_printk(KERN_WARNING, phba->shost,
2170 "No Virtual address \n");
2171
2172 pasync_ctx->async_data.ring_base =
2173 mem_descr->mem_array[0].virtual_address;
2174
2175 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2176 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2177 if (!mem_descr->mem_array[0].virtual_address)
2178 shost_printk(KERN_WARNING, phba->shost,
2179 "No Virtual address \n");
2180
2181 pasync_ctx->async_data.handle_base =
2182 mem_descr->mem_array[0].virtual_address;
2183 pasync_ctx->async_data.writables = 0;
2184 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2185
2186 pasync_header_h =
2187 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2188 pasync_data_h =
2189 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2190
2191 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2192 pasync_header_h->cri = -1;
2193 pasync_header_h->index = (char)index;
2194 INIT_LIST_HEAD(&pasync_header_h->link);
2195 pasync_header_h->pbuffer =
2196 (void *)((unsigned long)
2197 (pasync_ctx->async_header.va_base) +
2198 (p->defpdu_hdr_sz * index));
2199
2200 pasync_header_h->pa.u.a64.address =
2201 pasync_ctx->async_header.pa_base.u.a64.address +
2202 (p->defpdu_hdr_sz * index);
2203
2204 list_add_tail(&pasync_header_h->link,
2205 &pasync_ctx->async_header.free_list);
2206 pasync_header_h++;
2207 pasync_ctx->async_header.free_entries++;
2208 pasync_ctx->async_header.writables++;
2209
2210 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2211 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2212 header_busy_list);
2213 pasync_data_h->cri = -1;
2214 pasync_data_h->index = (char)index;
2215 INIT_LIST_HEAD(&pasync_data_h->link);
2216 pasync_data_h->pbuffer =
2217 (void *)((unsigned long)
2218 (pasync_ctx->async_data.va_base) +
2219 (p->defpdu_data_sz * index));
2220
2221 pasync_data_h->pa.u.a64.address =
2222 pasync_ctx->async_data.pa_base.u.a64.address +
2223 (p->defpdu_data_sz * index);
2224
2225 list_add_tail(&pasync_data_h->link,
2226 &pasync_ctx->async_data.free_list);
2227 pasync_data_h++;
2228 pasync_ctx->async_data.free_entries++;
2229 pasync_ctx->async_data.writables++;
2230
2231 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2232 }
2233
2234 pasync_ctx->async_header.host_write_ptr = 0;
2235 pasync_ctx->async_header.ep_read_ptr = -1;
2236 pasync_ctx->async_data.host_write_ptr = 0;
2237 pasync_ctx->async_data.ep_read_ptr = -1;
2238}
2239
2240static int
2241be_sgl_create_contiguous(void *virtual_address,
2242 u64 physical_address, u32 length,
2243 struct be_dma_mem *sgl)
2244{
2245 WARN_ON(!virtual_address);
2246 WARN_ON(!physical_address);
2247 WARN_ON(!length > 0);
2248 WARN_ON(!sgl);
2249
2250 sgl->va = virtual_address;
2251 sgl->dma = physical_address;
2252 sgl->size = length;
2253
2254 return 0;
2255}
2256
2257static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2258{
2259 memset(sgl, 0, sizeof(*sgl));
2260}
2261
2262static void
2263hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2264 struct mem_array *pmem, struct be_dma_mem *sgl)
2265{
2266 if (sgl->va)
2267 be_sgl_destroy_contiguous(sgl);
2268
2269 be_sgl_create_contiguous(pmem->virtual_address,
2270 pmem->bus_address.u.a64.address,
2271 pmem->size, sgl);
2272}
2273
2274static void
2275hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2276 struct mem_array *pmem, struct be_dma_mem *sgl)
2277{
2278 if (sgl->va)
2279 be_sgl_destroy_contiguous(sgl);
2280
2281 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2282 pmem->bus_address.u.a64.address,
2283 pmem->size, sgl);
2284}
2285
2286static int be_fill_queue(struct be_queue_info *q,
2287 u16 len, u16 entry_size, void *vaddress)
2288{
2289 struct be_dma_mem *mem = &q->dma_mem;
2290
2291 memset(q, 0, sizeof(*q));
2292 q->len = len;
2293 q->entry_size = entry_size;
2294 mem->size = len * entry_size;
2295 mem->va = vaddress;
2296 if (!mem->va)
2297 return -ENOMEM;
2298 memset(mem->va, 0, mem->size);
2299 return 0;
2300}
2301
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302302static int beiscsi_create_eqs(struct beiscsi_hba *phba,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302303 struct hwi_context_memory *phwi_context)
2304{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302305 unsigned int i, num_eq_pages;
2306 int ret, eq_for_mcc;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302307 struct be_queue_info *eq;
2308 struct be_dma_mem *mem;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302309 void *eq_vaddress;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302310 dma_addr_t paddr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302311
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302312 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2313 sizeof(struct be_eq_entry));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302314
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302315 if (phba->msix_enabled)
2316 eq_for_mcc = 1;
2317 else
2318 eq_for_mcc = 0;
2319 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2320 eq = &phwi_context->be_eq[i].q;
2321 mem = &eq->dma_mem;
2322 phwi_context->be_eq[i].phba = phba;
2323 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2324 num_eq_pages * PAGE_SIZE,
2325 &paddr);
2326 if (!eq_vaddress)
2327 goto create_eq_error;
2328
2329 mem->va = eq_vaddress;
2330 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2331 sizeof(struct be_eq_entry), eq_vaddress);
2332 if (ret) {
2333 shost_printk(KERN_ERR, phba->shost,
2334 "be_fill_queue Failed for EQ \n");
2335 goto create_eq_error;
2336 }
2337
2338 mem->dma = paddr;
2339 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2340 phwi_context->cur_eqd);
2341 if (ret) {
2342 shost_printk(KERN_ERR, phba->shost,
2343 "beiscsi_cmd_eq_create"
2344 "Failedfor EQ \n");
2345 goto create_eq_error;
2346 }
2347 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302348 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302349 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302350create_eq_error:
2351 for (i = 0; i < (phba->num_cpus + 1); i++) {
2352 eq = &phwi_context->be_eq[i].q;
2353 mem = &eq->dma_mem;
2354 if (mem->va)
2355 pci_free_consistent(phba->pcidev, num_eq_pages
2356 * PAGE_SIZE,
2357 mem->va, mem->dma);
2358 }
2359 return ret;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302360}
2361
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302362static int beiscsi_create_cqs(struct beiscsi_hba *phba,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302363 struct hwi_context_memory *phwi_context)
2364{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302365 unsigned int i, num_cq_pages;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302366 int ret;
2367 struct be_queue_info *cq, *eq;
2368 struct be_dma_mem *mem;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302369 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302370 void *cq_vaddress;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302371 dma_addr_t paddr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302372
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302373 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2374 sizeof(struct sol_cqe));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302375
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302376 for (i = 0; i < phba->num_cpus; i++) {
2377 cq = &phwi_context->be_cq[i];
2378 eq = &phwi_context->be_eq[i].q;
2379 pbe_eq = &phwi_context->be_eq[i];
2380 pbe_eq->cq = cq;
2381 pbe_eq->phba = phba;
2382 mem = &cq->dma_mem;
2383 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2384 num_cq_pages * PAGE_SIZE,
2385 &paddr);
2386 if (!cq_vaddress)
2387 goto create_cq_error;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05302388 ret = be_fill_queue(cq, phba->params.num_cq_entries,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302389 sizeof(struct sol_cqe), cq_vaddress);
2390 if (ret) {
2391 shost_printk(KERN_ERR, phba->shost,
2392 "be_fill_queue Failed for ISCSI CQ \n");
2393 goto create_cq_error;
2394 }
2395
2396 mem->dma = paddr;
2397 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2398 false, 0);
2399 if (ret) {
2400 shost_printk(KERN_ERR, phba->shost,
2401 "beiscsi_cmd_eq_create"
2402 "Failed for ISCSI CQ \n");
2403 goto create_cq_error;
2404 }
2405 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2406 cq->id, eq->id);
2407 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302408 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302409 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302410
2411create_cq_error:
2412 for (i = 0; i < phba->num_cpus; i++) {
2413 cq = &phwi_context->be_cq[i];
2414 mem = &cq->dma_mem;
2415 if (mem->va)
2416 pci_free_consistent(phba->pcidev, num_cq_pages
2417 * PAGE_SIZE,
2418 mem->va, mem->dma);
2419 }
2420 return ret;
2421
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302422}
2423
2424static int
2425beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2426 struct hwi_context_memory *phwi_context,
2427 struct hwi_controller *phwi_ctrlr,
2428 unsigned int def_pdu_ring_sz)
2429{
2430 unsigned int idx;
2431 int ret;
2432 struct be_queue_info *dq, *cq;
2433 struct be_dma_mem *mem;
2434 struct be_mem_descriptor *mem_descr;
2435 void *dq_vaddress;
2436
2437 idx = 0;
2438 dq = &phwi_context->be_def_hdrq;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302439 cq = &phwi_context->be_cq[0];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302440 mem = &dq->dma_mem;
2441 mem_descr = phba->init_mem;
2442 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2443 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2444 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2445 sizeof(struct phys_addr),
2446 sizeof(struct phys_addr), dq_vaddress);
2447 if (ret) {
2448 shost_printk(KERN_ERR, phba->shost,
2449 "be_fill_queue Failed for DEF PDU HDR\n");
2450 return ret;
2451 }
2452 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2453 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2454 def_pdu_ring_sz,
2455 phba->params.defpdu_hdr_sz);
2456 if (ret) {
2457 shost_printk(KERN_ERR, phba->shost,
2458 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2459 return ret;
2460 }
2461 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2462 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2463 phwi_context->be_def_hdrq.id);
2464 hwi_post_async_buffers(phba, 1);
2465 return 0;
2466}
2467
2468static int
2469beiscsi_create_def_data(struct beiscsi_hba *phba,
2470 struct hwi_context_memory *phwi_context,
2471 struct hwi_controller *phwi_ctrlr,
2472 unsigned int def_pdu_ring_sz)
2473{
2474 unsigned int idx;
2475 int ret;
2476 struct be_queue_info *dataq, *cq;
2477 struct be_dma_mem *mem;
2478 struct be_mem_descriptor *mem_descr;
2479 void *dq_vaddress;
2480
2481 idx = 0;
2482 dataq = &phwi_context->be_def_dataq;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302483 cq = &phwi_context->be_cq[0];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302484 mem = &dataq->dma_mem;
2485 mem_descr = phba->init_mem;
2486 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2487 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2488 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2489 sizeof(struct phys_addr),
2490 sizeof(struct phys_addr), dq_vaddress);
2491 if (ret) {
2492 shost_printk(KERN_ERR, phba->shost,
2493 "be_fill_queue Failed for DEF PDU DATA\n");
2494 return ret;
2495 }
2496 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2497 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2498 def_pdu_ring_sz,
2499 phba->params.defpdu_data_sz);
2500 if (ret) {
2501 shost_printk(KERN_ERR, phba->shost,
2502 "be_cmd_create_default_pdu_queue Failed"
2503 " for DEF PDU DATA\n");
2504 return ret;
2505 }
2506 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2507 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2508 phwi_context->be_def_dataq.id);
2509 hwi_post_async_buffers(phba, 0);
2510 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2511 return 0;
2512}
2513
2514static int
2515beiscsi_post_pages(struct beiscsi_hba *phba)
2516{
2517 struct be_mem_descriptor *mem_descr;
2518 struct mem_array *pm_arr;
2519 unsigned int page_offset, i;
2520 struct be_dma_mem sgl;
2521 int status;
2522
2523 mem_descr = phba->init_mem;
2524 mem_descr += HWI_MEM_SGE;
2525 pm_arr = mem_descr->mem_array;
2526
2527 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2528 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2529 for (i = 0; i < mem_descr->num_elements; i++) {
2530 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2531 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2532 page_offset,
2533 (pm_arr->size / PAGE_SIZE));
2534 page_offset += pm_arr->size / PAGE_SIZE;
2535 if (status != 0) {
2536 shost_printk(KERN_ERR, phba->shost,
2537 "post sgl failed.\n");
2538 return status;
2539 }
2540 pm_arr++;
2541 }
2542 SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2543 return 0;
2544}
2545
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302546static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2547{
2548 struct be_dma_mem *mem = &q->dma_mem;
2549 if (mem->va)
2550 pci_free_consistent(phba->pcidev, mem->size,
2551 mem->va, mem->dma);
2552}
2553
2554static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2555 u16 len, u16 entry_size)
2556{
2557 struct be_dma_mem *mem = &q->dma_mem;
2558
2559 memset(q, 0, sizeof(*q));
2560 q->len = len;
2561 q->entry_size = entry_size;
2562 mem->size = len * entry_size;
2563 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2564 if (!mem->va)
2565 return -1;
2566 memset(mem->va, 0, mem->size);
2567 return 0;
2568}
2569
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302570static int
2571beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2572 struct hwi_context_memory *phwi_context,
2573 struct hwi_controller *phwi_ctrlr)
2574{
2575 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2576 u64 pa_addr_lo;
2577 unsigned int idx, num, i;
2578 struct mem_array *pwrb_arr;
2579 void *wrb_vaddr;
2580 struct be_dma_mem sgl;
2581 struct be_mem_descriptor *mem_descr;
2582 int status;
2583
2584 idx = 0;
2585 mem_descr = phba->init_mem;
2586 mem_descr += HWI_MEM_WRB;
2587 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2588 GFP_KERNEL);
2589 if (!pwrb_arr) {
2590 shost_printk(KERN_ERR, phba->shost,
2591 "Memory alloc failed in create wrb ring.\n");
2592 return -ENOMEM;
2593 }
2594 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2595 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2596 num_wrb_rings = mem_descr->mem_array[idx].size /
2597 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2598
2599 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2600 if (num_wrb_rings) {
2601 pwrb_arr[num].virtual_address = wrb_vaddr;
2602 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2603 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2604 sizeof(struct iscsi_wrb);
2605 wrb_vaddr += pwrb_arr[num].size;
2606 pa_addr_lo += pwrb_arr[num].size;
2607 num_wrb_rings--;
2608 } else {
2609 idx++;
2610 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2611 pa_addr_lo = mem_descr->mem_array[idx].\
2612 bus_address.u.a64.address;
2613 num_wrb_rings = mem_descr->mem_array[idx].size /
2614 (phba->params.wrbs_per_cxn *
2615 sizeof(struct iscsi_wrb));
2616 pwrb_arr[num].virtual_address = wrb_vaddr;
2617 pwrb_arr[num].bus_address.u.a64.address\
2618 = pa_addr_lo;
2619 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2620 sizeof(struct iscsi_wrb);
2621 wrb_vaddr += pwrb_arr[num].size;
2622 pa_addr_lo += pwrb_arr[num].size;
2623 num_wrb_rings--;
2624 }
2625 }
2626 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2627 wrb_mem_index = 0;
2628 offset = 0;
2629 size = 0;
2630
2631 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2632 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2633 &phwi_context->be_wrbq[i]);
2634 if (status != 0) {
2635 shost_printk(KERN_ERR, phba->shost,
2636 "wrbq create failed.");
2637 return status;
2638 }
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05302639 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2640 id;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302641 }
2642 kfree(pwrb_arr);
2643 return 0;
2644}
2645
2646static void free_wrb_handles(struct beiscsi_hba *phba)
2647{
2648 unsigned int index;
2649 struct hwi_controller *phwi_ctrlr;
2650 struct hwi_wrb_context *pwrb_context;
2651
2652 phwi_ctrlr = phba->phwi_ctrlr;
2653 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2654 pwrb_context = &phwi_ctrlr->wrb_context[index];
2655 kfree(pwrb_context->pwrb_handle_base);
2656 kfree(pwrb_context->pwrb_handle_basestd);
2657 }
2658}
2659
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302660static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2661{
2662 struct be_queue_info *q;
2663 struct be_ctrl_info *ctrl = &phba->ctrl;
2664
2665 q = &phba->ctrl.mcc_obj.q;
2666 if (q->created)
2667 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2668 be_queue_free(phba, q);
2669
2670 q = &phba->ctrl.mcc_obj.cq;
2671 if (q->created)
2672 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2673 be_queue_free(phba, q);
2674}
2675
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302676static void hwi_cleanup(struct beiscsi_hba *phba)
2677{
2678 struct be_queue_info *q;
2679 struct be_ctrl_info *ctrl = &phba->ctrl;
2680 struct hwi_controller *phwi_ctrlr;
2681 struct hwi_context_memory *phwi_context;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302682 int i, eq_num;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302683
2684 phwi_ctrlr = phba->phwi_ctrlr;
2685 phwi_context = phwi_ctrlr->phwi_ctxt;
2686 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2687 q = &phwi_context->be_wrbq[i];
2688 if (q->created)
2689 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2690 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302691 free_wrb_handles(phba);
2692
2693 q = &phwi_context->be_def_hdrq;
2694 if (q->created)
2695 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2696
2697 q = &phwi_context->be_def_dataq;
2698 if (q->created)
2699 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2700
2701 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2702
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302703 for (i = 0; i < (phba->num_cpus); i++) {
2704 q = &phwi_context->be_cq[i];
2705 if (q->created)
2706 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2707 }
2708 if (phba->msix_enabled)
2709 eq_num = 1;
2710 else
2711 eq_num = 0;
2712 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2713 q = &phwi_context->be_eq[i].q;
2714 if (q->created)
2715 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2716 }
2717 be_mcc_queues_destroy(phba);
2718}
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302719
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302720static int be_mcc_queues_create(struct beiscsi_hba *phba,
2721 struct hwi_context_memory *phwi_context)
2722{
2723 struct be_queue_info *q, *cq;
2724 struct be_ctrl_info *ctrl = &phba->ctrl;
2725
2726 /* Alloc MCC compl queue */
2727 cq = &phba->ctrl.mcc_obj.cq;
2728 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2729 sizeof(struct be_mcc_compl)))
2730 goto err;
2731 /* Ask BE to create MCC compl queue; */
2732 if (phba->msix_enabled) {
2733 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2734 [phba->num_cpus].q, false, true, 0))
2735 goto mcc_cq_free;
2736 } else {
2737 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2738 false, true, 0))
2739 goto mcc_cq_free;
2740 }
2741
2742 /* Alloc MCC queue */
2743 q = &phba->ctrl.mcc_obj.q;
2744 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2745 goto mcc_cq_destroy;
2746
2747 /* Ask BE to create MCC queue */
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302748 if (beiscsi_cmd_mccq_create(phba, q, cq))
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302749 goto mcc_q_free;
2750
2751 return 0;
2752
2753mcc_q_free:
2754 be_queue_free(phba, q);
2755mcc_cq_destroy:
2756 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2757mcc_cq_free:
2758 be_queue_free(phba, cq);
2759err:
2760 return -1;
2761}
2762
2763static int find_num_cpus(void)
2764{
2765 int num_cpus = 0;
2766
2767 num_cpus = num_online_cpus();
2768 if (num_cpus >= MAX_CPUS)
2769 num_cpus = MAX_CPUS - 1;
2770
2771 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2772 return num_cpus;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302773}
2774
2775static int hwi_init_port(struct beiscsi_hba *phba)
2776{
2777 struct hwi_controller *phwi_ctrlr;
2778 struct hwi_context_memory *phwi_context;
2779 unsigned int def_pdu_ring_sz;
2780 struct be_ctrl_info *ctrl = &phba->ctrl;
2781 int status;
2782
2783 def_pdu_ring_sz =
2784 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2785 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302786 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302787 phwi_context->max_eqd = 0;
2788 phwi_context->min_eqd = 0;
2789 phwi_context->cur_eqd = 64;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302790 be_cmd_fw_initialize(&phba->ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302791
2792 status = beiscsi_create_eqs(phba, phwi_context);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302793 if (status != 0) {
2794 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2795 goto error;
2796 }
2797
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302798 status = be_mcc_queues_create(phba, phwi_context);
2799 if (status != 0)
2800 goto error;
2801
2802 status = mgmt_check_supported_fw(ctrl, phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302803 if (status != 0) {
2804 shost_printk(KERN_ERR, phba->shost,
2805 "Unsupported fw version \n");
2806 goto error;
2807 }
2808
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302809 if (phba->fw_config.iscsi_features == 0x1)
2810 ring_mode = 1;
2811 else
2812 ring_mode = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302813
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302814 status = beiscsi_create_cqs(phba, phwi_context);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302815 if (status != 0) {
2816 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2817 goto error;
2818 }
2819
2820 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2821 def_pdu_ring_sz);
2822 if (status != 0) {
2823 shost_printk(KERN_ERR, phba->shost,
2824 "Default Header not created\n");
2825 goto error;
2826 }
2827
2828 status = beiscsi_create_def_data(phba, phwi_context,
2829 phwi_ctrlr, def_pdu_ring_sz);
2830 if (status != 0) {
2831 shost_printk(KERN_ERR, phba->shost,
2832 "Default Data not created\n");
2833 goto error;
2834 }
2835
2836 status = beiscsi_post_pages(phba);
2837 if (status != 0) {
2838 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2839 goto error;
2840 }
2841
2842 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2843 if (status != 0) {
2844 shost_printk(KERN_ERR, phba->shost,
2845 "WRB Rings not created\n");
2846 goto error;
2847 }
2848
2849 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2850 return 0;
2851
2852error:
2853 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2854 hwi_cleanup(phba);
2855 return -ENOMEM;
2856}
2857
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302858static int hwi_init_controller(struct beiscsi_hba *phba)
2859{
2860 struct hwi_controller *phwi_ctrlr;
2861
2862 phwi_ctrlr = phba->phwi_ctrlr;
2863 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2864 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2865 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2866 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2867 phwi_ctrlr->phwi_ctxt);
2868 } else {
2869 shost_printk(KERN_ERR, phba->shost,
2870 "HWI_MEM_ADDN_CONTEXT is more than one element."
2871 "Failing to load\n");
2872 return -ENOMEM;
2873 }
2874
2875 iscsi_init_global_templates(phba);
2876 beiscsi_init_wrb_handle(phba);
2877 hwi_init_async_pdu_ctx(phba);
2878 if (hwi_init_port(phba) != 0) {
2879 shost_printk(KERN_ERR, phba->shost,
2880 "hwi_init_controller failed\n");
2881 return -ENOMEM;
2882 }
2883 return 0;
2884}
2885
2886static void beiscsi_free_mem(struct beiscsi_hba *phba)
2887{
2888 struct be_mem_descriptor *mem_descr;
2889 int i, j;
2890
2891 mem_descr = phba->init_mem;
2892 i = 0;
2893 j = 0;
2894 for (i = 0; i < SE_MEM_MAX; i++) {
2895 for (j = mem_descr->num_elements; j > 0; j--) {
2896 pci_free_consistent(phba->pcidev,
2897 mem_descr->mem_array[j - 1].size,
2898 mem_descr->mem_array[j - 1].virtual_address,
2899 mem_descr->mem_array[j - 1].bus_address.
2900 u.a64.address);
2901 }
2902 kfree(mem_descr->mem_array);
2903 mem_descr++;
2904 }
2905 kfree(phba->init_mem);
2906 kfree(phba->phwi_ctrlr);
2907}
2908
2909static int beiscsi_init_controller(struct beiscsi_hba *phba)
2910{
2911 int ret = -ENOMEM;
2912
2913 ret = beiscsi_get_memory(phba);
2914 if (ret < 0) {
2915 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2916 "Failed in beiscsi_alloc_memory \n");
2917 return ret;
2918 }
2919
2920 ret = hwi_init_controller(phba);
2921 if (ret)
2922 goto free_init;
2923 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2924 return 0;
2925
2926free_init:
2927 beiscsi_free_mem(phba);
2928 return -ENOMEM;
2929}
2930
2931static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2932{
2933 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2934 struct sgl_handle *psgl_handle;
2935 struct iscsi_sge *pfrag;
2936 unsigned int arr_index, i, idx;
2937
2938 phba->io_sgl_hndl_avbl = 0;
2939 phba->eh_sgl_hndl_avbl = 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302940
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302941 if (ring_mode) {
2942 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2943 phba->params.icds_per_ctrl,
2944 GFP_KERNEL);
2945 if (!phba->sgl_hndl_array) {
2946 shost_printk(KERN_ERR, phba->shost,
2947 "Mem Alloc Failed. Failing to load\n");
2948 return -ENOMEM;
2949 }
2950 }
2951
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302952 mem_descr_sglh = phba->init_mem;
2953 mem_descr_sglh += HWI_MEM_SGLH;
2954 if (1 == mem_descr_sglh->num_elements) {
2955 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2956 phba->params.ios_per_ctrl,
2957 GFP_KERNEL);
2958 if (!phba->io_sgl_hndl_base) {
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302959 if (ring_mode)
2960 kfree(phba->sgl_hndl_array);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302961 shost_printk(KERN_ERR, phba->shost,
2962 "Mem Alloc Failed. Failing to load\n");
2963 return -ENOMEM;
2964 }
2965 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2966 (phba->params.icds_per_ctrl -
2967 phba->params.ios_per_ctrl),
2968 GFP_KERNEL);
2969 if (!phba->eh_sgl_hndl_base) {
2970 kfree(phba->io_sgl_hndl_base);
2971 shost_printk(KERN_ERR, phba->shost,
2972 "Mem Alloc Failed. Failing to load\n");
2973 return -ENOMEM;
2974 }
2975 } else {
2976 shost_printk(KERN_ERR, phba->shost,
2977 "HWI_MEM_SGLH is more than one element."
2978 "Failing to load\n");
2979 return -ENOMEM;
2980 }
2981
2982 arr_index = 0;
2983 idx = 0;
2984 while (idx < mem_descr_sglh->num_elements) {
2985 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2986
2987 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2988 sizeof(struct sgl_handle)); i++) {
2989 if (arr_index < phba->params.ios_per_ctrl) {
2990 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2991 phba->io_sgl_hndl_avbl++;
2992 arr_index++;
2993 } else {
2994 phba->eh_sgl_hndl_base[arr_index -
2995 phba->params.ios_per_ctrl] =
2996 psgl_handle;
2997 arr_index++;
2998 phba->eh_sgl_hndl_avbl++;
2999 }
3000 psgl_handle++;
3001 }
3002 idx++;
3003 }
3004 SE_DEBUG(DBG_LVL_8,
3005 "phba->io_sgl_hndl_avbl=%d"
3006 "phba->eh_sgl_hndl_avbl=%d \n",
3007 phba->io_sgl_hndl_avbl,
3008 phba->eh_sgl_hndl_avbl);
3009 mem_descr_sg = phba->init_mem;
3010 mem_descr_sg += HWI_MEM_SGE;
3011 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3012 mem_descr_sg->num_elements);
3013 arr_index = 0;
3014 idx = 0;
3015 while (idx < mem_descr_sg->num_elements) {
3016 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3017
3018 for (i = 0;
3019 i < (mem_descr_sg->mem_array[idx].size) /
3020 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3021 i++) {
3022 if (arr_index < phba->params.ios_per_ctrl)
3023 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3024 else
3025 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3026 phba->params.ios_per_ctrl];
3027 psgl_handle->pfrag = pfrag;
3028 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3029 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3030 pfrag += phba->params.num_sge_per_io;
3031 psgl_handle->sgl_index =
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303032 phba->fw_config.iscsi_icd_start + arr_index++;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303033 }
3034 idx++;
3035 }
3036 phba->io_sgl_free_index = 0;
3037 phba->io_sgl_alloc_index = 0;
3038 phba->eh_sgl_free_index = 0;
3039 phba->eh_sgl_alloc_index = 0;
3040 return 0;
3041}
3042
3043static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3044{
3045 int i, new_cid;
3046
3047 phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3048 GFP_KERNEL);
3049 if (!phba->cid_array) {
3050 shost_printk(KERN_ERR, phba->shost,
3051 "Failed to allocate memory in "
3052 "hba_setup_cid_tbls\n");
3053 return -ENOMEM;
3054 }
3055 phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
3056 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3057 if (!phba->ep_array) {
3058 shost_printk(KERN_ERR, phba->shost,
3059 "Failed to allocate memory in "
3060 "hba_setup_cid_tbls \n");
3061 kfree(phba->cid_array);
3062 return -ENOMEM;
3063 }
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303064 new_cid = phba->fw_config.iscsi_cid_start;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303065 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3066 phba->cid_array[i] = new_cid;
3067 new_cid += 2;
3068 }
3069 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3070 return 0;
3071}
3072
3073static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3074{
3075 struct be_ctrl_info *ctrl = &phba->ctrl;
3076 struct hwi_controller *phwi_ctrlr;
3077 struct hwi_context_memory *phwi_context;
3078 struct be_queue_info *eq;
3079 u8 __iomem *addr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303080 u32 reg, i;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303081 u32 enabled;
3082
3083 phwi_ctrlr = phba->phwi_ctrlr;
3084 phwi_context = phwi_ctrlr->phwi_ctxt;
3085
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303086 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3087 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3088 reg = ioread32(addr);
3089 SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3090
3091 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3092 if (!enabled) {
3093 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3094 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3095 iowrite32(reg, addr);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303096 for (i = 0; i <= phba->num_cpus; i++) {
3097 eq = &phwi_context->be_eq[i].q;
3098 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3099 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3100 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303101 } else
3102 shost_printk(KERN_WARNING, phba->shost,
3103 "In hwi_enable_intr, Not Enabled \n");
3104 return true;
3105}
3106
3107static void hwi_disable_intr(struct beiscsi_hba *phba)
3108{
3109 struct be_ctrl_info *ctrl = &phba->ctrl;
3110
3111 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3112 u32 reg = ioread32(addr);
3113
3114 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3115 if (enabled) {
3116 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3117 iowrite32(reg, addr);
3118 } else
3119 shost_printk(KERN_WARNING, phba->shost,
3120 "In hwi_disable_intr, Already Disabled \n");
3121}
3122
3123static int beiscsi_init_port(struct beiscsi_hba *phba)
3124{
3125 int ret;
3126
3127 ret = beiscsi_init_controller(phba);
3128 if (ret < 0) {
3129 shost_printk(KERN_ERR, phba->shost,
3130 "beiscsi_dev_probe - Failed in"
3131 "beiscsi_init_controller \n");
3132 return ret;
3133 }
3134 ret = beiscsi_init_sgl_handle(phba);
3135 if (ret < 0) {
3136 shost_printk(KERN_ERR, phba->shost,
3137 "beiscsi_dev_probe - Failed in"
3138 "beiscsi_init_sgl_handle \n");
3139 goto do_cleanup_ctrlr;
3140 }
3141
3142 if (hba_setup_cid_tbls(phba)) {
3143 shost_printk(KERN_ERR, phba->shost,
3144 "Failed in hba_setup_cid_tbls\n");
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303145 if (ring_mode)
3146 kfree(phba->sgl_hndl_array);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303147 kfree(phba->io_sgl_hndl_base);
3148 kfree(phba->eh_sgl_hndl_base);
3149 goto do_cleanup_ctrlr;
3150 }
3151
3152 return ret;
3153
3154do_cleanup_ctrlr:
3155 hwi_cleanup(phba);
3156 return ret;
3157}
3158
3159static void hwi_purge_eq(struct beiscsi_hba *phba)
3160{
3161 struct hwi_controller *phwi_ctrlr;
3162 struct hwi_context_memory *phwi_context;
3163 struct be_queue_info *eq;
3164 struct be_eq_entry *eqe = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303165 int i, eq_msix;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303166
3167 phwi_ctrlr = phba->phwi_ctrlr;
3168 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303169 if (phba->msix_enabled)
3170 eq_msix = 1;
3171 else
3172 eq_msix = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303173
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303174 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3175 eq = &phwi_context->be_eq[i].q;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303176 eqe = queue_tail_node(eq);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303177
3178 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3179 & EQE_VALID_MASK) {
3180 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3181 queue_tail_inc(eq);
3182 eqe = queue_tail_node(eq);
3183 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303184 }
3185}
3186
3187static void beiscsi_clean_port(struct beiscsi_hba *phba)
3188{
3189 unsigned char mgmt_status;
3190
3191 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3192 if (mgmt_status)
3193 shost_printk(KERN_WARNING, phba->shost,
3194 "mgmt_epfw_cleanup FAILED \n");
3195 hwi_cleanup(phba);
3196 hwi_purge_eq(phba);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303197 if (ring_mode)
3198 kfree(phba->sgl_hndl_array);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303199 kfree(phba->io_sgl_hndl_base);
3200 kfree(phba->eh_sgl_hndl_base);
3201 kfree(phba->cid_array);
3202 kfree(phba->ep_array);
3203}
3204
3205void
3206beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3207 struct beiscsi_offload_params *params)
3208{
3209 struct wrb_handle *pwrb_handle;
3210 struct iscsi_target_context_update_wrb *pwrb = NULL;
3211 struct be_mem_descriptor *mem_descr;
3212 struct beiscsi_hba *phba = beiscsi_conn->phba;
3213 u32 doorbell = 0;
3214
3215 /*
3216 * We can always use 0 here because it is reserved by libiscsi for
3217 * login/startup related tasks.
3218 */
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303219 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3220 phba->fw_config.iscsi_cid_start), 0);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303221 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3222 memset(pwrb, 0, sizeof(*pwrb));
3223 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3224 max_burst_length, pwrb, params->dw[offsetof
3225 (struct amap_beiscsi_offload_params,
3226 max_burst_length) / 32]);
3227 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3228 max_send_data_segment_length, pwrb,
3229 params->dw[offsetof(struct amap_beiscsi_offload_params,
3230 max_send_data_segment_length) / 32]);
3231 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3232 first_burst_length,
3233 pwrb,
3234 params->dw[offsetof(struct amap_beiscsi_offload_params,
3235 first_burst_length) / 32]);
3236
3237 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3238 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3239 erl) / 32] & OFFLD_PARAMS_ERL));
3240 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3241 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3242 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3243 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3244 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3245 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3246 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3247 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3248 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3249 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3250 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3251 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3252 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3253 pwrb,
3254 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3255 exp_statsn) / 32] + 1));
3256 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3257 0x7);
3258 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3259 pwrb, pwrb_handle->wrb_index);
3260 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3261 pwrb, pwrb_handle->nxt_wrb_index);
3262 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3263 session_state, pwrb, 0);
3264 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3265 pwrb, 1);
3266 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3267 pwrb, 0);
3268 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3269 0);
3270
3271 mem_descr = phba->init_mem;
3272 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3273
3274 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3275 pad_buffer_addr_hi, pwrb,
3276 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3277 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3278 pad_buffer_addr_lo, pwrb,
3279 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3280
3281 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3282
3283 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303284 if (!ring_mode)
3285 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303286 << DB_DEF_PDU_WRB_INDEX_SHIFT;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303287 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3288
3289 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3290}
3291
3292static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3293 int *index, int *age)
3294{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303295 *index = (int)itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303296 if (age)
3297 *age = conn->session->age;
3298}
3299
3300/**
3301 * beiscsi_alloc_pdu - allocates pdu and related resources
3302 * @task: libiscsi task
3303 * @opcode: opcode of pdu for task
3304 *
3305 * This is called with the session lock held. It will allocate
3306 * the wrb and sgl if needed for the command. And it will prep
3307 * the pdu's itt. beiscsi_parse_pdu will later translate
3308 * the pdu itt to the libiscsi task itt.
3309 */
3310static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3311{
3312 struct beiscsi_io_task *io_task = task->dd_data;
3313 struct iscsi_conn *conn = task->conn;
3314 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3315 struct beiscsi_hba *phba = beiscsi_conn->phba;
3316 struct hwi_wrb_context *pwrb_context;
3317 struct hwi_controller *phwi_ctrlr;
3318 itt_t itt;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303319 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3320 dma_addr_t paddr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303321
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303322 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3323 GFP_KERNEL, &paddr);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303324 if (!io_task->cmd_bhs)
3325 return -ENOMEM;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303326 io_task->bhs_pa.u.a64.address = paddr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303327 io_task->libiscsi_itt = (itt_t)task->itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303328 io_task->pwrb_handle = alloc_wrb_handle(phba,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303329 beiscsi_conn->beiscsi_conn_cid -
3330 phba->fw_config.iscsi_cid_start,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303331 task->itt);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303332 io_task->conn = beiscsi_conn;
3333
3334 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3335 task->hdr_max = sizeof(struct be_cmd_bhs);
3336
3337 if (task->sc) {
3338 spin_lock(&phba->io_sgl_lock);
3339 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3340 spin_unlock(&phba->io_sgl_lock);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303341 if (!io_task->psgl_handle)
3342 goto free_hndls;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303343 } else {
3344 io_task->scsi_cmnd = NULL;
3345 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3346 if (!beiscsi_conn->login_in_progress) {
3347 spin_lock(&phba->mgmt_sgl_lock);
3348 io_task->psgl_handle = (struct sgl_handle *)
3349 alloc_mgmt_sgl_handle(phba);
3350 spin_unlock(&phba->mgmt_sgl_lock);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303351 if (!io_task->psgl_handle)
3352 goto free_hndls;
3353
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303354 beiscsi_conn->login_in_progress = 1;
3355 beiscsi_conn->plogin_sgl_handle =
3356 io_task->psgl_handle;
3357 } else {
3358 io_task->psgl_handle =
3359 beiscsi_conn->plogin_sgl_handle;
3360 }
3361 } else {
3362 spin_lock(&phba->mgmt_sgl_lock);
3363 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3364 spin_unlock(&phba->mgmt_sgl_lock);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303365 if (!io_task->psgl_handle)
3366 goto free_hndls;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303367 }
3368 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303369 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3370 wrb_index << 16) | (unsigned int)
3371 (io_task->psgl_handle->sgl_index));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303372 if (ring_mode) {
3373 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303374 phba->fw_config.iscsi_icd_start] =
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303375 io_task->psgl_handle;
3376 io_task->psgl_handle->task = task;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303377 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid -
3378 phba->fw_config.iscsi_cid_start;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303379 } else
3380 io_task->pwrb_handle->pio_handle = task;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303381
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303382 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3383 return 0;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303384
3385free_hndls:
3386 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303387 pwrb_context = &phwi_ctrlr->wrb_context[
3388 beiscsi_conn->beiscsi_conn_cid -
3389 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303390 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3391 io_task->pwrb_handle = NULL;
3392 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3393 io_task->bhs_pa.u.a64.address);
3394 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3395 return -ENOMEM;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303396}
3397
3398static void beiscsi_cleanup_task(struct iscsi_task *task)
3399{
3400 struct beiscsi_io_task *io_task = task->dd_data;
3401 struct iscsi_conn *conn = task->conn;
3402 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3403 struct beiscsi_hba *phba = beiscsi_conn->phba;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303404 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303405 struct hwi_wrb_context *pwrb_context;
3406 struct hwi_controller *phwi_ctrlr;
3407
3408 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303409 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3410 - phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303411 if (io_task->pwrb_handle) {
3412 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3413 io_task->pwrb_handle = NULL;
3414 }
3415
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303416 if (io_task->cmd_bhs) {
3417 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3418 io_task->bhs_pa.u.a64.address);
3419 }
3420
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303421 if (task->sc) {
3422 if (io_task->psgl_handle) {
3423 spin_lock(&phba->io_sgl_lock);
3424 free_io_sgl_handle(phba, io_task->psgl_handle);
3425 spin_unlock(&phba->io_sgl_lock);
3426 io_task->psgl_handle = NULL;
3427 }
3428 } else {
3429 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3430 return;
3431 if (io_task->psgl_handle) {
3432 spin_lock(&phba->mgmt_sgl_lock);
3433 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3434 spin_unlock(&phba->mgmt_sgl_lock);
3435 io_task->psgl_handle = NULL;
3436 }
3437 }
3438}
3439
3440static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3441 unsigned int num_sg, unsigned int xferlen,
3442 unsigned int writedir)
3443{
3444
3445 struct beiscsi_io_task *io_task = task->dd_data;
3446 struct iscsi_conn *conn = task->conn;
3447 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3448 struct beiscsi_hba *phba = beiscsi_conn->phba;
3449 struct iscsi_wrb *pwrb = NULL;
3450 unsigned int doorbell = 0;
3451
3452 pwrb = io_task->pwrb_handle->pwrb;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303453 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3454 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3455
3456 if (writedir) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303457 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3458 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3459 &io_task->cmd_bhs->iscsi_data_pdu,
3460 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3461 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3462 &io_task->cmd_bhs->iscsi_data_pdu,
3463 ISCSI_OPCODE_SCSI_DATA_OUT);
3464 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3465 &io_task->cmd_bhs->iscsi_data_pdu, 1);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303466 if (ring_mode)
3467 io_task->psgl_handle->type = INI_WR_CMD;
3468 else
3469 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303470 INI_WR_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303471 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303472 } else {
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303473 if (ring_mode)
3474 io_task->psgl_handle->type = INI_RD_CMD;
3475 else
3476 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303477 INI_RD_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303478 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3479 }
3480 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3481 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3482 io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3483
3484 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3485 cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3486 lun[0]));
3487 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3488 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3489 io_task->pwrb_handle->wrb_index);
3490 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3491 be32_to_cpu(task->cmdsn));
3492 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3493 io_task->psgl_handle->sgl_index);
3494
3495 hwi_write_sgl(pwrb, sg, num_sg, io_task);
3496
3497 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3498 io_task->pwrb_handle->nxt_wrb_index);
3499 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3500
3501 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303502 if (!ring_mode)
3503 doorbell |= (io_task->pwrb_handle->wrb_index &
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303504 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3505 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3506
3507 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3508 return 0;
3509}
3510
3511static int beiscsi_mtask(struct iscsi_task *task)
3512{
3513 struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3514 struct iscsi_conn *conn = task->conn;
3515 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3516 struct beiscsi_hba *phba = beiscsi_conn->phba;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303517 struct iscsi_session *session;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303518 struct iscsi_wrb *pwrb = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303519 struct hwi_controller *phwi_ctrlr;
3520 struct hwi_wrb_context *pwrb_context;
3521 struct wrb_handle *pwrb_handle;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303522 unsigned int doorbell = 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303523 unsigned int i, cid;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303524 struct iscsi_task *aborted_task;
3525
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303526 cid = beiscsi_conn->beiscsi_conn_cid;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303527 pwrb = io_task->pwrb_handle->pwrb;
3528 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3529 be32_to_cpu(task->cmdsn));
3530 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3531 io_task->pwrb_handle->wrb_index);
3532 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3533 io_task->psgl_handle->sgl_index);
3534
3535 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3536 case ISCSI_OP_LOGIN:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303537 if (ring_mode)
3538 io_task->psgl_handle->type = TGT_DM_CMD;
3539 else
3540 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303541 TGT_DM_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303542 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3543 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3544 hwi_write_buffer(pwrb, task);
3545 break;
3546 case ISCSI_OP_NOOP_OUT:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303547 if (ring_mode)
3548 io_task->psgl_handle->type = INI_RD_CMD;
3549 else
3550 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303551 INI_RD_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303552 hwi_write_buffer(pwrb, task);
3553 break;
3554 case ISCSI_OP_TEXT:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303555 if (ring_mode)
3556 io_task->psgl_handle->type = INI_WR_CMD;
3557 else
3558 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303559 INI_WR_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303560 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3561 hwi_write_buffer(pwrb, task);
3562 break;
3563 case ISCSI_OP_SCSI_TMFUNC:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303564 session = conn->session;
3565 i = ((struct iscsi_tm *)task->hdr)->rtt;
3566 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303567 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3568 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303569 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3570 >> 16];
3571 aborted_task = pwrb_handle->pio_handle;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303572 if (!aborted_task)
3573 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303574
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303575 aborted_io_task = aborted_task->dd_data;
3576 if (!aborted_io_task->scsi_cmnd)
3577 return 0;
3578
3579 mgmt_invalidate_icds(phba,
3580 aborted_io_task->psgl_handle->sgl_index,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303581 cid);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303582 if (ring_mode)
3583 io_task->psgl_handle->type = INI_TMF_CMD;
3584 else
3585 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303586 INI_TMF_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303587 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3588 hwi_write_buffer(pwrb, task);
3589 break;
3590 case ISCSI_OP_LOGOUT:
3591 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303592 if (ring_mode)
3593 io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3594 else
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303595 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3596 HWH_TYPE_LOGOUT);
3597 hwi_write_buffer(pwrb, task);
3598 break;
3599
3600 default:
3601 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3602 task->hdr->opcode & ISCSI_OPCODE_MASK);
3603 return -EINVAL;
3604 }
3605
3606 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3607 be32_to_cpu(task->data_count));
3608 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3609 io_task->pwrb_handle->nxt_wrb_index);
3610 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3611
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303612 doorbell |= cid & DB_WRB_POST_CID_MASK;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303613 if (!ring_mode)
3614 doorbell |= (io_task->pwrb_handle->wrb_index &
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303615 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3616 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3617 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3618 return 0;
3619}
3620
3621static int beiscsi_task_xmit(struct iscsi_task *task)
3622{
3623 struct iscsi_conn *conn = task->conn;
3624 struct beiscsi_io_task *io_task = task->dd_data;
3625 struct scsi_cmnd *sc = task->sc;
3626 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3627 struct scatterlist *sg;
3628 int num_sg;
3629 unsigned int writedir = 0, xferlen = 0;
3630
3631 SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3632 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3633 task, conn, beiscsi_conn);
3634 if (!sc)
3635 return beiscsi_mtask(task);
3636
3637 io_task->scsi_cmnd = sc;
3638 num_sg = scsi_dma_map(sc);
3639 if (num_sg < 0) {
3640 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3641 return num_sg;
3642 }
3643 SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3644 (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3645 xferlen = scsi_bufflen(sc);
3646 sg = scsi_sglist(sc);
3647 if (sc->sc_data_direction == DMA_TO_DEVICE) {
3648 writedir = 1;
3649 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3650 task->imm_count);
3651 } else
3652 writedir = 0;
3653 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3654}
3655
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303656
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303657static void beiscsi_remove(struct pci_dev *pcidev)
3658{
3659 struct beiscsi_hba *phba = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303660 struct hwi_controller *phwi_ctrlr;
3661 struct hwi_context_memory *phwi_context;
3662 struct be_eq_obj *pbe_eq;
3663 unsigned int i, msix_vec;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303664
3665 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3666 if (!phba) {
3667 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3668 return;
3669 }
3670
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303671 phwi_ctrlr = phba->phwi_ctrlr;
3672 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303673 hwi_disable_intr(phba);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303674 if (phba->msix_enabled) {
3675 for (i = 0; i <= phba->num_cpus; i++) {
3676 msix_vec = phba->msix_entries[i].vector;
3677 free_irq(msix_vec, &phwi_context->be_eq[i]);
3678 }
3679 } else
3680 if (phba->pcidev->irq)
3681 free_irq(phba->pcidev->irq, phba);
3682 pci_disable_msix(phba->pcidev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303683 destroy_workqueue(phba->wq);
3684 if (blk_iopoll_enabled)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303685 for (i = 0; i < phba->num_cpus; i++) {
3686 pbe_eq = &phwi_context->be_eq[i];
3687 blk_iopoll_disable(&pbe_eq->iopoll);
3688 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303689
3690 beiscsi_clean_port(phba);
3691 beiscsi_free_mem(phba);
3692 beiscsi_unmap_pci_function(phba);
3693 pci_free_consistent(phba->pcidev,
3694 phba->ctrl.mbox_mem_alloced.size,
3695 phba->ctrl.mbox_mem_alloced.va,
3696 phba->ctrl.mbox_mem_alloced.dma);
3697 iscsi_host_remove(phba->shost);
3698 pci_dev_put(phba->pcidev);
3699 iscsi_host_free(phba->shost);
3700}
3701
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303702static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3703{
3704 int i, status;
3705
3706 for (i = 0; i <= phba->num_cpus; i++)
3707 phba->msix_entries[i].entry = i;
3708
3709 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3710 (phba->num_cpus + 1));
3711 if (!status)
3712 phba->msix_enabled = true;
3713
3714 return;
3715}
3716
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303717static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3718 const struct pci_device_id *id)
3719{
3720 struct beiscsi_hba *phba = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303721 struct hwi_controller *phwi_ctrlr;
3722 struct hwi_context_memory *phwi_context;
3723 struct be_eq_obj *pbe_eq;
3724 int ret, msix_vec, num_cpus, i;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303725
3726 ret = beiscsi_enable_pci(pcidev);
3727 if (ret < 0) {
3728 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3729 "Failed to enable pci device \n");
3730 return ret;
3731 }
3732
3733 phba = beiscsi_hba_alloc(pcidev);
3734 if (!phba) {
3735 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3736 " Failed in beiscsi_hba_alloc \n");
3737 goto disable_pci;
3738 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303739 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303740
3741 pci_set_drvdata(pcidev, phba);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303742 if (enable_msix)
3743 num_cpus = find_num_cpus();
3744 else
3745 num_cpus = 1;
3746 phba->num_cpus = num_cpus;
3747 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3748
3749 if (enable_msix)
3750 beiscsi_msix_enable(phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303751 ret = be_ctrl_init(phba, pcidev);
3752 if (ret) {
3753 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3754 "Failed in be_ctrl_init\n");
3755 goto hba_free;
3756 }
3757
3758 spin_lock_init(&phba->io_sgl_lock);
3759 spin_lock_init(&phba->mgmt_sgl_lock);
3760 spin_lock_init(&phba->isr_lock);
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303761 ret = mgmt_get_fw_config(&phba->ctrl, phba);
3762 if (ret != 0) {
3763 shost_printk(KERN_ERR, phba->shost,
3764 "Error getting fw config\n");
3765 goto free_port;
3766 }
3767 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3768 phba->shost->can_queue = phba->params.ios_per_ctrl;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303769 beiscsi_get_params(phba);
3770 ret = beiscsi_init_port(phba);
3771 if (ret < 0) {
3772 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3773 "Failed in beiscsi_init_port\n");
3774 goto free_port;
3775 }
3776
3777 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3778 phba->shost->host_no);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303779 phba->wq = create_workqueue(phba->wq_name);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303780 if (!phba->wq) {
3781 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3782 "Failed to allocate work queue\n");
3783 goto free_twq;
3784 }
3785
3786 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3787
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303788 phwi_ctrlr = phba->phwi_ctrlr;
3789 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303790 if (blk_iopoll_enabled) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303791 for (i = 0; i < phba->num_cpus; i++) {
3792 pbe_eq = &phwi_context->be_eq[i];
3793 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3794 be_iopoll);
3795 blk_iopoll_enable(&pbe_eq->iopoll);
3796 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303797 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303798 ret = beiscsi_init_irqs(phba);
3799 if (ret < 0) {
3800 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3801 "Failed to beiscsi_init_irqs\n");
3802 goto free_blkenbld;
3803 }
3804 ret = hwi_enable_intr(phba);
3805 if (ret < 0) {
3806 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3807 "Failed to hwi_enable_intr\n");
3808 goto free_ctrlr;
3809 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303810 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3811 return 0;
3812
3813free_ctrlr:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303814 if (phba->msix_enabled) {
3815 for (i = 0; i <= phba->num_cpus; i++) {
3816 msix_vec = phba->msix_entries[i].vector;
3817 free_irq(msix_vec, &phwi_context->be_eq[i]);
3818 }
3819 } else
3820 if (phba->pcidev->irq)
3821 free_irq(phba->pcidev->irq, phba);
3822 pci_disable_msix(phba->pcidev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303823free_blkenbld:
3824 destroy_workqueue(phba->wq);
3825 if (blk_iopoll_enabled)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303826 for (i = 0; i < phba->num_cpus; i++) {
3827 pbe_eq = &phwi_context->be_eq[i];
3828 blk_iopoll_disable(&pbe_eq->iopoll);
3829 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303830free_twq:
3831 beiscsi_clean_port(phba);
3832 beiscsi_free_mem(phba);
3833free_port:
3834 pci_free_consistent(phba->pcidev,
3835 phba->ctrl.mbox_mem_alloced.size,
3836 phba->ctrl.mbox_mem_alloced.va,
3837 phba->ctrl.mbox_mem_alloced.dma);
3838 beiscsi_unmap_pci_function(phba);
3839hba_free:
3840 iscsi_host_remove(phba->shost);
3841 pci_dev_put(phba->pcidev);
3842 iscsi_host_free(phba->shost);
3843disable_pci:
3844 pci_disable_device(pcidev);
3845 return ret;
3846}
3847
3848struct iscsi_transport beiscsi_iscsi_transport = {
3849 .owner = THIS_MODULE,
3850 .name = DRV_NAME,
3851 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3852 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3853 .param_mask = ISCSI_MAX_RECV_DLENGTH |
3854 ISCSI_MAX_XMIT_DLENGTH |
3855 ISCSI_HDRDGST_EN |
3856 ISCSI_DATADGST_EN |
3857 ISCSI_INITIAL_R2T_EN |
3858 ISCSI_MAX_R2T |
3859 ISCSI_IMM_DATA_EN |
3860 ISCSI_FIRST_BURST |
3861 ISCSI_MAX_BURST |
3862 ISCSI_PDU_INORDER_EN |
3863 ISCSI_DATASEQ_INORDER_EN |
3864 ISCSI_ERL |
3865 ISCSI_CONN_PORT |
3866 ISCSI_CONN_ADDRESS |
3867 ISCSI_EXP_STATSN |
3868 ISCSI_PERSISTENT_PORT |
3869 ISCSI_PERSISTENT_ADDRESS |
3870 ISCSI_TARGET_NAME | ISCSI_TPGT |
3871 ISCSI_USERNAME | ISCSI_PASSWORD |
3872 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3873 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303874 ISCSI_LU_RESET_TMO |
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303875 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3876 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3877 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3878 ISCSI_HOST_INITIATOR_NAME,
3879 .create_session = beiscsi_session_create,
3880 .destroy_session = beiscsi_session_destroy,
3881 .create_conn = beiscsi_conn_create,
3882 .bind_conn = beiscsi_conn_bind,
3883 .destroy_conn = iscsi_conn_teardown,
3884 .set_param = beiscsi_set_param,
3885 .get_conn_param = beiscsi_conn_get_param,
3886 .get_session_param = iscsi_session_get_param,
3887 .get_host_param = beiscsi_get_host_param,
3888 .start_conn = beiscsi_conn_start,
3889 .stop_conn = beiscsi_conn_stop,
3890 .send_pdu = iscsi_conn_send_pdu,
3891 .xmit_task = beiscsi_task_xmit,
3892 .cleanup_task = beiscsi_cleanup_task,
3893 .alloc_pdu = beiscsi_alloc_pdu,
3894 .parse_pdu_itt = beiscsi_parse_pdu,
3895 .get_stats = beiscsi_conn_get_stats,
3896 .ep_connect = beiscsi_ep_connect,
3897 .ep_poll = beiscsi_ep_poll,
3898 .ep_disconnect = beiscsi_ep_disconnect,
3899 .session_recovery_timedout = iscsi_session_recovery_timedout,
3900};
3901
3902static struct pci_driver beiscsi_pci_driver = {
3903 .name = DRV_NAME,
3904 .probe = beiscsi_dev_probe,
3905 .remove = beiscsi_remove,
3906 .id_table = beiscsi_pci_id_table
3907};
3908
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303909
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303910static int __init beiscsi_module_init(void)
3911{
3912 int ret;
3913
3914 beiscsi_scsi_transport =
3915 iscsi_register_transport(&beiscsi_iscsi_transport);
3916 if (!beiscsi_scsi_transport) {
3917 SE_DEBUG(DBG_LVL_1,
3918 "beiscsi_module_init - Unable to register beiscsi"
3919 "transport.\n");
3920 ret = -ENOMEM;
3921 }
3922 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3923 &beiscsi_iscsi_transport);
3924
3925 ret = pci_register_driver(&beiscsi_pci_driver);
3926 if (ret) {
3927 SE_DEBUG(DBG_LVL_1,
3928 "beiscsi_module_init - Unable to register"
3929 "beiscsi pci driver.\n");
3930 goto unregister_iscsi_transport;
3931 }
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303932 ring_mode = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303933 return 0;
3934
3935unregister_iscsi_transport:
3936 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3937 return ret;
3938}
3939
3940static void __exit beiscsi_module_exit(void)
3941{
3942 pci_unregister_driver(&beiscsi_pci_driver);
3943 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3944}
3945
3946module_init(beiscsi_module_init);
3947module_exit(beiscsi_module_exit);