blob: 4a855a9c7126a0f711039928d464f316ae5f26e8 [file] [log] [blame]
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 * ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/blkdev.h>
24#include <linux/pci.h>
25#include <linux/string.h>
26#include <linux/kernel.h>
27#include <linux/semaphore.h>
28
29#include <scsi/libiscsi.h>
30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi.h>
36#include "be_main.h"
37#include "be_iscsi.h"
38#include "be_mgmt.h"
39
40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053042static unsigned int enable_msix = 1;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +053043static unsigned int ring_mode;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053044
45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47MODULE_AUTHOR("ServerEngines Corporation");
48MODULE_LICENSE("GPL");
49module_param(be_iopoll_budget, int, 0);
50module_param(enable_msix, int, 0);
51module_param(be_max_phys_size, uint, S_IRUGO);
52MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53 "contiguous memory that can be allocated."
54 "Range is 16 - 128");
55
56static int beiscsi_slave_configure(struct scsi_device *sdev)
57{
58 blk_queue_max_segment_size(sdev->request_queue, 65536);
59 return 0;
60}
61
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053062/*------------------- PCI Driver operations and data ----------------- */
63static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69 { 0 }
70};
71MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053073static struct scsi_host_template beiscsi_sht = {
74 .module = THIS_MODULE,
75 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76 .proc_name = DRV_NAME,
77 .queuecommand = iscsi_queuecommand,
78 .eh_abort_handler = iscsi_eh_abort,
79 .change_queue_depth = iscsi_change_queue_depth,
80 .slave_configure = beiscsi_slave_configure,
81 .target_alloc = iscsi_target_alloc,
82 .eh_device_reset_handler = iscsi_eh_device_reset,
83 .eh_target_reset_handler = iscsi_eh_target_reset,
84 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85 .can_queue = BE2_IO_DEPTH,
86 .this_id = -1,
87 .max_sectors = BEISCSI_MAX_SECTORS,
88 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
89 .use_clustering = ENABLE_CLUSTERING,
90};
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053091
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053092static struct scsi_transport_template *beiscsi_scsi_transport;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053093
94static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95{
96 struct beiscsi_hba *phba;
97 struct Scsi_Host *shost;
98
99 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100 if (!shost) {
101 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102 "iscsi_host_alloc failed \n");
103 return NULL;
104 }
105 shost->dma_boundary = pcidev->dma_mask;
106 shost->max_id = BE2_MAX_SESSIONS;
107 shost->max_channel = 0;
108 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109 shost->max_lun = BEISCSI_NUM_MAX_LUN;
110 shost->transportt = beiscsi_scsi_transport;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530111 phba = iscsi_host_priv(shost);
112 memset(phba, 0, sizeof(*phba));
113 phba->shost = shost;
114 phba->pcidev = pci_dev_get(pcidev);
115
116 if (iscsi_host_add(shost, &phba->pcidev->dev))
117 goto free_devices;
118 return phba;
119
120free_devices:
121 pci_dev_put(phba->pcidev);
122 iscsi_host_free(phba->shost);
123 return NULL;
124}
125
126static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127{
128 if (phba->csr_va) {
129 iounmap(phba->csr_va);
130 phba->csr_va = NULL;
131 }
132 if (phba->db_va) {
133 iounmap(phba->db_va);
134 phba->db_va = NULL;
135 }
136 if (phba->pci_va) {
137 iounmap(phba->pci_va);
138 phba->pci_va = NULL;
139 }
140}
141
142static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143 struct pci_dev *pcidev)
144{
145 u8 __iomem *addr;
146
147 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148 pci_resource_len(pcidev, 2));
149 if (addr == NULL)
150 return -ENOMEM;
151 phba->ctrl.csr = addr;
152 phba->csr_va = addr;
153 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
154
155 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156 if (addr == NULL)
157 goto pci_map_err;
158 phba->ctrl.db = addr;
159 phba->db_va = addr;
160 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
161
162 addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163 pci_resource_len(pcidev, 1));
164 if (addr == NULL)
165 goto pci_map_err;
166 phba->ctrl.pcicfg = addr;
167 phba->pci_va = addr;
168 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169 return 0;
170
171pci_map_err:
172 beiscsi_unmap_pci_function(phba);
173 return -ENOMEM;
174}
175
176static int beiscsi_enable_pci(struct pci_dev *pcidev)
177{
178 int ret;
179
180 ret = pci_enable_device(pcidev);
181 if (ret) {
182 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183 "failed. Returning -ENODEV\n");
184 return ret;
185 }
186
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530187 pci_set_master(pcidev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530188 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190 if (ret) {
191 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192 pci_disable_device(pcidev);
193 return ret;
194 }
195 }
196 return 0;
197}
198
199static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
200{
201 struct be_ctrl_info *ctrl = &phba->ctrl;
202 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204 int status = 0;
205
206 ctrl->pdev = pdev;
207 status = beiscsi_map_pci_bars(phba, pdev);
208 if (status)
209 return status;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530210 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212 mbox_mem_alloc->size,
213 &mbox_mem_alloc->dma);
214 if (!mbox_mem_alloc->va) {
215 beiscsi_unmap_pci_function(phba);
216 status = -ENOMEM;
217 return status;
218 }
219
220 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224 spin_lock_init(&ctrl->mbox_lock);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530225 spin_lock_init(&phba->ctrl.mcc_lock);
226 spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530228 return status;
229}
230
231static void beiscsi_get_params(struct beiscsi_hba *phba)
232{
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530233 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234 - (phba->fw_config.iscsi_cid_count
235 + BE2_TMFS
236 + BE2_NOPOUT_REQ));
237 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
238 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
239 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530240 phba->params.num_sge_per_io = BE2_SGE;
241 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
242 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
243 phba->params.eq_timer = 64;
244 phba->params.num_eq_entries =
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530245 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
246 + BE2_TMFS) / 512) + 1) * 512;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530247 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
248 ? 1024 : phba->params.num_eq_entries;
249 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530250 phba->params.num_eq_entries);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530251 phba->params.num_cq_entries =
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530252 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
253 + BE2_TMFS) / 512) + 1) * 512;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530254 phba->params.wrbs_per_cxn = 256;
255}
256
257static void hwi_ring_eq_db(struct beiscsi_hba *phba,
258 unsigned int id, unsigned int clr_interrupt,
259 unsigned int num_processed,
260 unsigned char rearm, unsigned char event)
261{
262 u32 val = 0;
263 val |= id & DB_EQ_RING_ID_MASK;
264 if (rearm)
265 val |= 1 << DB_EQ_REARM_SHIFT;
266 if (clr_interrupt)
267 val |= 1 << DB_EQ_CLR_SHIFT;
268 if (event)
269 val |= 1 << DB_EQ_EVNT_SHIFT;
270 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
271 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
272}
273
274/**
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530275 * be_isr_mcc - The isr routine of the driver.
276 * @irq: Not used
277 * @dev_id: Pointer to host adapter structure
278 */
279static irqreturn_t be_isr_mcc(int irq, void *dev_id)
280{
281 struct beiscsi_hba *phba;
282 struct be_eq_entry *eqe = NULL;
283 struct be_queue_info *eq;
284 struct be_queue_info *mcc;
285 unsigned int num_eq_processed;
286 struct be_eq_obj *pbe_eq;
287 unsigned long flags;
288
289 pbe_eq = dev_id;
290 eq = &pbe_eq->q;
291 phba = pbe_eq->phba;
292 mcc = &phba->ctrl.mcc_obj.cq;
293 eqe = queue_tail_node(eq);
294 if (!eqe)
295 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
296
297 num_eq_processed = 0;
298
299 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
300 & EQE_VALID_MASK) {
301 if (((eqe->dw[offsetof(struct amap_eq_entry,
302 resource_id) / 32] &
303 EQE_RESID_MASK) >> 16) == mcc->id) {
304 spin_lock_irqsave(&phba->isr_lock, flags);
305 phba->todo_mcc_cq = 1;
306 spin_unlock_irqrestore(&phba->isr_lock, flags);
307 }
308 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
309 queue_tail_inc(eq);
310 eqe = queue_tail_node(eq);
311 num_eq_processed++;
312 }
313 if (phba->todo_mcc_cq)
314 queue_work(phba->wq, &phba->work_cqs);
315 if (num_eq_processed)
316 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
317
318 return IRQ_HANDLED;
319}
320
321/**
322 * be_isr_msix - The isr routine of the driver.
323 * @irq: Not used
324 * @dev_id: Pointer to host adapter structure
325 */
326static irqreturn_t be_isr_msix(int irq, void *dev_id)
327{
328 struct beiscsi_hba *phba;
329 struct be_eq_entry *eqe = NULL;
330 struct be_queue_info *eq;
331 struct be_queue_info *cq;
332 unsigned int num_eq_processed;
333 struct be_eq_obj *pbe_eq;
334 unsigned long flags;
335
336 pbe_eq = dev_id;
337 eq = &pbe_eq->q;
338 cq = pbe_eq->cq;
339 eqe = queue_tail_node(eq);
340 if (!eqe)
341 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
342
343 phba = pbe_eq->phba;
344 num_eq_processed = 0;
345 if (blk_iopoll_enabled) {
346 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
347 & EQE_VALID_MASK) {
348 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
349 blk_iopoll_sched(&pbe_eq->iopoll);
350
351 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
352 queue_tail_inc(eq);
353 eqe = queue_tail_node(eq);
354 num_eq_processed++;
355 }
356 if (num_eq_processed)
357 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
358
359 return IRQ_HANDLED;
360 } else {
361 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
362 & EQE_VALID_MASK) {
363 spin_lock_irqsave(&phba->isr_lock, flags);
364 phba->todo_cq = 1;
365 spin_unlock_irqrestore(&phba->isr_lock, flags);
366 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
367 queue_tail_inc(eq);
368 eqe = queue_tail_node(eq);
369 num_eq_processed++;
370 }
371 if (phba->todo_cq)
372 queue_work(phba->wq, &phba->work_cqs);
373
374 if (num_eq_processed)
375 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
376
377 return IRQ_HANDLED;
378 }
379}
380
381/**
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530382 * be_isr - The isr routine of the driver.
383 * @irq: Not used
384 * @dev_id: Pointer to host adapter structure
385 */
386static irqreturn_t be_isr(int irq, void *dev_id)
387{
388 struct beiscsi_hba *phba;
389 struct hwi_controller *phwi_ctrlr;
390 struct hwi_context_memory *phwi_context;
391 struct be_eq_entry *eqe = NULL;
392 struct be_queue_info *eq;
393 struct be_queue_info *cq;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530394 struct be_queue_info *mcc;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530395 unsigned long flags, index;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530396 unsigned int num_mcceq_processed, num_ioeq_processed;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530397 struct be_ctrl_info *ctrl;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530398 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530399 int isr;
400
401 phba = dev_id;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530402 ctrl = &phba->ctrl;;
403 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
404 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
405 if (!isr)
406 return IRQ_NONE;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530407
408 phwi_ctrlr = phba->phwi_ctrlr;
409 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530410 pbe_eq = &phwi_context->be_eq[0];
411
412 eq = &phwi_context->be_eq[0].q;
413 mcc = &phba->ctrl.mcc_obj.cq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530414 index = 0;
415 eqe = queue_tail_node(eq);
416 if (!eqe)
417 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
418
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530419 num_ioeq_processed = 0;
420 num_mcceq_processed = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530421 if (blk_iopoll_enabled) {
422 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
423 & EQE_VALID_MASK) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530424 if (((eqe->dw[offsetof(struct amap_eq_entry,
425 resource_id) / 32] &
426 EQE_RESID_MASK) >> 16) == mcc->id) {
427 spin_lock_irqsave(&phba->isr_lock, flags);
428 phba->todo_mcc_cq = 1;
429 spin_unlock_irqrestore(&phba->isr_lock, flags);
430 num_mcceq_processed++;
431 } else {
432 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
433 blk_iopoll_sched(&pbe_eq->iopoll);
434 num_ioeq_processed++;
435 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530436 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
437 queue_tail_inc(eq);
438 eqe = queue_tail_node(eq);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530439 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530440 if (num_ioeq_processed || num_mcceq_processed) {
441 if (phba->todo_mcc_cq)
442 queue_work(phba->wq, &phba->work_cqs);
443
444 if ((num_mcceq_processed) && (!num_ioeq_processed))
445 hwi_ring_eq_db(phba, eq->id, 0,
446 (num_ioeq_processed +
447 num_mcceq_processed) , 1, 1);
448 else
449 hwi_ring_eq_db(phba, eq->id, 0,
450 (num_ioeq_processed +
451 num_mcceq_processed), 0, 1);
452
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530453 return IRQ_HANDLED;
454 } else
455 return IRQ_NONE;
456 } else {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530457 cq = &phwi_context->be_cq[0];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530458 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
459 & EQE_VALID_MASK) {
460
461 if (((eqe->dw[offsetof(struct amap_eq_entry,
462 resource_id) / 32] &
463 EQE_RESID_MASK) >> 16) != cq->id) {
464 spin_lock_irqsave(&phba->isr_lock, flags);
465 phba->todo_mcc_cq = 1;
466 spin_unlock_irqrestore(&phba->isr_lock, flags);
467 } else {
468 spin_lock_irqsave(&phba->isr_lock, flags);
469 phba->todo_cq = 1;
470 spin_unlock_irqrestore(&phba->isr_lock, flags);
471 }
472 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
473 queue_tail_inc(eq);
474 eqe = queue_tail_node(eq);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530475 num_ioeq_processed++;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530476 }
477 if (phba->todo_cq || phba->todo_mcc_cq)
478 queue_work(phba->wq, &phba->work_cqs);
479
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530480 if (num_ioeq_processed) {
481 hwi_ring_eq_db(phba, eq->id, 0,
482 num_ioeq_processed, 1, 1);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530483 return IRQ_HANDLED;
484 } else
485 return IRQ_NONE;
486 }
487}
488
489static int beiscsi_init_irqs(struct beiscsi_hba *phba)
490{
491 struct pci_dev *pcidev = phba->pcidev;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530492 struct hwi_controller *phwi_ctrlr;
493 struct hwi_context_memory *phwi_context;
494 int ret, msix_vec, i = 0;
495 char desc[32];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530496
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530497 phwi_ctrlr = phba->phwi_ctrlr;
498 phwi_context = phwi_ctrlr->phwi_ctxt;
499
500 if (phba->msix_enabled) {
501 for (i = 0; i < phba->num_cpus; i++) {
502 sprintf(desc, "beiscsi_msix_%04x", i);
503 msix_vec = phba->msix_entries[i].vector;
504 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
505 &phwi_context->be_eq[i]);
506 }
507 msix_vec = phba->msix_entries[i].vector;
508 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
509 &phwi_context->be_eq[i]);
510 } else {
511 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
512 "beiscsi", phba);
513 if (ret) {
514 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
515 "Failed to register irq\\n");
516 return ret;
517 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530518 }
519 return 0;
520}
521
522static void hwi_ring_cq_db(struct beiscsi_hba *phba,
523 unsigned int id, unsigned int num_processed,
524 unsigned char rearm, unsigned char event)
525{
526 u32 val = 0;
527 val |= id & DB_CQ_RING_ID_MASK;
528 if (rearm)
529 val |= 1 << DB_CQ_REARM_SHIFT;
530 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
531 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
532}
533
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530534static unsigned int
535beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
536 struct beiscsi_hba *phba,
537 unsigned short cid,
538 struct pdu_base *ppdu,
539 unsigned long pdu_len,
540 void *pbuffer, unsigned long buf_len)
541{
542 struct iscsi_conn *conn = beiscsi_conn->conn;
543 struct iscsi_session *session = conn->session;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530544 struct iscsi_task *task;
545 struct beiscsi_io_task *io_task;
546 struct iscsi_hdr *login_hdr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530547
548 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
549 PDUBASE_OPCODE_MASK) {
550 case ISCSI_OP_NOOP_IN:
551 pbuffer = NULL;
552 buf_len = 0;
553 break;
554 case ISCSI_OP_ASYNC_EVENT:
555 break;
556 case ISCSI_OP_REJECT:
557 WARN_ON(!pbuffer);
558 WARN_ON(!(buf_len == 48));
559 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
560 break;
561 case ISCSI_OP_LOGIN_RSP:
Jayamohan Kallickal7bd6e252010-01-05 05:07:02 +0530562 case ISCSI_OP_TEXT_RSP:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530563 task = conn->login_task;
564 io_task = task->dd_data;
565 login_hdr = (struct iscsi_hdr *)ppdu;
566 login_hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530567 break;
568 default:
569 shost_printk(KERN_WARNING, phba->shost,
570 "Unrecognized opcode 0x%x in async msg \n",
571 (ppdu->
572 dw[offsetof(struct amap_pdu_base, opcode) / 32]
573 & PDUBASE_OPCODE_MASK));
574 return 1;
575 }
576
577 spin_lock_bh(&session->lock);
578 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
579 spin_unlock_bh(&session->lock);
580 return 0;
581}
582
583static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
584{
585 struct sgl_handle *psgl_handle;
586
587 if (phba->io_sgl_hndl_avbl) {
588 SE_DEBUG(DBG_LVL_8,
589 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
590 phba->io_sgl_alloc_index);
591 psgl_handle = phba->io_sgl_hndl_base[phba->
592 io_sgl_alloc_index];
593 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
594 phba->io_sgl_hndl_avbl--;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530595 if (phba->io_sgl_alloc_index == (phba->params.
596 ios_per_ctrl - 1))
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530597 phba->io_sgl_alloc_index = 0;
598 else
599 phba->io_sgl_alloc_index++;
600 } else
601 psgl_handle = NULL;
602 return psgl_handle;
603}
604
605static void
606free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
607{
608 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
609 phba->io_sgl_free_index);
610 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
611 /*
612 * this can happen if clean_task is called on a task that
613 * failed in xmit_task or alloc_pdu.
614 */
615 SE_DEBUG(DBG_LVL_8,
616 "Double Free in IO SGL io_sgl_free_index=%d,"
617 "value there=%p \n", phba->io_sgl_free_index,
618 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
619 return;
620 }
621 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
622 phba->io_sgl_hndl_avbl++;
623 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
624 phba->io_sgl_free_index = 0;
625 else
626 phba->io_sgl_free_index++;
627}
628
629/**
630 * alloc_wrb_handle - To allocate a wrb handle
631 * @phba: The hba pointer
632 * @cid: The cid to use for allocation
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530633 *
634 * This happens under session_lock until submission to chip
635 */
Jayamohan Kallickald5431482010-01-05 05:06:21 +0530636struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530637{
638 struct hwi_wrb_context *pwrb_context;
639 struct hwi_controller *phwi_ctrlr;
Jayamohan Kallickald5431482010-01-05 05:06:21 +0530640 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530641
642 phwi_ctrlr = phba->phwi_ctrlr;
643 pwrb_context = &phwi_ctrlr->wrb_context[cid];
Jayamohan Kallickald5431482010-01-05 05:06:21 +0530644 if (pwrb_context->wrb_handles_available >= 2) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530645 pwrb_handle = pwrb_context->pwrb_handle_base[
646 pwrb_context->alloc_index];
647 pwrb_context->wrb_handles_available--;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530648 if (pwrb_context->alloc_index ==
649 (phba->params.wrbs_per_cxn - 1))
650 pwrb_context->alloc_index = 0;
651 else
652 pwrb_context->alloc_index++;
Jayamohan Kallickald5431482010-01-05 05:06:21 +0530653
654 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
655 pwrb_context->alloc_index];
656 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530657 } else
658 pwrb_handle = NULL;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530659 return pwrb_handle;
660}
661
662/**
663 * free_wrb_handle - To free the wrb handle back to pool
664 * @phba: The hba pointer
665 * @pwrb_context: The context to free from
666 * @pwrb_handle: The wrb_handle to free
667 *
668 * This happens under session_lock until submission to chip
669 */
670static void
671free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
672 struct wrb_handle *pwrb_handle)
673{
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530674 if (!ring_mode)
675 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
676 pwrb_handle;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530677 pwrb_context->wrb_handles_available++;
678 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
679 pwrb_context->free_index = 0;
680 else
681 pwrb_context->free_index++;
682
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530683 SE_DEBUG(DBG_LVL_8,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530684 "FREE WRB: pwrb_handle=%p free_index=0x%x"
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530685 "wrb_handles_available=%d \n",
686 pwrb_handle, pwrb_context->free_index,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530687 pwrb_context->wrb_handles_available);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530688}
689
690static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
691{
692 struct sgl_handle *psgl_handle;
693
694 if (phba->eh_sgl_hndl_avbl) {
695 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
696 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
697 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
698 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
699 phba->eh_sgl_hndl_avbl--;
700 if (phba->eh_sgl_alloc_index ==
701 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
702 1))
703 phba->eh_sgl_alloc_index = 0;
704 else
705 phba->eh_sgl_alloc_index++;
706 } else
707 psgl_handle = NULL;
708 return psgl_handle;
709}
710
711void
712free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
713{
714
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530715 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
716 phba->eh_sgl_free_index);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530717 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
718 /*
719 * this can happen if clean_task is called on a task that
720 * failed in xmit_task or alloc_pdu.
721 */
722 SE_DEBUG(DBG_LVL_8,
723 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
724 phba->eh_sgl_free_index);
725 return;
726 }
727 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
728 phba->eh_sgl_hndl_avbl++;
729 if (phba->eh_sgl_free_index ==
730 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
731 phba->eh_sgl_free_index = 0;
732 else
733 phba->eh_sgl_free_index++;
734}
735
736static void
737be_complete_io(struct beiscsi_conn *beiscsi_conn,
738 struct iscsi_task *task, struct sol_cqe *psol)
739{
740 struct beiscsi_io_task *io_task = task->dd_data;
741 struct be_status_bhs *sts_bhs =
742 (struct be_status_bhs *)io_task->cmd_bhs;
743 struct iscsi_conn *conn = beiscsi_conn->conn;
744 unsigned int sense_len;
745 unsigned char *sense;
746 u32 resid = 0, exp_cmdsn, max_cmdsn;
747 u8 rsp, status, flags;
748
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530749 exp_cmdsn = (psol->
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530750 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
751 & SOL_EXP_CMD_SN_MASK);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530752 max_cmdsn = ((psol->
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530753 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
754 & SOL_EXP_CMD_SN_MASK) +
755 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
756 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
757 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
758 & SOL_RESP_MASK) >> 16);
759 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
760 & SOL_STS_MASK) >> 8);
761 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
762 & SOL_FLAGS_MASK) >> 24) | 0x80;
763
764 task->sc->result = (DID_OK << 16) | status;
765 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
766 task->sc->result = DID_ERROR << 16;
767 goto unmap;
768 }
769
770 /* bidi not initially supported */
771 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
772 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
773 32] & SOL_RES_CNT_MASK);
774
775 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
776 task->sc->result = DID_ERROR << 16;
777
778 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
779 scsi_set_resid(task->sc, resid);
780 if (!status && (scsi_bufflen(task->sc) - resid <
781 task->sc->underflow))
782 task->sc->result = DID_ERROR << 16;
783 }
784 }
785
786 if (status == SAM_STAT_CHECK_CONDITION) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530787 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530788 sense = sts_bhs->sense_info + sizeof(unsigned short);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530789 sense_len = cpu_to_be16(*slen);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530790 memcpy(task->sc->sense_buffer, sense,
791 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
792 }
793 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
794 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
795 & SOL_RES_CNT_MASK)
796 conn->rxdata_octets += (psol->
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530797 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
798 & SOL_RES_CNT_MASK);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530799 }
800unmap:
801 scsi_dma_unmap(io_task->scsi_cmnd);
802 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
803}
804
805static void
806be_complete_logout(struct beiscsi_conn *beiscsi_conn,
807 struct iscsi_task *task, struct sol_cqe *psol)
808{
809 struct iscsi_logout_rsp *hdr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530810 struct beiscsi_io_task *io_task = task->dd_data;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530811 struct iscsi_conn *conn = beiscsi_conn->conn;
812
813 hdr = (struct iscsi_logout_rsp *)task->hdr;
Jayamohan Kallickal7bd6e252010-01-05 05:07:02 +0530814 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530815 hdr->t2wait = 5;
816 hdr->t2retain = 0;
817 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
818 & SOL_FLAGS_MASK) >> 24) | 0x80;
819 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
820 32] & SOL_RESP_MASK);
821 hdr->exp_cmdsn = cpu_to_be32(psol->
822 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
823 & SOL_EXP_CMD_SN_MASK);
824 hdr->max_cmdsn = be32_to_cpu((psol->
825 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
826 & SOL_EXP_CMD_SN_MASK) +
827 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
828 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
Jayamohan Kallickal7bd6e252010-01-05 05:07:02 +0530829 hdr->dlength[0] = 0;
830 hdr->dlength[1] = 0;
831 hdr->dlength[2] = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530832 hdr->hlength = 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530833 hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530834 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
835}
836
837static void
838be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
839 struct iscsi_task *task, struct sol_cqe *psol)
840{
841 struct iscsi_tm_rsp *hdr;
842 struct iscsi_conn *conn = beiscsi_conn->conn;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530843 struct beiscsi_io_task *io_task = task->dd_data;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530844
845 hdr = (struct iscsi_tm_rsp *)task->hdr;
Jayamohan Kallickal7bd6e252010-01-05 05:07:02 +0530846 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530847 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
848 & SOL_FLAGS_MASK) >> 24) | 0x80;
849 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
850 32] & SOL_RESP_MASK);
851 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530852 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530853 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
854 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
855 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
856 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530857 hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530858 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
859}
860
861static void
862hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
863 struct beiscsi_hba *phba, struct sol_cqe *psol)
864{
865 struct hwi_wrb_context *pwrb_context;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530866 struct wrb_handle *pwrb_handle = NULL;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530867 struct sgl_handle *psgl_handle = NULL;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530868 struct hwi_controller *phwi_ctrlr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530869 struct iscsi_task *task;
870 struct beiscsi_io_task *io_task;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530871 struct iscsi_conn *conn = beiscsi_conn->conn;
872 struct iscsi_session *session = conn->session;
873
874 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530875 if (ring_mode) {
876 psgl_handle = phba->sgl_hndl_array[((psol->
877 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
878 32] & SOL_ICD_INDEX_MASK) >> 6)];
879 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
880 task = psgl_handle->task;
881 pwrb_handle = NULL;
882 } else {
883 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
884 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530885 SOL_CID_MASK) >> 6) -
886 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530887 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
888 dw[offsetof(struct amap_sol_cqe, wrb_index) /
889 32] & SOL_WRB_INDEX_MASK) >> 16)];
890 task = pwrb_handle->pio_handle;
891 }
892
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530893 io_task = task->dd_data;
894 spin_lock(&phba->mgmt_sgl_lock);
895 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
896 spin_unlock(&phba->mgmt_sgl_lock);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530897 spin_lock_bh(&session->lock);
898 free_wrb_handle(phba, pwrb_context, pwrb_handle);
899 spin_unlock_bh(&session->lock);
900}
901
902static void
903be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
904 struct iscsi_task *task, struct sol_cqe *psol)
905{
906 struct iscsi_nopin *hdr;
907 struct iscsi_conn *conn = beiscsi_conn->conn;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530908 struct beiscsi_io_task *io_task = task->dd_data;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530909
910 hdr = (struct iscsi_nopin *)task->hdr;
911 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
912 & SOL_FLAGS_MASK) >> 24) | 0x80;
913 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
914 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
915 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
916 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
917 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
918 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
919 hdr->opcode = ISCSI_OP_NOOP_IN;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530920 hdr->itt = io_task->libiscsi_itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530921 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
922}
923
924static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
925 struct beiscsi_hba *phba, struct sol_cqe *psol)
926{
927 struct hwi_wrb_context *pwrb_context;
928 struct wrb_handle *pwrb_handle;
929 struct iscsi_wrb *pwrb = NULL;
930 struct hwi_controller *phwi_ctrlr;
931 struct iscsi_task *task;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530932 struct sgl_handle *psgl_handle = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530933 unsigned int type;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530934 struct iscsi_conn *conn = beiscsi_conn->conn;
935 struct iscsi_session *session = conn->session;
936
937 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530938 if (ring_mode) {
939 psgl_handle = phba->sgl_hndl_array[((psol->
940 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
941 32] & SOL_ICD_INDEX_MASK) >> 6)];
942 task = psgl_handle->task;
943 type = psgl_handle->type;
944 } else {
945 pwrb_context = &phwi_ctrlr->
946 wrb_context[((psol->dw[offsetof
947 (struct amap_sol_cqe, cid) / 32]
Jayamohan Kallickal7da50872010-01-05 05:04:12 +0530948 & SOL_CID_MASK) >> 6) -
949 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530950 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
951 dw[offsetof(struct amap_sol_cqe, wrb_index) /
952 32] & SOL_WRB_INDEX_MASK) >> 16)];
953 task = pwrb_handle->pio_handle;
954 pwrb = pwrb_handle->pwrb;
955 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530956 WRB_TYPE_MASK) >> 28;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530957 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530958 spin_lock_bh(&session->lock);
959 switch (type) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530960 case HWH_TYPE_IO:
961 case HWH_TYPE_IO_RD:
962 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
963 ISCSI_OP_NOOP_OUT) {
964 be_complete_nopin_resp(beiscsi_conn, task, psol);
965 } else
966 be_complete_io(beiscsi_conn, task, psol);
967 break;
968
969 case HWH_TYPE_LOGOUT:
970 be_complete_logout(beiscsi_conn, task, psol);
971 break;
972
973 case HWH_TYPE_LOGIN:
974 SE_DEBUG(DBG_LVL_1,
975 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
976 "- Solicited path \n");
977 break;
978
979 case HWH_TYPE_TMF:
980 be_complete_tmf(beiscsi_conn, task, psol);
981 break;
982
983 case HWH_TYPE_NOP:
984 be_complete_nopin_resp(beiscsi_conn, task, psol);
985 break;
986
987 default:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530988 if (ring_mode)
989 shost_printk(KERN_WARNING, phba->shost,
990 "In hwi_complete_cmd, unknown type = %d"
991 "icd_index 0x%x CID 0x%x\n", type,
992 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
993 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
994 psgl_handle->cid);
995 else
996 shost_printk(KERN_WARNING, phba->shost,
997 "In hwi_complete_cmd, unknown type = %d"
998 "wrb_index 0x%x CID 0x%x\n", type,
999 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1000 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1001 ((psol->dw[offsetof(struct amap_sol_cqe,
1002 cid) / 32] & SOL_CID_MASK) >> 6));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301003 break;
1004 }
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301005
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301006 spin_unlock_bh(&session->lock);
1007}
1008
1009static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1010 *pasync_ctx, unsigned int is_header,
1011 unsigned int host_write_ptr)
1012{
1013 if (is_header)
1014 return &pasync_ctx->async_entry[host_write_ptr].
1015 header_busy_list;
1016 else
1017 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1018}
1019
1020static struct async_pdu_handle *
1021hwi_get_async_handle(struct beiscsi_hba *phba,
1022 struct beiscsi_conn *beiscsi_conn,
1023 struct hwi_async_pdu_context *pasync_ctx,
1024 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1025{
1026 struct be_bus_address phys_addr;
1027 struct list_head *pbusy_list;
1028 struct async_pdu_handle *pasync_handle = NULL;
1029 int buffer_len = 0;
1030 unsigned char buffer_index = -1;
1031 unsigned char is_header = 0;
1032
1033 phys_addr.u.a32.address_lo =
1034 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1035 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1036 & PDUCQE_DPL_MASK) >> 16);
1037 phys_addr.u.a32.address_hi =
1038 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1039
1040 phys_addr.u.a64.address =
1041 *((unsigned long long *)(&phys_addr.u.a64.address));
1042
1043 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1044 & PDUCQE_CODE_MASK) {
1045 case UNSOL_HDR_NOTIFY:
1046 is_header = 1;
1047
1048 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1049 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1050 index) / 32] & PDUCQE_INDEX_MASK));
1051
1052 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1053 pasync_ctx->async_header.pa_base.u.a64.address);
1054
1055 buffer_index = buffer_len /
1056 pasync_ctx->async_header.buffer_size;
1057
1058 break;
1059 case UNSOL_DATA_NOTIFY:
1060 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1061 dw[offsetof(struct amap_i_t_dpdu_cqe,
1062 index) / 32] & PDUCQE_INDEX_MASK));
1063 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1064 pasync_ctx->async_data.pa_base.u.
1065 a64.address);
1066 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1067 break;
1068 default:
1069 pbusy_list = NULL;
1070 shost_printk(KERN_WARNING, phba->shost,
1071 "Unexpected code=%d \n",
1072 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1073 code) / 32] & PDUCQE_CODE_MASK);
1074 return NULL;
1075 }
1076
1077 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1078 WARN_ON(list_empty(pbusy_list));
1079 list_for_each_entry(pasync_handle, pbusy_list, link) {
1080 WARN_ON(pasync_handle->consumed);
1081 if (pasync_handle->index == buffer_index)
1082 break;
1083 }
1084
1085 WARN_ON(!pasync_handle);
1086
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301087 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1088 phba->fw_config.iscsi_cid_start;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301089 pasync_handle->is_header = is_header;
1090 pasync_handle->buffer_len = ((pdpdu_cqe->
1091 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1092 & PDUCQE_DPL_MASK) >> 16);
1093
1094 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1095 index) / 32] & PDUCQE_INDEX_MASK);
1096 return pasync_handle;
1097}
1098
1099static unsigned int
1100hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1101 unsigned int is_header, unsigned int cq_index)
1102{
1103 struct list_head *pbusy_list;
1104 struct async_pdu_handle *pasync_handle;
1105 unsigned int num_entries, writables = 0;
1106 unsigned int *pep_read_ptr, *pwritables;
1107
1108
1109 if (is_header) {
1110 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1111 pwritables = &pasync_ctx->async_header.writables;
1112 num_entries = pasync_ctx->async_header.num_entries;
1113 } else {
1114 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1115 pwritables = &pasync_ctx->async_data.writables;
1116 num_entries = pasync_ctx->async_data.num_entries;
1117 }
1118
1119 while ((*pep_read_ptr) != cq_index) {
1120 (*pep_read_ptr)++;
1121 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1122
1123 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1124 *pep_read_ptr);
1125 if (writables == 0)
1126 WARN_ON(list_empty(pbusy_list));
1127
1128 if (!list_empty(pbusy_list)) {
1129 pasync_handle = list_entry(pbusy_list->next,
1130 struct async_pdu_handle,
1131 link);
1132 WARN_ON(!pasync_handle);
1133 pasync_handle->consumed = 1;
1134 }
1135
1136 writables++;
1137 }
1138
1139 if (!writables) {
1140 SE_DEBUG(DBG_LVL_1,
1141 "Duplicate notification received - index 0x%x!!\n",
1142 cq_index);
1143 WARN_ON(1);
1144 }
1145
1146 *pwritables = *pwritables + writables;
1147 return 0;
1148}
1149
1150static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1151 unsigned int cri)
1152{
1153 struct hwi_controller *phwi_ctrlr;
1154 struct hwi_async_pdu_context *pasync_ctx;
1155 struct async_pdu_handle *pasync_handle, *tmp_handle;
1156 struct list_head *plist;
1157 unsigned int i = 0;
1158
1159 phwi_ctrlr = phba->phwi_ctrlr;
1160 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1161
1162 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1163
1164 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1165 list_del(&pasync_handle->link);
1166
1167 if (i == 0) {
1168 list_add_tail(&pasync_handle->link,
1169 &pasync_ctx->async_header.free_list);
1170 pasync_ctx->async_header.free_entries++;
1171 i++;
1172 } else {
1173 list_add_tail(&pasync_handle->link,
1174 &pasync_ctx->async_data.free_list);
1175 pasync_ctx->async_data.free_entries++;
1176 i++;
1177 }
1178 }
1179
1180 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1181 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1182 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1183 return 0;
1184}
1185
1186static struct phys_addr *
1187hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1188 unsigned int is_header, unsigned int host_write_ptr)
1189{
1190 struct phys_addr *pasync_sge = NULL;
1191
1192 if (is_header)
1193 pasync_sge = pasync_ctx->async_header.ring_base;
1194 else
1195 pasync_sge = pasync_ctx->async_data.ring_base;
1196
1197 return pasync_sge + host_write_ptr;
1198}
1199
1200static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1201 unsigned int is_header)
1202{
1203 struct hwi_controller *phwi_ctrlr;
1204 struct hwi_async_pdu_context *pasync_ctx;
1205 struct async_pdu_handle *pasync_handle;
1206 struct list_head *pfree_link, *pbusy_list;
1207 struct phys_addr *pasync_sge;
1208 unsigned int ring_id, num_entries;
1209 unsigned int host_write_num;
1210 unsigned int writables;
1211 unsigned int i = 0;
1212 u32 doorbell = 0;
1213
1214 phwi_ctrlr = phba->phwi_ctrlr;
1215 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1216
1217 if (is_header) {
1218 num_entries = pasync_ctx->async_header.num_entries;
1219 writables = min(pasync_ctx->async_header.writables,
1220 pasync_ctx->async_header.free_entries);
1221 pfree_link = pasync_ctx->async_header.free_list.next;
1222 host_write_num = pasync_ctx->async_header.host_write_ptr;
1223 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1224 } else {
1225 num_entries = pasync_ctx->async_data.num_entries;
1226 writables = min(pasync_ctx->async_data.writables,
1227 pasync_ctx->async_data.free_entries);
1228 pfree_link = pasync_ctx->async_data.free_list.next;
1229 host_write_num = pasync_ctx->async_data.host_write_ptr;
1230 ring_id = phwi_ctrlr->default_pdu_data.id;
1231 }
1232
1233 writables = (writables / 8) * 8;
1234 if (writables) {
1235 for (i = 0; i < writables; i++) {
1236 pbusy_list =
1237 hwi_get_async_busy_list(pasync_ctx, is_header,
1238 host_write_num);
1239 pasync_handle =
1240 list_entry(pfree_link, struct async_pdu_handle,
1241 link);
1242 WARN_ON(!pasync_handle);
1243 pasync_handle->consumed = 0;
1244
1245 pfree_link = pfree_link->next;
1246
1247 pasync_sge = hwi_get_ring_address(pasync_ctx,
1248 is_header, host_write_num);
1249
1250 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1251 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1252
1253 list_move(&pasync_handle->link, pbusy_list);
1254
1255 host_write_num++;
1256 host_write_num = host_write_num % num_entries;
1257 }
1258
1259 if (is_header) {
1260 pasync_ctx->async_header.host_write_ptr =
1261 host_write_num;
1262 pasync_ctx->async_header.free_entries -= writables;
1263 pasync_ctx->async_header.writables -= writables;
1264 pasync_ctx->async_header.busy_entries += writables;
1265 } else {
1266 pasync_ctx->async_data.host_write_ptr = host_write_num;
1267 pasync_ctx->async_data.free_entries -= writables;
1268 pasync_ctx->async_data.writables -= writables;
1269 pasync_ctx->async_data.busy_entries += writables;
1270 }
1271
1272 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1273 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1274 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1275 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1276 << DB_DEF_PDU_CQPROC_SHIFT;
1277
1278 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1279 }
1280}
1281
1282static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1283 struct beiscsi_conn *beiscsi_conn,
1284 struct i_t_dpdu_cqe *pdpdu_cqe)
1285{
1286 struct hwi_controller *phwi_ctrlr;
1287 struct hwi_async_pdu_context *pasync_ctx;
1288 struct async_pdu_handle *pasync_handle = NULL;
1289 unsigned int cq_index = -1;
1290
1291 phwi_ctrlr = phba->phwi_ctrlr;
1292 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1293
1294 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1295 pdpdu_cqe, &cq_index);
1296 BUG_ON(pasync_handle->is_header != 0);
1297 if (pasync_handle->consumed == 0)
1298 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1299 cq_index);
1300
1301 hwi_free_async_msg(phba, pasync_handle->cri);
1302 hwi_post_async_buffers(phba, pasync_handle->is_header);
1303}
1304
1305static unsigned int
1306hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1307 struct beiscsi_hba *phba,
1308 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1309{
1310 struct list_head *plist;
1311 struct async_pdu_handle *pasync_handle;
1312 void *phdr = NULL;
1313 unsigned int hdr_len = 0, buf_len = 0;
1314 unsigned int status, index = 0, offset = 0;
1315 void *pfirst_buffer = NULL;
1316 unsigned int num_buf = 0;
1317
1318 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1319
1320 list_for_each_entry(pasync_handle, plist, link) {
1321 if (index == 0) {
1322 phdr = pasync_handle->pbuffer;
1323 hdr_len = pasync_handle->buffer_len;
1324 } else {
1325 buf_len = pasync_handle->buffer_len;
1326 if (!num_buf) {
1327 pfirst_buffer = pasync_handle->pbuffer;
1328 num_buf++;
1329 }
1330 memcpy(pfirst_buffer + offset,
1331 pasync_handle->pbuffer, buf_len);
1332 offset = buf_len;
1333 }
1334 index++;
1335 }
1336
1337 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301338 (beiscsi_conn->beiscsi_conn_cid -
1339 phba->fw_config.iscsi_cid_start),
1340 phdr, hdr_len, pfirst_buffer,
1341 buf_len);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301342
1343 if (status == 0)
1344 hwi_free_async_msg(phba, cri);
1345 return 0;
1346}
1347
1348static unsigned int
1349hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1350 struct beiscsi_hba *phba,
1351 struct async_pdu_handle *pasync_handle)
1352{
1353 struct hwi_async_pdu_context *pasync_ctx;
1354 struct hwi_controller *phwi_ctrlr;
1355 unsigned int bytes_needed = 0, status = 0;
1356 unsigned short cri = pasync_handle->cri;
1357 struct pdu_base *ppdu;
1358
1359 phwi_ctrlr = phba->phwi_ctrlr;
1360 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1361
1362 list_del(&pasync_handle->link);
1363 if (pasync_handle->is_header) {
1364 pasync_ctx->async_header.busy_entries--;
1365 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1366 hwi_free_async_msg(phba, cri);
1367 BUG();
1368 }
1369
1370 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1371 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1372 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1373 (unsigned short)pasync_handle->buffer_len;
1374 list_add_tail(&pasync_handle->link,
1375 &pasync_ctx->async_entry[cri].wait_queue.list);
1376
1377 ppdu = pasync_handle->pbuffer;
1378 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1379 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1380 0xFFFF0000) | ((be16_to_cpu((ppdu->
1381 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1382 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1383
1384 if (status == 0) {
1385 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1386 bytes_needed;
1387
1388 if (bytes_needed == 0)
1389 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1390 pasync_ctx, cri);
1391 }
1392 } else {
1393 pasync_ctx->async_data.busy_entries--;
1394 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1395 list_add_tail(&pasync_handle->link,
1396 &pasync_ctx->async_entry[cri].wait_queue.
1397 list);
1398 pasync_ctx->async_entry[cri].wait_queue.
1399 bytes_received +=
1400 (unsigned short)pasync_handle->buffer_len;
1401
1402 if (pasync_ctx->async_entry[cri].wait_queue.
1403 bytes_received >=
1404 pasync_ctx->async_entry[cri].wait_queue.
1405 bytes_needed)
1406 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1407 pasync_ctx, cri);
1408 }
1409 }
1410 return status;
1411}
1412
1413static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1414 struct beiscsi_hba *phba,
1415 struct i_t_dpdu_cqe *pdpdu_cqe)
1416{
1417 struct hwi_controller *phwi_ctrlr;
1418 struct hwi_async_pdu_context *pasync_ctx;
1419 struct async_pdu_handle *pasync_handle = NULL;
1420 unsigned int cq_index = -1;
1421
1422 phwi_ctrlr = phba->phwi_ctrlr;
1423 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1424 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1425 pdpdu_cqe, &cq_index);
1426
1427 if (pasync_handle->consumed == 0)
1428 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1429 cq_index);
1430 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1431 hwi_post_async_buffers(phba, pasync_handle->is_header);
1432}
1433
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301434
1435static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301436{
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301437 struct be_queue_info *cq;
1438 struct sol_cqe *sol;
1439 struct dmsg_cqe *dmsg;
1440 unsigned int num_processed = 0;
1441 unsigned int tot_nump = 0;
1442 struct beiscsi_conn *beiscsi_conn;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301443 struct sgl_handle *psgl_handle = NULL;
Jayamohan Kallickalc2462282010-01-05 05:05:34 +05301444 struct beiscsi_endpoint *beiscsi_ep;
1445 struct iscsi_endpoint *ep;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301446 struct beiscsi_hba *phba;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301447
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301448 cq = pbe_eq->cq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301449 sol = queue_tail_node(cq);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301450 phba = pbe_eq->phba;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301451
1452 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1453 CQE_VALID_MASK) {
1454 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1455
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301456 if (ring_mode) {
1457 psgl_handle = phba->sgl_hndl_array[((sol->
1458 dw[offsetof(struct amap_sol_cqe_ring,
1459 icd_index) / 32] & SOL_ICD_INDEX_MASK)
1460 >> 6)];
Jayamohan Kallickalc2462282010-01-05 05:05:34 +05301461 ep = phba->ep_array[psgl_handle->cid];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301462 } else {
Jayamohan Kallickalc2462282010-01-05 05:05:34 +05301463 ep = phba->ep_array[(u32) ((sol->
1464 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1465 SOL_CID_MASK) >> 6) -
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301466 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301467 }
Jayamohan Kallickalc2462282010-01-05 05:05:34 +05301468 beiscsi_ep = ep->dd_data;
1469 beiscsi_conn = beiscsi_ep->conn;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301470 if (num_processed >= 32) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301471 hwi_ring_cq_db(phba, cq->id,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301472 num_processed, 0, 0);
1473 tot_nump += num_processed;
1474 num_processed = 0;
1475 }
1476
1477 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1478 32] & CQE_CODE_MASK) {
1479 case SOL_CMD_COMPLETE:
1480 hwi_complete_cmd(beiscsi_conn, phba, sol);
1481 break;
1482 case DRIVERMSG_NOTIFY:
1483 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1484 dmsg = (struct dmsg_cqe *)sol;
1485 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1486 break;
1487 case UNSOL_HDR_NOTIFY:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301488 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1489 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1490 (struct i_t_dpdu_cqe *)sol);
1491 break;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301492 case UNSOL_DATA_NOTIFY:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301493 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301494 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1495 (struct i_t_dpdu_cqe *)sol);
1496 break;
1497 case CXN_INVALIDATE_INDEX_NOTIFY:
1498 case CMD_INVALIDATED_NOTIFY:
1499 case CXN_INVALIDATE_NOTIFY:
1500 SE_DEBUG(DBG_LVL_1,
1501 "Ignoring CQ Error notification for cmd/cxn"
1502 "invalidate\n");
1503 break;
1504 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1505 case CMD_KILLED_INVALID_STATSN_RCVD:
1506 case CMD_KILLED_INVALID_R2T_RCVD:
1507 case CMD_CXN_KILLED_LUN_INVALID:
1508 case CMD_CXN_KILLED_ICD_INVALID:
1509 case CMD_CXN_KILLED_ITT_INVALID:
1510 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1511 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301512 if (ring_mode) {
1513 SE_DEBUG(DBG_LVL_1,
1514 "CQ Error notification for cmd.. "
1515 "code %d cid 0x%x\n",
1516 sol->dw[offsetof(struct amap_sol_cqe, code) /
1517 32] & CQE_CODE_MASK, psgl_handle->cid);
1518 } else {
1519 SE_DEBUG(DBG_LVL_1,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301520 "CQ Error notification for cmd.. "
1521 "code %d cid 0x%x\n",
1522 sol->dw[offsetof(struct amap_sol_cqe, code) /
1523 32] & CQE_CODE_MASK,
1524 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1525 32] & SOL_CID_MASK));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301526 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301527 break;
1528 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1529 SE_DEBUG(DBG_LVL_1,
1530 "Digest error on def pdu ring, dropping..\n");
1531 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1532 (struct i_t_dpdu_cqe *) sol);
1533 break;
1534 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1535 case CXN_KILLED_BURST_LEN_MISMATCH:
1536 case CXN_KILLED_AHS_RCVD:
1537 case CXN_KILLED_HDR_DIGEST_ERR:
1538 case CXN_KILLED_UNKNOWN_HDR:
1539 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1540 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1541 case CXN_KILLED_TIMED_OUT:
1542 case CXN_KILLED_FIN_RCVD:
1543 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1544 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1545 case CXN_KILLED_OVER_RUN_RESIDUAL:
1546 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1547 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301548 if (ring_mode) {
1549 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1550 "0x%x...\n",
1551 sol->dw[offsetof(struct amap_sol_cqe, code) /
1552 32] & CQE_CODE_MASK, psgl_handle->cid);
1553 } else {
1554 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301555 "0x%x...\n",
1556 sol->dw[offsetof(struct amap_sol_cqe, code) /
1557 32] & CQE_CODE_MASK,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301558 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1559 32] & CQE_CID_MASK));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301560 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301561 iscsi_conn_failure(beiscsi_conn->conn,
1562 ISCSI_ERR_CONN_FAILED);
1563 break;
1564 case CXN_KILLED_RST_SENT:
1565 case CXN_KILLED_RST_RCVD:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301566 if (ring_mode) {
1567 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1568 "received/sent on CID 0x%x...\n",
1569 sol->dw[offsetof(struct amap_sol_cqe, code) /
1570 32] & CQE_CODE_MASK, psgl_handle->cid);
1571 } else {
1572 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301573 "received/sent on CID 0x%x...\n",
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301574 sol->dw[offsetof(struct amap_sol_cqe, code) /
1575 32] & CQE_CODE_MASK,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301576 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1577 32] & CQE_CID_MASK));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05301578 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301579 iscsi_conn_failure(beiscsi_conn->conn,
1580 ISCSI_ERR_CONN_FAILED);
1581 break;
1582 default:
1583 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1584 "received on CID 0x%x...\n",
1585 sol->dw[offsetof(struct amap_sol_cqe, code) /
1586 32] & CQE_CODE_MASK,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05301587 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1588 32] & CQE_CID_MASK));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301589 break;
1590 }
1591
1592 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1593 queue_tail_inc(cq);
1594 sol = queue_tail_node(cq);
1595 num_processed++;
1596 }
1597
1598 if (num_processed > 0) {
1599 tot_nump += num_processed;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301600 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301601 }
1602 return tot_nump;
1603}
1604
1605static void beiscsi_process_all_cqs(struct work_struct *work)
1606{
1607 unsigned long flags;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301608 struct hwi_controller *phwi_ctrlr;
1609 struct hwi_context_memory *phwi_context;
1610 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301611 struct beiscsi_hba *phba =
1612 container_of(work, struct beiscsi_hba, work_cqs);
1613
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301614 phwi_ctrlr = phba->phwi_ctrlr;
1615 phwi_context = phwi_ctrlr->phwi_ctxt;
1616 if (phba->msix_enabled)
1617 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1618 else
1619 pbe_eq = &phwi_context->be_eq[0];
1620
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301621 if (phba->todo_mcc_cq) {
1622 spin_lock_irqsave(&phba->isr_lock, flags);
1623 phba->todo_mcc_cq = 0;
1624 spin_unlock_irqrestore(&phba->isr_lock, flags);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301625 }
1626
1627 if (phba->todo_cq) {
1628 spin_lock_irqsave(&phba->isr_lock, flags);
1629 phba->todo_cq = 0;
1630 spin_unlock_irqrestore(&phba->isr_lock, flags);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301631 beiscsi_process_cq(pbe_eq);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301632 }
1633}
1634
1635static int be_iopoll(struct blk_iopoll *iop, int budget)
1636{
1637 static unsigned int ret;
1638 struct beiscsi_hba *phba;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301639 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301640
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301641 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1642 ret = beiscsi_process_cq(pbe_eq);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301643 if (ret < budget) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301644 phba = pbe_eq->phba;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301645 blk_iopoll_complete(iop);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301646 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1647 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301648 }
1649 return ret;
1650}
1651
1652static void
1653hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1654 unsigned int num_sg, struct beiscsi_io_task *io_task)
1655{
1656 struct iscsi_sge *psgl;
1657 unsigned short sg_len, index;
1658 unsigned int sge_len = 0;
1659 unsigned long long addr;
1660 struct scatterlist *l_sg;
1661 unsigned int offset;
1662
1663 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1664 io_task->bhs_pa.u.a32.address_lo);
1665 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1666 io_task->bhs_pa.u.a32.address_hi);
1667
1668 l_sg = sg;
1669 for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1670 if (index == 0) {
1671 sg_len = sg_dma_len(sg);
1672 addr = (u64) sg_dma_address(sg);
1673 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1674 (addr & 0xFFFFFFFF));
1675 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1676 (addr >> 32));
1677 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1678 sg_len);
1679 sge_len = sg_len;
1680 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1681 1);
1682 } else {
1683 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1684 0);
1685 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1686 pwrb, sge_len);
1687 sg_len = sg_dma_len(sg);
1688 addr = (u64) sg_dma_address(sg);
1689 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1690 (addr & 0xFFFFFFFF));
1691 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1692 (addr >> 32));
1693 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1694 sg_len);
1695 }
1696 }
1697 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1698 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1699
1700 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1701
1702 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1703 io_task->bhs_pa.u.a32.address_hi);
1704 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1705 io_task->bhs_pa.u.a32.address_lo);
1706
1707 if (num_sg == 2)
1708 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1709 sg = l_sg;
1710 psgl++;
1711 psgl++;
1712 offset = 0;
1713 for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1714 sg_len = sg_dma_len(sg);
1715 addr = (u64) sg_dma_address(sg);
1716 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1717 (addr & 0xFFFFFFFF));
1718 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1719 (addr >> 32));
1720 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1721 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1722 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1723 offset += sg_len;
1724 }
1725 psgl--;
1726 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1727}
1728
1729static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1730{
1731 struct iscsi_sge *psgl;
1732 unsigned long long addr;
1733 struct beiscsi_io_task *io_task = task->dd_data;
1734 struct beiscsi_conn *beiscsi_conn = io_task->conn;
1735 struct beiscsi_hba *phba = beiscsi_conn->phba;
1736
1737 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1738 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1739 io_task->bhs_pa.u.a32.address_lo);
1740 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1741 io_task->bhs_pa.u.a32.address_hi);
1742
1743 if (task->data) {
1744 if (task->data_count) {
1745 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1746 addr = (u64) pci_map_single(phba->pcidev,
1747 task->data,
1748 task->data_count, 1);
1749 } else {
1750 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1751 addr = 0;
1752 }
1753 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1754 (addr & 0xFFFFFFFF));
1755 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1756 (addr >> 32));
1757 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1758 task->data_count);
1759
1760 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1761 } else {
1762 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1763 addr = 0;
1764 }
1765
1766 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1767
1768 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1769
1770 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1771 io_task->bhs_pa.u.a32.address_hi);
1772 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1773 io_task->bhs_pa.u.a32.address_lo);
1774 if (task->data) {
1775 psgl++;
1776 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1777 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1778 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1779 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1780 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1781 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1782
1783 psgl++;
1784 if (task->data) {
1785 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1786 (addr & 0xFFFFFFFF));
1787 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1788 (addr >> 32));
1789 }
1790 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1791 }
1792 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1793}
1794
1795static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1796{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05301797 unsigned int num_cq_pages, num_async_pdu_buf_pages;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301798 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1799 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1800
1801 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1802 sizeof(struct sol_cqe));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301803 num_async_pdu_buf_pages =
1804 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1805 phba->params.defpdu_hdr_sz);
1806 num_async_pdu_buf_sgl_pages =
1807 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1808 sizeof(struct phys_addr));
1809 num_async_pdu_data_pages =
1810 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1811 phba->params.defpdu_data_sz);
1812 num_async_pdu_data_sgl_pages =
1813 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1814 sizeof(struct phys_addr));
1815
1816 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1817
1818 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1819 BE_ISCSI_PDU_HEADER_SIZE;
1820 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1821 sizeof(struct hwi_context_memory);
1822
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301823
1824 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1825 * (phba->params.wrbs_per_cxn)
1826 * phba->params.cxns_per_ctrl;
1827 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
1828 (phba->params.wrbs_per_cxn);
1829 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1830 phba->params.cxns_per_ctrl);
1831
1832 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1833 phba->params.icds_per_ctrl;
1834 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1835 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1836
1837 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1838 num_async_pdu_buf_pages * PAGE_SIZE;
1839 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1840 num_async_pdu_data_pages * PAGE_SIZE;
1841 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1842 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1843 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1844 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1845 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1846 phba->params.asyncpdus_per_ctrl *
1847 sizeof(struct async_pdu_handle);
1848 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1849 phba->params.asyncpdus_per_ctrl *
1850 sizeof(struct async_pdu_handle);
1851 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1852 sizeof(struct hwi_async_pdu_context) +
1853 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1854}
1855
1856static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1857{
1858 struct be_mem_descriptor *mem_descr;
1859 dma_addr_t bus_add;
1860 struct mem_array *mem_arr, *mem_arr_orig;
1861 unsigned int i, j, alloc_size, curr_alloc_size;
1862
1863 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1864 if (!phba->phwi_ctrlr)
1865 return -ENOMEM;
1866
1867 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1868 GFP_KERNEL);
1869 if (!phba->init_mem) {
1870 kfree(phba->phwi_ctrlr);
1871 return -ENOMEM;
1872 }
1873
1874 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1875 GFP_KERNEL);
1876 if (!mem_arr_orig) {
1877 kfree(phba->init_mem);
1878 kfree(phba->phwi_ctrlr);
1879 return -ENOMEM;
1880 }
1881
1882 mem_descr = phba->init_mem;
1883 for (i = 0; i < SE_MEM_MAX; i++) {
1884 j = 0;
1885 mem_arr = mem_arr_orig;
1886 alloc_size = phba->mem_req[i];
1887 memset(mem_arr, 0, sizeof(struct mem_array) *
1888 BEISCSI_MAX_FRAGS_INIT);
1889 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1890 do {
1891 mem_arr->virtual_address = pci_alloc_consistent(
1892 phba->pcidev,
1893 curr_alloc_size,
1894 &bus_add);
1895 if (!mem_arr->virtual_address) {
1896 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1897 goto free_mem;
1898 if (curr_alloc_size -
1899 rounddown_pow_of_two(curr_alloc_size))
1900 curr_alloc_size = rounddown_pow_of_two
1901 (curr_alloc_size);
1902 else
1903 curr_alloc_size = curr_alloc_size / 2;
1904 } else {
1905 mem_arr->bus_address.u.
1906 a64.address = (__u64) bus_add;
1907 mem_arr->size = curr_alloc_size;
1908 alloc_size -= curr_alloc_size;
1909 curr_alloc_size = min(be_max_phys_size *
1910 1024, alloc_size);
1911 j++;
1912 mem_arr++;
1913 }
1914 } while (alloc_size);
1915 mem_descr->num_elements = j;
1916 mem_descr->size_in_bytes = phba->mem_req[i];
1917 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1918 GFP_KERNEL);
1919 if (!mem_descr->mem_array)
1920 goto free_mem;
1921
1922 memcpy(mem_descr->mem_array, mem_arr_orig,
1923 sizeof(struct mem_array) * j);
1924 mem_descr++;
1925 }
1926 kfree(mem_arr_orig);
1927 return 0;
1928free_mem:
1929 mem_descr->num_elements = j;
1930 while ((i) || (j)) {
1931 for (j = mem_descr->num_elements; j > 0; j--) {
1932 pci_free_consistent(phba->pcidev,
1933 mem_descr->mem_array[j - 1].size,
1934 mem_descr->mem_array[j - 1].
1935 virtual_address,
1936 mem_descr->mem_array[j - 1].
1937 bus_address.u.a64.address);
1938 }
1939 if (i) {
1940 i--;
1941 kfree(mem_descr->mem_array);
1942 mem_descr--;
1943 }
1944 }
1945 kfree(mem_arr_orig);
1946 kfree(phba->init_mem);
1947 kfree(phba->phwi_ctrlr);
1948 return -ENOMEM;
1949}
1950
1951static int beiscsi_get_memory(struct beiscsi_hba *phba)
1952{
1953 beiscsi_find_mem_req(phba);
1954 return beiscsi_alloc_mem(phba);
1955}
1956
1957static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1958{
1959 struct pdu_data_out *pdata_out;
1960 struct pdu_nop_out *pnop_out;
1961 struct be_mem_descriptor *mem_descr;
1962
1963 mem_descr = phba->init_mem;
1964 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1965 pdata_out =
1966 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1967 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1968
1969 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1970 IIOC_SCSI_DATA);
1971
1972 pnop_out =
1973 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1974 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1975
1976 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1977 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1978 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1979 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1980}
1981
1982static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1983{
1984 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1985 struct wrb_handle *pwrb_handle;
1986 struct hwi_controller *phwi_ctrlr;
1987 struct hwi_wrb_context *pwrb_context;
1988 struct iscsi_wrb *pwrb;
1989 unsigned int num_cxn_wrbh;
1990 unsigned int num_cxn_wrb, j, idx, index;
1991
1992 mem_descr_wrbh = phba->init_mem;
1993 mem_descr_wrbh += HWI_MEM_WRBH;
1994
1995 mem_descr_wrb = phba->init_mem;
1996 mem_descr_wrb += HWI_MEM_WRB;
1997
1998 idx = 0;
1999 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2000 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2001 ((sizeof(struct wrb_handle)) *
2002 phba->params.wrbs_per_cxn));
2003 phwi_ctrlr = phba->phwi_ctrlr;
2004
2005 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2006 pwrb_context = &phwi_ctrlr->wrb_context[index];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302007 pwrb_context->pwrb_handle_base =
2008 kzalloc(sizeof(struct wrb_handle *) *
2009 phba->params.wrbs_per_cxn, GFP_KERNEL);
2010 pwrb_context->pwrb_handle_basestd =
2011 kzalloc(sizeof(struct wrb_handle *) *
2012 phba->params.wrbs_per_cxn, GFP_KERNEL);
2013 if (num_cxn_wrbh) {
2014 pwrb_context->alloc_index = 0;
2015 pwrb_context->wrb_handles_available = 0;
2016 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2017 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2018 pwrb_context->pwrb_handle_basestd[j] =
2019 pwrb_handle;
2020 pwrb_context->wrb_handles_available++;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302021 pwrb_handle->wrb_index = j;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302022 pwrb_handle++;
2023 }
2024 pwrb_context->free_index = 0;
2025 num_cxn_wrbh--;
2026 } else {
2027 idx++;
2028 pwrb_handle =
2029 mem_descr_wrbh->mem_array[idx].virtual_address;
2030 num_cxn_wrbh =
2031 ((mem_descr_wrbh->mem_array[idx].size) /
2032 ((sizeof(struct wrb_handle)) *
2033 phba->params.wrbs_per_cxn));
2034 pwrb_context->alloc_index = 0;
2035 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2036 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2037 pwrb_context->pwrb_handle_basestd[j] =
2038 pwrb_handle;
2039 pwrb_context->wrb_handles_available++;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302040 pwrb_handle->wrb_index = j;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302041 pwrb_handle++;
2042 }
2043 pwrb_context->free_index = 0;
2044 num_cxn_wrbh--;
2045 }
2046 }
2047 idx = 0;
2048 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2049 num_cxn_wrb =
2050 ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2051 phba->params.wrbs_per_cxn);
2052
2053 for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2054 pwrb_context = &phwi_ctrlr->wrb_context[index];
2055 if (num_cxn_wrb) {
2056 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2057 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2058 pwrb_handle->pwrb = pwrb;
2059 pwrb++;
2060 }
2061 num_cxn_wrb--;
2062 } else {
2063 idx++;
2064 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2065 num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2066 (sizeof(struct iscsi_wrb)) *
2067 phba->params.wrbs_per_cxn);
2068 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2069 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2070 pwrb_handle->pwrb = pwrb;
2071 pwrb++;
2072 }
2073 num_cxn_wrb--;
2074 }
2075 }
2076}
2077
2078static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2079{
2080 struct hwi_controller *phwi_ctrlr;
2081 struct hba_parameters *p = &phba->params;
2082 struct hwi_async_pdu_context *pasync_ctx;
2083 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2084 unsigned int index;
2085 struct be_mem_descriptor *mem_descr;
2086
2087 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2088 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2089
2090 phwi_ctrlr = phba->phwi_ctrlr;
2091 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2092 mem_descr->mem_array[0].virtual_address;
2093 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2094 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2095
2096 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2097 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2098 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2099 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2100
2101 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2102 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2103 if (mem_descr->mem_array[0].virtual_address) {
2104 SE_DEBUG(DBG_LVL_8,
2105 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2106 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2107 } else
2108 shost_printk(KERN_WARNING, phba->shost,
2109 "No Virtual address \n");
2110
2111 pasync_ctx->async_header.va_base =
2112 mem_descr->mem_array[0].virtual_address;
2113
2114 pasync_ctx->async_header.pa_base.u.a64.address =
2115 mem_descr->mem_array[0].bus_address.u.a64.address;
2116
2117 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2118 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2119 if (mem_descr->mem_array[0].virtual_address) {
2120 SE_DEBUG(DBG_LVL_8,
2121 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2122 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2123 } else
2124 shost_printk(KERN_WARNING, phba->shost,
2125 "No Virtual address \n");
2126 pasync_ctx->async_header.ring_base =
2127 mem_descr->mem_array[0].virtual_address;
2128
2129 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2130 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2131 if (mem_descr->mem_array[0].virtual_address) {
2132 SE_DEBUG(DBG_LVL_8,
2133 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2134 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2135 } else
2136 shost_printk(KERN_WARNING, phba->shost,
2137 "No Virtual address \n");
2138
2139 pasync_ctx->async_header.handle_base =
2140 mem_descr->mem_array[0].virtual_address;
2141 pasync_ctx->async_header.writables = 0;
2142 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2143
2144 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2145 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2146 if (mem_descr->mem_array[0].virtual_address) {
2147 SE_DEBUG(DBG_LVL_8,
2148 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2149 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2150 } else
2151 shost_printk(KERN_WARNING, phba->shost,
2152 "No Virtual address \n");
2153 pasync_ctx->async_data.va_base =
2154 mem_descr->mem_array[0].virtual_address;
2155 pasync_ctx->async_data.pa_base.u.a64.address =
2156 mem_descr->mem_array[0].bus_address.u.a64.address;
2157
2158 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2159 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2160 if (mem_descr->mem_array[0].virtual_address) {
2161 SE_DEBUG(DBG_LVL_8,
2162 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2163 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2164 } else
2165 shost_printk(KERN_WARNING, phba->shost,
2166 "No Virtual address \n");
2167
2168 pasync_ctx->async_data.ring_base =
2169 mem_descr->mem_array[0].virtual_address;
2170
2171 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2172 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2173 if (!mem_descr->mem_array[0].virtual_address)
2174 shost_printk(KERN_WARNING, phba->shost,
2175 "No Virtual address \n");
2176
2177 pasync_ctx->async_data.handle_base =
2178 mem_descr->mem_array[0].virtual_address;
2179 pasync_ctx->async_data.writables = 0;
2180 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2181
2182 pasync_header_h =
2183 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2184 pasync_data_h =
2185 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2186
2187 for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2188 pasync_header_h->cri = -1;
2189 pasync_header_h->index = (char)index;
2190 INIT_LIST_HEAD(&pasync_header_h->link);
2191 pasync_header_h->pbuffer =
2192 (void *)((unsigned long)
2193 (pasync_ctx->async_header.va_base) +
2194 (p->defpdu_hdr_sz * index));
2195
2196 pasync_header_h->pa.u.a64.address =
2197 pasync_ctx->async_header.pa_base.u.a64.address +
2198 (p->defpdu_hdr_sz * index);
2199
2200 list_add_tail(&pasync_header_h->link,
2201 &pasync_ctx->async_header.free_list);
2202 pasync_header_h++;
2203 pasync_ctx->async_header.free_entries++;
2204 pasync_ctx->async_header.writables++;
2205
2206 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2207 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2208 header_busy_list);
2209 pasync_data_h->cri = -1;
2210 pasync_data_h->index = (char)index;
2211 INIT_LIST_HEAD(&pasync_data_h->link);
2212 pasync_data_h->pbuffer =
2213 (void *)((unsigned long)
2214 (pasync_ctx->async_data.va_base) +
2215 (p->defpdu_data_sz * index));
2216
2217 pasync_data_h->pa.u.a64.address =
2218 pasync_ctx->async_data.pa_base.u.a64.address +
2219 (p->defpdu_data_sz * index);
2220
2221 list_add_tail(&pasync_data_h->link,
2222 &pasync_ctx->async_data.free_list);
2223 pasync_data_h++;
2224 pasync_ctx->async_data.free_entries++;
2225 pasync_ctx->async_data.writables++;
2226
2227 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2228 }
2229
2230 pasync_ctx->async_header.host_write_ptr = 0;
2231 pasync_ctx->async_header.ep_read_ptr = -1;
2232 pasync_ctx->async_data.host_write_ptr = 0;
2233 pasync_ctx->async_data.ep_read_ptr = -1;
2234}
2235
2236static int
2237be_sgl_create_contiguous(void *virtual_address,
2238 u64 physical_address, u32 length,
2239 struct be_dma_mem *sgl)
2240{
2241 WARN_ON(!virtual_address);
2242 WARN_ON(!physical_address);
2243 WARN_ON(!length > 0);
2244 WARN_ON(!sgl);
2245
2246 sgl->va = virtual_address;
2247 sgl->dma = physical_address;
2248 sgl->size = length;
2249
2250 return 0;
2251}
2252
2253static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2254{
2255 memset(sgl, 0, sizeof(*sgl));
2256}
2257
2258static void
2259hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2260 struct mem_array *pmem, struct be_dma_mem *sgl)
2261{
2262 if (sgl->va)
2263 be_sgl_destroy_contiguous(sgl);
2264
2265 be_sgl_create_contiguous(pmem->virtual_address,
2266 pmem->bus_address.u.a64.address,
2267 pmem->size, sgl);
2268}
2269
2270static void
2271hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2272 struct mem_array *pmem, struct be_dma_mem *sgl)
2273{
2274 if (sgl->va)
2275 be_sgl_destroy_contiguous(sgl);
2276
2277 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2278 pmem->bus_address.u.a64.address,
2279 pmem->size, sgl);
2280}
2281
2282static int be_fill_queue(struct be_queue_info *q,
2283 u16 len, u16 entry_size, void *vaddress)
2284{
2285 struct be_dma_mem *mem = &q->dma_mem;
2286
2287 memset(q, 0, sizeof(*q));
2288 q->len = len;
2289 q->entry_size = entry_size;
2290 mem->size = len * entry_size;
2291 mem->va = vaddress;
2292 if (!mem->va)
2293 return -ENOMEM;
2294 memset(mem->va, 0, mem->size);
2295 return 0;
2296}
2297
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302298static int beiscsi_create_eqs(struct beiscsi_hba *phba,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302299 struct hwi_context_memory *phwi_context)
2300{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302301 unsigned int i, num_eq_pages;
2302 int ret, eq_for_mcc;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302303 struct be_queue_info *eq;
2304 struct be_dma_mem *mem;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302305 void *eq_vaddress;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302306 dma_addr_t paddr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302307
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302308 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2309 sizeof(struct be_eq_entry));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302310
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302311 if (phba->msix_enabled)
2312 eq_for_mcc = 1;
2313 else
2314 eq_for_mcc = 0;
2315 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2316 eq = &phwi_context->be_eq[i].q;
2317 mem = &eq->dma_mem;
2318 phwi_context->be_eq[i].phba = phba;
2319 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2320 num_eq_pages * PAGE_SIZE,
2321 &paddr);
2322 if (!eq_vaddress)
2323 goto create_eq_error;
2324
2325 mem->va = eq_vaddress;
2326 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2327 sizeof(struct be_eq_entry), eq_vaddress);
2328 if (ret) {
2329 shost_printk(KERN_ERR, phba->shost,
2330 "be_fill_queue Failed for EQ \n");
2331 goto create_eq_error;
2332 }
2333
2334 mem->dma = paddr;
2335 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2336 phwi_context->cur_eqd);
2337 if (ret) {
2338 shost_printk(KERN_ERR, phba->shost,
2339 "beiscsi_cmd_eq_create"
2340 "Failedfor EQ \n");
2341 goto create_eq_error;
2342 }
2343 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302344 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302345 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302346create_eq_error:
2347 for (i = 0; i < (phba->num_cpus + 1); i++) {
2348 eq = &phwi_context->be_eq[i].q;
2349 mem = &eq->dma_mem;
2350 if (mem->va)
2351 pci_free_consistent(phba->pcidev, num_eq_pages
2352 * PAGE_SIZE,
2353 mem->va, mem->dma);
2354 }
2355 return ret;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302356}
2357
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302358static int beiscsi_create_cqs(struct beiscsi_hba *phba,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302359 struct hwi_context_memory *phwi_context)
2360{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302361 unsigned int i, num_cq_pages;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302362 int ret;
2363 struct be_queue_info *cq, *eq;
2364 struct be_dma_mem *mem;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302365 struct be_eq_obj *pbe_eq;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302366 void *cq_vaddress;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302367 dma_addr_t paddr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302368
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302369 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2370 sizeof(struct sol_cqe));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302371
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302372 for (i = 0; i < phba->num_cpus; i++) {
2373 cq = &phwi_context->be_cq[i];
2374 eq = &phwi_context->be_eq[i].q;
2375 pbe_eq = &phwi_context->be_eq[i];
2376 pbe_eq->cq = cq;
2377 pbe_eq->phba = phba;
2378 mem = &cq->dma_mem;
2379 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2380 num_cq_pages * PAGE_SIZE,
2381 &paddr);
2382 if (!cq_vaddress)
2383 goto create_cq_error;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05302384 ret = be_fill_queue(cq, phba->params.num_cq_entries,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302385 sizeof(struct sol_cqe), cq_vaddress);
2386 if (ret) {
2387 shost_printk(KERN_ERR, phba->shost,
2388 "be_fill_queue Failed for ISCSI CQ \n");
2389 goto create_cq_error;
2390 }
2391
2392 mem->dma = paddr;
2393 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2394 false, 0);
2395 if (ret) {
2396 shost_printk(KERN_ERR, phba->shost,
2397 "beiscsi_cmd_eq_create"
2398 "Failed for ISCSI CQ \n");
2399 goto create_cq_error;
2400 }
2401 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2402 cq->id, eq->id);
2403 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302404 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302405 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302406
2407create_cq_error:
2408 for (i = 0; i < phba->num_cpus; i++) {
2409 cq = &phwi_context->be_cq[i];
2410 mem = &cq->dma_mem;
2411 if (mem->va)
2412 pci_free_consistent(phba->pcidev, num_cq_pages
2413 * PAGE_SIZE,
2414 mem->va, mem->dma);
2415 }
2416 return ret;
2417
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302418}
2419
2420static int
2421beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2422 struct hwi_context_memory *phwi_context,
2423 struct hwi_controller *phwi_ctrlr,
2424 unsigned int def_pdu_ring_sz)
2425{
2426 unsigned int idx;
2427 int ret;
2428 struct be_queue_info *dq, *cq;
2429 struct be_dma_mem *mem;
2430 struct be_mem_descriptor *mem_descr;
2431 void *dq_vaddress;
2432
2433 idx = 0;
2434 dq = &phwi_context->be_def_hdrq;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302435 cq = &phwi_context->be_cq[0];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302436 mem = &dq->dma_mem;
2437 mem_descr = phba->init_mem;
2438 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2439 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2440 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2441 sizeof(struct phys_addr),
2442 sizeof(struct phys_addr), dq_vaddress);
2443 if (ret) {
2444 shost_printk(KERN_ERR, phba->shost,
2445 "be_fill_queue Failed for DEF PDU HDR\n");
2446 return ret;
2447 }
2448 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2449 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2450 def_pdu_ring_sz,
2451 phba->params.defpdu_hdr_sz);
2452 if (ret) {
2453 shost_printk(KERN_ERR, phba->shost,
2454 "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2455 return ret;
2456 }
2457 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2458 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2459 phwi_context->be_def_hdrq.id);
2460 hwi_post_async_buffers(phba, 1);
2461 return 0;
2462}
2463
2464static int
2465beiscsi_create_def_data(struct beiscsi_hba *phba,
2466 struct hwi_context_memory *phwi_context,
2467 struct hwi_controller *phwi_ctrlr,
2468 unsigned int def_pdu_ring_sz)
2469{
2470 unsigned int idx;
2471 int ret;
2472 struct be_queue_info *dataq, *cq;
2473 struct be_dma_mem *mem;
2474 struct be_mem_descriptor *mem_descr;
2475 void *dq_vaddress;
2476
2477 idx = 0;
2478 dataq = &phwi_context->be_def_dataq;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302479 cq = &phwi_context->be_cq[0];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302480 mem = &dataq->dma_mem;
2481 mem_descr = phba->init_mem;
2482 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2483 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2484 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2485 sizeof(struct phys_addr),
2486 sizeof(struct phys_addr), dq_vaddress);
2487 if (ret) {
2488 shost_printk(KERN_ERR, phba->shost,
2489 "be_fill_queue Failed for DEF PDU DATA\n");
2490 return ret;
2491 }
2492 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2493 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2494 def_pdu_ring_sz,
2495 phba->params.defpdu_data_sz);
2496 if (ret) {
2497 shost_printk(KERN_ERR, phba->shost,
2498 "be_cmd_create_default_pdu_queue Failed"
2499 " for DEF PDU DATA\n");
2500 return ret;
2501 }
2502 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2503 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2504 phwi_context->be_def_dataq.id);
2505 hwi_post_async_buffers(phba, 0);
2506 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2507 return 0;
2508}
2509
2510static int
2511beiscsi_post_pages(struct beiscsi_hba *phba)
2512{
2513 struct be_mem_descriptor *mem_descr;
2514 struct mem_array *pm_arr;
2515 unsigned int page_offset, i;
2516 struct be_dma_mem sgl;
2517 int status;
2518
2519 mem_descr = phba->init_mem;
2520 mem_descr += HWI_MEM_SGE;
2521 pm_arr = mem_descr->mem_array;
2522
2523 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2524 phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2525 for (i = 0; i < mem_descr->num_elements; i++) {
2526 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2527 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2528 page_offset,
2529 (pm_arr->size / PAGE_SIZE));
2530 page_offset += pm_arr->size / PAGE_SIZE;
2531 if (status != 0) {
2532 shost_printk(KERN_ERR, phba->shost,
2533 "post sgl failed.\n");
2534 return status;
2535 }
2536 pm_arr++;
2537 }
2538 SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2539 return 0;
2540}
2541
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302542static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2543{
2544 struct be_dma_mem *mem = &q->dma_mem;
2545 if (mem->va)
2546 pci_free_consistent(phba->pcidev, mem->size,
2547 mem->va, mem->dma);
2548}
2549
2550static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2551 u16 len, u16 entry_size)
2552{
2553 struct be_dma_mem *mem = &q->dma_mem;
2554
2555 memset(q, 0, sizeof(*q));
2556 q->len = len;
2557 q->entry_size = entry_size;
2558 mem->size = len * entry_size;
2559 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2560 if (!mem->va)
2561 return -1;
2562 memset(mem->va, 0, mem->size);
2563 return 0;
2564}
2565
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302566static int
2567beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2568 struct hwi_context_memory *phwi_context,
2569 struct hwi_controller *phwi_ctrlr)
2570{
2571 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2572 u64 pa_addr_lo;
2573 unsigned int idx, num, i;
2574 struct mem_array *pwrb_arr;
2575 void *wrb_vaddr;
2576 struct be_dma_mem sgl;
2577 struct be_mem_descriptor *mem_descr;
2578 int status;
2579
2580 idx = 0;
2581 mem_descr = phba->init_mem;
2582 mem_descr += HWI_MEM_WRB;
2583 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2584 GFP_KERNEL);
2585 if (!pwrb_arr) {
2586 shost_printk(KERN_ERR, phba->shost,
2587 "Memory alloc failed in create wrb ring.\n");
2588 return -ENOMEM;
2589 }
2590 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2591 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2592 num_wrb_rings = mem_descr->mem_array[idx].size /
2593 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2594
2595 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2596 if (num_wrb_rings) {
2597 pwrb_arr[num].virtual_address = wrb_vaddr;
2598 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2599 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2600 sizeof(struct iscsi_wrb);
2601 wrb_vaddr += pwrb_arr[num].size;
2602 pa_addr_lo += pwrb_arr[num].size;
2603 num_wrb_rings--;
2604 } else {
2605 idx++;
2606 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2607 pa_addr_lo = mem_descr->mem_array[idx].\
2608 bus_address.u.a64.address;
2609 num_wrb_rings = mem_descr->mem_array[idx].size /
2610 (phba->params.wrbs_per_cxn *
2611 sizeof(struct iscsi_wrb));
2612 pwrb_arr[num].virtual_address = wrb_vaddr;
2613 pwrb_arr[num].bus_address.u.a64.address\
2614 = pa_addr_lo;
2615 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2616 sizeof(struct iscsi_wrb);
2617 wrb_vaddr += pwrb_arr[num].size;
2618 pa_addr_lo += pwrb_arr[num].size;
2619 num_wrb_rings--;
2620 }
2621 }
2622 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2623 wrb_mem_index = 0;
2624 offset = 0;
2625 size = 0;
2626
2627 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2628 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2629 &phwi_context->be_wrbq[i]);
2630 if (status != 0) {
2631 shost_printk(KERN_ERR, phba->shost,
2632 "wrbq create failed.");
2633 return status;
2634 }
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05302635 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2636 id;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302637 }
2638 kfree(pwrb_arr);
2639 return 0;
2640}
2641
2642static void free_wrb_handles(struct beiscsi_hba *phba)
2643{
2644 unsigned int index;
2645 struct hwi_controller *phwi_ctrlr;
2646 struct hwi_wrb_context *pwrb_context;
2647
2648 phwi_ctrlr = phba->phwi_ctrlr;
2649 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2650 pwrb_context = &phwi_ctrlr->wrb_context[index];
2651 kfree(pwrb_context->pwrb_handle_base);
2652 kfree(pwrb_context->pwrb_handle_basestd);
2653 }
2654}
2655
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302656static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2657{
2658 struct be_queue_info *q;
2659 struct be_ctrl_info *ctrl = &phba->ctrl;
2660
2661 q = &phba->ctrl.mcc_obj.q;
2662 if (q->created)
2663 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2664 be_queue_free(phba, q);
2665
2666 q = &phba->ctrl.mcc_obj.cq;
2667 if (q->created)
2668 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2669 be_queue_free(phba, q);
2670}
2671
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302672static void hwi_cleanup(struct beiscsi_hba *phba)
2673{
2674 struct be_queue_info *q;
2675 struct be_ctrl_info *ctrl = &phba->ctrl;
2676 struct hwi_controller *phwi_ctrlr;
2677 struct hwi_context_memory *phwi_context;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302678 int i, eq_num;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302679
2680 phwi_ctrlr = phba->phwi_ctrlr;
2681 phwi_context = phwi_ctrlr->phwi_ctxt;
2682 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2683 q = &phwi_context->be_wrbq[i];
2684 if (q->created)
2685 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2686 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302687 free_wrb_handles(phba);
2688
2689 q = &phwi_context->be_def_hdrq;
2690 if (q->created)
2691 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2692
2693 q = &phwi_context->be_def_dataq;
2694 if (q->created)
2695 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2696
2697 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2698
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302699 for (i = 0; i < (phba->num_cpus); i++) {
2700 q = &phwi_context->be_cq[i];
2701 if (q->created)
2702 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2703 }
2704 if (phba->msix_enabled)
2705 eq_num = 1;
2706 else
2707 eq_num = 0;
2708 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2709 q = &phwi_context->be_eq[i].q;
2710 if (q->created)
2711 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2712 }
2713 be_mcc_queues_destroy(phba);
2714}
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302715
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302716static int be_mcc_queues_create(struct beiscsi_hba *phba,
2717 struct hwi_context_memory *phwi_context)
2718{
2719 struct be_queue_info *q, *cq;
2720 struct be_ctrl_info *ctrl = &phba->ctrl;
2721
2722 /* Alloc MCC compl queue */
2723 cq = &phba->ctrl.mcc_obj.cq;
2724 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2725 sizeof(struct be_mcc_compl)))
2726 goto err;
2727 /* Ask BE to create MCC compl queue; */
2728 if (phba->msix_enabled) {
2729 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2730 [phba->num_cpus].q, false, true, 0))
2731 goto mcc_cq_free;
2732 } else {
2733 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2734 false, true, 0))
2735 goto mcc_cq_free;
2736 }
2737
2738 /* Alloc MCC queue */
2739 q = &phba->ctrl.mcc_obj.q;
2740 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2741 goto mcc_cq_destroy;
2742
2743 /* Ask BE to create MCC queue */
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302744 if (beiscsi_cmd_mccq_create(phba, q, cq))
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302745 goto mcc_q_free;
2746
2747 return 0;
2748
2749mcc_q_free:
2750 be_queue_free(phba, q);
2751mcc_cq_destroy:
2752 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2753mcc_cq_free:
2754 be_queue_free(phba, cq);
2755err:
2756 return -1;
2757}
2758
2759static int find_num_cpus(void)
2760{
2761 int num_cpus = 0;
2762
2763 num_cpus = num_online_cpus();
2764 if (num_cpus >= MAX_CPUS)
2765 num_cpus = MAX_CPUS - 1;
2766
2767 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2768 return num_cpus;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302769}
2770
2771static int hwi_init_port(struct beiscsi_hba *phba)
2772{
2773 struct hwi_controller *phwi_ctrlr;
2774 struct hwi_context_memory *phwi_context;
2775 unsigned int def_pdu_ring_sz;
2776 struct be_ctrl_info *ctrl = &phba->ctrl;
2777 int status;
2778
2779 def_pdu_ring_sz =
2780 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2781 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302782 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302783 phwi_context->max_eqd = 0;
2784 phwi_context->min_eqd = 0;
2785 phwi_context->cur_eqd = 64;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302786 be_cmd_fw_initialize(&phba->ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302787
2788 status = beiscsi_create_eqs(phba, phwi_context);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302789 if (status != 0) {
2790 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2791 goto error;
2792 }
2793
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302794 status = be_mcc_queues_create(phba, phwi_context);
2795 if (status != 0)
2796 goto error;
2797
2798 status = mgmt_check_supported_fw(ctrl, phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302799 if (status != 0) {
2800 shost_printk(KERN_ERR, phba->shost,
2801 "Unsupported fw version \n");
2802 goto error;
2803 }
2804
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302805 if (phba->fw_config.iscsi_features == 0x1)
2806 ring_mode = 1;
2807 else
2808 ring_mode = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302809
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302810 status = beiscsi_create_cqs(phba, phwi_context);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302811 if (status != 0) {
2812 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2813 goto error;
2814 }
2815
2816 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2817 def_pdu_ring_sz);
2818 if (status != 0) {
2819 shost_printk(KERN_ERR, phba->shost,
2820 "Default Header not created\n");
2821 goto error;
2822 }
2823
2824 status = beiscsi_create_def_data(phba, phwi_context,
2825 phwi_ctrlr, def_pdu_ring_sz);
2826 if (status != 0) {
2827 shost_printk(KERN_ERR, phba->shost,
2828 "Default Data not created\n");
2829 goto error;
2830 }
2831
2832 status = beiscsi_post_pages(phba);
2833 if (status != 0) {
2834 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2835 goto error;
2836 }
2837
2838 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2839 if (status != 0) {
2840 shost_printk(KERN_ERR, phba->shost,
2841 "WRB Rings not created\n");
2842 goto error;
2843 }
2844
2845 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2846 return 0;
2847
2848error:
2849 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2850 hwi_cleanup(phba);
2851 return -ENOMEM;
2852}
2853
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302854static int hwi_init_controller(struct beiscsi_hba *phba)
2855{
2856 struct hwi_controller *phwi_ctrlr;
2857
2858 phwi_ctrlr = phba->phwi_ctrlr;
2859 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2860 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2861 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2862 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2863 phwi_ctrlr->phwi_ctxt);
2864 } else {
2865 shost_printk(KERN_ERR, phba->shost,
2866 "HWI_MEM_ADDN_CONTEXT is more than one element."
2867 "Failing to load\n");
2868 return -ENOMEM;
2869 }
2870
2871 iscsi_init_global_templates(phba);
2872 beiscsi_init_wrb_handle(phba);
2873 hwi_init_async_pdu_ctx(phba);
2874 if (hwi_init_port(phba) != 0) {
2875 shost_printk(KERN_ERR, phba->shost,
2876 "hwi_init_controller failed\n");
2877 return -ENOMEM;
2878 }
2879 return 0;
2880}
2881
2882static void beiscsi_free_mem(struct beiscsi_hba *phba)
2883{
2884 struct be_mem_descriptor *mem_descr;
2885 int i, j;
2886
2887 mem_descr = phba->init_mem;
2888 i = 0;
2889 j = 0;
2890 for (i = 0; i < SE_MEM_MAX; i++) {
2891 for (j = mem_descr->num_elements; j > 0; j--) {
2892 pci_free_consistent(phba->pcidev,
2893 mem_descr->mem_array[j - 1].size,
2894 mem_descr->mem_array[j - 1].virtual_address,
2895 mem_descr->mem_array[j - 1].bus_address.
2896 u.a64.address);
2897 }
2898 kfree(mem_descr->mem_array);
2899 mem_descr++;
2900 }
2901 kfree(phba->init_mem);
2902 kfree(phba->phwi_ctrlr);
2903}
2904
2905static int beiscsi_init_controller(struct beiscsi_hba *phba)
2906{
2907 int ret = -ENOMEM;
2908
2909 ret = beiscsi_get_memory(phba);
2910 if (ret < 0) {
2911 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2912 "Failed in beiscsi_alloc_memory \n");
2913 return ret;
2914 }
2915
2916 ret = hwi_init_controller(phba);
2917 if (ret)
2918 goto free_init;
2919 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2920 return 0;
2921
2922free_init:
2923 beiscsi_free_mem(phba);
2924 return -ENOMEM;
2925}
2926
2927static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2928{
2929 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2930 struct sgl_handle *psgl_handle;
2931 struct iscsi_sge *pfrag;
2932 unsigned int arr_index, i, idx;
2933
2934 phba->io_sgl_hndl_avbl = 0;
2935 phba->eh_sgl_hndl_avbl = 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05302936
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302937 if (ring_mode) {
2938 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2939 phba->params.icds_per_ctrl,
2940 GFP_KERNEL);
2941 if (!phba->sgl_hndl_array) {
2942 shost_printk(KERN_ERR, phba->shost,
2943 "Mem Alloc Failed. Failing to load\n");
2944 return -ENOMEM;
2945 }
2946 }
2947
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302948 mem_descr_sglh = phba->init_mem;
2949 mem_descr_sglh += HWI_MEM_SGLH;
2950 if (1 == mem_descr_sglh->num_elements) {
2951 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2952 phba->params.ios_per_ctrl,
2953 GFP_KERNEL);
2954 if (!phba->io_sgl_hndl_base) {
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05302955 if (ring_mode)
2956 kfree(phba->sgl_hndl_array);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05302957 shost_printk(KERN_ERR, phba->shost,
2958 "Mem Alloc Failed. Failing to load\n");
2959 return -ENOMEM;
2960 }
2961 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2962 (phba->params.icds_per_ctrl -
2963 phba->params.ios_per_ctrl),
2964 GFP_KERNEL);
2965 if (!phba->eh_sgl_hndl_base) {
2966 kfree(phba->io_sgl_hndl_base);
2967 shost_printk(KERN_ERR, phba->shost,
2968 "Mem Alloc Failed. Failing to load\n");
2969 return -ENOMEM;
2970 }
2971 } else {
2972 shost_printk(KERN_ERR, phba->shost,
2973 "HWI_MEM_SGLH is more than one element."
2974 "Failing to load\n");
2975 return -ENOMEM;
2976 }
2977
2978 arr_index = 0;
2979 idx = 0;
2980 while (idx < mem_descr_sglh->num_elements) {
2981 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2982
2983 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2984 sizeof(struct sgl_handle)); i++) {
2985 if (arr_index < phba->params.ios_per_ctrl) {
2986 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2987 phba->io_sgl_hndl_avbl++;
2988 arr_index++;
2989 } else {
2990 phba->eh_sgl_hndl_base[arr_index -
2991 phba->params.ios_per_ctrl] =
2992 psgl_handle;
2993 arr_index++;
2994 phba->eh_sgl_hndl_avbl++;
2995 }
2996 psgl_handle++;
2997 }
2998 idx++;
2999 }
3000 SE_DEBUG(DBG_LVL_8,
3001 "phba->io_sgl_hndl_avbl=%d"
3002 "phba->eh_sgl_hndl_avbl=%d \n",
3003 phba->io_sgl_hndl_avbl,
3004 phba->eh_sgl_hndl_avbl);
3005 mem_descr_sg = phba->init_mem;
3006 mem_descr_sg += HWI_MEM_SGE;
3007 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3008 mem_descr_sg->num_elements);
3009 arr_index = 0;
3010 idx = 0;
3011 while (idx < mem_descr_sg->num_elements) {
3012 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3013
3014 for (i = 0;
3015 i < (mem_descr_sg->mem_array[idx].size) /
3016 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3017 i++) {
3018 if (arr_index < phba->params.ios_per_ctrl)
3019 psgl_handle = phba->io_sgl_hndl_base[arr_index];
3020 else
3021 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3022 phba->params.ios_per_ctrl];
3023 psgl_handle->pfrag = pfrag;
3024 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3025 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3026 pfrag += phba->params.num_sge_per_io;
3027 psgl_handle->sgl_index =
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303028 phba->fw_config.iscsi_icd_start + arr_index++;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303029 }
3030 idx++;
3031 }
3032 phba->io_sgl_free_index = 0;
3033 phba->io_sgl_alloc_index = 0;
3034 phba->eh_sgl_free_index = 0;
3035 phba->eh_sgl_alloc_index = 0;
3036 return 0;
3037}
3038
3039static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3040{
3041 int i, new_cid;
3042
Jayamohan Kallickalc2462282010-01-05 05:05:34 +05303043 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303044 GFP_KERNEL);
3045 if (!phba->cid_array) {
3046 shost_printk(KERN_ERR, phba->shost,
3047 "Failed to allocate memory in "
3048 "hba_setup_cid_tbls\n");
3049 return -ENOMEM;
3050 }
Jayamohan Kallickalc2462282010-01-05 05:05:34 +05303051 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303052 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3053 if (!phba->ep_array) {
3054 shost_printk(KERN_ERR, phba->shost,
3055 "Failed to allocate memory in "
3056 "hba_setup_cid_tbls \n");
3057 kfree(phba->cid_array);
3058 return -ENOMEM;
3059 }
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303060 new_cid = phba->fw_config.iscsi_cid_start;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303061 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3062 phba->cid_array[i] = new_cid;
3063 new_cid += 2;
3064 }
3065 phba->avlbl_cids = phba->params.cxns_per_ctrl;
3066 return 0;
3067}
3068
3069static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3070{
3071 struct be_ctrl_info *ctrl = &phba->ctrl;
3072 struct hwi_controller *phwi_ctrlr;
3073 struct hwi_context_memory *phwi_context;
3074 struct be_queue_info *eq;
3075 u8 __iomem *addr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303076 u32 reg, i;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303077 u32 enabled;
3078
3079 phwi_ctrlr = phba->phwi_ctrlr;
3080 phwi_context = phwi_ctrlr->phwi_ctxt;
3081
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303082 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3083 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3084 reg = ioread32(addr);
3085 SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3086
3087 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3088 if (!enabled) {
3089 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3090 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3091 iowrite32(reg, addr);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303092 for (i = 0; i <= phba->num_cpus; i++) {
3093 eq = &phwi_context->be_eq[i].q;
3094 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3095 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3096 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303097 } else
3098 shost_printk(KERN_WARNING, phba->shost,
3099 "In hwi_enable_intr, Not Enabled \n");
3100 return true;
3101}
3102
3103static void hwi_disable_intr(struct beiscsi_hba *phba)
3104{
3105 struct be_ctrl_info *ctrl = &phba->ctrl;
3106
3107 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3108 u32 reg = ioread32(addr);
3109
3110 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3111 if (enabled) {
3112 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3113 iowrite32(reg, addr);
3114 } else
3115 shost_printk(KERN_WARNING, phba->shost,
3116 "In hwi_disable_intr, Already Disabled \n");
3117}
3118
3119static int beiscsi_init_port(struct beiscsi_hba *phba)
3120{
3121 int ret;
3122
3123 ret = beiscsi_init_controller(phba);
3124 if (ret < 0) {
3125 shost_printk(KERN_ERR, phba->shost,
3126 "beiscsi_dev_probe - Failed in"
3127 "beiscsi_init_controller \n");
3128 return ret;
3129 }
3130 ret = beiscsi_init_sgl_handle(phba);
3131 if (ret < 0) {
3132 shost_printk(KERN_ERR, phba->shost,
3133 "beiscsi_dev_probe - Failed in"
3134 "beiscsi_init_sgl_handle \n");
3135 goto do_cleanup_ctrlr;
3136 }
3137
3138 if (hba_setup_cid_tbls(phba)) {
3139 shost_printk(KERN_ERR, phba->shost,
3140 "Failed in hba_setup_cid_tbls\n");
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303141 if (ring_mode)
3142 kfree(phba->sgl_hndl_array);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303143 kfree(phba->io_sgl_hndl_base);
3144 kfree(phba->eh_sgl_hndl_base);
3145 goto do_cleanup_ctrlr;
3146 }
3147
3148 return ret;
3149
3150do_cleanup_ctrlr:
3151 hwi_cleanup(phba);
3152 return ret;
3153}
3154
3155static void hwi_purge_eq(struct beiscsi_hba *phba)
3156{
3157 struct hwi_controller *phwi_ctrlr;
3158 struct hwi_context_memory *phwi_context;
3159 struct be_queue_info *eq;
3160 struct be_eq_entry *eqe = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303161 int i, eq_msix;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303162
3163 phwi_ctrlr = phba->phwi_ctrlr;
3164 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303165 if (phba->msix_enabled)
3166 eq_msix = 1;
3167 else
3168 eq_msix = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303169
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303170 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3171 eq = &phwi_context->be_eq[i].q;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303172 eqe = queue_tail_node(eq);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303173
3174 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3175 & EQE_VALID_MASK) {
3176 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3177 queue_tail_inc(eq);
3178 eqe = queue_tail_node(eq);
3179 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303180 }
3181}
3182
3183static void beiscsi_clean_port(struct beiscsi_hba *phba)
3184{
3185 unsigned char mgmt_status;
3186
3187 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3188 if (mgmt_status)
3189 shost_printk(KERN_WARNING, phba->shost,
3190 "mgmt_epfw_cleanup FAILED \n");
3191 hwi_cleanup(phba);
3192 hwi_purge_eq(phba);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303193 if (ring_mode)
3194 kfree(phba->sgl_hndl_array);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303195 kfree(phba->io_sgl_hndl_base);
3196 kfree(phba->eh_sgl_hndl_base);
3197 kfree(phba->cid_array);
3198 kfree(phba->ep_array);
3199}
3200
3201void
3202beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3203 struct beiscsi_offload_params *params)
3204{
3205 struct wrb_handle *pwrb_handle;
3206 struct iscsi_target_context_update_wrb *pwrb = NULL;
3207 struct be_mem_descriptor *mem_descr;
3208 struct beiscsi_hba *phba = beiscsi_conn->phba;
3209 u32 doorbell = 0;
3210
3211 /*
3212 * We can always use 0 here because it is reserved by libiscsi for
3213 * login/startup related tasks.
3214 */
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303215 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
Jayamohan Kallickald5431482010-01-05 05:06:21 +05303216 phba->fw_config.iscsi_cid_start));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303217 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3218 memset(pwrb, 0, sizeof(*pwrb));
3219 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3220 max_burst_length, pwrb, params->dw[offsetof
3221 (struct amap_beiscsi_offload_params,
3222 max_burst_length) / 32]);
3223 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3224 max_send_data_segment_length, pwrb,
3225 params->dw[offsetof(struct amap_beiscsi_offload_params,
3226 max_send_data_segment_length) / 32]);
3227 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3228 first_burst_length,
3229 pwrb,
3230 params->dw[offsetof(struct amap_beiscsi_offload_params,
3231 first_burst_length) / 32]);
3232
3233 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3234 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3235 erl) / 32] & OFFLD_PARAMS_ERL));
3236 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3237 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3238 dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3239 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3240 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3241 hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3242 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3243 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3244 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3245 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3246 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3247 imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3248 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3249 pwrb,
3250 (params->dw[offsetof(struct amap_beiscsi_offload_params,
3251 exp_statsn) / 32] + 1));
3252 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3253 0x7);
3254 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3255 pwrb, pwrb_handle->wrb_index);
3256 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3257 pwrb, pwrb_handle->nxt_wrb_index);
3258 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3259 session_state, pwrb, 0);
3260 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3261 pwrb, 1);
3262 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3263 pwrb, 0);
3264 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3265 0);
3266
3267 mem_descr = phba->init_mem;
3268 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3269
3270 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3271 pad_buffer_addr_hi, pwrb,
3272 mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3273 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3274 pad_buffer_addr_lo, pwrb,
3275 mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3276
3277 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3278
3279 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303280 if (!ring_mode)
3281 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303282 << DB_DEF_PDU_WRB_INDEX_SHIFT;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303283 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3284
3285 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3286}
3287
3288static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3289 int *index, int *age)
3290{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303291 *index = (int)itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303292 if (age)
3293 *age = conn->session->age;
3294}
3295
3296/**
3297 * beiscsi_alloc_pdu - allocates pdu and related resources
3298 * @task: libiscsi task
3299 * @opcode: opcode of pdu for task
3300 *
3301 * This is called with the session lock held. It will allocate
3302 * the wrb and sgl if needed for the command. And it will prep
3303 * the pdu's itt. beiscsi_parse_pdu will later translate
3304 * the pdu itt to the libiscsi task itt.
3305 */
3306static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3307{
3308 struct beiscsi_io_task *io_task = task->dd_data;
3309 struct iscsi_conn *conn = task->conn;
3310 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3311 struct beiscsi_hba *phba = beiscsi_conn->phba;
3312 struct hwi_wrb_context *pwrb_context;
3313 struct hwi_controller *phwi_ctrlr;
3314 itt_t itt;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303315 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3316 dma_addr_t paddr;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303317
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303318 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3319 GFP_KERNEL, &paddr);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303320 if (!io_task->cmd_bhs)
3321 return -ENOMEM;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303322 io_task->bhs_pa.u.a64.address = paddr;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303323 io_task->libiscsi_itt = (itt_t)task->itt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303324 io_task->pwrb_handle = alloc_wrb_handle(phba,
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303325 beiscsi_conn->beiscsi_conn_cid -
Jayamohan Kallickald5431482010-01-05 05:06:21 +05303326 phba->fw_config.iscsi_cid_start
3327 );
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303328 io_task->conn = beiscsi_conn;
3329
3330 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3331 task->hdr_max = sizeof(struct be_cmd_bhs);
3332
3333 if (task->sc) {
3334 spin_lock(&phba->io_sgl_lock);
3335 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3336 spin_unlock(&phba->io_sgl_lock);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303337 if (!io_task->psgl_handle)
3338 goto free_hndls;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303339 } else {
3340 io_task->scsi_cmnd = NULL;
3341 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3342 if (!beiscsi_conn->login_in_progress) {
3343 spin_lock(&phba->mgmt_sgl_lock);
3344 io_task->psgl_handle = (struct sgl_handle *)
3345 alloc_mgmt_sgl_handle(phba);
3346 spin_unlock(&phba->mgmt_sgl_lock);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303347 if (!io_task->psgl_handle)
3348 goto free_hndls;
3349
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303350 beiscsi_conn->login_in_progress = 1;
3351 beiscsi_conn->plogin_sgl_handle =
3352 io_task->psgl_handle;
3353 } else {
3354 io_task->psgl_handle =
3355 beiscsi_conn->plogin_sgl_handle;
3356 }
3357 } else {
3358 spin_lock(&phba->mgmt_sgl_lock);
3359 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3360 spin_unlock(&phba->mgmt_sgl_lock);
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303361 if (!io_task->psgl_handle)
3362 goto free_hndls;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303363 }
3364 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303365 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3366 wrb_index << 16) | (unsigned int)
3367 (io_task->psgl_handle->sgl_index));
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303368 if (ring_mode) {
3369 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303370 phba->fw_config.iscsi_icd_start] =
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303371 io_task->psgl_handle;
3372 io_task->psgl_handle->task = task;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303373 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid -
3374 phba->fw_config.iscsi_cid_start;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303375 } else
3376 io_task->pwrb_handle->pio_handle = task;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303377
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303378 io_task->cmd_bhs->iscsi_hdr.itt = itt;
3379 return 0;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303380
3381free_hndls:
3382 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303383 pwrb_context = &phwi_ctrlr->wrb_context[
3384 beiscsi_conn->beiscsi_conn_cid -
3385 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303386 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3387 io_task->pwrb_handle = NULL;
3388 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3389 io_task->bhs_pa.u.a64.address);
3390 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3391 return -ENOMEM;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303392}
3393
3394static void beiscsi_cleanup_task(struct iscsi_task *task)
3395{
3396 struct beiscsi_io_task *io_task = task->dd_data;
3397 struct iscsi_conn *conn = task->conn;
3398 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3399 struct beiscsi_hba *phba = beiscsi_conn->phba;
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303400 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303401 struct hwi_wrb_context *pwrb_context;
3402 struct hwi_controller *phwi_ctrlr;
3403
3404 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303405 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3406 - phba->fw_config.iscsi_cid_start];
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303407 if (io_task->pwrb_handle) {
3408 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3409 io_task->pwrb_handle = NULL;
3410 }
3411
Jayamohan Kallickal2afc95b2009-09-22 08:22:26 +05303412 if (io_task->cmd_bhs) {
3413 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3414 io_task->bhs_pa.u.a64.address);
3415 }
3416
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303417 if (task->sc) {
3418 if (io_task->psgl_handle) {
3419 spin_lock(&phba->io_sgl_lock);
3420 free_io_sgl_handle(phba, io_task->psgl_handle);
3421 spin_unlock(&phba->io_sgl_lock);
3422 io_task->psgl_handle = NULL;
3423 }
3424 } else {
3425 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3426 return;
3427 if (io_task->psgl_handle) {
3428 spin_lock(&phba->mgmt_sgl_lock);
3429 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3430 spin_unlock(&phba->mgmt_sgl_lock);
3431 io_task->psgl_handle = NULL;
3432 }
3433 }
3434}
3435
3436static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3437 unsigned int num_sg, unsigned int xferlen,
3438 unsigned int writedir)
3439{
3440
3441 struct beiscsi_io_task *io_task = task->dd_data;
3442 struct iscsi_conn *conn = task->conn;
3443 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3444 struct beiscsi_hba *phba = beiscsi_conn->phba;
3445 struct iscsi_wrb *pwrb = NULL;
3446 unsigned int doorbell = 0;
3447
3448 pwrb = io_task->pwrb_handle->pwrb;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303449 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3450 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3451
3452 if (writedir) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303453 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3454 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3455 &io_task->cmd_bhs->iscsi_data_pdu,
3456 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3457 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3458 &io_task->cmd_bhs->iscsi_data_pdu,
3459 ISCSI_OPCODE_SCSI_DATA_OUT);
3460 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3461 &io_task->cmd_bhs->iscsi_data_pdu, 1);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303462 if (ring_mode)
3463 io_task->psgl_handle->type = INI_WR_CMD;
3464 else
3465 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303466 INI_WR_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303467 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303468 } else {
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303469 if (ring_mode)
3470 io_task->psgl_handle->type = INI_RD_CMD;
3471 else
3472 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303473 INI_RD_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303474 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3475 }
3476 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3477 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3478 io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3479
3480 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3481 cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3482 lun[0]));
3483 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3484 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3485 io_task->pwrb_handle->wrb_index);
3486 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3487 be32_to_cpu(task->cmdsn));
3488 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3489 io_task->psgl_handle->sgl_index);
3490
3491 hwi_write_sgl(pwrb, sg, num_sg, io_task);
3492
3493 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3494 io_task->pwrb_handle->nxt_wrb_index);
3495 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3496
3497 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303498 if (!ring_mode)
3499 doorbell |= (io_task->pwrb_handle->wrb_index &
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303500 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3501 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3502
3503 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3504 return 0;
3505}
3506
3507static int beiscsi_mtask(struct iscsi_task *task)
3508{
3509 struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3510 struct iscsi_conn *conn = task->conn;
3511 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3512 struct beiscsi_hba *phba = beiscsi_conn->phba;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303513 struct iscsi_session *session;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303514 struct iscsi_wrb *pwrb = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303515 struct hwi_controller *phwi_ctrlr;
3516 struct hwi_wrb_context *pwrb_context;
3517 struct wrb_handle *pwrb_handle;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303518 unsigned int doorbell = 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303519 unsigned int i, cid;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303520 struct iscsi_task *aborted_task;
3521
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303522 cid = beiscsi_conn->beiscsi_conn_cid;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303523 pwrb = io_task->pwrb_handle->pwrb;
3524 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3525 be32_to_cpu(task->cmdsn));
3526 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3527 io_task->pwrb_handle->wrb_index);
3528 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3529 io_task->psgl_handle->sgl_index);
3530
3531 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3532 case ISCSI_OP_LOGIN:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303533 if (ring_mode)
3534 io_task->psgl_handle->type = TGT_DM_CMD;
3535 else
3536 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303537 TGT_DM_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303538 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3539 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3540 hwi_write_buffer(pwrb, task);
3541 break;
3542 case ISCSI_OP_NOOP_OUT:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303543 if (ring_mode)
3544 io_task->psgl_handle->type = INI_RD_CMD;
3545 else
3546 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303547 INI_RD_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303548 hwi_write_buffer(pwrb, task);
3549 break;
3550 case ISCSI_OP_TEXT:
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303551 if (ring_mode)
3552 io_task->psgl_handle->type = INI_WR_CMD;
3553 else
3554 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303555 INI_WR_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303556 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3557 hwi_write_buffer(pwrb, task);
3558 break;
3559 case ISCSI_OP_SCSI_TMFUNC:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303560 session = conn->session;
3561 i = ((struct iscsi_tm *)task->hdr)->rtt;
3562 phwi_ctrlr = phba->phwi_ctrlr;
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303563 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3564 phba->fw_config.iscsi_cid_start];
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303565 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3566 >> 16];
3567 aborted_task = pwrb_handle->pio_handle;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303568 if (!aborted_task)
3569 return 0;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303570
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303571 aborted_io_task = aborted_task->dd_data;
3572 if (!aborted_io_task->scsi_cmnd)
3573 return 0;
3574
3575 mgmt_invalidate_icds(phba,
3576 aborted_io_task->psgl_handle->sgl_index,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303577 cid);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303578 if (ring_mode)
3579 io_task->psgl_handle->type = INI_TMF_CMD;
3580 else
3581 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303582 INI_TMF_CMD);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303583 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3584 hwi_write_buffer(pwrb, task);
3585 break;
3586 case ISCSI_OP_LOGOUT:
3587 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303588 if (ring_mode)
3589 io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3590 else
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303591 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3592 HWH_TYPE_LOGOUT);
3593 hwi_write_buffer(pwrb, task);
3594 break;
3595
3596 default:
3597 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3598 task->hdr->opcode & ISCSI_OPCODE_MASK);
3599 return -EINVAL;
3600 }
3601
3602 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3603 be32_to_cpu(task->data_count));
3604 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3605 io_task->pwrb_handle->nxt_wrb_index);
3606 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3607
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303608 doorbell |= cid & DB_WRB_POST_CID_MASK;
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303609 if (!ring_mode)
3610 doorbell |= (io_task->pwrb_handle->wrb_index &
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303611 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3612 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3613 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3614 return 0;
3615}
3616
3617static int beiscsi_task_xmit(struct iscsi_task *task)
3618{
3619 struct iscsi_conn *conn = task->conn;
3620 struct beiscsi_io_task *io_task = task->dd_data;
3621 struct scsi_cmnd *sc = task->sc;
3622 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3623 struct scatterlist *sg;
3624 int num_sg;
3625 unsigned int writedir = 0, xferlen = 0;
3626
3627 SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3628 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3629 task, conn, beiscsi_conn);
3630 if (!sc)
3631 return beiscsi_mtask(task);
3632
3633 io_task->scsi_cmnd = sc;
3634 num_sg = scsi_dma_map(sc);
3635 if (num_sg < 0) {
3636 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3637 return num_sg;
3638 }
3639 SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3640 (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3641 xferlen = scsi_bufflen(sc);
3642 sg = scsi_sglist(sc);
3643 if (sc->sc_data_direction == DMA_TO_DEVICE) {
3644 writedir = 1;
3645 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3646 task->imm_count);
3647 } else
3648 writedir = 0;
3649 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3650}
3651
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303652
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303653static void beiscsi_remove(struct pci_dev *pcidev)
3654{
3655 struct beiscsi_hba *phba = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303656 struct hwi_controller *phwi_ctrlr;
3657 struct hwi_context_memory *phwi_context;
3658 struct be_eq_obj *pbe_eq;
3659 unsigned int i, msix_vec;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303660
3661 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3662 if (!phba) {
3663 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3664 return;
3665 }
3666
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303667 phwi_ctrlr = phba->phwi_ctrlr;
3668 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303669 hwi_disable_intr(phba);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303670 if (phba->msix_enabled) {
3671 for (i = 0; i <= phba->num_cpus; i++) {
3672 msix_vec = phba->msix_entries[i].vector;
3673 free_irq(msix_vec, &phwi_context->be_eq[i]);
3674 }
3675 } else
3676 if (phba->pcidev->irq)
3677 free_irq(phba->pcidev->irq, phba);
3678 pci_disable_msix(phba->pcidev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303679 destroy_workqueue(phba->wq);
3680 if (blk_iopoll_enabled)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303681 for (i = 0; i < phba->num_cpus; i++) {
3682 pbe_eq = &phwi_context->be_eq[i];
3683 blk_iopoll_disable(&pbe_eq->iopoll);
3684 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303685
3686 beiscsi_clean_port(phba);
3687 beiscsi_free_mem(phba);
3688 beiscsi_unmap_pci_function(phba);
3689 pci_free_consistent(phba->pcidev,
3690 phba->ctrl.mbox_mem_alloced.size,
3691 phba->ctrl.mbox_mem_alloced.va,
3692 phba->ctrl.mbox_mem_alloced.dma);
3693 iscsi_host_remove(phba->shost);
3694 pci_dev_put(phba->pcidev);
3695 iscsi_host_free(phba->shost);
3696}
3697
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303698static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3699{
3700 int i, status;
3701
3702 for (i = 0; i <= phba->num_cpus; i++)
3703 phba->msix_entries[i].entry = i;
3704
3705 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3706 (phba->num_cpus + 1));
3707 if (!status)
3708 phba->msix_enabled = true;
3709
3710 return;
3711}
3712
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303713static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3714 const struct pci_device_id *id)
3715{
3716 struct beiscsi_hba *phba = NULL;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303717 struct hwi_controller *phwi_ctrlr;
3718 struct hwi_context_memory *phwi_context;
3719 struct be_eq_obj *pbe_eq;
3720 int ret, msix_vec, num_cpus, i;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303721
3722 ret = beiscsi_enable_pci(pcidev);
3723 if (ret < 0) {
3724 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3725 "Failed to enable pci device \n");
3726 return ret;
3727 }
3728
3729 phba = beiscsi_hba_alloc(pcidev);
3730 if (!phba) {
3731 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3732 " Failed in beiscsi_hba_alloc \n");
3733 goto disable_pci;
3734 }
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303735 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303736
3737 pci_set_drvdata(pcidev, phba);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303738 if (enable_msix)
3739 num_cpus = find_num_cpus();
3740 else
3741 num_cpus = 1;
3742 phba->num_cpus = num_cpus;
3743 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3744
3745 if (enable_msix)
3746 beiscsi_msix_enable(phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303747 ret = be_ctrl_init(phba, pcidev);
3748 if (ret) {
3749 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3750 "Failed in be_ctrl_init\n");
3751 goto hba_free;
3752 }
3753
3754 spin_lock_init(&phba->io_sgl_lock);
3755 spin_lock_init(&phba->mgmt_sgl_lock);
3756 spin_lock_init(&phba->isr_lock);
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303757 ret = mgmt_get_fw_config(&phba->ctrl, phba);
3758 if (ret != 0) {
3759 shost_printk(KERN_ERR, phba->shost,
3760 "Error getting fw config\n");
3761 goto free_port;
3762 }
3763 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3764 phba->shost->can_queue = phba->params.ios_per_ctrl;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303765 beiscsi_get_params(phba);
3766 ret = beiscsi_init_port(phba);
3767 if (ret < 0) {
3768 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3769 "Failed in beiscsi_init_port\n");
3770 goto free_port;
3771 }
3772
3773 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3774 phba->shost->host_no);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303775 phba->wq = create_workqueue(phba->wq_name);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303776 if (!phba->wq) {
3777 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3778 "Failed to allocate work queue\n");
3779 goto free_twq;
3780 }
3781
3782 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3783
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303784 phwi_ctrlr = phba->phwi_ctrlr;
3785 phwi_context = phwi_ctrlr->phwi_ctxt;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303786 if (blk_iopoll_enabled) {
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303787 for (i = 0; i < phba->num_cpus; i++) {
3788 pbe_eq = &phwi_context->be_eq[i];
3789 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3790 be_iopoll);
3791 blk_iopoll_enable(&pbe_eq->iopoll);
3792 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303793 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303794 ret = beiscsi_init_irqs(phba);
3795 if (ret < 0) {
3796 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3797 "Failed to beiscsi_init_irqs\n");
3798 goto free_blkenbld;
3799 }
3800 ret = hwi_enable_intr(phba);
3801 if (ret < 0) {
3802 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3803 "Failed to hwi_enable_intr\n");
3804 goto free_ctrlr;
3805 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303806 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3807 return 0;
3808
3809free_ctrlr:
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303810 if (phba->msix_enabled) {
3811 for (i = 0; i <= phba->num_cpus; i++) {
3812 msix_vec = phba->msix_entries[i].vector;
3813 free_irq(msix_vec, &phwi_context->be_eq[i]);
3814 }
3815 } else
3816 if (phba->pcidev->irq)
3817 free_irq(phba->pcidev->irq, phba);
3818 pci_disable_msix(phba->pcidev);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303819free_blkenbld:
3820 destroy_workqueue(phba->wq);
3821 if (blk_iopoll_enabled)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303822 for (i = 0; i < phba->num_cpus; i++) {
3823 pbe_eq = &phwi_context->be_eq[i];
3824 blk_iopoll_disable(&pbe_eq->iopoll);
3825 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303826free_twq:
3827 beiscsi_clean_port(phba);
3828 beiscsi_free_mem(phba);
3829free_port:
3830 pci_free_consistent(phba->pcidev,
3831 phba->ctrl.mbox_mem_alloced.size,
3832 phba->ctrl.mbox_mem_alloced.va,
3833 phba->ctrl.mbox_mem_alloced.dma);
3834 beiscsi_unmap_pci_function(phba);
3835hba_free:
3836 iscsi_host_remove(phba->shost);
3837 pci_dev_put(phba->pcidev);
3838 iscsi_host_free(phba->shost);
3839disable_pci:
3840 pci_disable_device(pcidev);
3841 return ret;
3842}
3843
3844struct iscsi_transport beiscsi_iscsi_transport = {
3845 .owner = THIS_MODULE,
3846 .name = DRV_NAME,
3847 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3848 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3849 .param_mask = ISCSI_MAX_RECV_DLENGTH |
3850 ISCSI_MAX_XMIT_DLENGTH |
3851 ISCSI_HDRDGST_EN |
3852 ISCSI_DATADGST_EN |
3853 ISCSI_INITIAL_R2T_EN |
3854 ISCSI_MAX_R2T |
3855 ISCSI_IMM_DATA_EN |
3856 ISCSI_FIRST_BURST |
3857 ISCSI_MAX_BURST |
3858 ISCSI_PDU_INORDER_EN |
3859 ISCSI_DATASEQ_INORDER_EN |
3860 ISCSI_ERL |
3861 ISCSI_CONN_PORT |
3862 ISCSI_CONN_ADDRESS |
3863 ISCSI_EXP_STATSN |
3864 ISCSI_PERSISTENT_PORT |
3865 ISCSI_PERSISTENT_ADDRESS |
3866 ISCSI_TARGET_NAME | ISCSI_TPGT |
3867 ISCSI_USERNAME | ISCSI_PASSWORD |
3868 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3869 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
Jayamohan Kallickal7da50872010-01-05 05:04:12 +05303870 ISCSI_LU_RESET_TMO |
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303871 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3872 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3873 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3874 ISCSI_HOST_INITIATOR_NAME,
3875 .create_session = beiscsi_session_create,
3876 .destroy_session = beiscsi_session_destroy,
3877 .create_conn = beiscsi_conn_create,
3878 .bind_conn = beiscsi_conn_bind,
3879 .destroy_conn = iscsi_conn_teardown,
3880 .set_param = beiscsi_set_param,
3881 .get_conn_param = beiscsi_conn_get_param,
3882 .get_session_param = iscsi_session_get_param,
3883 .get_host_param = beiscsi_get_host_param,
3884 .start_conn = beiscsi_conn_start,
3885 .stop_conn = beiscsi_conn_stop,
3886 .send_pdu = iscsi_conn_send_pdu,
3887 .xmit_task = beiscsi_task_xmit,
3888 .cleanup_task = beiscsi_cleanup_task,
3889 .alloc_pdu = beiscsi_alloc_pdu,
3890 .parse_pdu_itt = beiscsi_parse_pdu,
3891 .get_stats = beiscsi_conn_get_stats,
3892 .ep_connect = beiscsi_ep_connect,
3893 .ep_poll = beiscsi_ep_poll,
3894 .ep_disconnect = beiscsi_ep_disconnect,
3895 .session_recovery_timedout = iscsi_session_recovery_timedout,
3896};
3897
3898static struct pci_driver beiscsi_pci_driver = {
3899 .name = DRV_NAME,
3900 .probe = beiscsi_dev_probe,
3901 .remove = beiscsi_remove,
3902 .id_table = beiscsi_pci_id_table
3903};
3904
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +05303905
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303906static int __init beiscsi_module_init(void)
3907{
3908 int ret;
3909
3910 beiscsi_scsi_transport =
3911 iscsi_register_transport(&beiscsi_iscsi_transport);
3912 if (!beiscsi_scsi_transport) {
3913 SE_DEBUG(DBG_LVL_1,
3914 "beiscsi_module_init - Unable to register beiscsi"
3915 "transport.\n");
3916 ret = -ENOMEM;
3917 }
3918 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3919 &beiscsi_iscsi_transport);
3920
3921 ret = pci_register_driver(&beiscsi_pci_driver);
3922 if (ret) {
3923 SE_DEBUG(DBG_LVL_1,
3924 "beiscsi_module_init - Unable to register"
3925 "beiscsi pci driver.\n");
3926 goto unregister_iscsi_transport;
3927 }
Jayamohan Kallickal35e66012009-10-23 11:53:49 +05303928 ring_mode = 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303929 return 0;
3930
3931unregister_iscsi_transport:
3932 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3933 return ret;
3934}
3935
3936static void __exit beiscsi_module_exit(void)
3937{
3938 pci_unregister_driver(&beiscsi_pci_driver);
3939 iscsi_unregister_transport(&beiscsi_iscsi_transport);
3940}
3941
3942module_init(beiscsi_module_init);
3943module_exit(beiscsi_module_exit);