blob: 4f19030c1e3e32ed711e252b426025b898dd96ab [file] [log] [blame]
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301/**
Jayamohan Kallickald2eeb1a2010-01-23 05:35:15 +05302 * Copyright (C) 2005 - 2010 ServerEngines
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05303 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19#include "be_mgmt.h"
20#include "be_main.h"
21
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +053022void be_mcc_notify(struct beiscsi_hba *phba)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053023{
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
25 u32 val = 0;
26
27 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
30}
31
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +053032unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
33{
34 unsigned int tag = 0;
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +053035
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +053036 if (phba->ctrl.mcc_tag_available) {
37 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
38 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
39 phba->ctrl.mcc_numtag[tag] = 0;
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +053040 }
41 if (tag) {
42 phba->ctrl.mcc_tag_available--;
43 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
44 phba->ctrl.mcc_alloc_index = 0;
45 else
46 phba->ctrl.mcc_alloc_index++;
47 }
48 return tag;
49}
50
51void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
52{
53 spin_lock(&ctrl->mbox_lock);
54 tag = tag & 0x000000FF;
55 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
56 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
57 ctrl->mcc_free_index = 0;
58 else
59 ctrl->mcc_free_index++;
60 ctrl->mcc_tag_available++;
61 spin_unlock(&ctrl->mbox_lock);
62}
63
64bool is_link_state_evt(u32 trailer)
65{
66 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
67 ASYNC_TRAILER_EVENT_CODE_MASK) ==
68 ASYNC_EVENT_CODE_LINK_STATE);
69}
70
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053071static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
72{
73 if (compl->flags != 0) {
74 compl->flags = le32_to_cpu(compl->flags);
75 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
76 return true;
77 } else
78 return false;
79}
80
81static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
82{
83 compl->flags = 0;
84}
85
86static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
87 struct be_mcc_compl *compl)
88{
89 u16 compl_status, extd_status;
90
91 be_dws_le_to_cpu(compl, 4);
92
93 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
94 CQE_STATUS_COMPL_MASK;
95 if (compl_status != MCC_STATUS_SUCCESS) {
96 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
97 CQE_STATUS_EXTD_MASK;
98 dev_err(&ctrl->pdev->dev,
99 "error in cmd completion: status(compl/extd)=%d/%d\n",
100 compl_status, extd_status);
101 return -1;
102 }
103 return 0;
104}
105
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +0530106int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
107 struct be_mcc_compl *compl)
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530108{
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +0530109 u16 compl_status, extd_status;
110 unsigned short tag;
111
112 be_dws_le_to_cpu(compl, 4);
113
114 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
115 CQE_STATUS_COMPL_MASK;
116 /* The ctrl.mcc_numtag[tag] is filled with
117 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
118 * [7:0] = compl_status
119 */
120 tag = (compl->tag0 & 0x000000FF);
121 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
122 CQE_STATUS_EXTD_MASK;
123
124 ctrl->mcc_numtag[tag] = 0x80000000;
125 ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
126 ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
127 ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
128 wake_up_interruptible(&ctrl->mcc_wait[tag]);
129 return 0;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530130}
131
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530132static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
133{
134 struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
135 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
136
137 if (be_mcc_compl_is_new(compl)) {
138 queue_tail_inc(mcc_cq);
139 return compl;
140 }
141 return NULL;
142}
143
144static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
145{
146 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
147}
148
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +0530149void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530150 struct be_async_event_link_state *evt)
151{
152 switch (evt->port_link_status) {
153 case ASYNC_EVENT_LINK_DOWN:
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530154 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n",
155 evt->physical_port);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530156 phba->state |= BE_ADAPTER_LINK_DOWN;
Jayamohan Kallickalda7408c2010-01-05 05:11:23 +0530157 iscsi_host_for_each_session(phba->shost,
158 be2iscsi_fail_session);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530159 break;
160 case ASYNC_EVENT_LINK_UP:
161 phba->state = BE_ADAPTER_UP;
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530162 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n",
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530163 evt->physical_port);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530164 break;
165 default:
166 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530167 "Physical Port %d\n",
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530168 evt->port_link_status,
169 evt->physical_port);
170 }
171}
172
173static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530174 u16 num_popped)
175{
176 u32 val = 0;
177 val |= qid & DB_CQ_RING_ID_MASK;
178 if (arm)
179 val |= 1 << DB_CQ_REARM_SHIFT;
180 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530181 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
182}
183
184
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530185int beiscsi_process_mcc(struct beiscsi_hba *phba)
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530186{
187 struct be_mcc_compl *compl;
188 int num = 0, status = 0;
189 struct be_ctrl_info *ctrl = &phba->ctrl;
190
191 spin_lock_bh(&phba->ctrl.mcc_cq_lock);
192 while ((compl = be_mcc_compl_get(phba))) {
193 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
194 /* Interpret flags as an async trailer */
Jayamohan Kallickal78b9fb62009-11-25 01:41:37 +0530195 if (is_link_state_evt(compl->flags))
196 /* Interpret compl as a async link evt */
197 beiscsi_async_link_state_process(phba,
198 (struct be_async_event_link_state *) compl);
199 else
200 SE_DEBUG(DBG_LVL_1,
201 " Unsupported Async Event, flags"
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530202 " = 0x%08x\n", compl->flags);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530203
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530204 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
205 status = be_mcc_compl_process(ctrl, compl);
206 atomic_dec(&phba->ctrl.mcc_obj.q.used);
207 }
208 be_mcc_compl_use(compl);
209 num++;
210 }
211
212 if (num)
213 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
214
215 spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
216 return status;
217}
218
219/* Wait till no more pending mcc requests are present */
220static int be_mcc_wait_compl(struct beiscsi_hba *phba)
221{
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530222 int i, status;
223 for (i = 0; i < mcc_timeout; i++) {
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530224 status = beiscsi_process_mcc(phba);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530225 if (status)
226 return status;
227
228 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
229 break;
230 udelay(100);
231 }
232 if (i == mcc_timeout) {
233 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
234 return -1;
235 }
236 return 0;
237}
238
239/* Notify MCC requests and wait for completion */
240int be_mcc_notify_wait(struct beiscsi_hba *phba)
241{
242 be_mcc_notify(phba);
243 return be_mcc_wait_compl(phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530244}
245
246static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
247{
248#define long_delay 2000
249 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
250 int cnt = 0, wait = 5; /* in usecs */
251 u32 ready;
252
253 do {
254 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
255 if (ready)
256 break;
257
258 if (cnt > 6000000) {
259 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
260 return -1;
261 }
262
263 if (cnt > 50) {
264 wait = long_delay;
265 mdelay(long_delay / 1000);
266 } else
267 udelay(wait);
268 cnt += wait;
269 } while (true);
270 return 0;
271}
272
273int be_mbox_notify(struct be_ctrl_info *ctrl)
274{
275 int status;
276 u32 val = 0;
277 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
278 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
279 struct be_mcc_mailbox *mbox = mbox_mem->va;
280 struct be_mcc_compl *compl = &mbox->compl;
281
282 val &= ~MPU_MAILBOX_DB_RDY_MASK;
283 val |= MPU_MAILBOX_DB_HI_MASK;
284 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
285 iowrite32(val, db);
286
287 status = be_mbox_db_ready_wait(ctrl);
288 if (status != 0) {
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530289 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530290 return status;
291 }
292 val = 0;
293 val &= ~MPU_MAILBOX_DB_RDY_MASK;
294 val &= ~MPU_MAILBOX_DB_HI_MASK;
295 val |= (u32) (mbox_mem->dma >> 4) << 2;
296 iowrite32(val, db);
297
298 status = be_mbox_db_ready_wait(ctrl);
299 if (status != 0) {
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530300 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530301 return status;
302 }
303 if (be_mcc_compl_is_new(compl)) {
304 status = be_mcc_compl_process(ctrl, &mbox->compl);
305 be_mcc_compl_use(compl);
306 if (status) {
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530307 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530308 return status;
309 }
310 } else {
311 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
312 return -1;
313 }
314 return 0;
315}
316
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530317/*
318 * Insert the mailbox address into the doorbell in two steps
319 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
320 */
321static int be_mbox_notify_wait(struct beiscsi_hba *phba)
322{
323 int status;
324 u32 val = 0;
325 void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
326 struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
327 struct be_mcc_mailbox *mbox = mbox_mem->va;
328 struct be_mcc_compl *compl = &mbox->compl;
329 struct be_ctrl_info *ctrl = &phba->ctrl;
330
331 val |= MPU_MAILBOX_DB_HI_MASK;
332 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
333 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
334 iowrite32(val, db);
335
336 /* wait for ready to be set */
337 status = be_mbox_db_ready_wait(ctrl);
338 if (status != 0)
339 return status;
340
341 val = 0;
342 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
343 val |= (u32)(mbox_mem->dma >> 4) << 2;
344 iowrite32(val, db);
345
346 status = be_mbox_db_ready_wait(ctrl);
347 if (status != 0)
348 return status;
349
350 /* A cq entry has been made now */
351 if (be_mcc_compl_is_new(compl)) {
352 status = be_mcc_compl_process(ctrl, &mbox->compl);
353 be_mcc_compl_use(compl);
354 if (status)
355 return status;
356 } else {
357 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
358 return -1;
359 }
360 return 0;
361}
362
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530363void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
364 bool embedded, u8 sge_cnt)
365{
366 if (embedded)
367 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
368 else
369 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
370 MCC_WRB_SGE_CNT_SHIFT;
371 wrb->payload_length = payload_len;
372 be_dws_cpu_to_le(wrb, 8);
373}
374
375void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
376 u8 subsystem, u8 opcode, int cmd_len)
377{
378 req_hdr->opcode = opcode;
379 req_hdr->subsystem = subsystem;
380 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
381}
382
383static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
384 struct be_dma_mem *mem)
385{
386 int i, buf_pages;
387 u64 dma = (u64) mem->dma;
388
389 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
390 for (i = 0; i < buf_pages; i++) {
391 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
392 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
393 dma += PAGE_SIZE_4K;
394 }
395}
396
397static u32 eq_delay_to_mult(u32 usec_delay)
398{
399#define MAX_INTR_RATE 651042
400 const u32 round = 10;
401 u32 multiplier;
402
403 if (usec_delay == 0)
404 multiplier = 0;
405 else {
406 u32 interrupt_rate = 1000000 / usec_delay;
407 if (interrupt_rate == 0)
408 multiplier = 1023;
409 else {
410 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
411 multiplier /= interrupt_rate;
412 multiplier = (multiplier + round / 2) / round;
413 multiplier = min(multiplier, (u32) 1023);
414 }
415 }
416 return multiplier;
417}
418
419struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
420{
421 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
422}
423
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530424struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
425{
426 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
427 struct be_mcc_wrb *wrb;
428
429 BUG_ON(atomic_read(&mccq->used) >= mccq->len);
430 wrb = queue_head_node(mccq);
Jayamohan Kallickal756d29c2010-01-05 05:10:46 +0530431 memset(wrb, 0, sizeof(*wrb));
432 wrb->tag0 = (mccq->head & 0x000000FF) << 16;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530433 queue_head_inc(mccq);
434 atomic_inc(&mccq->used);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530435 return wrb;
436}
437
438
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530439int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
440 struct be_queue_info *eq, int eq_delay)
441{
442 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
443 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
444 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
445 struct be_dma_mem *q_mem = &eq->dma_mem;
446 int status;
447
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530448 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530449 spin_lock(&ctrl->mbox_lock);
450 memset(wrb, 0, sizeof(*wrb));
451
452 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
453
454 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
455 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
456
457 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
458
459 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
460 PCI_FUNC(ctrl->pdev->devfn));
461 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
462 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
463 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
464 __ilog2_u32(eq->len / 256));
465 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
466 eq_delay_to_mult(eq_delay));
467 be_dws_cpu_to_le(req->context, sizeof(req->context));
468
469 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
470
471 status = be_mbox_notify(ctrl);
472 if (!status) {
473 eq->id = le16_to_cpu(resp->eq_id);
474 eq->created = true;
475 }
476 spin_unlock(&ctrl->mbox_lock);
477 return status;
478}
479
480int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
481{
482 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
483 int status;
484 u8 *endian_check;
485
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530486 SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530487 spin_lock(&ctrl->mbox_lock);
488 memset(wrb, 0, sizeof(*wrb));
489
490 endian_check = (u8 *) wrb;
491 *endian_check++ = 0xFF;
492 *endian_check++ = 0x12;
493 *endian_check++ = 0x34;
494 *endian_check++ = 0xFF;
495 *endian_check++ = 0xFF;
496 *endian_check++ = 0x56;
497 *endian_check++ = 0x78;
498 *endian_check++ = 0xFF;
499 be_dws_cpu_to_le(wrb, sizeof(*wrb));
500
501 status = be_mbox_notify(ctrl);
502 if (status)
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530503 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530504
505 spin_unlock(&ctrl->mbox_lock);
506 return status;
507}
508
509int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
510 struct be_queue_info *cq, struct be_queue_info *eq,
511 bool sol_evts, bool no_delay, int coalesce_wm)
512{
513 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
514 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
515 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
516 struct be_dma_mem *q_mem = &cq->dma_mem;
517 void *ctxt = &req->context;
518 int status;
519
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530520 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530521 spin_lock(&ctrl->mbox_lock);
522 memset(wrb, 0, sizeof(*wrb));
523
524 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
525
526 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
527 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530528 if (!q_mem->va)
529 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
530
531 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
532
533 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
534 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
535 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
536 __ilog2_u32(cq->len / 256));
537 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
538 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
539 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
540 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
541 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
542 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
543 PCI_FUNC(ctrl->pdev->devfn));
544 be_dws_cpu_to_le(ctxt, sizeof(req->context));
545
546 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
547
548 status = be_mbox_notify(ctrl);
549 if (!status) {
550 cq->id = le16_to_cpu(resp->cq_id);
551 cq->created = true;
552 } else
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530553 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n",
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530554 status);
555 spin_unlock(&ctrl->mbox_lock);
556
557 return status;
558}
559
560static u32 be_encoded_q_len(int q_len)
561{
562 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
563 if (len_encoded == 16)
564 len_encoded = 0;
565 return len_encoded;
566}
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530567
Jayamohan Kallickal35e66012009-10-23 11:53:49 +0530568int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530569 struct be_queue_info *mccq,
570 struct be_queue_info *cq)
571{
572 struct be_mcc_wrb *wrb;
573 struct be_cmd_req_mcc_create *req;
574 struct be_dma_mem *q_mem = &mccq->dma_mem;
575 struct be_ctrl_info *ctrl;
576 void *ctxt;
577 int status;
578
579 spin_lock(&phba->ctrl.mbox_lock);
580 ctrl = &phba->ctrl;
581 wrb = wrb_from_mbox(&ctrl->mbox_mem);
582 req = embedded_payload(wrb);
583 ctxt = &req->context;
584
585 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
586
587 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
588 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
589
590 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
591
592 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
593 PCI_FUNC(phba->pcidev->devfn));
594 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
595 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
596 be_encoded_q_len(mccq->len));
597 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
598
599 be_dws_cpu_to_le(ctxt, sizeof(req->context));
600
601 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
602
603 status = be_mbox_notify_wait(phba);
604 if (!status) {
605 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
606 mccq->id = le16_to_cpu(resp->id);
607 mccq->created = true;
608 }
609 spin_unlock(&phba->ctrl.mbox_lock);
610
611 return status;
612}
613
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530614int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
615 int queue_type)
616{
617 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
618 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
619 u8 subsys = 0, opcode = 0;
620 int status;
621
Jayamohan Kallickal457ff3b2010-07-22 04:16:00 +0530622 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530623 spin_lock(&ctrl->mbox_lock);
624 memset(wrb, 0, sizeof(*wrb));
625 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
626
627 switch (queue_type) {
628 case QTYPE_EQ:
629 subsys = CMD_SUBSYSTEM_COMMON;
630 opcode = OPCODE_COMMON_EQ_DESTROY;
631 break;
632 case QTYPE_CQ:
633 subsys = CMD_SUBSYSTEM_COMMON;
634 opcode = OPCODE_COMMON_CQ_DESTROY;
635 break;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530636 case QTYPE_MCCQ:
637 subsys = CMD_SUBSYSTEM_COMMON;
638 opcode = OPCODE_COMMON_MCC_DESTROY;
639 break;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530640 case QTYPE_WRBQ:
641 subsys = CMD_SUBSYSTEM_ISCSI;
642 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
643 break;
644 case QTYPE_DPDUQ:
645 subsys = CMD_SUBSYSTEM_ISCSI;
646 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
647 break;
648 case QTYPE_SGL:
649 subsys = CMD_SUBSYSTEM_ISCSI;
650 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
651 break;
652 default:
653 spin_unlock(&ctrl->mbox_lock);
654 BUG();
655 return -1;
656 }
657 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
658 if (queue_type != QTYPE_SGL)
659 req->id = cpu_to_le16(q->id);
660
661 status = be_mbox_notify(ctrl);
662
663 spin_unlock(&ctrl->mbox_lock);
664 return status;
665}
666
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530667int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
668 struct be_queue_info *cq,
669 struct be_queue_info *dq, int length,
670 int entry_size)
671{
672 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
673 struct be_defq_create_req *req = embedded_payload(wrb);
674 struct be_dma_mem *q_mem = &dq->dma_mem;
675 void *ctxt = &req->context;
676 int status;
677
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530678 SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530679 spin_lock(&ctrl->mbox_lock);
680 memset(wrb, 0, sizeof(*wrb));
681
682 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
683
684 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
685 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
686
687 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
688 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
689 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
690 1);
691 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
692 PCI_FUNC(ctrl->pdev->devfn));
693 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
694 be_encoded_q_len(length / sizeof(struct phys_addr)));
695 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
696 ctxt, entry_size);
697 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
698 cq->id);
699
700 be_dws_cpu_to_le(ctxt, sizeof(req->context));
701
702 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
703
704 status = be_mbox_notify(ctrl);
705 if (!status) {
706 struct be_defq_create_resp *resp = embedded_payload(wrb);
707
708 dq->id = le16_to_cpu(resp->id);
709 dq->created = true;
710 }
711 spin_unlock(&ctrl->mbox_lock);
712
713 return status;
714}
715
716int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
717 struct be_queue_info *wrbq)
718{
719 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
720 struct be_wrbq_create_req *req = embedded_payload(wrb);
721 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
722 int status;
723
724 spin_lock(&ctrl->mbox_lock);
725 memset(wrb, 0, sizeof(*wrb));
726
727 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
728
729 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
730 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
731 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
732 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
733
734 status = be_mbox_notify(ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530735 if (!status) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530736 wrbq->id = le16_to_cpu(resp->cid);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530737 wrbq->created = true;
738 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530739 spin_unlock(&ctrl->mbox_lock);
740 return status;
741}
742
743int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
744 struct be_dma_mem *q_mem,
745 u32 page_offset, u32 num_pages)
746{
747 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
748 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
749 int status;
750 unsigned int curr_pages;
751 u32 internal_page_offset = 0;
752 u32 temp_num_pages = num_pages;
753
754 if (num_pages == 0xff)
755 num_pages = 1;
756
757 spin_lock(&ctrl->mbox_lock);
758 do {
759 memset(wrb, 0, sizeof(*wrb));
760 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
761 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
762 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
763 sizeof(*req));
764 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
765 pages);
766 req->num_pages = min(num_pages, curr_pages);
767 req->page_offset = page_offset;
768 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
769 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
770 internal_page_offset += req->num_pages;
771 page_offset += req->num_pages;
772 num_pages -= req->num_pages;
773
774 if (temp_num_pages == 0xff)
775 req->num_pages = temp_num_pages;
776
777 status = be_mbox_notify(ctrl);
778 if (status) {
779 SE_DEBUG(DBG_LVL_1,
780 "FW CMD to map iscsi frags failed.\n");
781 goto error;
782 }
783 } while (num_pages > 0);
784error:
785 spin_unlock(&ctrl->mbox_lock);
786 if (status != 0)
787 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
788 return status;
789}