blob: 10f8fe7a38d260d1f313a9f062dc1a391505b4c0 [file] [log] [blame]
Jayamohan Kallickal6733b392009-09-05 07:36:35 +05301/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19#include "be_mgmt.h"
20#include "be_main.h"
21
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053022static void be_mcc_notify(struct beiscsi_hba *phba)
23{
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
25 u32 val = 0;
26
27 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
30}
31
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053032static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
33{
34 if (compl->flags != 0) {
35 compl->flags = le32_to_cpu(compl->flags);
36 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
37 return true;
38 } else
39 return false;
40}
41
42static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
43{
44 compl->flags = 0;
45}
46
47static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
48 struct be_mcc_compl *compl)
49{
50 u16 compl_status, extd_status;
51
52 be_dws_le_to_cpu(compl, 4);
53
54 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
55 CQE_STATUS_COMPL_MASK;
56 if (compl_status != MCC_STATUS_SUCCESS) {
57 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
58 CQE_STATUS_EXTD_MASK;
59 dev_err(&ctrl->pdev->dev,
60 "error in cmd completion: status(compl/extd)=%d/%d\n",
61 compl_status, extd_status);
62 return -1;
63 }
64 return 0;
65}
66
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053067
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053068static inline bool is_link_state_evt(u32 trailer)
69{
70 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053071 ASYNC_TRAILER_EVENT_CODE_MASK) ==
72 ASYNC_EVENT_CODE_LINK_STATE);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +053073}
74
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +053075static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
76{
77 struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
78 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
79
80 if (be_mcc_compl_is_new(compl)) {
81 queue_tail_inc(mcc_cq);
82 return compl;
83 }
84 return NULL;
85}
86
87static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
88{
89 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
90}
91
92static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
93 struct be_async_event_link_state *evt)
94{
95 switch (evt->port_link_status) {
96 case ASYNC_EVENT_LINK_DOWN:
97 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
98 evt->physical_port);
99 phba->state |= BE_ADAPTER_LINK_DOWN;
100 break;
101 case ASYNC_EVENT_LINK_UP:
102 phba->state = BE_ADAPTER_UP;
103 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
104 evt->physical_port);
105 iscsi_host_for_each_session(phba->shost,
106 be2iscsi_fail_session);
107 break;
108 default:
109 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
110 "Physical Port %d \n",
111 evt->port_link_status,
112 evt->physical_port);
113 }
114}
115
116static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530117 u16 num_popped)
118{
119 u32 val = 0;
120 val |= qid & DB_CQ_RING_ID_MASK;
121 if (arm)
122 val |= 1 << DB_CQ_REARM_SHIFT;
123 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530124 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
125}
126
127
128int be_process_mcc(struct beiscsi_hba *phba)
129{
130 struct be_mcc_compl *compl;
131 int num = 0, status = 0;
132 struct be_ctrl_info *ctrl = &phba->ctrl;
133
134 spin_lock_bh(&phba->ctrl.mcc_cq_lock);
135 while ((compl = be_mcc_compl_get(phba))) {
136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
137 /* Interpret flags as an async trailer */
138 BUG_ON(!is_link_state_evt(compl->flags));
139
140 /* Interpret compl as a async link evt */
141 beiscsi_async_link_state_process(phba,
142 (struct be_async_event_link_state *) compl);
143 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
144 status = be_mcc_compl_process(ctrl, compl);
145 atomic_dec(&phba->ctrl.mcc_obj.q.used);
146 }
147 be_mcc_compl_use(compl);
148 num++;
149 }
150
151 if (num)
152 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
153
154 spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
155 return status;
156}
157
158/* Wait till no more pending mcc requests are present */
159static int be_mcc_wait_compl(struct beiscsi_hba *phba)
160{
161#define mcc_timeout 120000 /* 5s timeout */
162 int i, status;
163 for (i = 0; i < mcc_timeout; i++) {
164 status = be_process_mcc(phba);
165 if (status)
166 return status;
167
168 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
169 break;
170 udelay(100);
171 }
172 if (i == mcc_timeout) {
173 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
174 return -1;
175 }
176 return 0;
177}
178
179/* Notify MCC requests and wait for completion */
180int be_mcc_notify_wait(struct beiscsi_hba *phba)
181{
182 be_mcc_notify(phba);
183 return be_mcc_wait_compl(phba);
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530184}
185
186static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
187{
188#define long_delay 2000
189 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
190 int cnt = 0, wait = 5; /* in usecs */
191 u32 ready;
192
193 do {
194 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
195 if (ready)
196 break;
197
198 if (cnt > 6000000) {
199 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
200 return -1;
201 }
202
203 if (cnt > 50) {
204 wait = long_delay;
205 mdelay(long_delay / 1000);
206 } else
207 udelay(wait);
208 cnt += wait;
209 } while (true);
210 return 0;
211}
212
213int be_mbox_notify(struct be_ctrl_info *ctrl)
214{
215 int status;
216 u32 val = 0;
217 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
218 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
219 struct be_mcc_mailbox *mbox = mbox_mem->va;
220 struct be_mcc_compl *compl = &mbox->compl;
221
222 val &= ~MPU_MAILBOX_DB_RDY_MASK;
223 val |= MPU_MAILBOX_DB_HI_MASK;
224 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
225 iowrite32(val, db);
226
227 status = be_mbox_db_ready_wait(ctrl);
228 if (status != 0) {
229 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
230 return status;
231 }
232 val = 0;
233 val &= ~MPU_MAILBOX_DB_RDY_MASK;
234 val &= ~MPU_MAILBOX_DB_HI_MASK;
235 val |= (u32) (mbox_mem->dma >> 4) << 2;
236 iowrite32(val, db);
237
238 status = be_mbox_db_ready_wait(ctrl);
239 if (status != 0) {
240 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
241 return status;
242 }
243 if (be_mcc_compl_is_new(compl)) {
244 status = be_mcc_compl_process(ctrl, &mbox->compl);
245 be_mcc_compl_use(compl);
246 if (status) {
247 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
248 return status;
249 }
250 } else {
251 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
252 return -1;
253 }
254 return 0;
255}
256
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530257/*
258 * Insert the mailbox address into the doorbell in two steps
259 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
260 */
261static int be_mbox_notify_wait(struct beiscsi_hba *phba)
262{
263 int status;
264 u32 val = 0;
265 void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
266 struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
267 struct be_mcc_mailbox *mbox = mbox_mem->va;
268 struct be_mcc_compl *compl = &mbox->compl;
269 struct be_ctrl_info *ctrl = &phba->ctrl;
270
271 val |= MPU_MAILBOX_DB_HI_MASK;
272 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
273 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
274 iowrite32(val, db);
275
276 /* wait for ready to be set */
277 status = be_mbox_db_ready_wait(ctrl);
278 if (status != 0)
279 return status;
280
281 val = 0;
282 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
283 val |= (u32)(mbox_mem->dma >> 4) << 2;
284 iowrite32(val, db);
285
286 status = be_mbox_db_ready_wait(ctrl);
287 if (status != 0)
288 return status;
289
290 /* A cq entry has been made now */
291 if (be_mcc_compl_is_new(compl)) {
292 status = be_mcc_compl_process(ctrl, &mbox->compl);
293 be_mcc_compl_use(compl);
294 if (status)
295 return status;
296 } else {
297 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
298 return -1;
299 }
300 return 0;
301}
302
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530303void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
304 bool embedded, u8 sge_cnt)
305{
306 if (embedded)
307 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
308 else
309 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
310 MCC_WRB_SGE_CNT_SHIFT;
311 wrb->payload_length = payload_len;
312 be_dws_cpu_to_le(wrb, 8);
313}
314
315void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
316 u8 subsystem, u8 opcode, int cmd_len)
317{
318 req_hdr->opcode = opcode;
319 req_hdr->subsystem = subsystem;
320 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
321}
322
323static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
324 struct be_dma_mem *mem)
325{
326 int i, buf_pages;
327 u64 dma = (u64) mem->dma;
328
329 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
330 for (i = 0; i < buf_pages; i++) {
331 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
332 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
333 dma += PAGE_SIZE_4K;
334 }
335}
336
337static u32 eq_delay_to_mult(u32 usec_delay)
338{
339#define MAX_INTR_RATE 651042
340 const u32 round = 10;
341 u32 multiplier;
342
343 if (usec_delay == 0)
344 multiplier = 0;
345 else {
346 u32 interrupt_rate = 1000000 / usec_delay;
347 if (interrupt_rate == 0)
348 multiplier = 1023;
349 else {
350 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
351 multiplier /= interrupt_rate;
352 multiplier = (multiplier + round / 2) / round;
353 multiplier = min(multiplier, (u32) 1023);
354 }
355 }
356 return multiplier;
357}
358
359struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
360{
361 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
362}
363
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530364struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
365{
366 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
367 struct be_mcc_wrb *wrb;
368
369 BUG_ON(atomic_read(&mccq->used) >= mccq->len);
370 wrb = queue_head_node(mccq);
371 queue_head_inc(mccq);
372 atomic_inc(&mccq->used);
373 memset(wrb, 0, sizeof(*wrb));
374 return wrb;
375}
376
377
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530378int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
379 struct be_queue_info *eq, int eq_delay)
380{
381 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
382 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
383 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
384 struct be_dma_mem *q_mem = &eq->dma_mem;
385 int status;
386
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530387 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530388 spin_lock(&ctrl->mbox_lock);
389 memset(wrb, 0, sizeof(*wrb));
390
391 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
392
393 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
394 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
395
396 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
397
398 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
399 PCI_FUNC(ctrl->pdev->devfn));
400 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
401 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
402 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
403 __ilog2_u32(eq->len / 256));
404 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
405 eq_delay_to_mult(eq_delay));
406 be_dws_cpu_to_le(req->context, sizeof(req->context));
407
408 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
409
410 status = be_mbox_notify(ctrl);
411 if (!status) {
412 eq->id = le16_to_cpu(resp->eq_id);
413 eq->created = true;
414 }
415 spin_unlock(&ctrl->mbox_lock);
416 return status;
417}
418
419int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
420{
421 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
422 int status;
423 u8 *endian_check;
424
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530425 SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530426 spin_lock(&ctrl->mbox_lock);
427 memset(wrb, 0, sizeof(*wrb));
428
429 endian_check = (u8 *) wrb;
430 *endian_check++ = 0xFF;
431 *endian_check++ = 0x12;
432 *endian_check++ = 0x34;
433 *endian_check++ = 0xFF;
434 *endian_check++ = 0xFF;
435 *endian_check++ = 0x56;
436 *endian_check++ = 0x78;
437 *endian_check++ = 0xFF;
438 be_dws_cpu_to_le(wrb, sizeof(*wrb));
439
440 status = be_mbox_notify(ctrl);
441 if (status)
442 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
443
444 spin_unlock(&ctrl->mbox_lock);
445 return status;
446}
447
448int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
449 struct be_queue_info *cq, struct be_queue_info *eq,
450 bool sol_evts, bool no_delay, int coalesce_wm)
451{
452 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
453 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
454 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
455 struct be_dma_mem *q_mem = &cq->dma_mem;
456 void *ctxt = &req->context;
457 int status;
458
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530459 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530460 spin_lock(&ctrl->mbox_lock);
461 memset(wrb, 0, sizeof(*wrb));
462
463 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
464
465 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
466 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530467 if (!q_mem->va)
468 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
469
470 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
471
472 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
473 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
474 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
475 __ilog2_u32(cq->len / 256));
476 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
477 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
478 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
479 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
480 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
481 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
482 PCI_FUNC(ctrl->pdev->devfn));
483 be_dws_cpu_to_le(ctxt, sizeof(req->context));
484
485 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
486
487 status = be_mbox_notify(ctrl);
488 if (!status) {
489 cq->id = le16_to_cpu(resp->cq_id);
490 cq->created = true;
491 } else
492 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
493 status);
494 spin_unlock(&ctrl->mbox_lock);
495
496 return status;
497}
498
499static u32 be_encoded_q_len(int q_len)
500{
501 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
502 if (len_encoded == 16)
503 len_encoded = 0;
504 return len_encoded;
505}
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530506
507int be_cmd_mccq_create(struct beiscsi_hba *phba,
508 struct be_queue_info *mccq,
509 struct be_queue_info *cq)
510{
511 struct be_mcc_wrb *wrb;
512 struct be_cmd_req_mcc_create *req;
513 struct be_dma_mem *q_mem = &mccq->dma_mem;
514 struct be_ctrl_info *ctrl;
515 void *ctxt;
516 int status;
517
518 spin_lock(&phba->ctrl.mbox_lock);
519 ctrl = &phba->ctrl;
520 wrb = wrb_from_mbox(&ctrl->mbox_mem);
521 req = embedded_payload(wrb);
522 ctxt = &req->context;
523
524 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
525
526 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
527 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
528
529 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
530
531 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
532 PCI_FUNC(phba->pcidev->devfn));
533 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
534 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
535 be_encoded_q_len(mccq->len));
536 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
537
538 be_dws_cpu_to_le(ctxt, sizeof(req->context));
539
540 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
541
542 status = be_mbox_notify_wait(phba);
543 if (!status) {
544 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
545 mccq->id = le16_to_cpu(resp->id);
546 mccq->created = true;
547 }
548 spin_unlock(&phba->ctrl.mbox_lock);
549
550 return status;
551}
552
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530553int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
554 int queue_type)
555{
556 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
557 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
558 u8 subsys = 0, opcode = 0;
559 int status;
560
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530561 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530562 spin_lock(&ctrl->mbox_lock);
563 memset(wrb, 0, sizeof(*wrb));
564 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
565
566 switch (queue_type) {
567 case QTYPE_EQ:
568 subsys = CMD_SUBSYSTEM_COMMON;
569 opcode = OPCODE_COMMON_EQ_DESTROY;
570 break;
571 case QTYPE_CQ:
572 subsys = CMD_SUBSYSTEM_COMMON;
573 opcode = OPCODE_COMMON_CQ_DESTROY;
574 break;
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530575 case QTYPE_MCCQ:
576 subsys = CMD_SUBSYSTEM_COMMON;
577 opcode = OPCODE_COMMON_MCC_DESTROY;
578 break;
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530579 case QTYPE_WRBQ:
580 subsys = CMD_SUBSYSTEM_ISCSI;
581 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
582 break;
583 case QTYPE_DPDUQ:
584 subsys = CMD_SUBSYSTEM_ISCSI;
585 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
586 break;
587 case QTYPE_SGL:
588 subsys = CMD_SUBSYSTEM_ISCSI;
589 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
590 break;
591 default:
592 spin_unlock(&ctrl->mbox_lock);
593 BUG();
594 return -1;
595 }
596 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
597 if (queue_type != QTYPE_SGL)
598 req->id = cpu_to_le16(q->id);
599
600 status = be_mbox_notify(ctrl);
601
602 spin_unlock(&ctrl->mbox_lock);
603 return status;
604}
605
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530606int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
607 struct be_queue_info *cq,
608 struct be_queue_info *dq, int length,
609 int entry_size)
610{
611 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
612 struct be_defq_create_req *req = embedded_payload(wrb);
613 struct be_dma_mem *q_mem = &dq->dma_mem;
614 void *ctxt = &req->context;
615 int status;
616
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530617 SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530618 spin_lock(&ctrl->mbox_lock);
619 memset(wrb, 0, sizeof(*wrb));
620
621 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
622
623 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
624 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
625
626 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
627 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
628 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
629 1);
630 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
631 PCI_FUNC(ctrl->pdev->devfn));
632 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
633 be_encoded_q_len(length / sizeof(struct phys_addr)));
634 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
635 ctxt, entry_size);
636 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
637 cq->id);
638
639 be_dws_cpu_to_le(ctxt, sizeof(req->context));
640
641 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
642
643 status = be_mbox_notify(ctrl);
644 if (!status) {
645 struct be_defq_create_resp *resp = embedded_payload(wrb);
646
647 dq->id = le16_to_cpu(resp->id);
648 dq->created = true;
649 }
650 spin_unlock(&ctrl->mbox_lock);
651
652 return status;
653}
654
655int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
656 struct be_queue_info *wrbq)
657{
658 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
659 struct be_wrbq_create_req *req = embedded_payload(wrb);
660 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
661 int status;
662
663 spin_lock(&ctrl->mbox_lock);
664 memset(wrb, 0, sizeof(*wrb));
665
666 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
667
668 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
669 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
670 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
671 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
672
673 status = be_mbox_notify(ctrl);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530674 if (!status) {
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530675 wrbq->id = le16_to_cpu(resp->cid);
Jayamohan Kallickalbfead3b2009-10-23 11:52:33 +0530676 wrbq->created = true;
677 }
Jayamohan Kallickal6733b392009-09-05 07:36:35 +0530678 spin_unlock(&ctrl->mbox_lock);
679 return status;
680}
681
682int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
683 struct be_dma_mem *q_mem,
684 u32 page_offset, u32 num_pages)
685{
686 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
687 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
688 int status;
689 unsigned int curr_pages;
690 u32 internal_page_offset = 0;
691 u32 temp_num_pages = num_pages;
692
693 if (num_pages == 0xff)
694 num_pages = 1;
695
696 spin_lock(&ctrl->mbox_lock);
697 do {
698 memset(wrb, 0, sizeof(*wrb));
699 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
700 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
701 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
702 sizeof(*req));
703 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
704 pages);
705 req->num_pages = min(num_pages, curr_pages);
706 req->page_offset = page_offset;
707 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
708 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
709 internal_page_offset += req->num_pages;
710 page_offset += req->num_pages;
711 num_pages -= req->num_pages;
712
713 if (temp_num_pages == 0xff)
714 req->num_pages = temp_num_pages;
715
716 status = be_mbox_notify(ctrl);
717 if (status) {
718 SE_DEBUG(DBG_LVL_1,
719 "FW CMD to map iscsi frags failed.\n");
720 goto error;
721 }
722 } while (num_pages > 0);
723error:
724 spin_unlock(&ctrl->mbox_lock);
725 if (status != 0)
726 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
727 return status;
728}