blob: f1ec191f0c0da5b5e8641b7908005571b1374fa7 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19
Sathya Perla5fb379e2009-06-18 00:02:59 +000020void be_mcc_notify(struct be_ctrl_info *ctrl)
21{
22 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
23 u32 val = 0;
24
25 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
26 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
27 iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
28}
29
30/* To check if valid bit is set, check the entire word as we don't know
31 * the endianness of the data (old entry is host endian while a new entry is
32 * little endian) */
33static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
34{
35 if (compl->flags != 0) {
36 compl->flags = le32_to_cpu(compl->flags);
37 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
38 return true;
39 } else {
40 return false;
41 }
42}
43
44/* Need to reset the entire word that houses the valid bit */
45static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
46{
47 compl->flags = 0;
48}
49
50static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
51 struct be_mcc_cq_entry *compl)
52{
53 u16 compl_status, extd_status;
54
55 /* Just swap the status to host endian; mcc tag is opaquely copied
56 * from mcc_wrb */
57 be_dws_le_to_cpu(compl, 4);
58
59 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
60 CQE_STATUS_COMPL_MASK;
61 if (compl_status != MCC_STATUS_SUCCESS) {
62 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
63 CQE_STATUS_EXTD_MASK;
64 printk(KERN_WARNING DRV_NAME
65 " error in cmd completion: status(compl/extd)=%d/%d\n",
66 compl_status, extd_status);
67 return -1;
68 }
69 return 0;
70}
71
72
73static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
74{
75 struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
76 struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
77
78 if (be_mcc_compl_is_new(compl)) {
79 queue_tail_inc(mcc_cq);
80 return compl;
81 }
82 return NULL;
83}
84
85void be_process_mcc(struct be_ctrl_info *ctrl)
86{
87 struct be_mcc_cq_entry *compl;
88 int num = 0;
89
90 spin_lock_bh(&ctrl->mcc_cq_lock);
91 while ((compl = be_mcc_compl_get(ctrl))) {
92 if (!(compl->flags & CQE_FLAGS_ASYNC_MASK)) {
93 be_mcc_compl_process(ctrl, compl);
94 atomic_dec(&ctrl->mcc_obj.q.used);
95 }
96 be_mcc_compl_use(compl);
97 num++;
98 }
99 if (num)
100 be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
101 spin_unlock_bh(&ctrl->mcc_cq_lock);
102}
103
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700104static int be_mbox_db_ready_wait(void __iomem *db)
105{
106 int cnt = 0, wait = 5;
107 u32 ready;
108
109 do {
110 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
111 if (ready)
112 break;
113
114 if (cnt > 200000) {
115 printk(KERN_WARNING DRV_NAME
116 ": mbox_db poll timed out\n");
117 return -1;
118 }
119
120 if (cnt > 50)
121 wait = 200;
122 cnt += wait;
123 udelay(wait);
124 } while (true);
125
126 return 0;
127}
128
129/*
130 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000131 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700132 */
133static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
134{
135 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136 u32 val = 0;
137 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
138 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
139 struct be_mcc_mailbox *mbox = mbox_mem->va;
140 struct be_mcc_cq_entry *cqe = &mbox->cqe;
141
142 memset(cqe, 0, sizeof(*cqe));
143
144 val &= ~MPU_MAILBOX_DB_RDY_MASK;
145 val |= MPU_MAILBOX_DB_HI_MASK;
146 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
147 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
148 iowrite32(val, db);
149
150 /* wait for ready to be set */
151 status = be_mbox_db_ready_wait(db);
152 if (status != 0)
153 return status;
154
155 val = 0;
156 val &= ~MPU_MAILBOX_DB_RDY_MASK;
157 val &= ~MPU_MAILBOX_DB_HI_MASK;
158 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
159 val |= (u32)(mbox_mem->dma >> 4) << 2;
160 iowrite32(val, db);
161
162 status = be_mbox_db_ready_wait(db);
163 if (status != 0)
164 return status;
165
Sathya Perla5fb379e2009-06-18 00:02:59 +0000166 /* A cq entry has been made now */
167 if (be_mcc_compl_is_new(cqe)) {
168 status = be_mcc_compl_process(ctrl, &mbox->cqe);
169 be_mcc_compl_use(cqe);
170 if (status)
171 return status;
172 } else {
173 printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174 return -1;
175 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000176 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177}
178
179static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
180{
181 u32 sem = ioread32(ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
182
183 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
184 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
185 return -1;
186 else
187 return 0;
188}
189
190static int be_POST_stage_poll(struct be_ctrl_info *ctrl, u16 poll_stage)
191{
192 u16 stage, cnt, error;
193 for (cnt = 0; cnt < 5000; cnt++) {
194 error = be_POST_stage_get(ctrl, &stage);
195 if (error)
196 return -1;
197
198 if (stage == poll_stage)
199 break;
200 udelay(1000);
201 }
202 if (stage != poll_stage)
203 return -1;
204 return 0;
205}
206
207
208int be_cmd_POST(struct be_ctrl_info *ctrl)
209{
210 u16 stage, error;
211
212 error = be_POST_stage_get(ctrl, &stage);
213 if (error)
214 goto err;
215
216 if (stage == POST_STAGE_ARMFW_RDY)
217 return 0;
218
219 if (stage != POST_STAGE_AWAITING_HOST_RDY)
220 goto err;
221
222 /* On awaiting host rdy, reset and again poll on awaiting host rdy */
223 iowrite32(POST_STAGE_BE_RESET, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
224 error = be_POST_stage_poll(ctrl, POST_STAGE_AWAITING_HOST_RDY);
225 if (error)
226 goto err;
227
228 /* Now kickoff POST and poll on armfw ready */
229 iowrite32(POST_STAGE_HOST_RDY, ctrl->csr + MPU_EP_SEMAPHORE_OFFSET);
230 error = be_POST_stage_poll(ctrl, POST_STAGE_ARMFW_RDY);
231 if (error)
232 goto err;
233
234 return 0;
235err:
236 printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage);
237 return -1;
238}
239
240static inline void *embedded_payload(struct be_mcc_wrb *wrb)
241{
242 return wrb->payload.embedded_payload;
243}
244
245static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
246{
247 return &wrb->payload.sgl[0];
248}
249
250/* Don't touch the hdr after it's prepared */
251static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
252 bool embedded, u8 sge_cnt)
253{
254 if (embedded)
255 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
256 else
257 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
258 MCC_WRB_SGE_CNT_SHIFT;
259 wrb->payload_length = payload_len;
260 be_dws_cpu_to_le(wrb, 20);
261}
262
263/* Don't touch the hdr after it's prepared */
264static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
265 u8 subsystem, u8 opcode, int cmd_len)
266{
267 req_hdr->opcode = opcode;
268 req_hdr->subsystem = subsystem;
269 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
270}
271
272static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
273 struct be_dma_mem *mem)
274{
275 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
276 u64 dma = (u64)mem->dma;
277
278 for (i = 0; i < buf_pages; i++) {
279 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
280 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
281 dma += PAGE_SIZE_4K;
282 }
283}
284
285/* Converts interrupt delay in microseconds to multiplier value */
286static u32 eq_delay_to_mult(u32 usec_delay)
287{
288#define MAX_INTR_RATE 651042
289 const u32 round = 10;
290 u32 multiplier;
291
292 if (usec_delay == 0)
293 multiplier = 0;
294 else {
295 u32 interrupt_rate = 1000000 / usec_delay;
296 /* Max delay, corresponding to the lowest interrupt rate */
297 if (interrupt_rate == 0)
298 multiplier = 1023;
299 else {
300 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
301 multiplier /= interrupt_rate;
302 /* Round the multiplier to the closest value.*/
303 multiplier = (multiplier + round/2) / round;
304 multiplier = min(multiplier, (u32)1023);
305 }
306 }
307 return multiplier;
308}
309
310static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
311{
312 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
313}
314
Sathya Perla5fb379e2009-06-18 00:02:59 +0000315static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
316{
317 struct be_mcc_wrb *wrb = NULL;
318 if (atomic_read(&mccq->used) < mccq->len) {
319 wrb = queue_head_node(mccq);
320 queue_head_inc(mccq);
321 atomic_inc(&mccq->used);
322 memset(wrb, 0, sizeof(*wrb));
323 }
324 return wrb;
325}
326
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700327int be_cmd_eq_create(struct be_ctrl_info *ctrl,
328 struct be_queue_info *eq, int eq_delay)
329{
330 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
331 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
332 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
333 struct be_dma_mem *q_mem = &eq->dma_mem;
334 int status;
335
Sathya Perla5fb379e2009-06-18 00:02:59 +0000336 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700337 memset(wrb, 0, sizeof(*wrb));
338
339 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
340
341 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
342 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
343
344 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
345
346 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
347 ctrl->pci_func);
348 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
349 /* 4byte eqe*/
350 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
351 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
352 __ilog2_u32(eq->len/256));
353 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
354 eq_delay_to_mult(eq_delay));
355 be_dws_cpu_to_le(req->context, sizeof(req->context));
356
357 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
358
359 status = be_mbox_db_ring(ctrl);
360 if (!status) {
361 eq->id = le16_to_cpu(resp->eq_id);
362 eq->created = true;
363 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000364 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700365 return status;
366}
367
368int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
369 u8 type, bool permanent, u32 if_handle)
370{
371 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
372 struct be_cmd_req_mac_query *req = embedded_payload(wrb);
373 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
374 int status;
375
Sathya Perla5fb379e2009-06-18 00:02:59 +0000376 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700377 memset(wrb, 0, sizeof(*wrb));
378
379 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
380
381 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
382 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
383
384 req->type = type;
385 if (permanent) {
386 req->permanent = 1;
387 } else {
388 req->if_id = cpu_to_le16((u16)if_handle);
389 req->permanent = 0;
390 }
391
392 status = be_mbox_db_ring(ctrl);
393 if (!status)
394 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
395
Sathya Perla5fb379e2009-06-18 00:02:59 +0000396 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700397 return status;
398}
399
400int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
401 u32 if_id, u32 *pmac_id)
402{
403 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
404 struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
405 int status;
406
Sathya Perla5fb379e2009-06-18 00:02:59 +0000407 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700408 memset(wrb, 0, sizeof(*wrb));
409
410 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
411
412 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
413 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
414
415 req->if_id = cpu_to_le32(if_id);
416 memcpy(req->mac_address, mac_addr, ETH_ALEN);
417
418 status = be_mbox_db_ring(ctrl);
419 if (!status) {
420 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
421 *pmac_id = le32_to_cpu(resp->pmac_id);
422 }
423
Sathya Perla5fb379e2009-06-18 00:02:59 +0000424 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700425 return status;
426}
427
428int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
429{
430 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
431 struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
432 int status;
433
Sathya Perla5fb379e2009-06-18 00:02:59 +0000434 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435 memset(wrb, 0, sizeof(*wrb));
436
437 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
438
439 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
440 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
441
442 req->if_id = cpu_to_le32(if_id);
443 req->pmac_id = cpu_to_le32(pmac_id);
444
445 status = be_mbox_db_ring(ctrl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000446 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700447
448 return status;
449}
450
451int be_cmd_cq_create(struct be_ctrl_info *ctrl,
452 struct be_queue_info *cq, struct be_queue_info *eq,
453 bool sol_evts, bool no_delay, int coalesce_wm)
454{
455 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
456 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
457 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
458 struct be_dma_mem *q_mem = &cq->dma_mem;
459 void *ctxt = &req->context;
460 int status;
461
Sathya Perla5fb379e2009-06-18 00:02:59 +0000462 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463 memset(wrb, 0, sizeof(*wrb));
464
465 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
466
467 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
468 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
469
470 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
471
472 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
473 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
474 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
475 __ilog2_u32(cq->len/256));
476 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
477 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
478 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
479 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000480 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
482 be_dws_cpu_to_le(ctxt, sizeof(req->context));
483
484 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
485
486 status = be_mbox_db_ring(ctrl);
487 if (!status) {
488 cq->id = le16_to_cpu(resp->cq_id);
489 cq->created = true;
490 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000491 spin_unlock(&ctrl->mbox_lock);
492
493 return status;
494}
495
496static u32 be_encoded_q_len(int q_len)
497{
498 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
499 if (len_encoded == 16)
500 len_encoded = 0;
501 return len_encoded;
502}
503
504int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
505 struct be_queue_info *mccq,
506 struct be_queue_info *cq)
507{
508 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
509 struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
510 struct be_dma_mem *q_mem = &mccq->dma_mem;
511 void *ctxt = &req->context;
512 int status;
513
514 spin_lock(&ctrl->mbox_lock);
515 memset(wrb, 0, sizeof(*wrb));
516
517 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
518
519 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
520 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
521
522 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
523
524 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
525 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
526 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
527 be_encoded_q_len(mccq->len));
528 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
529
530 be_dws_cpu_to_le(ctxt, sizeof(req->context));
531
532 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
533
534 status = be_mbox_db_ring(ctrl);
535 if (!status) {
536 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
537 mccq->id = le16_to_cpu(resp->id);
538 mccq->created = true;
539 }
540 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541
542 return status;
543}
544
545int be_cmd_txq_create(struct be_ctrl_info *ctrl,
546 struct be_queue_info *txq,
547 struct be_queue_info *cq)
548{
549 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
550 struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb);
551 struct be_dma_mem *q_mem = &txq->dma_mem;
552 void *ctxt = &req->context;
553 int status;
554 u32 len_encoded;
555
Sathya Perla5fb379e2009-06-18 00:02:59 +0000556 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557 memset(wrb, 0, sizeof(*wrb));
558
559 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
560
561 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
562 sizeof(*req));
563
564 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
565 req->ulp_num = BE_ULP1_NUM;
566 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
567
568 len_encoded = fls(txq->len); /* log2(len) + 1 */
569 if (len_encoded == 16)
570 len_encoded = 0;
571 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded);
572 AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
573 ctrl->pci_func);
574 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
575 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
576
577 be_dws_cpu_to_le(ctxt, sizeof(req->context));
578
579 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
580
581 status = be_mbox_db_ring(ctrl);
582 if (!status) {
583 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
584 txq->id = le16_to_cpu(resp->cid);
585 txq->created = true;
586 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000587 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
589 return status;
590}
591
592int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
593 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
594 u16 max_frame_size, u32 if_id, u32 rss)
595{
596 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
597 struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb);
598 struct be_dma_mem *q_mem = &rxq->dma_mem;
599 int status;
600
Sathya Perla5fb379e2009-06-18 00:02:59 +0000601 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 memset(wrb, 0, sizeof(*wrb));
603
604 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
605
606 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
607 sizeof(*req));
608
609 req->cq_id = cpu_to_le16(cq_id);
610 req->frag_size = fls(frag_size) - 1;
611 req->num_pages = 2;
612 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
613 req->interface_id = cpu_to_le32(if_id);
614 req->max_frame_size = cpu_to_le16(max_frame_size);
615 req->rss_queue = cpu_to_le32(rss);
616
617 status = be_mbox_db_ring(ctrl);
618 if (!status) {
619 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
620 rxq->id = le16_to_cpu(resp->id);
621 rxq->created = true;
622 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000623 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624
625 return status;
626}
627
628/* Generic destroyer function for all types of queues */
629int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
630 int queue_type)
631{
632 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
633 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
634 u8 subsys = 0, opcode = 0;
635 int status;
636
Sathya Perla5fb379e2009-06-18 00:02:59 +0000637 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 memset(wrb, 0, sizeof(*wrb));
640 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
641
642 switch (queue_type) {
643 case QTYPE_EQ:
644 subsys = CMD_SUBSYSTEM_COMMON;
645 opcode = OPCODE_COMMON_EQ_DESTROY;
646 break;
647 case QTYPE_CQ:
648 subsys = CMD_SUBSYSTEM_COMMON;
649 opcode = OPCODE_COMMON_CQ_DESTROY;
650 break;
651 case QTYPE_TXQ:
652 subsys = CMD_SUBSYSTEM_ETH;
653 opcode = OPCODE_ETH_TX_DESTROY;
654 break;
655 case QTYPE_RXQ:
656 subsys = CMD_SUBSYSTEM_ETH;
657 opcode = OPCODE_ETH_RX_DESTROY;
658 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000659 case QTYPE_MCCQ:
660 subsys = CMD_SUBSYSTEM_COMMON;
661 opcode = OPCODE_COMMON_MCC_DESTROY;
662 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663 default:
664 printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
665 status = -1;
666 goto err;
667 }
668 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
669 req->id = cpu_to_le16(q->id);
670
671 status = be_mbox_db_ring(ctrl);
672err:
Sathya Perla5fb379e2009-06-18 00:02:59 +0000673 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
675 return status;
676}
677
678/* Create an rx filtering policy configuration on an i/f */
679int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
680 bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
681{
682 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
683 struct be_cmd_req_if_create *req = embedded_payload(wrb);
684 int status;
685
Sathya Perla5fb379e2009-06-18 00:02:59 +0000686 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 memset(wrb, 0, sizeof(*wrb));
688
689 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
690
691 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
692 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
693
694 req->capability_flags = cpu_to_le32(flags);
695 req->enable_flags = cpu_to_le32(flags);
696 if (!pmac_invalid)
697 memcpy(req->mac_addr, mac, ETH_ALEN);
698
699 status = be_mbox_db_ring(ctrl);
700 if (!status) {
701 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
702 *if_handle = le32_to_cpu(resp->interface_id);
703 if (!pmac_invalid)
704 *pmac_id = le32_to_cpu(resp->pmac_id);
705 }
706
Sathya Perla5fb379e2009-06-18 00:02:59 +0000707 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708 return status;
709}
710
711int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
712{
713 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
714 struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
715 int status;
716
Sathya Perla5fb379e2009-06-18 00:02:59 +0000717 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718 memset(wrb, 0, sizeof(*wrb));
719
720 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
721
722 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
723 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
724
725 req->interface_id = cpu_to_le32(interface_id);
726 status = be_mbox_db_ring(ctrl);
727
Sathya Perla5fb379e2009-06-18 00:02:59 +0000728 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
730 return status;
731}
732
733/* Get stats is a non embedded command: the request is not embedded inside
734 * WRB but is a separate dma memory block
735 */
736int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
737{
738 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
739 struct be_cmd_req_get_stats *req = nonemb_cmd->va;
740 struct be_sge *sge = nonembedded_sgl(wrb);
741 int status;
742
Sathya Perla5fb379e2009-06-18 00:02:59 +0000743 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 memset(wrb, 0, sizeof(*wrb));
745
746 memset(req, 0, sizeof(*req));
747
748 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
749
750 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
751 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
752 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
753 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
754 sge->len = cpu_to_le32(nonemb_cmd->size);
755
756 status = be_mbox_db_ring(ctrl);
757 if (!status) {
758 struct be_cmd_resp_get_stats *resp = nonemb_cmd->va;
759 be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
760 }
761
Sathya Perla5fb379e2009-06-18 00:02:59 +0000762 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return status;
764}
765
766int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
767 struct be_link_info *link)
768{
769 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
770 struct be_cmd_req_link_status *req = embedded_payload(wrb);
771 int status;
772
Sathya Perla5fb379e2009-06-18 00:02:59 +0000773 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 memset(wrb, 0, sizeof(*wrb));
775
776 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
777
778 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
779 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
780
781 status = be_mbox_db_ring(ctrl);
782 if (!status) {
783 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
784 link->speed = resp->mac_speed;
785 link->duplex = resp->mac_duplex;
786 link->fault = resp->mac_fault;
787 } else {
788 link->speed = PHY_LINK_SPEED_ZERO;
789 }
790
Sathya Perla5fb379e2009-06-18 00:02:59 +0000791 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 return status;
793}
794
795int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
796{
797 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
798 struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
799 int status;
800
Sathya Perla5fb379e2009-06-18 00:02:59 +0000801 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 memset(wrb, 0, sizeof(*wrb));
803
804 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
805
806 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
807 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
808
809 status = be_mbox_db_ring(ctrl);
810 if (!status) {
811 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
812 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
813 }
814
Sathya Perla5fb379e2009-06-18 00:02:59 +0000815 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 return status;
817}
818
819/* set the EQ delay interval of an EQ to specified value */
820int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
821{
822 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
823 struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
824 int status;
825
Sathya Perla5fb379e2009-06-18 00:02:59 +0000826 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 memset(wrb, 0, sizeof(*wrb));
828
829 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
830
831 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
832 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
833
834 req->num_eq = cpu_to_le32(1);
835 req->delay[0].eq_id = cpu_to_le32(eq_id);
836 req->delay[0].phase = 0;
837 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
838
839 status = be_mbox_db_ring(ctrl);
840
Sathya Perla5fb379e2009-06-18 00:02:59 +0000841 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 return status;
843}
844
845int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
846 u32 num, bool untagged, bool promiscuous)
847{
848 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
849 struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
850 int status;
851
Sathya Perla5fb379e2009-06-18 00:02:59 +0000852 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853 memset(wrb, 0, sizeof(*wrb));
854
855 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
856
857 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
858 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
859
860 req->interface_id = if_id;
861 req->promiscuous = promiscuous;
862 req->untagged = untagged;
863 req->num_vlan = num;
864 if (!promiscuous) {
865 memcpy(req->normal_vlan, vtag_array,
866 req->num_vlan * sizeof(vtag_array[0]));
867 }
868
869 status = be_mbox_db_ring(ctrl);
870
Sathya Perla5fb379e2009-06-18 00:02:59 +0000871 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872 return status;
873}
874
875int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
876{
877 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
878 struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
879 int status;
880
Sathya Perla5fb379e2009-06-18 00:02:59 +0000881 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700882 memset(wrb, 0, sizeof(*wrb));
883
884 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
885
886 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
887 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
888
889 if (port_num)
890 req->port1_promiscuous = en;
891 else
892 req->port0_promiscuous = en;
893
894 status = be_mbox_db_ring(ctrl);
895
Sathya Perla5fb379e2009-06-18 00:02:59 +0000896 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897 return status;
898}
899
900int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
901 u32 num, bool promiscuous)
902{
903 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
904 struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
905 int status;
906
Sathya Perla5fb379e2009-06-18 00:02:59 +0000907 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 memset(wrb, 0, sizeof(*wrb));
909
910 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
911
912 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
913 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
914
915 req->interface_id = if_id;
916 req->promiscuous = promiscuous;
917 if (!promiscuous) {
918 req->num_mac = cpu_to_le16(num);
919 if (num)
920 memcpy(req->mac, mac_table, ETH_ALEN * num);
921 }
922
923 status = be_mbox_db_ring(ctrl);
924
Sathya Perla5fb379e2009-06-18 00:02:59 +0000925 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700926 return status;
927}
928
929int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
930{
931 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
932 struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
933 int status;
934
Sathya Perla5fb379e2009-06-18 00:02:59 +0000935 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936
937 memset(wrb, 0, sizeof(*wrb));
938
939 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
940
941 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
942 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
943
944 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
945 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
946
947 status = be_mbox_db_ring(ctrl);
948
Sathya Perla5fb379e2009-06-18 00:02:59 +0000949 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950 return status;
951}
952
953int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
954{
955 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
956 struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
957 int status;
958
Sathya Perla5fb379e2009-06-18 00:02:59 +0000959 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960
961 memset(wrb, 0, sizeof(*wrb));
962
963 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
964
965 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
966 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
967
968 status = be_mbox_db_ring(ctrl);
969 if (!status) {
970 struct be_cmd_resp_get_flow_control *resp =
971 embedded_payload(wrb);
972 *tx_fc = le16_to_cpu(resp->tx_flow_control);
973 *rx_fc = le16_to_cpu(resp->rx_flow_control);
974 }
975
Sathya Perla5fb379e2009-06-18 00:02:59 +0000976 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700977 return status;
978}
979
980int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
981{
982 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
983 struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
984 int status;
985
Sathya Perla5fb379e2009-06-18 00:02:59 +0000986 spin_lock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987
988 memset(wrb, 0, sizeof(*wrb));
989
990 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
991
992 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
993 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
994
995 status = be_mbox_db_ring(ctrl);
996 if (!status) {
997 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
998 *port_num = le32_to_cpu(resp->phys_port);
999 }
1000
Sathya Perla5fb379e2009-06-18 00:02:59 +00001001 spin_unlock(&ctrl->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002 return status;
1003}