blob: 8fa9a709d9fe4d4abcbd817f0c1856ace9bd3eaa [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020
Sathya Perla8788fdc2009-07-27 22:52:03 +000021static void be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +000022{
Sathya Perla8788fdc2009-07-27 22:52:03 +000023 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +000024 u32 val = 0;
25
26 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +000028
29 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +000030 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Sathya Perla5fb379e2009-06-18 00:02:59 +000031}
32
33/* To check if valid bit is set, check the entire word as we don't know
34 * the endianness of the data (old entry is host endian while a new entry is
35 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +000036static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000037{
38 if (compl->flags != 0) {
39 compl->flags = le32_to_cpu(compl->flags);
40 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
41 return true;
42 } else {
43 return false;
44 }
45}
46
47/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +000048static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000049{
50 compl->flags = 0;
51}
52
Sathya Perla8788fdc2009-07-27 22:52:03 +000053static int be_mcc_compl_process(struct be_adapter *adapter,
Sathya Perlaefd2e402009-07-27 22:53:10 +000054 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000055{
56 u16 compl_status, extd_status;
57
58 /* Just swap the status to host endian; mcc tag is opaquely copied
59 * from mcc_wrb */
60 be_dws_le_to_cpu(compl, 4);
61
62 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
63 CQE_STATUS_COMPL_MASK;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -070064
65 if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
66 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
67 adapter->flash_status = compl_status;
68 complete(&adapter->flash_compl);
69 }
70
Sathya Perlab31c50a2009-09-17 10:30:13 -070071 if (compl_status == MCC_STATUS_SUCCESS) {
72 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73 struct be_cmd_resp_get_stats *resp =
Sathya Perla3abcded2010-10-03 22:12:27 -070074 adapter->stats_cmd.va;
Sathya Perlab31c50a2009-09-17 10:30:13 -070075 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter);
Ajit Khaparde0fc48c32010-07-29 06:18:58 +000078 adapter->stats_ioctl_sent = false;
Sathya Perlab31c50a2009-09-17 10:30:13 -070079 }
Ajit Khaparde89438072010-07-23 12:42:40 -070080 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
Sathya Perla5fb379e2009-06-18 00:02:59 +000082 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
83 CQE_STATUS_EXTD_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000084 dev_warn(&adapter->pdev->dev,
Ajit Khaparded744b442009-12-03 06:12:06 +000085 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
86 compl->tag0, compl_status, extd_status);
Sathya Perla5fb379e2009-06-18 00:02:59 +000087 }
Sathya Perlab31c50a2009-09-17 10:30:13 -070088 return compl_status;
Sathya Perla5fb379e2009-06-18 00:02:59 +000089}
90
Sathya Perlaa8f447bd2009-06-18 00:10:27 +000091/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +000092static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +000093 struct be_async_event_link_state *evt)
94{
Sathya Perla8788fdc2009-07-27 22:52:03 +000095 be_link_status_update(adapter,
96 evt->port_link_status == ASYNC_EVENT_LINK_UP);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +000097}
98
Somnath Koturcc4ce022010-10-21 07:11:14 -070099/* Grp5 CoS Priority evt */
100static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
101 struct be_async_event_grp5_cos_priority *evt)
102{
103 if (evt->valid) {
104 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Ajit Khaparde60964dd2011-02-11 13:37:25 +0000105 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700106 adapter->recommended_prio =
107 evt->reco_default_priority << VLAN_PRIO_SHIFT;
108 }
109}
110
111/* Grp5 QOS Speed evt */
112static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
113 struct be_async_event_grp5_qos_link_speed *evt)
114{
115 if (evt->physical_port == adapter->port_num) {
116 /* qos_link_speed is in units of 10 Mbps */
117 adapter->link_speed = evt->qos_link_speed * 10;
118 }
119}
120
121static void be_async_grp5_evt_process(struct be_adapter *adapter,
122 u32 trailer, struct be_mcc_compl *evt)
123{
124 u8 event_type = 0;
125
126 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
127 ASYNC_TRAILER_EVENT_TYPE_MASK;
128
129 switch (event_type) {
130 case ASYNC_EVENT_COS_PRIORITY:
131 be_async_grp5_cos_priority_process(adapter,
132 (struct be_async_event_grp5_cos_priority *)evt);
133 break;
134 case ASYNC_EVENT_QOS_SPEED:
135 be_async_grp5_qos_speed_process(adapter,
136 (struct be_async_event_grp5_qos_link_speed *)evt);
137 break;
138 default:
139 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
140 break;
141 }
142}
143
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000144static inline bool is_link_state_evt(u32 trailer)
145{
Eric Dumazet807540b2010-09-23 05:40:09 +0000146 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000147 ASYNC_TRAILER_EVENT_CODE_MASK) ==
Eric Dumazet807540b2010-09-23 05:40:09 +0000148 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000149}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000150
Somnath Koturcc4ce022010-10-21 07:11:14 -0700151static inline bool is_grp5_evt(u32 trailer)
152{
153 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
154 ASYNC_TRAILER_EVENT_CODE_MASK) ==
155 ASYNC_EVENT_CODE_GRP_5);
156}
157
Sathya Perlaefd2e402009-07-27 22:53:10 +0000158static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000159{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000160 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000161 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000162
163 if (be_mcc_compl_is_new(compl)) {
164 queue_tail_inc(mcc_cq);
165 return compl;
166 }
167 return NULL;
168}
169
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000170void be_async_mcc_enable(struct be_adapter *adapter)
171{
172 spin_lock_bh(&adapter->mcc_cq_lock);
173
174 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
175 adapter->mcc_obj.rearm_cq = true;
176
177 spin_unlock_bh(&adapter->mcc_cq_lock);
178}
179
180void be_async_mcc_disable(struct be_adapter *adapter)
181{
182 adapter->mcc_obj.rearm_cq = false;
183}
184
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800185int be_process_mcc(struct be_adapter *adapter, int *status)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000186{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000187 struct be_mcc_compl *compl;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800188 int num = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000189 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 spin_lock_bh(&adapter->mcc_cq_lock);
192 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000193 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
194 /* Interpret flags as an async trailer */
Ajit Khaparde323f30b2010-09-03 06:24:13 +0000195 if (is_link_state_evt(compl->flags))
196 be_async_link_state_process(adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000197 (struct be_async_event_link_state *) compl);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700198 else if (is_grp5_evt(compl->flags))
199 be_async_grp5_evt_process(adapter,
200 compl->flags, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700201 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800202 *status = be_mcc_compl_process(adapter, compl);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000203 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000204 }
205 be_mcc_compl_use(compl);
206 num++;
207 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700208
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800210 return num;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000211}
212
Sathya Perla6ac7b682009-06-18 00:05:54 +0000213/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700214static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000215{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700216#define mcc_timeout 120000 /* 12s timeout */
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800217 int i, num, status = 0;
218 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700219
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800220 for (i = 0; i < mcc_timeout; i++) {
221 num = be_process_mcc(adapter, &status);
222 if (num)
223 be_cq_notify(adapter, mcc_obj->cq.id,
224 mcc_obj->rearm_cq, num);
225
226 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000227 break;
228 udelay(100);
229 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700230 if (i == mcc_timeout) {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000231 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
Sathya Perlab31c50a2009-09-17 10:30:13 -0700232 return -1;
233 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800234 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000235}
236
237/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700238static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000239{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 be_mcc_notify(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700241 return be_mcc_wait_compl(adapter);
Sathya Perla6ac7b682009-06-18 00:05:54 +0000242}
243
Sathya Perla5f0b8492009-07-27 22:52:56 +0000244static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000246 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 u32 ready;
248
249 do {
Sathya Perlacf588472010-02-14 21:22:01 +0000250 ready = ioread32(db);
251 if (ready == 0xffffffff) {
252 dev_err(&adapter->pdev->dev,
253 "pci slot disconnected\n");
254 return -1;
255 }
256
257 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258 if (ready)
259 break;
260
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000261 if (msecs > 4000) {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000262 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
Ajit Khaparded053de92010-09-03 06:23:30 +0000263 be_detect_dump_ue(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700264 return -1;
265 }
266
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000267 set_current_state(TASK_INTERRUPTIBLE);
268 schedule_timeout(msecs_to_jiffies(1));
269 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270 } while (true);
271
272 return 0;
273}
274
275/*
276 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000277 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700278 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700279static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700280{
281 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700282 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000283 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
284 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000286 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perlacf588472010-02-14 21:22:01 +0000288 /* wait for ready to be set */
289 status = be_mbox_db_ready_wait(adapter, db);
290 if (status != 0)
291 return status;
292
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700293 val |= MPU_MAILBOX_DB_HI_MASK;
294 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
295 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
296 iowrite32(val, db);
297
298 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000299 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 if (status != 0)
301 return status;
302
303 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
305 val |= (u32)(mbox_mem->dma >> 4) << 2;
306 iowrite32(val, db);
307
Sathya Perla5f0b8492009-07-27 22:52:56 +0000308 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309 if (status != 0)
310 return status;
311
Sathya Perla5fb379e2009-06-18 00:02:59 +0000312 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000313 if (be_mcc_compl_is_new(compl)) {
314 status = be_mcc_compl_process(adapter, &mbox->compl);
315 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000316 if (status)
317 return status;
318 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000319 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 return -1;
321 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000322 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323}
324
Sathya Perla8788fdc2009-07-27 22:52:03 +0000325static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700326{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000327 u32 sem;
328
329 if (lancer_chip(adapter))
330 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
331 else
332 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700333
334 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
335 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
336 return -1;
337 else
338 return 0;
339}
340
Sathya Perla8788fdc2009-07-27 22:52:03 +0000341int be_cmd_POST(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700342{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000343 u16 stage;
344 int status, timeout = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700345
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000346 do {
347 status = be_POST_stage_get(adapter, &stage);
348 if (status) {
349 dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
350 stage);
351 return -1;
352 } else if (stage != POST_STAGE_ARMFW_RDY) {
353 set_current_state(TASK_INTERRUPTIBLE);
354 schedule_timeout(2 * HZ);
355 timeout += 2;
356 } else {
357 return 0;
358 }
Sathya Perlad938a702010-05-26 00:33:43 -0700359 } while (timeout < 40);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700360
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000361 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
362 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700363}
364
365static inline void *embedded_payload(struct be_mcc_wrb *wrb)
366{
367 return wrb->payload.embedded_payload;
368}
369
370static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
371{
372 return &wrb->payload.sgl[0];
373}
374
375/* Don't touch the hdr after it's prepared */
376static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
Ajit Khaparded744b442009-12-03 06:12:06 +0000377 bool embedded, u8 sge_cnt, u32 opcode)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700378{
379 if (embedded)
380 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
381 else
382 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
383 MCC_WRB_SGE_CNT_SHIFT;
384 wrb->payload_length = payload_len;
Ajit Khaparded744b442009-12-03 06:12:06 +0000385 wrb->tag0 = opcode;
Sathya Perlafa4281b2010-01-21 22:51:36 +0000386 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700387}
388
389/* Don't touch the hdr after it's prepared */
390static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
391 u8 subsystem, u8 opcode, int cmd_len)
392{
393 req_hdr->opcode = opcode;
394 req_hdr->subsystem = subsystem;
395 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000396 req_hdr->version = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700397}
398
399static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
400 struct be_dma_mem *mem)
401{
402 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
403 u64 dma = (u64)mem->dma;
404
405 for (i = 0; i < buf_pages; i++) {
406 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
407 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
408 dma += PAGE_SIZE_4K;
409 }
410}
411
412/* Converts interrupt delay in microseconds to multiplier value */
413static u32 eq_delay_to_mult(u32 usec_delay)
414{
415#define MAX_INTR_RATE 651042
416 const u32 round = 10;
417 u32 multiplier;
418
419 if (usec_delay == 0)
420 multiplier = 0;
421 else {
422 u32 interrupt_rate = 1000000 / usec_delay;
423 /* Max delay, corresponding to the lowest interrupt rate */
424 if (interrupt_rate == 0)
425 multiplier = 1023;
426 else {
427 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
428 multiplier /= interrupt_rate;
429 /* Round the multiplier to the closest value.*/
430 multiplier = (multiplier + round/2) / round;
431 multiplier = min(multiplier, (u32)1023);
432 }
433 }
434 return multiplier;
435}
436
Sathya Perlab31c50a2009-09-17 10:30:13 -0700437static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700438{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700439 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
440 struct be_mcc_wrb *wrb
441 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
442 memset(wrb, 0, sizeof(*wrb));
443 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444}
445
Sathya Perlab31c50a2009-09-17 10:30:13 -0700446static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000447{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700448 struct be_queue_info *mccq = &adapter->mcc_obj.q;
449 struct be_mcc_wrb *wrb;
450
Sathya Perla713d03942009-11-22 22:02:45 +0000451 if (atomic_read(&mccq->used) >= mccq->len) {
452 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
453 return NULL;
454 }
455
Sathya Perlab31c50a2009-09-17 10:30:13 -0700456 wrb = queue_head_node(mccq);
457 queue_head_inc(mccq);
458 atomic_inc(&mccq->used);
459 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000460 return wrb;
461}
462
Sathya Perla2243e2e2009-11-22 22:02:03 +0000463/* Tell fw we're about to start firing cmds by writing a
464 * special pattern across the wrb hdr; uses mbox
465 */
466int be_cmd_fw_init(struct be_adapter *adapter)
467{
468 u8 *wrb;
469 int status;
470
Ivan Vecera29849612010-12-14 05:43:19 +0000471 if (mutex_lock_interruptible(&adapter->mbox_lock))
472 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000473
474 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000475 *wrb++ = 0xFF;
476 *wrb++ = 0x12;
477 *wrb++ = 0x34;
478 *wrb++ = 0xFF;
479 *wrb++ = 0xFF;
480 *wrb++ = 0x56;
481 *wrb++ = 0x78;
482 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000483
484 status = be_mbox_notify_wait(adapter);
485
Ivan Vecera29849612010-12-14 05:43:19 +0000486 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000487 return status;
488}
489
490/* Tell fw we're done with firing cmds by writing a
491 * special pattern across the wrb hdr; uses mbox
492 */
493int be_cmd_fw_clean(struct be_adapter *adapter)
494{
495 u8 *wrb;
496 int status;
497
Sathya Perlacf588472010-02-14 21:22:01 +0000498 if (adapter->eeh_err)
499 return -EIO;
500
Ivan Vecera29849612010-12-14 05:43:19 +0000501 if (mutex_lock_interruptible(&adapter->mbox_lock))
502 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000503
504 wrb = (u8 *)wrb_from_mbox(adapter);
505 *wrb++ = 0xFF;
506 *wrb++ = 0xAA;
507 *wrb++ = 0xBB;
508 *wrb++ = 0xFF;
509 *wrb++ = 0xFF;
510 *wrb++ = 0xCC;
511 *wrb++ = 0xDD;
512 *wrb = 0xFF;
513
514 status = be_mbox_notify_wait(adapter);
515
Ivan Vecera29849612010-12-14 05:43:19 +0000516 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000517 return status;
518}
Sathya Perla8788fdc2009-07-27 22:52:03 +0000519int be_cmd_eq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 struct be_queue_info *eq, int eq_delay)
521{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700522 struct be_mcc_wrb *wrb;
523 struct be_cmd_req_eq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 struct be_dma_mem *q_mem = &eq->dma_mem;
525 int status;
526
Ivan Vecera29849612010-12-14 05:43:19 +0000527 if (mutex_lock_interruptible(&adapter->mbox_lock))
528 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700529
530 wrb = wrb_from_mbox(adapter);
531 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532
Ajit Khaparded744b442009-12-03 06:12:06 +0000533 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534
535 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
536 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
537
538 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
539
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
541 /* 4byte eqe*/
542 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
543 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
544 __ilog2_u32(eq->len/256));
545 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
546 eq_delay_to_mult(eq_delay));
547 be_dws_cpu_to_le(req->context, sizeof(req->context));
548
549 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
550
Sathya Perlab31c50a2009-09-17 10:30:13 -0700551 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700553 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554 eq->id = le16_to_cpu(resp->eq_id);
555 eq->created = true;
556 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700557
Ivan Vecera29849612010-12-14 05:43:19 +0000558 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 return status;
560}
561
Sathya Perlab31c50a2009-09-17 10:30:13 -0700562/* Uses mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000563int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700564 u8 type, bool permanent, u32 if_handle)
565{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700566 struct be_mcc_wrb *wrb;
567 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700568 int status;
569
Ivan Vecera29849612010-12-14 05:43:19 +0000570 if (mutex_lock_interruptible(&adapter->mbox_lock))
571 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700572
573 wrb = wrb_from_mbox(adapter);
574 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575
Ajit Khaparded744b442009-12-03 06:12:06 +0000576 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
577 OPCODE_COMMON_NTWK_MAC_QUERY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578
579 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
580 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
581
582 req->type = type;
583 if (permanent) {
584 req->permanent = 1;
585 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700586 req->if_id = cpu_to_le16((u16) if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 req->permanent = 0;
588 }
589
Sathya Perlab31c50a2009-09-17 10:30:13 -0700590 status = be_mbox_notify_wait(adapter);
591 if (!status) {
592 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700594 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595
Ivan Vecera29849612010-12-14 05:43:19 +0000596 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 return status;
598}
599
Sathya Perlab31c50a2009-09-17 10:30:13 -0700600/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000601int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000602 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700604 struct be_mcc_wrb *wrb;
605 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 int status;
607
Sathya Perlab31c50a2009-09-17 10:30:13 -0700608 spin_lock_bh(&adapter->mcc_lock);
609
610 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000611 if (!wrb) {
612 status = -EBUSY;
613 goto err;
614 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700615 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616
Ajit Khaparded744b442009-12-03 06:12:06 +0000617 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
618 OPCODE_COMMON_NTWK_PMAC_ADD);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
621 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
622
Ajit Khapardef8617e02011-02-11 13:36:37 +0000623 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 req->if_id = cpu_to_le32(if_id);
625 memcpy(req->mac_address, mac_addr, ETH_ALEN);
626
Sathya Perlab31c50a2009-09-17 10:30:13 -0700627 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 if (!status) {
629 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
630 *pmac_id = le32_to_cpu(resp->pmac_id);
631 }
632
Sathya Perla713d03942009-11-22 22:02:45 +0000633err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700634 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 return status;
636}
637
Sathya Perlab31c50a2009-09-17 10:30:13 -0700638/* Uses synchronous MCCQ */
Ajit Khapardef8617e02011-02-11 13:36:37 +0000639int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700641 struct be_mcc_wrb *wrb;
642 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 int status;
644
Sathya Perlab31c50a2009-09-17 10:30:13 -0700645 spin_lock_bh(&adapter->mcc_lock);
646
647 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000648 if (!wrb) {
649 status = -EBUSY;
650 goto err;
651 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700652 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653
Ajit Khaparded744b442009-12-03 06:12:06 +0000654 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
655 OPCODE_COMMON_NTWK_PMAC_DEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656
657 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
658 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
659
Ajit Khapardef8617e02011-02-11 13:36:37 +0000660 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 req->if_id = cpu_to_le32(if_id);
662 req->pmac_id = cpu_to_le32(pmac_id);
663
Sathya Perlab31c50a2009-09-17 10:30:13 -0700664 status = be_mcc_notify_wait(adapter);
665
Sathya Perla713d03942009-11-22 22:02:45 +0000666err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700667 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668 return status;
669}
670
Sathya Perlab31c50a2009-09-17 10:30:13 -0700671/* Uses Mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000672int be_cmd_cq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673 struct be_queue_info *cq, struct be_queue_info *eq,
674 bool sol_evts, bool no_delay, int coalesce_wm)
675{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700676 struct be_mcc_wrb *wrb;
677 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700679 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 int status;
681
Ivan Vecera29849612010-12-14 05:43:19 +0000682 if (mutex_lock_interruptible(&adapter->mbox_lock))
683 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700684
685 wrb = wrb_from_mbox(adapter);
686 req = embedded_payload(wrb);
687 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688
Ajit Khaparded744b442009-12-03 06:12:06 +0000689 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
690 OPCODE_COMMON_CQ_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691
692 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
693 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
694
695 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000696 if (lancer_chip(adapter)) {
697 req->hdr.version = 1;
698 req->page_size = 1; /* 1 for 4K */
699 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
700 coalesce_wm);
701 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
702 no_delay);
703 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
704 __ilog2_u32(cq->len/256));
705 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
706 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
707 ctxt, 1);
708 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
709 ctxt, eq->id);
710 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
711 } else {
712 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
713 coalesce_wm);
714 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
715 ctxt, no_delay);
716 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
717 __ilog2_u32(cq->len/256));
718 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
719 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
720 ctxt, sol_evts);
721 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
722 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
723 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
724 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 be_dws_cpu_to_le(ctxt, sizeof(req->context));
727
728 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
729
Sathya Perlab31c50a2009-09-17 10:30:13 -0700730 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700732 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733 cq->id = le16_to_cpu(resp->cq_id);
734 cq->created = true;
735 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700736
Ivan Vecera29849612010-12-14 05:43:19 +0000737 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000738
739 return status;
740}
741
742static u32 be_encoded_q_len(int q_len)
743{
744 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
745 if (len_encoded == 16)
746 len_encoded = 0;
747 return len_encoded;
748}
749
Sathya Perla8788fdc2009-07-27 22:52:03 +0000750int be_cmd_mccq_create(struct be_adapter *adapter,
Sathya Perla5fb379e2009-06-18 00:02:59 +0000751 struct be_queue_info *mccq,
752 struct be_queue_info *cq)
753{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700754 struct be_mcc_wrb *wrb;
755 struct be_cmd_req_mcc_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000756 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700757 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000758 int status;
759
Ivan Vecera29849612010-12-14 05:43:19 +0000760 if (mutex_lock_interruptible(&adapter->mbox_lock))
761 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700762
763 wrb = wrb_from_mbox(adapter);
764 req = embedded_payload(wrb);
765 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000766
Ajit Khaparded744b442009-12-03 06:12:06 +0000767 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
Somnath Koturcc4ce022010-10-21 07:11:14 -0700768 OPCODE_COMMON_MCC_CREATE_EXT);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000769
770 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Somnath Koturcc4ce022010-10-21 07:11:14 -0700771 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000772
Ajit Khaparded4a2ac32010-03-11 01:35:59 +0000773 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000774 if (lancer_chip(adapter)) {
775 req->hdr.version = 1;
776 req->cq_id = cpu_to_le16(cq->id);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000777
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000778 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
779 be_encoded_q_len(mccq->len));
780 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
781 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
782 ctxt, cq->id);
783 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
784 ctxt, 1);
785
786 } else {
787 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
788 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
789 be_encoded_q_len(mccq->len));
790 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
791 }
792
Somnath Koturcc4ce022010-10-21 07:11:14 -0700793 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000794 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000795 be_dws_cpu_to_le(ctxt, sizeof(req->context));
796
797 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
798
Sathya Perlab31c50a2009-09-17 10:30:13 -0700799 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000800 if (!status) {
801 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
802 mccq->id = le16_to_cpu(resp->id);
803 mccq->created = true;
804 }
Ivan Vecera29849612010-12-14 05:43:19 +0000805 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806
807 return status;
808}
809
Sathya Perla8788fdc2009-07-27 22:52:03 +0000810int be_cmd_txq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 struct be_queue_info *txq,
812 struct be_queue_info *cq)
813{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700814 struct be_mcc_wrb *wrb;
815 struct be_cmd_req_eth_tx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 struct be_dma_mem *q_mem = &txq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700817 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819
Ivan Vecera29849612010-12-14 05:43:19 +0000820 if (mutex_lock_interruptible(&adapter->mbox_lock))
821 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700822
823 wrb = wrb_from_mbox(adapter);
824 req = embedded_payload(wrb);
825 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826
Ajit Khaparded744b442009-12-03 06:12:06 +0000827 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
828 OPCODE_ETH_TX_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829
830 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
831 sizeof(*req));
832
833 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
834 req->ulp_num = BE_ULP1_NUM;
835 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
836
Sathya Perlab31c50a2009-09-17 10:30:13 -0700837 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
838 be_encoded_q_len(txq->len));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
840 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
841
842 be_dws_cpu_to_le(ctxt, sizeof(req->context));
843
844 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847 if (!status) {
848 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
849 txq->id = le16_to_cpu(resp->cid);
850 txq->created = true;
851 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700852
Ivan Vecera29849612010-12-14 05:43:19 +0000853 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854
855 return status;
856}
857
Sathya Perlab31c50a2009-09-17 10:30:13 -0700858/* Uses mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000859int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
Sathya Perla3abcded2010-10-03 22:12:27 -0700861 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700863 struct be_mcc_wrb *wrb;
864 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865 struct be_dma_mem *q_mem = &rxq->dma_mem;
866 int status;
867
Ivan Vecera29849612010-12-14 05:43:19 +0000868 if (mutex_lock_interruptible(&adapter->mbox_lock))
869 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700870
871 wrb = wrb_from_mbox(adapter);
872 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873
Ajit Khaparded744b442009-12-03 06:12:06 +0000874 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
875 OPCODE_ETH_RX_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876
877 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
878 sizeof(*req));
879
880 req->cq_id = cpu_to_le16(cq_id);
881 req->frag_size = fls(frag_size) - 1;
882 req->num_pages = 2;
883 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
884 req->interface_id = cpu_to_le32(if_id);
885 req->max_frame_size = cpu_to_le16(max_frame_size);
886 req->rss_queue = cpu_to_le32(rss);
887
Sathya Perlab31c50a2009-09-17 10:30:13 -0700888 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700889 if (!status) {
890 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
891 rxq->id = le16_to_cpu(resp->id);
892 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -0700893 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700895
Ivan Vecera29849612010-12-14 05:43:19 +0000896 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897
898 return status;
899}
900
Sathya Perlab31c50a2009-09-17 10:30:13 -0700901/* Generic destroyer function for all types of queues
902 * Uses Mbox
903 */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000904int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905 int queue_type)
906{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700907 struct be_mcc_wrb *wrb;
908 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909 u8 subsys = 0, opcode = 0;
910 int status;
911
Sathya Perlacf588472010-02-14 21:22:01 +0000912 if (adapter->eeh_err)
913 return -EIO;
914
Ivan Vecera29849612010-12-14 05:43:19 +0000915 if (mutex_lock_interruptible(&adapter->mbox_lock))
916 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917
Sathya Perlab31c50a2009-09-17 10:30:13 -0700918 wrb = wrb_from_mbox(adapter);
919 req = embedded_payload(wrb);
920
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921 switch (queue_type) {
922 case QTYPE_EQ:
923 subsys = CMD_SUBSYSTEM_COMMON;
924 opcode = OPCODE_COMMON_EQ_DESTROY;
925 break;
926 case QTYPE_CQ:
927 subsys = CMD_SUBSYSTEM_COMMON;
928 opcode = OPCODE_COMMON_CQ_DESTROY;
929 break;
930 case QTYPE_TXQ:
931 subsys = CMD_SUBSYSTEM_ETH;
932 opcode = OPCODE_ETH_TX_DESTROY;
933 break;
934 case QTYPE_RXQ:
935 subsys = CMD_SUBSYSTEM_ETH;
936 opcode = OPCODE_ETH_RX_DESTROY;
937 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000938 case QTYPE_MCCQ:
939 subsys = CMD_SUBSYSTEM_COMMON;
940 opcode = OPCODE_COMMON_MCC_DESTROY;
941 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +0000943 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944 }
Ajit Khaparded744b442009-12-03 06:12:06 +0000945
946 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
947
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
949 req->id = cpu_to_le16(q->id);
950
Sathya Perlab31c50a2009-09-17 10:30:13 -0700951 status = be_mbox_notify_wait(adapter);
Sathya Perla5f0b8492009-07-27 22:52:56 +0000952
Ivan Vecera29849612010-12-14 05:43:19 +0000953 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700954
955 return status;
956}
957
Sathya Perlab31c50a2009-09-17 10:30:13 -0700958/* Create an rx filtering policy configuration on an i/f
959 * Uses mbox
960 */
Sathya Perla73d540f2009-10-14 20:20:42 +0000961int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000962 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
963 u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700964{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700965 struct be_mcc_wrb *wrb;
966 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700967 int status;
968
Ivan Vecera29849612010-12-14 05:43:19 +0000969 if (mutex_lock_interruptible(&adapter->mbox_lock))
970 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700971
972 wrb = wrb_from_mbox(adapter);
973 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700974
Ajit Khaparded744b442009-12-03 06:12:06 +0000975 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
976 OPCODE_COMMON_NTWK_INTERFACE_CREATE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700977
978 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
979 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
980
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000981 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +0000982 req->capability_flags = cpu_to_le32(cap_flags);
983 req->enable_flags = cpu_to_le32(en_flags);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700984 req->pmac_invalid = pmac_invalid;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700985 if (!pmac_invalid)
986 memcpy(req->mac_addr, mac, ETH_ALEN);
987
Sathya Perlab31c50a2009-09-17 10:30:13 -0700988 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700989 if (!status) {
990 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
991 *if_handle = le32_to_cpu(resp->interface_id);
992 if (!pmac_invalid)
993 *pmac_id = le32_to_cpu(resp->pmac_id);
994 }
995
Ivan Vecera29849612010-12-14 05:43:19 +0000996 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997 return status;
998}
999
Sathya Perlab31c50a2009-09-17 10:30:13 -07001000/* Uses mbox */
Ajit Khaparde658681f2011-02-11 13:34:46 +00001001int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001003 struct be_mcc_wrb *wrb;
1004 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005 int status;
1006
Sathya Perlacf588472010-02-14 21:22:01 +00001007 if (adapter->eeh_err)
1008 return -EIO;
1009
Ivan Vecera29849612010-12-14 05:43:19 +00001010 if (mutex_lock_interruptible(&adapter->mbox_lock))
1011 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001012
1013 wrb = wrb_from_mbox(adapter);
1014 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015
Ajit Khaparded744b442009-12-03 06:12:06 +00001016 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1017 OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018
1019 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1020 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1021
Ajit Khaparde658681f2011-02-11 13:34:46 +00001022 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001024
1025 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026
Ivan Vecera29849612010-12-14 05:43:19 +00001027 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028
1029 return status;
1030}
1031
1032/* Get stats is a non embedded command: the request is not embedded inside
1033 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001034 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001036int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001038 struct be_mcc_wrb *wrb;
1039 struct be_cmd_req_get_stats *req;
1040 struct be_sge *sge;
Sathya Perla713d03942009-11-22 22:02:45 +00001041 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042
Sathya Perlab31c50a2009-09-17 10:30:13 -07001043 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001044
Sathya Perlab31c50a2009-09-17 10:30:13 -07001045 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001046 if (!wrb) {
1047 status = -EBUSY;
1048 goto err;
1049 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001050 req = nonemb_cmd->va;
1051 sge = nonembedded_sgl(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052
Ajit Khaparded744b442009-12-03 06:12:06 +00001053 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1054 OPCODE_ETH_GET_STATISTICS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055
1056 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1057 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
1058 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1059 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1060 sge->len = cpu_to_le32(nonemb_cmd->size);
1061
Sathya Perlab31c50a2009-09-17 10:30:13 -07001062 be_mcc_notify(adapter);
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001063 adapter->stats_ioctl_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064
Sathya Perla713d03942009-11-22 22:02:45 +00001065err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001066 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001067 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068}
1069
Sathya Perlab31c50a2009-09-17 10:30:13 -07001070/* Uses synchronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001071int be_cmd_link_status_query(struct be_adapter *adapter,
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001072 bool *link_up, u8 *mac_speed, u16 *link_speed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001074 struct be_mcc_wrb *wrb;
1075 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076 int status;
1077
Sathya Perlab31c50a2009-09-17 10:30:13 -07001078 spin_lock_bh(&adapter->mcc_lock);
1079
1080 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001081 if (!wrb) {
1082 status = -EBUSY;
1083 goto err;
1084 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001085 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001086
1087 *link_up = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
Ajit Khaparded744b442009-12-03 06:12:06 +00001089 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1090 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091
1092 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1093 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1094
Sathya Perlab31c50a2009-09-17 10:30:13 -07001095 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 if (!status) {
1097 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001098 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001099 *link_up = true;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001100 *link_speed = le16_to_cpu(resp->link_speed);
1101 *mac_speed = resp->mac_speed;
1102 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 }
1104
Sathya Perla713d03942009-11-22 22:02:45 +00001105err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001106 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 return status;
1108}
1109
Sathya Perlab31c50a2009-09-17 10:30:13 -07001110/* Uses Mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001111int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001113 struct be_mcc_wrb *wrb;
1114 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 int status;
1116
Ivan Vecera29849612010-12-14 05:43:19 +00001117 if (mutex_lock_interruptible(&adapter->mbox_lock))
1118 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001119
1120 wrb = wrb_from_mbox(adapter);
1121 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122
Ajit Khaparded744b442009-12-03 06:12:06 +00001123 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1124 OPCODE_COMMON_GET_FW_VERSION);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125
1126 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1127 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1128
Sathya Perlab31c50a2009-09-17 10:30:13 -07001129 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 if (!status) {
1131 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1132 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1133 }
1134
Ivan Vecera29849612010-12-14 05:43:19 +00001135 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136 return status;
1137}
1138
Sathya Perlab31c50a2009-09-17 10:30:13 -07001139/* set the EQ delay interval of an EQ to specified value
1140 * Uses async mcc
1141 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001142int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001144 struct be_mcc_wrb *wrb;
1145 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla713d03942009-11-22 22:02:45 +00001146 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Sathya Perlab31c50a2009-09-17 10:30:13 -07001148 spin_lock_bh(&adapter->mcc_lock);
1149
1150 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001151 if (!wrb) {
1152 status = -EBUSY;
1153 goto err;
1154 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001155 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156
Ajit Khaparded744b442009-12-03 06:12:06 +00001157 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1158 OPCODE_COMMON_MODIFY_EQ_DELAY);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
1160 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1161 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1162
1163 req->num_eq = cpu_to_le32(1);
1164 req->delay[0].eq_id = cpu_to_le32(eq_id);
1165 req->delay[0].phase = 0;
1166 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1167
Sathya Perlab31c50a2009-09-17 10:30:13 -07001168 be_mcc_notify(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
Sathya Perla713d03942009-11-22 22:02:45 +00001170err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001171 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001172 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173}
1174
Sathya Perlab31c50a2009-09-17 10:30:13 -07001175/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001176int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 u32 num, bool untagged, bool promiscuous)
1178{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001179 struct be_mcc_wrb *wrb;
1180 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181 int status;
1182
Sathya Perlab31c50a2009-09-17 10:30:13 -07001183 spin_lock_bh(&adapter->mcc_lock);
1184
1185 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001186 if (!wrb) {
1187 status = -EBUSY;
1188 goto err;
1189 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001190 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191
Ajit Khaparded744b442009-12-03 06:12:06 +00001192 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1193 OPCODE_COMMON_NTWK_VLAN_CONFIG);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194
1195 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1196 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1197
1198 req->interface_id = if_id;
1199 req->promiscuous = promiscuous;
1200 req->untagged = untagged;
1201 req->num_vlan = num;
1202 if (!promiscuous) {
1203 memcpy(req->normal_vlan, vtag_array,
1204 req->num_vlan * sizeof(vtag_array[0]));
1205 }
1206
Sathya Perlab31c50a2009-09-17 10:30:13 -07001207 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208
Sathya Perla713d03942009-11-22 22:02:45 +00001209err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001210 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 return status;
1212}
1213
Sathya Perlab31c50a2009-09-17 10:30:13 -07001214/* Uses MCC for this command as it may be called in BH context
1215 * Uses synchronous mcc
1216 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001217int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001219 struct be_mcc_wrb *wrb;
1220 struct be_cmd_req_promiscuous_config *req;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001221 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222
Sathya Perla8788fdc2009-07-27 22:52:03 +00001223 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001224
Sathya Perlab31c50a2009-09-17 10:30:13 -07001225 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001226 if (!wrb) {
1227 status = -EBUSY;
1228 goto err;
1229 }
Sathya Perla6ac7b682009-06-18 00:05:54 +00001230 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
Ajit Khaparded744b442009-12-03 06:12:06 +00001232 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233
1234 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1235 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1236
Sathya Perla69d7ce72010-04-11 22:35:27 +00001237 /* In FW versions X.102.149/X.101.487 and later,
1238 * the port setting associated only with the
1239 * issuing pci function will take effect
1240 */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241 if (port_num)
1242 req->port1_promiscuous = en;
1243 else
1244 req->port0_promiscuous = en;
1245
Sathya Perlab31c50a2009-09-17 10:30:13 -07001246 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247
Sathya Perla713d03942009-11-22 22:02:45 +00001248err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001249 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001250 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251}
1252
Sathya Perla6ac7b682009-06-18 00:05:54 +00001253/*
Sathya Perlab31c50a2009-09-17 10:30:13 -07001254 * Uses MCC for this command as it may be called in BH context
Sathya Perla6ac7b682009-06-18 00:05:54 +00001255 * (mc == NULL) => multicast promiscous
1256 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001257int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001258 struct net_device *netdev, struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001260 struct be_mcc_wrb *wrb;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001261 struct be_cmd_req_mcast_mac_config *req = mem->va;
1262 struct be_sge *sge;
1263 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264
Sathya Perla8788fdc2009-07-27 22:52:03 +00001265 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001266
Sathya Perlab31c50a2009-09-17 10:30:13 -07001267 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001268 if (!wrb) {
1269 status = -EBUSY;
1270 goto err;
1271 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00001272 sge = nonembedded_sgl(wrb);
1273 memset(req, 0, sizeof(*req));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274
Ajit Khaparded744b442009-12-03 06:12:06 +00001275 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1276 OPCODE_COMMON_NTWK_MULTICAST_SET);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001277 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1278 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1279 sge->len = cpu_to_le32(mem->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280
1281 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1282 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1283
1284 req->interface_id = if_id;
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001285 if (netdev) {
Sathya Perla24307ee2009-06-18 00:09:25 +00001286 int i;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001287 struct netdev_hw_addr *ha;
Sathya Perla24307ee2009-06-18 00:09:25 +00001288
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001289 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
Sathya Perla24307ee2009-06-18 00:09:25 +00001290
Jiri Pirko0ddf4772010-02-20 00:13:58 +00001291 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +00001292 netdev_for_each_mc_addr(ha, netdev)
Joe Jin408cc292010-12-06 03:00:59 +00001293 memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
Sathya Perla24307ee2009-06-18 00:09:25 +00001294 } else {
1295 req->promiscuous = 1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 }
1297
Sathya Perlae7b909a2009-11-22 22:01:10 +00001298 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299
Sathya Perla713d03942009-11-22 22:02:45 +00001300err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001301 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001302 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303}
1304
Sathya Perlab31c50a2009-09-17 10:30:13 -07001305/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001306int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001308 struct be_mcc_wrb *wrb;
1309 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310 int status;
1311
Sathya Perlab31c50a2009-09-17 10:30:13 -07001312 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313
Sathya Perlab31c50a2009-09-17 10:30:13 -07001314 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001315 if (!wrb) {
1316 status = -EBUSY;
1317 goto err;
1318 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001319 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320
Ajit Khaparded744b442009-12-03 06:12:06 +00001321 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1322 OPCODE_COMMON_SET_FLOW_CONTROL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323
1324 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1325 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1326
1327 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1328 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1329
Sathya Perlab31c50a2009-09-17 10:30:13 -07001330 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331
Sathya Perla713d03942009-11-22 22:02:45 +00001332err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001333 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334 return status;
1335}
1336
Sathya Perlab31c50a2009-09-17 10:30:13 -07001337/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001338int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001340 struct be_mcc_wrb *wrb;
1341 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 int status;
1343
Sathya Perlab31c50a2009-09-17 10:30:13 -07001344 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345
Sathya Perlab31c50a2009-09-17 10:30:13 -07001346 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001347 if (!wrb) {
1348 status = -EBUSY;
1349 goto err;
1350 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001351 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
Ajit Khaparded744b442009-12-03 06:12:06 +00001353 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1354 OPCODE_COMMON_GET_FLOW_CONTROL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355
1356 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1357 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1358
Sathya Perlab31c50a2009-09-17 10:30:13 -07001359 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 if (!status) {
1361 struct be_cmd_resp_get_flow_control *resp =
1362 embedded_payload(wrb);
1363 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1364 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1365 }
1366
Sathya Perla713d03942009-11-22 22:02:45 +00001367err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001368 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 return status;
1370}
1371
Sathya Perlab31c50a2009-09-17 10:30:13 -07001372/* Uses mbox */
Sathya Perla3abcded2010-10-03 22:12:27 -07001373int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1374 u32 *mode, u32 *caps)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001376 struct be_mcc_wrb *wrb;
1377 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378 int status;
1379
Ivan Vecera29849612010-12-14 05:43:19 +00001380 if (mutex_lock_interruptible(&adapter->mbox_lock))
1381 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382
Sathya Perlab31c50a2009-09-17 10:30:13 -07001383 wrb = wrb_from_mbox(adapter);
1384 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385
Ajit Khaparded744b442009-12-03 06:12:06 +00001386 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1387 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388
1389 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1390 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1391
Sathya Perlab31c50a2009-09-17 10:30:13 -07001392 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 if (!status) {
1394 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1395 *port_num = le32_to_cpu(resp->phys_port);
Ajit Khaparde3486be22010-07-23 02:04:54 +00001396 *mode = le32_to_cpu(resp->function_mode);
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 *caps = le32_to_cpu(resp->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 }
1399
Ivan Vecera29849612010-12-14 05:43:19 +00001400 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 return status;
1402}
sarveshwarb14074ea2009-08-05 13:05:24 -07001403
Sathya Perlab31c50a2009-09-17 10:30:13 -07001404/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07001405int be_cmd_reset_function(struct be_adapter *adapter)
1406{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001407 struct be_mcc_wrb *wrb;
1408 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07001409 int status;
1410
Ivan Vecera29849612010-12-14 05:43:19 +00001411 if (mutex_lock_interruptible(&adapter->mbox_lock))
1412 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07001413
Sathya Perlab31c50a2009-09-17 10:30:13 -07001414 wrb = wrb_from_mbox(adapter);
1415 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07001416
Ajit Khaparded744b442009-12-03 06:12:06 +00001417 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1418 OPCODE_COMMON_FUNCTION_RESET);
sarveshwarb14074ea2009-08-05 13:05:24 -07001419
1420 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1421 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1422
Sathya Perlab31c50a2009-09-17 10:30:13 -07001423 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07001424
Ivan Vecera29849612010-12-14 05:43:19 +00001425 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07001426 return status;
1427}
Ajit Khaparde84517482009-09-04 03:12:16 +00001428
Sathya Perla3abcded2010-10-03 22:12:27 -07001429int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1430{
1431 struct be_mcc_wrb *wrb;
1432 struct be_cmd_req_rss_config *req;
1433 u32 myhash[10];
1434 int status;
1435
Ivan Vecera29849612010-12-14 05:43:19 +00001436 if (mutex_lock_interruptible(&adapter->mbox_lock))
1437 return -1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001438
1439 wrb = wrb_from_mbox(adapter);
1440 req = embedded_payload(wrb);
1441
1442 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1443 OPCODE_ETH_RSS_CONFIG);
1444
1445 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1446 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1447
1448 req->if_id = cpu_to_le32(adapter->if_handle);
1449 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1450 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1451 memcpy(req->cpu_table, rsstable, table_size);
1452 memcpy(req->hash, myhash, sizeof(myhash));
1453 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1454
1455 status = be_mbox_notify_wait(adapter);
1456
Ivan Vecera29849612010-12-14 05:43:19 +00001457 mutex_unlock(&adapter->mbox_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07001458 return status;
1459}
1460
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001461/* Uses sync mcc */
1462int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1463 u8 bcn, u8 sts, u8 state)
1464{
1465 struct be_mcc_wrb *wrb;
1466 struct be_cmd_req_enable_disable_beacon *req;
1467 int status;
1468
1469 spin_lock_bh(&adapter->mcc_lock);
1470
1471 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001472 if (!wrb) {
1473 status = -EBUSY;
1474 goto err;
1475 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001476 req = embedded_payload(wrb);
1477
Ajit Khaparded744b442009-12-03 06:12:06 +00001478 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1479 OPCODE_COMMON_ENABLE_DISABLE_BEACON);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001480
1481 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1482 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1483
1484 req->port_num = port_num;
1485 req->beacon_state = state;
1486 req->beacon_duration = bcn;
1487 req->status_duration = sts;
1488
1489 status = be_mcc_notify_wait(adapter);
1490
Sathya Perla713d03942009-11-22 22:02:45 +00001491err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001492 spin_unlock_bh(&adapter->mcc_lock);
1493 return status;
1494}
1495
1496/* Uses sync mcc */
1497int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1498{
1499 struct be_mcc_wrb *wrb;
1500 struct be_cmd_req_get_beacon_state *req;
1501 int status;
1502
1503 spin_lock_bh(&adapter->mcc_lock);
1504
1505 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001506 if (!wrb) {
1507 status = -EBUSY;
1508 goto err;
1509 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001510 req = embedded_payload(wrb);
1511
Ajit Khaparded744b442009-12-03 06:12:06 +00001512 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1513 OPCODE_COMMON_GET_BEACON_STATE);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001514
1515 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1516 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1517
1518 req->port_num = port_num;
1519
1520 status = be_mcc_notify_wait(adapter);
1521 if (!status) {
1522 struct be_cmd_resp_get_beacon_state *resp =
1523 embedded_payload(wrb);
1524 *state = resp->beacon_state;
1525 }
1526
Sathya Perla713d03942009-11-22 22:02:45 +00001527err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001528 spin_unlock_bh(&adapter->mcc_lock);
1529 return status;
1530}
1531
Ajit Khaparde84517482009-09-04 03:12:16 +00001532int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1533 u32 flash_type, u32 flash_opcode, u32 buf_size)
1534{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001535 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001536 struct be_cmd_write_flashrom *req;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001537 struct be_sge *sge;
Ajit Khaparde84517482009-09-04 03:12:16 +00001538 int status;
1539
Sathya Perlab31c50a2009-09-17 10:30:13 -07001540 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001541 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001542
1543 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001544 if (!wrb) {
1545 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001546 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00001547 }
1548 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001549 sge = nonembedded_sgl(wrb);
1550
Ajit Khaparded744b442009-12-03 06:12:06 +00001551 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1552 OPCODE_COMMON_WRITE_FLASHROM);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001553 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
Ajit Khaparde84517482009-09-04 03:12:16 +00001554
1555 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1556 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1557 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1558 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1559 sge->len = cpu_to_le32(cmd->size);
1560
1561 req->params.op_type = cpu_to_le32(flash_type);
1562 req->params.op_code = cpu_to_le32(flash_opcode);
1563 req->params.data_buf_size = cpu_to_le32(buf_size);
1564
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001565 be_mcc_notify(adapter);
1566 spin_unlock_bh(&adapter->mcc_lock);
1567
1568 if (!wait_for_completion_timeout(&adapter->flash_compl,
1569 msecs_to_jiffies(12000)))
1570 status = -1;
1571 else
1572 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00001573
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001574 return status;
1575
1576err_unlock:
1577 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00001578 return status;
1579}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001580
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001581int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1582 int offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001583{
1584 struct be_mcc_wrb *wrb;
1585 struct be_cmd_write_flashrom *req;
1586 int status;
1587
1588 spin_lock_bh(&adapter->mcc_lock);
1589
1590 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001591 if (!wrb) {
1592 status = -EBUSY;
1593 goto err;
1594 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001595 req = embedded_payload(wrb);
1596
Ajit Khaparded744b442009-12-03 06:12:06 +00001597 be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1598 OPCODE_COMMON_READ_FLASHROM);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001599
1600 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1601 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1602
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001603 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001604 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00001605 req->params.offset = cpu_to_le32(offset);
1606 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001607
1608 status = be_mcc_notify_wait(adapter);
1609 if (!status)
1610 memcpy(flashed_crc, req->params.data_buf, 4);
1611
Sathya Perla713d03942009-11-22 22:02:45 +00001612err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001613 spin_unlock_bh(&adapter->mcc_lock);
1614 return status;
1615}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001616
Dan Carpenterc196b022010-05-26 04:47:39 +00001617int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001618 struct be_dma_mem *nonemb_cmd)
1619{
1620 struct be_mcc_wrb *wrb;
1621 struct be_cmd_req_acpi_wol_magic_config *req;
1622 struct be_sge *sge;
1623 int status;
1624
1625 spin_lock_bh(&adapter->mcc_lock);
1626
1627 wrb = wrb_from_mccq(adapter);
1628 if (!wrb) {
1629 status = -EBUSY;
1630 goto err;
1631 }
1632 req = nonemb_cmd->va;
1633 sge = nonembedded_sgl(wrb);
1634
1635 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1636 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1637
1638 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1639 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1640 memcpy(req->magic_mac, mac, ETH_ALEN);
1641
1642 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1643 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1644 sge->len = cpu_to_le32(nonemb_cmd->size);
1645
1646 status = be_mcc_notify_wait(adapter);
1647
1648err:
1649 spin_unlock_bh(&adapter->mcc_lock);
1650 return status;
1651}
Suresh Rff33a6e2009-12-03 16:15:52 -08001652
Sarveshwar Bandifced9992009-12-23 04:41:44 +00001653int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1654 u8 loopback_type, u8 enable)
1655{
1656 struct be_mcc_wrb *wrb;
1657 struct be_cmd_req_set_lmode *req;
1658 int status;
1659
1660 spin_lock_bh(&adapter->mcc_lock);
1661
1662 wrb = wrb_from_mccq(adapter);
1663 if (!wrb) {
1664 status = -EBUSY;
1665 goto err;
1666 }
1667
1668 req = embedded_payload(wrb);
1669
1670 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1671 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1672
1673 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1674 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1675 sizeof(*req));
1676
1677 req->src_port = port_num;
1678 req->dest_port = port_num;
1679 req->loopback_type = loopback_type;
1680 req->loopback_state = enable;
1681
1682 status = be_mcc_notify_wait(adapter);
1683err:
1684 spin_unlock_bh(&adapter->mcc_lock);
1685 return status;
1686}
1687
Suresh Rff33a6e2009-12-03 16:15:52 -08001688int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1689 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1690{
1691 struct be_mcc_wrb *wrb;
1692 struct be_cmd_req_loopback_test *req;
1693 int status;
1694
1695 spin_lock_bh(&adapter->mcc_lock);
1696
1697 wrb = wrb_from_mccq(adapter);
1698 if (!wrb) {
1699 status = -EBUSY;
1700 goto err;
1701 }
1702
1703 req = embedded_payload(wrb);
1704
1705 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1706 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1707
1708 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1709 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
Sathya Perla3ffd0512010-06-01 00:19:33 -07001710 req->hdr.timeout = cpu_to_le32(4);
Suresh Rff33a6e2009-12-03 16:15:52 -08001711
1712 req->pattern = cpu_to_le64(pattern);
1713 req->src_port = cpu_to_le32(port_num);
1714 req->dest_port = cpu_to_le32(port_num);
1715 req->pkt_size = cpu_to_le32(pkt_size);
1716 req->num_pkts = cpu_to_le32(num_pkts);
1717 req->loopback_type = cpu_to_le32(loopback_type);
1718
1719 status = be_mcc_notify_wait(adapter);
1720 if (!status) {
1721 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1722 status = le32_to_cpu(resp->status);
1723 }
1724
1725err:
1726 spin_unlock_bh(&adapter->mcc_lock);
1727 return status;
1728}
1729
1730int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1731 u32 byte_cnt, struct be_dma_mem *cmd)
1732{
1733 struct be_mcc_wrb *wrb;
1734 struct be_cmd_req_ddrdma_test *req;
1735 struct be_sge *sge;
1736 int status;
1737 int i, j = 0;
1738
1739 spin_lock_bh(&adapter->mcc_lock);
1740
1741 wrb = wrb_from_mccq(adapter);
1742 if (!wrb) {
1743 status = -EBUSY;
1744 goto err;
1745 }
1746 req = cmd->va;
1747 sge = nonembedded_sgl(wrb);
1748 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1749 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1750 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1751 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1752
1753 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1754 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1755 sge->len = cpu_to_le32(cmd->size);
1756
1757 req->pattern = cpu_to_le64(pattern);
1758 req->byte_count = cpu_to_le32(byte_cnt);
1759 for (i = 0; i < byte_cnt; i++) {
1760 req->snd_buff[i] = (u8)(pattern >> (j*8));
1761 j++;
1762 if (j > 7)
1763 j = 0;
1764 }
1765
1766 status = be_mcc_notify_wait(adapter);
1767
1768 if (!status) {
1769 struct be_cmd_resp_ddrdma_test *resp;
1770 resp = cmd->va;
1771 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1772 resp->snd_err) {
1773 status = -1;
1774 }
1775 }
1776
1777err:
1778 spin_unlock_bh(&adapter->mcc_lock);
1779 return status;
1780}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08001781
Dan Carpenterc196b022010-05-26 04:47:39 +00001782int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08001783 struct be_dma_mem *nonemb_cmd)
1784{
1785 struct be_mcc_wrb *wrb;
1786 struct be_cmd_req_seeprom_read *req;
1787 struct be_sge *sge;
1788 int status;
1789
1790 spin_lock_bh(&adapter->mcc_lock);
1791
1792 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00001793 if (!wrb) {
1794 status = -EBUSY;
1795 goto err;
1796 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08001797 req = nonemb_cmd->va;
1798 sge = nonembedded_sgl(wrb);
1799
1800 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1801 OPCODE_COMMON_SEEPROM_READ);
1802
1803 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1804 OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1805
1806 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1807 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1808 sge->len = cpu_to_le32(nonemb_cmd->size);
1809
1810 status = be_mcc_notify_wait(adapter);
1811
Ajit Khapardee45ff012011-02-04 17:18:28 +00001812err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08001813 spin_unlock_bh(&adapter->mcc_lock);
1814 return status;
1815}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00001816
1817int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
1818{
1819 struct be_mcc_wrb *wrb;
1820 struct be_cmd_req_get_phy_info *req;
1821 struct be_sge *sge;
1822 int status;
1823
1824 spin_lock_bh(&adapter->mcc_lock);
1825
1826 wrb = wrb_from_mccq(adapter);
1827 if (!wrb) {
1828 status = -EBUSY;
1829 goto err;
1830 }
1831
1832 req = cmd->va;
1833 sge = nonembedded_sgl(wrb);
1834
1835 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1836 OPCODE_COMMON_GET_PHY_DETAILS);
1837
1838 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1839 OPCODE_COMMON_GET_PHY_DETAILS,
1840 sizeof(*req));
1841
1842 sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1843 sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1844 sge->len = cpu_to_le32(cmd->size);
1845
1846 status = be_mcc_notify_wait(adapter);
1847err:
1848 spin_unlock_bh(&adapter->mcc_lock);
1849 return status;
1850}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001851
1852int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1853{
1854 struct be_mcc_wrb *wrb;
1855 struct be_cmd_req_set_qos *req;
1856 int status;
1857
1858 spin_lock_bh(&adapter->mcc_lock);
1859
1860 wrb = wrb_from_mccq(adapter);
1861 if (!wrb) {
1862 status = -EBUSY;
1863 goto err;
1864 }
1865
1866 req = embedded_payload(wrb);
1867
1868 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1869 OPCODE_COMMON_SET_QOS);
1870
1871 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1872 OPCODE_COMMON_SET_QOS, sizeof(*req));
1873
1874 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00001875 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
1876 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001877
1878 status = be_mcc_notify_wait(adapter);
1879
1880err:
1881 spin_unlock_bh(&adapter->mcc_lock);
1882 return status;
1883}