blob: c5912c4ed24f5d37a77572fa80d2f325e758715b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020
Ajit Khaparde609ff3b2011-02-20 11:42:07 +000021/* Must be a power of 2 or else MODULO will BUG_ON */
Somnath Kotur3de09452011-09-30 07:25:05 +000022static int be_get_temp_freq = 64;
23
24static inline void *embedded_payload(struct be_mcc_wrb *wrb)
25{
26 return wrb->payload.embedded_payload;
27}
Ajit Khaparde609ff3b2011-02-20 11:42:07 +000028
Sathya Perla8788fdc2009-07-27 22:52:03 +000029static void be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +000030{
Sathya Perla8788fdc2009-07-27 22:52:03 +000031 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +000032 u32 val = 0;
33
Ajit Khaparde7acc2082011-02-11 13:38:17 +000034 if (adapter->eeh_err) {
35 dev_info(&adapter->pdev->dev,
36 "Error in Card Detected! Cannot issue commands\n");
37 return;
38 }
39
Sathya Perla5fb379e2009-06-18 00:02:59 +000040 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
41 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +000042
43 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +000044 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Sathya Perla5fb379e2009-06-18 00:02:59 +000045}
46
47/* To check if valid bit is set, check the entire word as we don't know
48 * the endianness of the data (old entry is host endian while a new entry is
49 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +000050static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000051{
52 if (compl->flags != 0) {
53 compl->flags = le32_to_cpu(compl->flags);
54 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
55 return true;
56 } else {
57 return false;
58 }
59}
60
61/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +000062static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000063{
64 compl->flags = 0;
65}
66
Sathya Perla8788fdc2009-07-27 22:52:03 +000067static int be_mcc_compl_process(struct be_adapter *adapter,
Sathya Perlaefd2e402009-07-27 22:53:10 +000068 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000069{
70 u16 compl_status, extd_status;
71
72 /* Just swap the status to host endian; mcc tag is opaquely copied
73 * from mcc_wrb */
74 be_dws_le_to_cpu(compl, 4);
75
76 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
77 CQE_STATUS_COMPL_MASK;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -070078
Shripad Nunjundarao485bf562011-05-16 07:36:59 +000079 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
80 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
Sarveshwar Bandidd131e72010-05-25 16:16:32 -070081 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
82 adapter->flash_status = compl_status;
83 complete(&adapter->flash_compl);
84 }
85
Sathya Perlab31c50a2009-09-17 10:30:13 -070086 if (compl_status == MCC_STATUS_SUCCESS) {
Selvin Xavier005d5692011-05-16 07:36:35 +000087 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
88 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
Ajit Khaparde63499352011-04-19 12:11:02 +000089 (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +000090 be_parse_stats(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +000091 adapter->stats_cmd_sent = false;
Sathya Perlab31c50a2009-09-17 10:30:13 -070092 }
Somnath Kotur3de09452011-09-30 07:25:05 +000093 if (compl->tag0 ==
94 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
95 struct be_mcc_wrb *mcc_wrb =
96 queue_index_node(&adapter->mcc_obj.q,
97 compl->tag1);
98 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
99 embedded_payload(mcc_wrb);
100 adapter->drv_stats.be_on_die_temperature =
101 resp->on_die_temperature;
102 }
Sathya Perla2b3f2912011-06-29 23:32:56 +0000103 } else {
Somnath Kotur3de09452011-09-30 07:25:05 +0000104 if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
105 be_get_temp_freq = 0;
106
Sathya Perla2b3f2912011-06-29 23:32:56 +0000107 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
108 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
109 goto done;
110
111 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
112 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
113 "permitted to execute this cmd (opcode %d)\n",
114 compl->tag0);
115 } else {
116 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
117 CQE_STATUS_EXTD_MASK;
118 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
119 "status %d, extd-status %d\n",
120 compl->tag0, compl_status, extd_status);
121 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000122 }
Sathya Perla2b3f2912011-06-29 23:32:56 +0000123done:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700124 return compl_status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000125}
126
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000127/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000128static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000129 struct be_async_event_link_state *evt)
130{
Sathya Perlaea172a02011-08-02 19:57:42 +0000131 be_link_status_update(adapter, evt->port_link_status);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000132}
133
Somnath Koturcc4ce022010-10-21 07:11:14 -0700134/* Grp5 CoS Priority evt */
135static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
136 struct be_async_event_grp5_cos_priority *evt)
137{
138 if (evt->valid) {
139 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Ajit Khaparde60964dd2011-02-11 13:37:25 +0000140 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700141 adapter->recommended_prio =
142 evt->reco_default_priority << VLAN_PRIO_SHIFT;
143 }
144}
145
146/* Grp5 QOS Speed evt */
147static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
148 struct be_async_event_grp5_qos_link_speed *evt)
149{
150 if (evt->physical_port == adapter->port_num) {
151 /* qos_link_speed is in units of 10 Mbps */
152 adapter->link_speed = evt->qos_link_speed * 10;
153 }
154}
155
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000156/*Grp5 PVID evt*/
157static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
158 struct be_async_event_grp5_pvid_state *evt)
159{
160 if (evt->enabled)
Somnath Kotur939cf302011-08-18 21:51:49 -0700161 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000162 else
163 adapter->pvid = 0;
164}
165
Somnath Koturcc4ce022010-10-21 07:11:14 -0700166static void be_async_grp5_evt_process(struct be_adapter *adapter,
167 u32 trailer, struct be_mcc_compl *evt)
168{
169 u8 event_type = 0;
170
171 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
172 ASYNC_TRAILER_EVENT_TYPE_MASK;
173
174 switch (event_type) {
175 case ASYNC_EVENT_COS_PRIORITY:
176 be_async_grp5_cos_priority_process(adapter,
177 (struct be_async_event_grp5_cos_priority *)evt);
178 break;
179 case ASYNC_EVENT_QOS_SPEED:
180 be_async_grp5_qos_speed_process(adapter,
181 (struct be_async_event_grp5_qos_link_speed *)evt);
182 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000183 case ASYNC_EVENT_PVID_STATE:
184 be_async_grp5_pvid_state_process(adapter,
185 (struct be_async_event_grp5_pvid_state *)evt);
186 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700187 default:
188 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
189 break;
190 }
191}
192
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000193static inline bool is_link_state_evt(u32 trailer)
194{
Eric Dumazet807540b2010-09-23 05:40:09 +0000195 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000196 ASYNC_TRAILER_EVENT_CODE_MASK) ==
Eric Dumazet807540b2010-09-23 05:40:09 +0000197 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000198}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000199
Somnath Koturcc4ce022010-10-21 07:11:14 -0700200static inline bool is_grp5_evt(u32 trailer)
201{
202 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
203 ASYNC_TRAILER_EVENT_CODE_MASK) ==
204 ASYNC_EVENT_CODE_GRP_5);
205}
206
Sathya Perlaefd2e402009-07-27 22:53:10 +0000207static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000208{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000210 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000211
212 if (be_mcc_compl_is_new(compl)) {
213 queue_tail_inc(mcc_cq);
214 return compl;
215 }
216 return NULL;
217}
218
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000219void be_async_mcc_enable(struct be_adapter *adapter)
220{
221 spin_lock_bh(&adapter->mcc_cq_lock);
222
223 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
224 adapter->mcc_obj.rearm_cq = true;
225
226 spin_unlock_bh(&adapter->mcc_cq_lock);
227}
228
229void be_async_mcc_disable(struct be_adapter *adapter)
230{
231 adapter->mcc_obj.rearm_cq = false;
232}
233
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800234int be_process_mcc(struct be_adapter *adapter, int *status)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000235{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000236 struct be_mcc_compl *compl;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800237 int num = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000238 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000239
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 spin_lock_bh(&adapter->mcc_cq_lock);
241 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000242 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
243 /* Interpret flags as an async trailer */
Ajit Khaparde323f30b2010-09-03 06:24:13 +0000244 if (is_link_state_evt(compl->flags))
245 be_async_link_state_process(adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000246 (struct be_async_event_link_state *) compl);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700247 else if (is_grp5_evt(compl->flags))
248 be_async_grp5_evt_process(adapter,
249 compl->flags, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700250 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800251 *status = be_mcc_compl_process(adapter, compl);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000252 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000253 }
254 be_mcc_compl_use(compl);
255 num++;
256 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700257
Sathya Perla8788fdc2009-07-27 22:52:03 +0000258 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800259 return num;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000260}
261
Sathya Perla6ac7b682009-06-18 00:05:54 +0000262/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700263static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000264{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700265#define mcc_timeout 120000 /* 12s timeout */
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800266 int i, num, status = 0;
267 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700268
Ajit Khaparde7acc2082011-02-11 13:38:17 +0000269 if (adapter->eeh_err)
270 return -EIO;
271
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800272 for (i = 0; i < mcc_timeout; i++) {
273 num = be_process_mcc(adapter, &status);
274 if (num)
275 be_cq_notify(adapter, mcc_obj->cq.id,
276 mcc_obj->rearm_cq, num);
277
278 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000279 break;
280 udelay(100);
281 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700282 if (i == mcc_timeout) {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000283 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
Sathya Perlab31c50a2009-09-17 10:30:13 -0700284 return -1;
285 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800286 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000287}
288
289/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700290static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000291{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000292 be_mcc_notify(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700293 return be_mcc_wait_compl(adapter);
Sathya Perla6ac7b682009-06-18 00:05:54 +0000294}
295
Sathya Perla5f0b8492009-07-27 22:52:56 +0000296static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000298 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 u32 ready;
300
Ajit Khaparde7acc2082011-02-11 13:38:17 +0000301 if (adapter->eeh_err) {
302 dev_err(&adapter->pdev->dev,
303 "Error detected in card.Cannot issue commands\n");
304 return -EIO;
305 }
306
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307 do {
Sathya Perlacf588472010-02-14 21:22:01 +0000308 ready = ioread32(db);
309 if (ready == 0xffffffff) {
310 dev_err(&adapter->pdev->dev,
311 "pci slot disconnected\n");
312 return -1;
313 }
314
315 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 if (ready)
317 break;
318
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000319 if (msecs > 4000) {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000320 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +0000321 be_detect_dump_ue(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700322 return -1;
323 }
324
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000325 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000326 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700327 } while (true);
328
329 return 0;
330}
331
332/*
333 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000334 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700335 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700336static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700337{
338 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700339 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000340 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
341 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700342 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000343 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700344
Sathya Perlacf588472010-02-14 21:22:01 +0000345 /* wait for ready to be set */
346 status = be_mbox_db_ready_wait(adapter, db);
347 if (status != 0)
348 return status;
349
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700350 val |= MPU_MAILBOX_DB_HI_MASK;
351 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
352 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
353 iowrite32(val, db);
354
355 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000356 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700357 if (status != 0)
358 return status;
359
360 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700361 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
362 val |= (u32)(mbox_mem->dma >> 4) << 2;
363 iowrite32(val, db);
364
Sathya Perla5f0b8492009-07-27 22:52:56 +0000365 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700366 if (status != 0)
367 return status;
368
Sathya Perla5fb379e2009-06-18 00:02:59 +0000369 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000370 if (be_mcc_compl_is_new(compl)) {
371 status = be_mcc_compl_process(adapter, &mbox->compl);
372 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000373 if (status)
374 return status;
375 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000376 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700377 return -1;
378 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000379 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700380}
381
Sathya Perla8788fdc2009-07-27 22:52:03 +0000382static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700383{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000384 u32 sem;
385
386 if (lancer_chip(adapter))
387 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
388 else
389 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700390
391 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
392 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
393 return -1;
394 else
395 return 0;
396}
397
Sathya Perla8788fdc2009-07-27 22:52:03 +0000398int be_cmd_POST(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700399{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000400 u16 stage;
401 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000402 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700403
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000404 do {
405 status = be_POST_stage_get(adapter, &stage);
406 if (status) {
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000407 dev_err(dev, "POST error; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000408 return -1;
409 } else if (stage != POST_STAGE_ARMFW_RDY) {
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000410 if (msleep_interruptible(2000)) {
411 dev_err(dev, "Waiting for POST aborted\n");
412 return -EINTR;
413 }
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000414 timeout += 2;
415 } else {
416 return 0;
417 }
Somnath Kotur3ab81b52011-10-03 08:10:57 +0000418 } while (timeout < 60);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000420 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000421 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700422}
423
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424
425static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
426{
427 return &wrb->payload.sgl[0];
428}
429
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700430
431/* Don't touch the hdr after it's prepared */
Somnath Kotur106df1e2011-10-27 07:12:13 +0000432/* mem will be NULL for embedded commands */
433static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
434 u8 subsystem, u8 opcode, int cmd_len,
435 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Somnath Kotur106df1e2011-10-27 07:12:13 +0000437 struct be_sge *sge;
438
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700439 req_hdr->opcode = opcode;
440 req_hdr->subsystem = subsystem;
441 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000442 req_hdr->version = 0;
Somnath Kotur106df1e2011-10-27 07:12:13 +0000443
444 wrb->tag0 = opcode;
445 wrb->tag1 = subsystem;
446 wrb->payload_length = cmd_len;
447 if (mem) {
448 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
449 MCC_WRB_SGE_CNT_SHIFT;
450 sge = nonembedded_sgl(wrb);
451 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
452 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
453 sge->len = cpu_to_le32(mem->size);
454 } else
455 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
456 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700457}
458
459static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
460 struct be_dma_mem *mem)
461{
462 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
463 u64 dma = (u64)mem->dma;
464
465 for (i = 0; i < buf_pages; i++) {
466 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
467 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
468 dma += PAGE_SIZE_4K;
469 }
470}
471
472/* Converts interrupt delay in microseconds to multiplier value */
473static u32 eq_delay_to_mult(u32 usec_delay)
474{
475#define MAX_INTR_RATE 651042
476 const u32 round = 10;
477 u32 multiplier;
478
479 if (usec_delay == 0)
480 multiplier = 0;
481 else {
482 u32 interrupt_rate = 1000000 / usec_delay;
483 /* Max delay, corresponding to the lowest interrupt rate */
484 if (interrupt_rate == 0)
485 multiplier = 1023;
486 else {
487 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
488 multiplier /= interrupt_rate;
489 /* Round the multiplier to the closest value.*/
490 multiplier = (multiplier + round/2) / round;
491 multiplier = min(multiplier, (u32)1023);
492 }
493 }
494 return multiplier;
495}
496
Sathya Perlab31c50a2009-09-17 10:30:13 -0700497static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700499 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
500 struct be_mcc_wrb *wrb
501 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
502 memset(wrb, 0, sizeof(*wrb));
503 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504}
505
Sathya Perlab31c50a2009-09-17 10:30:13 -0700506static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000507{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700508 struct be_queue_info *mccq = &adapter->mcc_obj.q;
509 struct be_mcc_wrb *wrb;
510
Sathya Perla713d03942009-11-22 22:02:45 +0000511 if (atomic_read(&mccq->used) >= mccq->len) {
512 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
513 return NULL;
514 }
515
Sathya Perlab31c50a2009-09-17 10:30:13 -0700516 wrb = queue_head_node(mccq);
517 queue_head_inc(mccq);
518 atomic_inc(&mccq->used);
519 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000520 return wrb;
521}
522
Sathya Perla2243e2e2009-11-22 22:02:03 +0000523/* Tell fw we're about to start firing cmds by writing a
524 * special pattern across the wrb hdr; uses mbox
525 */
526int be_cmd_fw_init(struct be_adapter *adapter)
527{
528 u8 *wrb;
529 int status;
530
Ivan Vecera29849612010-12-14 05:43:19 +0000531 if (mutex_lock_interruptible(&adapter->mbox_lock))
532 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000533
534 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000535 *wrb++ = 0xFF;
536 *wrb++ = 0x12;
537 *wrb++ = 0x34;
538 *wrb++ = 0xFF;
539 *wrb++ = 0xFF;
540 *wrb++ = 0x56;
541 *wrb++ = 0x78;
542 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000543
544 status = be_mbox_notify_wait(adapter);
545
Ivan Vecera29849612010-12-14 05:43:19 +0000546 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000547 return status;
548}
549
550/* Tell fw we're done with firing cmds by writing a
551 * special pattern across the wrb hdr; uses mbox
552 */
553int be_cmd_fw_clean(struct be_adapter *adapter)
554{
555 u8 *wrb;
556 int status;
557
Sathya Perlacf588472010-02-14 21:22:01 +0000558 if (adapter->eeh_err)
559 return -EIO;
560
Ivan Vecera29849612010-12-14 05:43:19 +0000561 if (mutex_lock_interruptible(&adapter->mbox_lock))
562 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000563
564 wrb = (u8 *)wrb_from_mbox(adapter);
565 *wrb++ = 0xFF;
566 *wrb++ = 0xAA;
567 *wrb++ = 0xBB;
568 *wrb++ = 0xFF;
569 *wrb++ = 0xFF;
570 *wrb++ = 0xCC;
571 *wrb++ = 0xDD;
572 *wrb = 0xFF;
573
574 status = be_mbox_notify_wait(adapter);
575
Ivan Vecera29849612010-12-14 05:43:19 +0000576 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000577 return status;
578}
Sathya Perla8788fdc2009-07-27 22:52:03 +0000579int be_cmd_eq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580 struct be_queue_info *eq, int eq_delay)
581{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700582 struct be_mcc_wrb *wrb;
583 struct be_cmd_req_eq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 struct be_dma_mem *q_mem = &eq->dma_mem;
585 int status;
586
Ivan Vecera29849612010-12-14 05:43:19 +0000587 if (mutex_lock_interruptible(&adapter->mbox_lock))
588 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700589
590 wrb = wrb_from_mbox(adapter);
591 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Somnath Kotur106df1e2011-10-27 07:12:13 +0000593 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
594 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595
596 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
597
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
599 /* 4byte eqe*/
600 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
601 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
602 __ilog2_u32(eq->len/256));
603 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
604 eq_delay_to_mult(eq_delay));
605 be_dws_cpu_to_le(req->context, sizeof(req->context));
606
607 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
608
Sathya Perlab31c50a2009-09-17 10:30:13 -0700609 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700611 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 eq->id = le16_to_cpu(resp->eq_id);
613 eq->created = true;
614 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700615
Ivan Vecera29849612010-12-14 05:43:19 +0000616 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 return status;
618}
619
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000620/* Use MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000621int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 u8 type, bool permanent, u32 if_handle)
623{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700624 struct be_mcc_wrb *wrb;
625 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 int status;
627
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000628 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700629
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000630 wrb = wrb_from_mccq(adapter);
631 if (!wrb) {
632 status = -EBUSY;
633 goto err;
634 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700635 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636
Somnath Kotur106df1e2011-10-27 07:12:13 +0000637 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
638 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 req->type = type;
640 if (permanent) {
641 req->permanent = 1;
642 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700643 req->if_id = cpu_to_le16((u16) if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 req->permanent = 0;
645 }
646
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000647 status = be_mcc_notify_wait(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700648 if (!status) {
649 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700651 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000653err:
654 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655 return status;
656}
657
Sathya Perlab31c50a2009-09-17 10:30:13 -0700658/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000659int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000660 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700662 struct be_mcc_wrb *wrb;
663 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 int status;
665
Sathya Perlab31c50a2009-09-17 10:30:13 -0700666 spin_lock_bh(&adapter->mcc_lock);
667
668 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000669 if (!wrb) {
670 status = -EBUSY;
671 goto err;
672 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700673 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
Somnath Kotur106df1e2011-10-27 07:12:13 +0000675 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
676 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677
Ajit Khapardef8617e02011-02-11 13:36:37 +0000678 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679 req->if_id = cpu_to_le32(if_id);
680 memcpy(req->mac_address, mac_addr, ETH_ALEN);
681
Sathya Perlab31c50a2009-09-17 10:30:13 -0700682 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 if (!status) {
684 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
685 *pmac_id = le32_to_cpu(resp->pmac_id);
686 }
687
Sathya Perla713d03942009-11-22 22:02:45 +0000688err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700689 spin_unlock_bh(&adapter->mcc_lock);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000690
691 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
692 status = -EPERM;
693
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 return status;
695}
696
Sathya Perlab31c50a2009-09-17 10:30:13 -0700697/* Uses synchronous MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +0000698int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700700 struct be_mcc_wrb *wrb;
701 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702 int status;
703
Sathya Perla30128032011-11-10 19:17:57 +0000704 if (pmac_id == -1)
705 return 0;
706
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 spin_lock_bh(&adapter->mcc_lock);
708
709 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000710 if (!wrb) {
711 status = -EBUSY;
712 goto err;
713 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700714 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715
Somnath Kotur106df1e2011-10-27 07:12:13 +0000716 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
717 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718
Ajit Khapardef8617e02011-02-11 13:36:37 +0000719 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 req->if_id = cpu_to_le32(if_id);
721 req->pmac_id = cpu_to_le32(pmac_id);
722
Sathya Perlab31c50a2009-09-17 10:30:13 -0700723 status = be_mcc_notify_wait(adapter);
724
Sathya Perla713d03942009-11-22 22:02:45 +0000725err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700726 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700727 return status;
728}
729
Sathya Perlab31c50a2009-09-17 10:30:13 -0700730/* Uses Mbox */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000731int be_cmd_cq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732 struct be_queue_info *cq, struct be_queue_info *eq,
733 bool sol_evts, bool no_delay, int coalesce_wm)
734{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700735 struct be_mcc_wrb *wrb;
736 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700738 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 int status;
740
Ivan Vecera29849612010-12-14 05:43:19 +0000741 if (mutex_lock_interruptible(&adapter->mbox_lock))
742 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700743
744 wrb = wrb_from_mbox(adapter);
745 req = embedded_payload(wrb);
746 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Somnath Kotur106df1e2011-10-27 07:12:13 +0000748 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
749 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750
751 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000752 if (lancer_chip(adapter)) {
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +0000753 req->hdr.version = 2;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000754 req->page_size = 1; /* 1 for 4K */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000755 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
756 no_delay);
757 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
758 __ilog2_u32(cq->len/256));
759 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
760 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
761 ctxt, 1);
762 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
763 ctxt, eq->id);
764 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
765 } else {
766 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
767 coalesce_wm);
768 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
769 ctxt, no_delay);
770 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
771 __ilog2_u32(cq->len/256));
772 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
773 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
774 ctxt, sol_evts);
775 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
776 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
777 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
778 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 be_dws_cpu_to_le(ctxt, sizeof(req->context));
781
782 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
783
Sathya Perlab31c50a2009-09-17 10:30:13 -0700784 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700786 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 cq->id = le16_to_cpu(resp->cq_id);
788 cq->created = true;
789 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700790
Ivan Vecera29849612010-12-14 05:43:19 +0000791 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000792
793 return status;
794}
795
796static u32 be_encoded_q_len(int q_len)
797{
798 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
799 if (len_encoded == 16)
800 len_encoded = 0;
801 return len_encoded;
802}
803
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000804int be_cmd_mccq_ext_create(struct be_adapter *adapter,
Sathya Perla5fb379e2009-06-18 00:02:59 +0000805 struct be_queue_info *mccq,
806 struct be_queue_info *cq)
807{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700808 struct be_mcc_wrb *wrb;
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000809 struct be_cmd_req_mcc_ext_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000810 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700811 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000812 int status;
813
Ivan Vecera29849612010-12-14 05:43:19 +0000814 if (mutex_lock_interruptible(&adapter->mbox_lock))
815 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700816
817 wrb = wrb_from_mbox(adapter);
818 req = embedded_payload(wrb);
819 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000820
Somnath Kotur106df1e2011-10-27 07:12:13 +0000821 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
822 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000823
Ajit Khaparded4a2ac32010-03-11 01:35:59 +0000824 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000825 if (lancer_chip(adapter)) {
826 req->hdr.version = 1;
827 req->cq_id = cpu_to_le16(cq->id);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000828
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000829 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
830 be_encoded_q_len(mccq->len));
831 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
832 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
833 ctxt, cq->id);
834 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
835 ctxt, 1);
836
837 } else {
838 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
839 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
840 be_encoded_q_len(mccq->len));
841 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
842 }
843
Somnath Koturcc4ce022010-10-21 07:11:14 -0700844 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000845 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000846 be_dws_cpu_to_le(ctxt, sizeof(req->context));
847
848 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
849
Sathya Perlab31c50a2009-09-17 10:30:13 -0700850 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000851 if (!status) {
852 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
853 mccq->id = le16_to_cpu(resp->id);
854 mccq->created = true;
855 }
Ivan Vecera29849612010-12-14 05:43:19 +0000856 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857
858 return status;
859}
860
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000861int be_cmd_mccq_org_create(struct be_adapter *adapter,
862 struct be_queue_info *mccq,
863 struct be_queue_info *cq)
864{
865 struct be_mcc_wrb *wrb;
866 struct be_cmd_req_mcc_create *req;
867 struct be_dma_mem *q_mem = &mccq->dma_mem;
868 void *ctxt;
869 int status;
870
871 if (mutex_lock_interruptible(&adapter->mbox_lock))
872 return -1;
873
874 wrb = wrb_from_mbox(adapter);
875 req = embedded_payload(wrb);
876 ctxt = &req->context;
877
Somnath Kotur106df1e2011-10-27 07:12:13 +0000878 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
879 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000880
881 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
882
883 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
884 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
885 be_encoded_q_len(mccq->len));
886 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
887
888 be_dws_cpu_to_le(ctxt, sizeof(req->context));
889
890 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
891
892 status = be_mbox_notify_wait(adapter);
893 if (!status) {
894 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
895 mccq->id = le16_to_cpu(resp->id);
896 mccq->created = true;
897 }
898
899 mutex_unlock(&adapter->mbox_lock);
900 return status;
901}
902
903int be_cmd_mccq_create(struct be_adapter *adapter,
904 struct be_queue_info *mccq,
905 struct be_queue_info *cq)
906{
907 int status;
908
909 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
910 if (status && !lancer_chip(adapter)) {
911 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
912 "or newer to avoid conflicting priorities between NIC "
913 "and FCoE traffic");
914 status = be_cmd_mccq_org_create(adapter, mccq, cq);
915 }
916 return status;
917}
918
Sathya Perla8788fdc2009-07-27 22:52:03 +0000919int be_cmd_txq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920 struct be_queue_info *txq,
921 struct be_queue_info *cq)
922{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700923 struct be_mcc_wrb *wrb;
924 struct be_cmd_req_eth_tx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925 struct be_dma_mem *q_mem = &txq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700926 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700927 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928
Ivan Vecera29849612010-12-14 05:43:19 +0000929 if (mutex_lock_interruptible(&adapter->mbox_lock))
930 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700931
932 wrb = wrb_from_mbox(adapter);
933 req = embedded_payload(wrb);
934 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935
Somnath Kotur106df1e2011-10-27 07:12:13 +0000936 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
937 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +0000939 if (lancer_chip(adapter)) {
940 req->hdr.version = 1;
941 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
942 adapter->if_handle);
943 }
944
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
946 req->ulp_num = BE_ULP1_NUM;
947 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
948
Sathya Perlab31c50a2009-09-17 10:30:13 -0700949 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
950 be_encoded_q_len(txq->len));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
952 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
953
954 be_dws_cpu_to_le(ctxt, sizeof(req->context));
955
956 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
957
Sathya Perlab31c50a2009-09-17 10:30:13 -0700958 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959 if (!status) {
960 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
961 txq->id = le16_to_cpu(resp->cid);
962 txq->created = true;
963 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700964
Ivan Vecera29849612010-12-14 05:43:19 +0000965 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700966
967 return status;
968}
969
Sathya Perla482c9e72011-06-29 23:33:17 +0000970/* Uses MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000971int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
Sathya Perla3abcded2010-10-03 22:12:27 -0700973 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700974{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700975 struct be_mcc_wrb *wrb;
976 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700977 struct be_dma_mem *q_mem = &rxq->dma_mem;
978 int status;
979
Sathya Perla482c9e72011-06-29 23:33:17 +0000980 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700981
Sathya Perla482c9e72011-06-29 23:33:17 +0000982 wrb = wrb_from_mccq(adapter);
983 if (!wrb) {
984 status = -EBUSY;
985 goto err;
986 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700987 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988
Somnath Kotur106df1e2011-10-27 07:12:13 +0000989 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
990 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700991
992 req->cq_id = cpu_to_le16(cq_id);
993 req->frag_size = fls(frag_size) - 1;
994 req->num_pages = 2;
995 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
996 req->interface_id = cpu_to_le32(if_id);
997 req->max_frame_size = cpu_to_le16(max_frame_size);
998 req->rss_queue = cpu_to_le32(rss);
999
Sathya Perla482c9e72011-06-29 23:33:17 +00001000 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001 if (!status) {
1002 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1003 rxq->id = le16_to_cpu(resp->id);
1004 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -07001005 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001007
Sathya Perla482c9e72011-06-29 23:33:17 +00001008err:
1009 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010 return status;
1011}
1012
Sathya Perlab31c50a2009-09-17 10:30:13 -07001013/* Generic destroyer function for all types of queues
1014 * Uses Mbox
1015 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001016int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017 int queue_type)
1018{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001019 struct be_mcc_wrb *wrb;
1020 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021 u8 subsys = 0, opcode = 0;
1022 int status;
1023
Sathya Perlacf588472010-02-14 21:22:01 +00001024 if (adapter->eeh_err)
1025 return -EIO;
1026
Ivan Vecera29849612010-12-14 05:43:19 +00001027 if (mutex_lock_interruptible(&adapter->mbox_lock))
1028 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029
Sathya Perlab31c50a2009-09-17 10:30:13 -07001030 wrb = wrb_from_mbox(adapter);
1031 req = embedded_payload(wrb);
1032
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 switch (queue_type) {
1034 case QTYPE_EQ:
1035 subsys = CMD_SUBSYSTEM_COMMON;
1036 opcode = OPCODE_COMMON_EQ_DESTROY;
1037 break;
1038 case QTYPE_CQ:
1039 subsys = CMD_SUBSYSTEM_COMMON;
1040 opcode = OPCODE_COMMON_CQ_DESTROY;
1041 break;
1042 case QTYPE_TXQ:
1043 subsys = CMD_SUBSYSTEM_ETH;
1044 opcode = OPCODE_ETH_TX_DESTROY;
1045 break;
1046 case QTYPE_RXQ:
1047 subsys = CMD_SUBSYSTEM_ETH;
1048 opcode = OPCODE_ETH_RX_DESTROY;
1049 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001050 case QTYPE_MCCQ:
1051 subsys = CMD_SUBSYSTEM_COMMON;
1052 opcode = OPCODE_COMMON_MCC_DESTROY;
1053 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +00001055 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001056 }
Ajit Khaparded744b442009-12-03 06:12:06 +00001057
Somnath Kotur106df1e2011-10-27 07:12:13 +00001058 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1059 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 req->id = cpu_to_le16(q->id);
1061
Sathya Perlab31c50a2009-09-17 10:30:13 -07001062 status = be_mbox_notify_wait(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00001063 if (!status)
1064 q->created = false;
Sathya Perla5f0b8492009-07-27 22:52:56 +00001065
Ivan Vecera29849612010-12-14 05:43:19 +00001066 mutex_unlock(&adapter->mbox_lock);
Sathya Perla482c9e72011-06-29 23:33:17 +00001067 return status;
1068}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
Sathya Perla482c9e72011-06-29 23:33:17 +00001070/* Uses MCC */
1071int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1072{
1073 struct be_mcc_wrb *wrb;
1074 struct be_cmd_req_q_destroy *req;
1075 int status;
1076
1077 spin_lock_bh(&adapter->mcc_lock);
1078
1079 wrb = wrb_from_mccq(adapter);
1080 if (!wrb) {
1081 status = -EBUSY;
1082 goto err;
1083 }
1084 req = embedded_payload(wrb);
1085
Somnath Kotur106df1e2011-10-27 07:12:13 +00001086 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1087 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
Sathya Perla482c9e72011-06-29 23:33:17 +00001088 req->id = cpu_to_le16(q->id);
1089
1090 status = be_mcc_notify_wait(adapter);
1091 if (!status)
1092 q->created = false;
1093
1094err:
1095 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 return status;
1097}
1098
Sathya Perlab31c50a2009-09-17 10:30:13 -07001099/* Create an rx filtering policy configuration on an i/f
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001100 * Uses MCCQ
Sathya Perlab31c50a2009-09-17 10:30:13 -07001101 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001102int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001103 u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001105 struct be_mcc_wrb *wrb;
1106 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 int status;
1108
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001109 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001110
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001111 wrb = wrb_from_mccq(adapter);
1112 if (!wrb) {
1113 status = -EBUSY;
1114 goto err;
1115 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001116 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117
Somnath Kotur106df1e2011-10-27 07:12:13 +00001118 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1119 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001120 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001121 req->capability_flags = cpu_to_le32(cap_flags);
1122 req->enable_flags = cpu_to_le32(en_flags);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001123 if (mac)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124 memcpy(req->mac_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001125 else
1126 req->pmac_invalid = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001128 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 if (!status) {
1130 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1131 *if_handle = le32_to_cpu(resp->interface_id);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001132 if (mac)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 *pmac_id = le32_to_cpu(resp->pmac_id);
1134 }
1135
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001136err:
1137 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 return status;
1139}
1140
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001141/* Uses MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001142int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001144 struct be_mcc_wrb *wrb;
1145 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 int status;
1147
Sathya Perlacf588472010-02-14 21:22:01 +00001148 if (adapter->eeh_err)
1149 return -EIO;
1150
Sathya Perla30128032011-11-10 19:17:57 +00001151 if (interface_id == -1)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001152 return 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001153
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001154 spin_lock_bh(&adapter->mcc_lock);
1155
1156 wrb = wrb_from_mccq(adapter);
1157 if (!wrb) {
1158 status = -EBUSY;
1159 goto err;
1160 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001161 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162
Somnath Kotur106df1e2011-10-27 07:12:13 +00001163 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1164 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
Ajit Khaparde658681f2011-02-11 13:34:46 +00001165 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001167
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001168 status = be_mcc_notify_wait(adapter);
1169err:
1170 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171 return status;
1172}
1173
1174/* Get stats is a non embedded command: the request is not embedded inside
1175 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001176 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001178int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001180 struct be_mcc_wrb *wrb;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001181 struct be_cmd_req_hdr *hdr;
Sathya Perla713d03942009-11-22 22:02:45 +00001182 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001184 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1185 be_cmd_get_die_temperature(adapter);
1186
Sathya Perlab31c50a2009-09-17 10:30:13 -07001187 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188
Sathya Perlab31c50a2009-09-17 10:30:13 -07001189 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001190 if (!wrb) {
1191 status = -EBUSY;
1192 goto err;
1193 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001194 hdr = nonemb_cmd->va;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195
Somnath Kotur106df1e2011-10-27 07:12:13 +00001196 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1197 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001198
1199 if (adapter->generation == BE_GEN3)
1200 hdr->version = 1;
1201
Sathya Perlab31c50a2009-09-17 10:30:13 -07001202 be_mcc_notify(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001203 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
Sathya Perla713d03942009-11-22 22:02:45 +00001205err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001206 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001207 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208}
1209
Selvin Xavier005d5692011-05-16 07:36:35 +00001210/* Lancer Stats */
1211int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1212 struct be_dma_mem *nonemb_cmd)
1213{
1214
1215 struct be_mcc_wrb *wrb;
1216 struct lancer_cmd_req_pport_stats *req;
Selvin Xavier005d5692011-05-16 07:36:35 +00001217 int status = 0;
1218
1219 spin_lock_bh(&adapter->mcc_lock);
1220
1221 wrb = wrb_from_mccq(adapter);
1222 if (!wrb) {
1223 status = -EBUSY;
1224 goto err;
1225 }
1226 req = nonemb_cmd->va;
Selvin Xavier005d5692011-05-16 07:36:35 +00001227
Somnath Kotur106df1e2011-10-27 07:12:13 +00001228 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1229 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1230 nonemb_cmd);
Selvin Xavier005d5692011-05-16 07:36:35 +00001231
1232 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1233 req->cmd_params.params.reset_stats = 0;
1234
Selvin Xavier005d5692011-05-16 07:36:35 +00001235 be_mcc_notify(adapter);
1236 adapter->stats_cmd_sent = true;
1237
1238err:
1239 spin_unlock_bh(&adapter->mcc_lock);
1240 return status;
1241}
1242
Sathya Perlab31c50a2009-09-17 10:30:13 -07001243/* Uses synchronous mcc */
Sathya Perlaea172a02011-08-02 19:57:42 +00001244int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1245 u16 *link_speed, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001247 struct be_mcc_wrb *wrb;
1248 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 int status;
1250
Sathya Perlab31c50a2009-09-17 10:30:13 -07001251 spin_lock_bh(&adapter->mcc_lock);
1252
1253 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001254 if (!wrb) {
1255 status = -EBUSY;
1256 goto err;
1257 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001258 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001259
Somnath Kotur106df1e2011-10-27 07:12:13 +00001260 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1261 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262
Sathya Perlab31c50a2009-09-17 10:30:13 -07001263 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 if (!status) {
1265 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001266 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001267 *link_speed = le16_to_cpu(resp->link_speed);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001268 if (mac_speed)
1269 *mac_speed = resp->mac_speed;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001270 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271 }
1272
Sathya Perla713d03942009-11-22 22:02:45 +00001273err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001274 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 return status;
1276}
1277
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001278/* Uses synchronous mcc */
1279int be_cmd_get_die_temperature(struct be_adapter *adapter)
1280{
1281 struct be_mcc_wrb *wrb;
1282 struct be_cmd_req_get_cntl_addnl_attribs *req;
Somnath Kotur3de09452011-09-30 07:25:05 +00001283 u16 mccq_index;
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001284 int status;
1285
1286 spin_lock_bh(&adapter->mcc_lock);
1287
Somnath Kotur3de09452011-09-30 07:25:05 +00001288 mccq_index = adapter->mcc_obj.q.head;
1289
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001290 wrb = wrb_from_mccq(adapter);
1291 if (!wrb) {
1292 status = -EBUSY;
1293 goto err;
1294 }
1295 req = embedded_payload(wrb);
1296
Somnath Kotur106df1e2011-10-27 07:12:13 +00001297 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1298 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1299 wrb, NULL);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001300
Somnath Kotur3de09452011-09-30 07:25:05 +00001301 wrb->tag1 = mccq_index;
1302
1303 be_mcc_notify(adapter);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001304
1305err:
1306 spin_unlock_bh(&adapter->mcc_lock);
1307 return status;
1308}
1309
Somnath Kotur311fddc2011-03-16 21:22:43 +00001310/* Uses synchronous mcc */
1311int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1312{
1313 struct be_mcc_wrb *wrb;
1314 struct be_cmd_req_get_fat *req;
1315 int status;
1316
1317 spin_lock_bh(&adapter->mcc_lock);
1318
1319 wrb = wrb_from_mccq(adapter);
1320 if (!wrb) {
1321 status = -EBUSY;
1322 goto err;
1323 }
1324 req = embedded_payload(wrb);
1325
Somnath Kotur106df1e2011-10-27 07:12:13 +00001326 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1327 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001328 req->fat_operation = cpu_to_le32(QUERY_FAT);
1329 status = be_mcc_notify_wait(adapter);
1330 if (!status) {
1331 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1332 if (log_size && resp->log_size)
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001333 *log_size = le32_to_cpu(resp->log_size) -
1334 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001335 }
1336err:
1337 spin_unlock_bh(&adapter->mcc_lock);
1338 return status;
1339}
1340
1341void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1342{
1343 struct be_dma_mem get_fat_cmd;
1344 struct be_mcc_wrb *wrb;
1345 struct be_cmd_req_get_fat *req;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001346 u32 offset = 0, total_size, buf_size,
1347 log_offset = sizeof(u32), payload_len;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001348 int status;
1349
1350 if (buf_len == 0)
1351 return;
1352
1353 total_size = buf_len;
1354
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001355 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1356 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1357 get_fat_cmd.size,
1358 &get_fat_cmd.dma);
1359 if (!get_fat_cmd.va) {
1360 status = -ENOMEM;
1361 dev_err(&adapter->pdev->dev,
1362 "Memory allocation failure while retrieving FAT data\n");
1363 return;
1364 }
1365
Somnath Kotur311fddc2011-03-16 21:22:43 +00001366 spin_lock_bh(&adapter->mcc_lock);
1367
Somnath Kotur311fddc2011-03-16 21:22:43 +00001368 while (total_size) {
1369 buf_size = min(total_size, (u32)60*1024);
1370 total_size -= buf_size;
1371
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001372 wrb = wrb_from_mccq(adapter);
1373 if (!wrb) {
1374 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001375 goto err;
1376 }
1377 req = get_fat_cmd.va;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001378
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001379 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
Somnath Kotur106df1e2011-10-27 07:12:13 +00001380 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1381 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1382 &get_fat_cmd);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001383
1384 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1385 req->read_log_offset = cpu_to_le32(log_offset);
1386 req->read_log_length = cpu_to_le32(buf_size);
1387 req->data_buffer_size = cpu_to_le32(buf_size);
1388
1389 status = be_mcc_notify_wait(adapter);
1390 if (!status) {
1391 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1392 memcpy(buf + offset,
1393 resp->data_buffer,
Somnath Kotur92aa9212011-09-30 07:24:00 +00001394 le32_to_cpu(resp->read_log_length));
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001395 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001396 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001397 goto err;
1398 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001399 offset += buf_size;
1400 log_offset += buf_size;
1401 }
1402err:
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001403 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1404 get_fat_cmd.va,
1405 get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001406 spin_unlock_bh(&adapter->mcc_lock);
1407}
1408
Sathya Perla04b71172011-09-27 13:30:27 -04001409/* Uses synchronous mcc */
1410int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1411 char *fw_on_flash)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001413 struct be_mcc_wrb *wrb;
1414 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 int status;
1416
Sathya Perla04b71172011-09-27 13:30:27 -04001417 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001418
Sathya Perla04b71172011-09-27 13:30:27 -04001419 wrb = wrb_from_mccq(adapter);
1420 if (!wrb) {
1421 status = -EBUSY;
1422 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 }
1424
Sathya Perla04b71172011-09-27 13:30:27 -04001425 req = embedded_payload(wrb);
Sathya Perla04b71172011-09-27 13:30:27 -04001426
Somnath Kotur106df1e2011-10-27 07:12:13 +00001427 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1428 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
Sathya Perla04b71172011-09-27 13:30:27 -04001429 status = be_mcc_notify_wait(adapter);
1430 if (!status) {
1431 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1432 strcpy(fw_ver, resp->firmware_version_string);
1433 if (fw_on_flash)
1434 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1435 }
1436err:
1437 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438 return status;
1439}
1440
Sathya Perlab31c50a2009-09-17 10:30:13 -07001441/* set the EQ delay interval of an EQ to specified value
1442 * Uses async mcc
1443 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001444int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001446 struct be_mcc_wrb *wrb;
1447 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla713d03942009-11-22 22:02:45 +00001448 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449
Sathya Perlab31c50a2009-09-17 10:30:13 -07001450 spin_lock_bh(&adapter->mcc_lock);
1451
1452 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001453 if (!wrb) {
1454 status = -EBUSY;
1455 goto err;
1456 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001457 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458
Somnath Kotur106df1e2011-10-27 07:12:13 +00001459 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1460 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461
1462 req->num_eq = cpu_to_le32(1);
1463 req->delay[0].eq_id = cpu_to_le32(eq_id);
1464 req->delay[0].phase = 0;
1465 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1466
Sathya Perlab31c50a2009-09-17 10:30:13 -07001467 be_mcc_notify(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468
Sathya Perla713d03942009-11-22 22:02:45 +00001469err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001470 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001471 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472}
1473
Sathya Perlab31c50a2009-09-17 10:30:13 -07001474/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001475int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 u32 num, bool untagged, bool promiscuous)
1477{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001478 struct be_mcc_wrb *wrb;
1479 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 int status;
1481
Sathya Perlab31c50a2009-09-17 10:30:13 -07001482 spin_lock_bh(&adapter->mcc_lock);
1483
1484 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001485 if (!wrb) {
1486 status = -EBUSY;
1487 goto err;
1488 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001489 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490
Somnath Kotur106df1e2011-10-27 07:12:13 +00001491 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1492 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493
1494 req->interface_id = if_id;
1495 req->promiscuous = promiscuous;
1496 req->untagged = untagged;
1497 req->num_vlan = num;
1498 if (!promiscuous) {
1499 memcpy(req->normal_vlan, vtag_array,
1500 req->num_vlan * sizeof(vtag_array[0]));
1501 }
1502
Sathya Perlab31c50a2009-09-17 10:30:13 -07001503 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504
Sathya Perla713d03942009-11-22 22:02:45 +00001505err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001506 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 return status;
1508}
1509
Sathya Perla5b8821b2011-08-02 19:57:44 +00001510int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001512 struct be_mcc_wrb *wrb;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001513 struct be_dma_mem *mem = &adapter->rx_filter;
1514 struct be_cmd_req_rx_filter *req = mem->va;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001515 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516
Sathya Perla8788fdc2009-07-27 22:52:03 +00001517 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001518
Sathya Perlab31c50a2009-09-17 10:30:13 -07001519 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001520 if (!wrb) {
1521 status = -EBUSY;
1522 goto err;
1523 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00001524 memset(req, 0, sizeof(*req));
Somnath Kotur106df1e2011-10-27 07:12:13 +00001525 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1526 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1527 wrb, mem);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528
Sathya Perla5b8821b2011-08-02 19:57:44 +00001529 req->if_id = cpu_to_le32(adapter->if_handle);
1530 if (flags & IFF_PROMISC) {
1531 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1532 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1533 if (value == ON)
1534 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001535 BE_IF_FLAGS_VLAN_PROMISCUOUS);
Sathya Perla5b8821b2011-08-02 19:57:44 +00001536 } else if (flags & IFF_ALLMULTI) {
1537 req->if_flags_mask = req->if_flags =
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001538 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
Sathya Perla24307ee2009-06-18 00:09:25 +00001539 } else {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001540 struct netdev_hw_addr *ha;
1541 int i = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001543 req->if_flags_mask = req->if_flags =
1544 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
Padmanabh Ratnakar1610c792011-11-03 01:49:27 +00001545
1546 /* Reset mcast promisc mode if already set by setting mask
1547 * and not setting flags field
1548 */
1549 req->if_flags_mask |=
1550 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1551
Padmanabh Ratnakar016f97b2011-11-03 01:49:13 +00001552 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
Sathya Perla5b8821b2011-08-02 19:57:44 +00001553 netdev_for_each_mc_addr(ha, adapter->netdev)
1554 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1555 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556
Sathya Perla0d1d5872011-08-03 05:19:27 -07001557 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001558err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001559 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001560 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561}
1562
Sathya Perlab31c50a2009-09-17 10:30:13 -07001563/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001564int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001566 struct be_mcc_wrb *wrb;
1567 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 int status;
1569
Sathya Perlab31c50a2009-09-17 10:30:13 -07001570 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perlab31c50a2009-09-17 10:30:13 -07001572 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001573 if (!wrb) {
1574 status = -EBUSY;
1575 goto err;
1576 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001577 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Somnath Kotur106df1e2011-10-27 07:12:13 +00001579 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1580 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
1582 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1583 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1584
Sathya Perlab31c50a2009-09-17 10:30:13 -07001585 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
Sathya Perla713d03942009-11-22 22:02:45 +00001587err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001588 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589 return status;
1590}
1591
Sathya Perlab31c50a2009-09-17 10:30:13 -07001592/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001593int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001595 struct be_mcc_wrb *wrb;
1596 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 int status;
1598
Sathya Perlab31c50a2009-09-17 10:30:13 -07001599 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600
Sathya Perlab31c50a2009-09-17 10:30:13 -07001601 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001602 if (!wrb) {
1603 status = -EBUSY;
1604 goto err;
1605 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001606 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607
Somnath Kotur106df1e2011-10-27 07:12:13 +00001608 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1609 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perlab31c50a2009-09-17 10:30:13 -07001611 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 if (!status) {
1613 struct be_cmd_resp_get_flow_control *resp =
1614 embedded_payload(wrb);
1615 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1616 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1617 }
1618
Sathya Perla713d03942009-11-22 22:02:45 +00001619err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001620 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 return status;
1622}
1623
Sathya Perlab31c50a2009-09-17 10:30:13 -07001624/* Uses mbox */
Sathya Perla3abcded2010-10-03 22:12:27 -07001625int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1626 u32 *mode, u32 *caps)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001628 struct be_mcc_wrb *wrb;
1629 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 int status;
1631
Ivan Vecera29849612010-12-14 05:43:19 +00001632 if (mutex_lock_interruptible(&adapter->mbox_lock))
1633 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Sathya Perlab31c50a2009-09-17 10:30:13 -07001635 wrb = wrb_from_mbox(adapter);
1636 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637
Somnath Kotur106df1e2011-10-27 07:12:13 +00001638 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1639 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640
Sathya Perlab31c50a2009-09-17 10:30:13 -07001641 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642 if (!status) {
1643 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1644 *port_num = le32_to_cpu(resp->phys_port);
Ajit Khaparde3486be22010-07-23 02:04:54 +00001645 *mode = le32_to_cpu(resp->function_mode);
Sathya Perla3abcded2010-10-03 22:12:27 -07001646 *caps = le32_to_cpu(resp->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647 }
1648
Ivan Vecera29849612010-12-14 05:43:19 +00001649 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 return status;
1651}
sarveshwarb14074ea2009-08-05 13:05:24 -07001652
Sathya Perlab31c50a2009-09-17 10:30:13 -07001653/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07001654int be_cmd_reset_function(struct be_adapter *adapter)
1655{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001656 struct be_mcc_wrb *wrb;
1657 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07001658 int status;
1659
Ivan Vecera29849612010-12-14 05:43:19 +00001660 if (mutex_lock_interruptible(&adapter->mbox_lock))
1661 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07001662
Sathya Perlab31c50a2009-09-17 10:30:13 -07001663 wrb = wrb_from_mbox(adapter);
1664 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07001665
Somnath Kotur106df1e2011-10-27 07:12:13 +00001666 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1667 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
sarveshwarb14074ea2009-08-05 13:05:24 -07001668
Sathya Perlab31c50a2009-09-17 10:30:13 -07001669 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07001670
Ivan Vecera29849612010-12-14 05:43:19 +00001671 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07001672 return status;
1673}
Ajit Khaparde84517482009-09-04 03:12:16 +00001674
Sathya Perla3abcded2010-10-03 22:12:27 -07001675int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1676{
1677 struct be_mcc_wrb *wrb;
1678 struct be_cmd_req_rss_config *req;
Sathya Perla5d8bee62011-05-23 20:29:09 +00001679 u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1680 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
Sathya Perla3abcded2010-10-03 22:12:27 -07001681 int status;
1682
Ivan Vecera29849612010-12-14 05:43:19 +00001683 if (mutex_lock_interruptible(&adapter->mbox_lock))
1684 return -1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001685
1686 wrb = wrb_from_mbox(adapter);
1687 req = embedded_payload(wrb);
1688
Somnath Kotur106df1e2011-10-27 07:12:13 +00001689 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1690 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001691
1692 req->if_id = cpu_to_le32(adapter->if_handle);
1693 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1694 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1695 memcpy(req->cpu_table, rsstable, table_size);
1696 memcpy(req->hash, myhash, sizeof(myhash));
1697 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1698
1699 status = be_mbox_notify_wait(adapter);
1700
Ivan Vecera29849612010-12-14 05:43:19 +00001701 mutex_unlock(&adapter->mbox_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07001702 return status;
1703}
1704
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001705/* Uses sync mcc */
1706int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1707 u8 bcn, u8 sts, u8 state)
1708{
1709 struct be_mcc_wrb *wrb;
1710 struct be_cmd_req_enable_disable_beacon *req;
1711 int status;
1712
1713 spin_lock_bh(&adapter->mcc_lock);
1714
1715 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001716 if (!wrb) {
1717 status = -EBUSY;
1718 goto err;
1719 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001720 req = embedded_payload(wrb);
1721
Somnath Kotur106df1e2011-10-27 07:12:13 +00001722 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1723 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001724
1725 req->port_num = port_num;
1726 req->beacon_state = state;
1727 req->beacon_duration = bcn;
1728 req->status_duration = sts;
1729
1730 status = be_mcc_notify_wait(adapter);
1731
Sathya Perla713d03942009-11-22 22:02:45 +00001732err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001733 spin_unlock_bh(&adapter->mcc_lock);
1734 return status;
1735}
1736
1737/* Uses sync mcc */
1738int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1739{
1740 struct be_mcc_wrb *wrb;
1741 struct be_cmd_req_get_beacon_state *req;
1742 int status;
1743
1744 spin_lock_bh(&adapter->mcc_lock);
1745
1746 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001747 if (!wrb) {
1748 status = -EBUSY;
1749 goto err;
1750 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001751 req = embedded_payload(wrb);
1752
Somnath Kotur106df1e2011-10-27 07:12:13 +00001753 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1754 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001755
1756 req->port_num = port_num;
1757
1758 status = be_mcc_notify_wait(adapter);
1759 if (!status) {
1760 struct be_cmd_resp_get_beacon_state *resp =
1761 embedded_payload(wrb);
1762 *state = resp->beacon_state;
1763 }
1764
Sathya Perla713d03942009-11-22 22:02:45 +00001765err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001766 spin_unlock_bh(&adapter->mcc_lock);
1767 return status;
1768}
1769
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001770int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1771 u32 data_size, u32 data_offset, const char *obj_name,
1772 u32 *data_written, u8 *addn_status)
1773{
1774 struct be_mcc_wrb *wrb;
1775 struct lancer_cmd_req_write_object *req;
1776 struct lancer_cmd_resp_write_object *resp;
1777 void *ctxt = NULL;
1778 int status;
1779
1780 spin_lock_bh(&adapter->mcc_lock);
1781 adapter->flash_status = 0;
1782
1783 wrb = wrb_from_mccq(adapter);
1784 if (!wrb) {
1785 status = -EBUSY;
1786 goto err_unlock;
1787 }
1788
1789 req = embedded_payload(wrb);
1790
Somnath Kotur106df1e2011-10-27 07:12:13 +00001791 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001792 OPCODE_COMMON_WRITE_OBJECT,
Somnath Kotur106df1e2011-10-27 07:12:13 +00001793 sizeof(struct lancer_cmd_req_write_object), wrb,
1794 NULL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001795
1796 ctxt = &req->context;
1797 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1798 write_length, ctxt, data_size);
1799
1800 if (data_size == 0)
1801 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1802 eof, ctxt, 1);
1803 else
1804 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1805 eof, ctxt, 0);
1806
1807 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1808 req->write_offset = cpu_to_le32(data_offset);
1809 strcpy(req->object_name, obj_name);
1810 req->descriptor_count = cpu_to_le32(1);
1811 req->buf_len = cpu_to_le32(data_size);
1812 req->addr_low = cpu_to_le32((cmd->dma +
1813 sizeof(struct lancer_cmd_req_write_object))
1814 & 0xFFFFFFFF);
1815 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1816 sizeof(struct lancer_cmd_req_write_object)));
1817
1818 be_mcc_notify(adapter);
1819 spin_unlock_bh(&adapter->mcc_lock);
1820
1821 if (!wait_for_completion_timeout(&adapter->flash_compl,
1822 msecs_to_jiffies(12000)))
1823 status = -1;
1824 else
1825 status = adapter->flash_status;
1826
1827 resp = embedded_payload(wrb);
1828 if (!status) {
1829 *data_written = le32_to_cpu(resp->actual_write_len);
1830 } else {
1831 *addn_status = resp->additional_status;
1832 status = resp->status;
1833 }
1834
1835 return status;
1836
1837err_unlock:
1838 spin_unlock_bh(&adapter->mcc_lock);
1839 return status;
1840}
1841
Ajit Khaparde84517482009-09-04 03:12:16 +00001842int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1843 u32 flash_type, u32 flash_opcode, u32 buf_size)
1844{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001845 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001846 struct be_cmd_write_flashrom *req;
Ajit Khaparde84517482009-09-04 03:12:16 +00001847 int status;
1848
Sathya Perlab31c50a2009-09-17 10:30:13 -07001849 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001850 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001851
1852 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001853 if (!wrb) {
1854 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001855 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00001856 }
1857 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001858
Somnath Kotur106df1e2011-10-27 07:12:13 +00001859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1860 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
Ajit Khaparde84517482009-09-04 03:12:16 +00001861
1862 req->params.op_type = cpu_to_le32(flash_type);
1863 req->params.op_code = cpu_to_le32(flash_opcode);
1864 req->params.data_buf_size = cpu_to_le32(buf_size);
1865
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001866 be_mcc_notify(adapter);
1867 spin_unlock_bh(&adapter->mcc_lock);
1868
1869 if (!wait_for_completion_timeout(&adapter->flash_compl,
Sathya Perlae2edb7d2011-08-22 19:41:54 +00001870 msecs_to_jiffies(40000)))
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001871 status = -1;
1872 else
1873 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00001874
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001875 return status;
1876
1877err_unlock:
1878 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00001879 return status;
1880}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001881
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001882int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1883 int offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001884{
1885 struct be_mcc_wrb *wrb;
1886 struct be_cmd_write_flashrom *req;
1887 int status;
1888
1889 spin_lock_bh(&adapter->mcc_lock);
1890
1891 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001892 if (!wrb) {
1893 status = -EBUSY;
1894 goto err;
1895 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001896 req = embedded_payload(wrb);
1897
Somnath Kotur106df1e2011-10-27 07:12:13 +00001898 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1899 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001900
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001901 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001902 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00001903 req->params.offset = cpu_to_le32(offset);
1904 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001905
1906 status = be_mcc_notify_wait(adapter);
1907 if (!status)
1908 memcpy(flashed_crc, req->params.data_buf, 4);
1909
Sathya Perla713d03942009-11-22 22:02:45 +00001910err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001911 spin_unlock_bh(&adapter->mcc_lock);
1912 return status;
1913}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001914
Dan Carpenterc196b022010-05-26 04:47:39 +00001915int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001916 struct be_dma_mem *nonemb_cmd)
1917{
1918 struct be_mcc_wrb *wrb;
1919 struct be_cmd_req_acpi_wol_magic_config *req;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001920 int status;
1921
1922 spin_lock_bh(&adapter->mcc_lock);
1923
1924 wrb = wrb_from_mccq(adapter);
1925 if (!wrb) {
1926 status = -EBUSY;
1927 goto err;
1928 }
1929 req = nonemb_cmd->va;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001930
Somnath Kotur106df1e2011-10-27 07:12:13 +00001931 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1932 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
1933 nonemb_cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001934 memcpy(req->magic_mac, mac, ETH_ALEN);
1935
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001936 status = be_mcc_notify_wait(adapter);
1937
1938err:
1939 spin_unlock_bh(&adapter->mcc_lock);
1940 return status;
1941}
Suresh Rff33a6e2009-12-03 16:15:52 -08001942
Sarveshwar Bandifced9992009-12-23 04:41:44 +00001943int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1944 u8 loopback_type, u8 enable)
1945{
1946 struct be_mcc_wrb *wrb;
1947 struct be_cmd_req_set_lmode *req;
1948 int status;
1949
1950 spin_lock_bh(&adapter->mcc_lock);
1951
1952 wrb = wrb_from_mccq(adapter);
1953 if (!wrb) {
1954 status = -EBUSY;
1955 goto err;
1956 }
1957
1958 req = embedded_payload(wrb);
1959
Somnath Kotur106df1e2011-10-27 07:12:13 +00001960 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1961 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
1962 NULL);
Sarveshwar Bandifced9992009-12-23 04:41:44 +00001963
1964 req->src_port = port_num;
1965 req->dest_port = port_num;
1966 req->loopback_type = loopback_type;
1967 req->loopback_state = enable;
1968
1969 status = be_mcc_notify_wait(adapter);
1970err:
1971 spin_unlock_bh(&adapter->mcc_lock);
1972 return status;
1973}
1974
Suresh Rff33a6e2009-12-03 16:15:52 -08001975int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1976 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1977{
1978 struct be_mcc_wrb *wrb;
1979 struct be_cmd_req_loopback_test *req;
1980 int status;
1981
1982 spin_lock_bh(&adapter->mcc_lock);
1983
1984 wrb = wrb_from_mccq(adapter);
1985 if (!wrb) {
1986 status = -EBUSY;
1987 goto err;
1988 }
1989
1990 req = embedded_payload(wrb);
1991
Somnath Kotur106df1e2011-10-27 07:12:13 +00001992 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1993 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
Sathya Perla3ffd0512010-06-01 00:19:33 -07001994 req->hdr.timeout = cpu_to_le32(4);
Suresh Rff33a6e2009-12-03 16:15:52 -08001995
1996 req->pattern = cpu_to_le64(pattern);
1997 req->src_port = cpu_to_le32(port_num);
1998 req->dest_port = cpu_to_le32(port_num);
1999 req->pkt_size = cpu_to_le32(pkt_size);
2000 req->num_pkts = cpu_to_le32(num_pkts);
2001 req->loopback_type = cpu_to_le32(loopback_type);
2002
2003 status = be_mcc_notify_wait(adapter);
2004 if (!status) {
2005 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2006 status = le32_to_cpu(resp->status);
2007 }
2008
2009err:
2010 spin_unlock_bh(&adapter->mcc_lock);
2011 return status;
2012}
2013
2014int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2015 u32 byte_cnt, struct be_dma_mem *cmd)
2016{
2017 struct be_mcc_wrb *wrb;
2018 struct be_cmd_req_ddrdma_test *req;
Suresh Rff33a6e2009-12-03 16:15:52 -08002019 int status;
2020 int i, j = 0;
2021
2022 spin_lock_bh(&adapter->mcc_lock);
2023
2024 wrb = wrb_from_mccq(adapter);
2025 if (!wrb) {
2026 status = -EBUSY;
2027 goto err;
2028 }
2029 req = cmd->va;
Somnath Kotur106df1e2011-10-27 07:12:13 +00002030 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2031 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
Suresh Rff33a6e2009-12-03 16:15:52 -08002032
2033 req->pattern = cpu_to_le64(pattern);
2034 req->byte_count = cpu_to_le32(byte_cnt);
2035 for (i = 0; i < byte_cnt; i++) {
2036 req->snd_buff[i] = (u8)(pattern >> (j*8));
2037 j++;
2038 if (j > 7)
2039 j = 0;
2040 }
2041
2042 status = be_mcc_notify_wait(adapter);
2043
2044 if (!status) {
2045 struct be_cmd_resp_ddrdma_test *resp;
2046 resp = cmd->va;
2047 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2048 resp->snd_err) {
2049 status = -1;
2050 }
2051 }
2052
2053err:
2054 spin_unlock_bh(&adapter->mcc_lock);
2055 return status;
2056}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002057
Dan Carpenterc196b022010-05-26 04:47:39 +00002058int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002059 struct be_dma_mem *nonemb_cmd)
2060{
2061 struct be_mcc_wrb *wrb;
2062 struct be_cmd_req_seeprom_read *req;
2063 struct be_sge *sge;
2064 int status;
2065
2066 spin_lock_bh(&adapter->mcc_lock);
2067
2068 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00002069 if (!wrb) {
2070 status = -EBUSY;
2071 goto err;
2072 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002073 req = nonemb_cmd->va;
2074 sge = nonembedded_sgl(wrb);
2075
Somnath Kotur106df1e2011-10-27 07:12:13 +00002076 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2077 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2078 nonemb_cmd);
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002079
2080 status = be_mcc_notify_wait(adapter);
2081
Ajit Khapardee45ff012011-02-04 17:18:28 +00002082err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002083 spin_unlock_bh(&adapter->mcc_lock);
2084 return status;
2085}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002086
Sathya Perla306f1342011-08-02 19:57:45 +00002087int be_cmd_get_phy_info(struct be_adapter *adapter,
2088 struct be_phy_info *phy_info)
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002089{
2090 struct be_mcc_wrb *wrb;
2091 struct be_cmd_req_get_phy_info *req;
Sathya Perla306f1342011-08-02 19:57:45 +00002092 struct be_dma_mem cmd;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002093 int status;
2094
2095 spin_lock_bh(&adapter->mcc_lock);
2096
2097 wrb = wrb_from_mccq(adapter);
2098 if (!wrb) {
2099 status = -EBUSY;
2100 goto err;
2101 }
Sathya Perla306f1342011-08-02 19:57:45 +00002102 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2103 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2104 &cmd.dma);
2105 if (!cmd.va) {
2106 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2107 status = -ENOMEM;
2108 goto err;
2109 }
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002110
Sathya Perla306f1342011-08-02 19:57:45 +00002111 req = cmd.va;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002112
Somnath Kotur106df1e2011-10-27 07:12:13 +00002113 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2114 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2115 wrb, &cmd);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002116
2117 status = be_mcc_notify_wait(adapter);
Sathya Perla306f1342011-08-02 19:57:45 +00002118 if (!status) {
2119 struct be_phy_info *resp_phy_info =
2120 cmd.va + sizeof(struct be_cmd_req_hdr);
2121 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
2122 phy_info->interface_type =
2123 le16_to_cpu(resp_phy_info->interface_type);
2124 }
2125 pci_free_consistent(adapter->pdev, cmd.size,
2126 cmd.va, cmd.dma);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002127err:
2128 spin_unlock_bh(&adapter->mcc_lock);
2129 return status;
2130}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002131
2132int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2133{
2134 struct be_mcc_wrb *wrb;
2135 struct be_cmd_req_set_qos *req;
2136 int status;
2137
2138 spin_lock_bh(&adapter->mcc_lock);
2139
2140 wrb = wrb_from_mccq(adapter);
2141 if (!wrb) {
2142 status = -EBUSY;
2143 goto err;
2144 }
2145
2146 req = embedded_payload(wrb);
2147
Somnath Kotur106df1e2011-10-27 07:12:13 +00002148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2149 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002150
2151 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00002152 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2153 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002154
2155 status = be_mcc_notify_wait(adapter);
2156
2157err:
2158 spin_unlock_bh(&adapter->mcc_lock);
2159 return status;
2160}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002161
2162int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2163{
2164 struct be_mcc_wrb *wrb;
2165 struct be_cmd_req_cntl_attribs *req;
2166 struct be_cmd_resp_cntl_attribs *resp;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002167 int status;
2168 int payload_len = max(sizeof(*req), sizeof(*resp));
2169 struct mgmt_controller_attrib *attribs;
2170 struct be_dma_mem attribs_cmd;
2171
2172 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2173 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2174 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2175 &attribs_cmd.dma);
2176 if (!attribs_cmd.va) {
2177 dev_err(&adapter->pdev->dev,
2178 "Memory allocation failure\n");
2179 return -ENOMEM;
2180 }
2181
2182 if (mutex_lock_interruptible(&adapter->mbox_lock))
2183 return -1;
2184
2185 wrb = wrb_from_mbox(adapter);
2186 if (!wrb) {
2187 status = -EBUSY;
2188 goto err;
2189 }
2190 req = attribs_cmd.va;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002191
Somnath Kotur106df1e2011-10-27 07:12:13 +00002192 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2193 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2194 &attribs_cmd);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002195
2196 status = be_mbox_notify_wait(adapter);
2197 if (!status) {
Joe Perches43d620c2011-06-16 19:08:06 +00002198 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002199 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2200 }
2201
2202err:
2203 mutex_unlock(&adapter->mbox_lock);
2204 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2205 attribs_cmd.dma);
2206 return status;
2207}
Sathya Perla2e588f82011-03-11 02:49:26 +00002208
2209/* Uses mbox */
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002210int be_cmd_req_native_mode(struct be_adapter *adapter)
Sathya Perla2e588f82011-03-11 02:49:26 +00002211{
2212 struct be_mcc_wrb *wrb;
2213 struct be_cmd_req_set_func_cap *req;
2214 int status;
2215
2216 if (mutex_lock_interruptible(&adapter->mbox_lock))
2217 return -1;
2218
2219 wrb = wrb_from_mbox(adapter);
2220 if (!wrb) {
2221 status = -EBUSY;
2222 goto err;
2223 }
2224
2225 req = embedded_payload(wrb);
2226
Somnath Kotur106df1e2011-10-27 07:12:13 +00002227 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2228 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
Sathya Perla2e588f82011-03-11 02:49:26 +00002229
2230 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2231 CAPABILITY_BE3_NATIVE_ERX_API);
2232 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2233
2234 status = be_mbox_notify_wait(adapter);
2235 if (!status) {
2236 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2237 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2238 CAPABILITY_BE3_NATIVE_ERX_API;
2239 }
2240err:
2241 mutex_unlock(&adapter->mbox_lock);
2242 return status;
2243}