blob: d75c9fb9a188be55b8e31597ad44fe968df62000 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Parav Pandit6a4ab662012-03-26 14:27:12 +000018#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
Somnath Kotur3de09452011-09-30 07:25:05 +000022static inline void *embedded_payload(struct be_mcc_wrb *wrb)
23{
24 return wrb->payload.embedded_payload;
25}
Ajit Khaparde609ff3b2011-02-20 11:42:07 +000026
Sathya Perla8788fdc2009-07-27 22:52:03 +000027static void be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +000028{
Sathya Perla8788fdc2009-07-27 22:52:03 +000029 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +000030 u32 val = 0;
31
Sathya Perla6589ade2011-11-10 19:18:00 +000032 if (be_error(adapter))
Ajit Khaparde7acc2082011-02-11 13:38:17 +000033 return;
Ajit Khaparde7acc2082011-02-11 13:38:17 +000034
Sathya Perla5fb379e2009-06-18 00:02:59 +000035 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +000037
38 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +000039 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Sathya Perla5fb379e2009-06-18 00:02:59 +000040}
41
42/* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
44 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +000045static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000046{
47 if (compl->flags != 0) {
48 compl->flags = le32_to_cpu(compl->flags);
49 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50 return true;
51 } else {
52 return false;
53 }
54}
55
56/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +000057static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000058{
59 compl->flags = 0;
60}
61
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +000062static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
63{
64 unsigned long addr;
65
66 addr = tag1;
67 addr = ((addr << 16) << 16) | tag0;
68 return (void *)addr;
69}
70
Sathya Perla8788fdc2009-07-27 22:52:03 +000071static int be_mcc_compl_process(struct be_adapter *adapter,
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +000072 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000073{
74 u16 compl_status, extd_status;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +000075 struct be_cmd_resp_hdr *resp_hdr;
76 u8 opcode = 0, subsystem = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +000077
78 /* Just swap the status to host endian; mcc tag is opaquely copied
79 * from mcc_wrb */
80 be_dws_le_to_cpu(compl, 4);
81
82 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
83 CQE_STATUS_COMPL_MASK;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -070084
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +000085 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
86
87 if (resp_hdr) {
88 opcode = resp_hdr->opcode;
89 subsystem = resp_hdr->subsystem;
90 }
91
92 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
93 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
94 (subsystem == CMD_SUBSYSTEM_COMMON)) {
Sarveshwar Bandidd131e72010-05-25 16:16:32 -070095 adapter->flash_status = compl_status;
96 complete(&adapter->flash_compl);
97 }
98
Sathya Perlab31c50a2009-09-17 10:30:13 -070099 if (compl_status == MCC_STATUS_SUCCESS) {
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000100 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
101 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
102 (subsystem == CMD_SUBSYSTEM_ETH)) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000103 be_parse_stats(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +0000104 adapter->stats_cmd_sent = false;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700105 }
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000106 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
107 subsystem == CMD_SUBSYSTEM_COMMON) {
Somnath Kotur3de09452011-09-30 07:25:05 +0000108 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000109 (void *)resp_hdr;
Somnath Kotur3de09452011-09-30 07:25:05 +0000110 adapter->drv_stats.be_on_die_temperature =
111 resp->on_die_temperature;
112 }
Sathya Perla2b3f2912011-06-29 23:32:56 +0000113 } else {
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000114 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +0000115 adapter->be_get_temp_freq = 0;
Somnath Kotur3de09452011-09-30 07:25:05 +0000116
Sathya Perla2b3f2912011-06-29 23:32:56 +0000117 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
118 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
119 goto done;
120
121 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000122 dev_warn(&adapter->pdev->dev,
123 "opcode %d-%d is not permitted\n",
124 opcode, subsystem);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000125 } else {
126 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
127 CQE_STATUS_EXTD_MASK;
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000128 dev_err(&adapter->pdev->dev,
129 "opcode %d-%d failed:status %d-%d\n",
130 opcode, subsystem, compl_status, extd_status);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000131 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000132 }
Sathya Perla2b3f2912011-06-29 23:32:56 +0000133done:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700134 return compl_status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000135}
136
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000137/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000138static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000139 struct be_async_event_link_state *evt)
140{
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000141 /* When link status changes, link speed must be re-queried from FW */
Ajit Khaparde42f11cf2012-04-21 18:53:22 +0000142 adapter->phy.link_speed = -1;
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000143
144 /* For the initial link status do not rely on the ASYNC event as
145 * it may not be received in some cases.
146 */
147 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
148 be_link_status_update(adapter, evt->port_link_status);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000149}
150
Somnath Koturcc4ce022010-10-21 07:11:14 -0700151/* Grp5 CoS Priority evt */
152static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
153 struct be_async_event_grp5_cos_priority *evt)
154{
155 if (evt->valid) {
156 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Ajit Khaparde60964dd2011-02-11 13:37:25 +0000157 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700158 adapter->recommended_prio =
159 evt->reco_default_priority << VLAN_PRIO_SHIFT;
160 }
161}
162
163/* Grp5 QOS Speed evt */
164static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
165 struct be_async_event_grp5_qos_link_speed *evt)
166{
167 if (evt->physical_port == adapter->port_num) {
168 /* qos_link_speed is in units of 10 Mbps */
Ajit Khaparde42f11cf2012-04-21 18:53:22 +0000169 adapter->phy.link_speed = evt->qos_link_speed * 10;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700170 }
171}
172
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000173/*Grp5 PVID evt*/
174static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
175 struct be_async_event_grp5_pvid_state *evt)
176{
177 if (evt->enabled)
Somnath Kotur939cf302011-08-18 21:51:49 -0700178 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000179 else
180 adapter->pvid = 0;
181}
182
Somnath Koturcc4ce022010-10-21 07:11:14 -0700183static void be_async_grp5_evt_process(struct be_adapter *adapter,
184 u32 trailer, struct be_mcc_compl *evt)
185{
186 u8 event_type = 0;
187
188 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
189 ASYNC_TRAILER_EVENT_TYPE_MASK;
190
191 switch (event_type) {
192 case ASYNC_EVENT_COS_PRIORITY:
193 be_async_grp5_cos_priority_process(adapter,
194 (struct be_async_event_grp5_cos_priority *)evt);
195 break;
196 case ASYNC_EVENT_QOS_SPEED:
197 be_async_grp5_qos_speed_process(adapter,
198 (struct be_async_event_grp5_qos_link_speed *)evt);
199 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000200 case ASYNC_EVENT_PVID_STATE:
201 be_async_grp5_pvid_state_process(adapter,
202 (struct be_async_event_grp5_pvid_state *)evt);
203 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700204 default:
205 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
206 break;
207 }
208}
209
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000210static inline bool is_link_state_evt(u32 trailer)
211{
Eric Dumazet807540b2010-09-23 05:40:09 +0000212 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000213 ASYNC_TRAILER_EVENT_CODE_MASK) ==
Eric Dumazet807540b2010-09-23 05:40:09 +0000214 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000215}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000216
Somnath Koturcc4ce022010-10-21 07:11:14 -0700217static inline bool is_grp5_evt(u32 trailer)
218{
219 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
220 ASYNC_TRAILER_EVENT_CODE_MASK) ==
221 ASYNC_EVENT_CODE_GRP_5);
222}
223
Sathya Perlaefd2e402009-07-27 22:53:10 +0000224static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000225{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000226 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000227 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000228
229 if (be_mcc_compl_is_new(compl)) {
230 queue_tail_inc(mcc_cq);
231 return compl;
232 }
233 return NULL;
234}
235
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000236void be_async_mcc_enable(struct be_adapter *adapter)
237{
238 spin_lock_bh(&adapter->mcc_cq_lock);
239
240 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
241 adapter->mcc_obj.rearm_cq = true;
242
243 spin_unlock_bh(&adapter->mcc_cq_lock);
244}
245
246void be_async_mcc_disable(struct be_adapter *adapter)
247{
248 adapter->mcc_obj.rearm_cq = false;
249}
250
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000251int be_process_mcc(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000252{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000253 struct be_mcc_compl *compl;
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000254 int num = 0, status = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000255 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000256
Sathya Perla8788fdc2009-07-27 22:52:03 +0000257 spin_lock_bh(&adapter->mcc_cq_lock);
258 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000259 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
260 /* Interpret flags as an async trailer */
Ajit Khaparde323f30b2010-09-03 06:24:13 +0000261 if (is_link_state_evt(compl->flags))
262 be_async_link_state_process(adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000263 (struct be_async_event_link_state *) compl);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700264 else if (is_grp5_evt(compl->flags))
265 be_async_grp5_evt_process(adapter,
266 compl->flags, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700267 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000268 status = be_mcc_compl_process(adapter, compl);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000269 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000270 }
271 be_mcc_compl_use(compl);
272 num++;
273 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700274
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000275 if (num)
276 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
277
Sathya Perla8788fdc2009-07-27 22:52:03 +0000278 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000279 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000280}
281
Sathya Perla6ac7b682009-06-18 00:05:54 +0000282/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700283static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000284{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700285#define mcc_timeout 120000 /* 12s timeout */
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000286 int i, status = 0;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800287 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700288
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800289 for (i = 0; i < mcc_timeout; i++) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000290 if (be_error(adapter))
291 return -EIO;
292
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000293 status = be_process_mcc(adapter);
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800294
295 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000296 break;
297 udelay(100);
298 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700299 if (i == mcc_timeout) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000300 dev_err(&adapter->pdev->dev, "FW not responding\n");
301 adapter->fw_timeout = true;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000302 return -EIO;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700303 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800304 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000305}
306
307/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700308static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000309{
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000310 int status;
311 struct be_mcc_wrb *wrb;
312 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
313 u16 index = mcc_obj->q.head;
314 struct be_cmd_resp_hdr *resp;
315
316 index_dec(&index, mcc_obj->q.len);
317 wrb = queue_index_node(&mcc_obj->q, index);
318
319 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
320
Sathya Perla8788fdc2009-07-27 22:52:03 +0000321 be_mcc_notify(adapter);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000322
323 status = be_mcc_wait_compl(adapter);
324 if (status == -EIO)
325 goto out;
326
327 status = resp->status;
328out:
329 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000330}
331
Sathya Perla5f0b8492009-07-27 22:52:56 +0000332static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700333{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000334 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700335 u32 ready;
336
337 do {
Sathya Perla6589ade2011-11-10 19:18:00 +0000338 if (be_error(adapter))
339 return -EIO;
340
Sathya Perlacf588472010-02-14 21:22:01 +0000341 ready = ioread32(db);
Sathya Perla434b3642011-11-10 19:17:59 +0000342 if (ready == 0xffffffff)
Sathya Perlacf588472010-02-14 21:22:01 +0000343 return -1;
Sathya Perlacf588472010-02-14 21:22:01 +0000344
345 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700346 if (ready)
347 break;
348
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000349 if (msecs > 4000) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000350 dev_err(&adapter->pdev->dev, "FW not responding\n");
351 adapter->fw_timeout = true;
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +0000352 be_detect_dump_ue(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700353 return -1;
354 }
355
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000356 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000357 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700358 } while (true);
359
360 return 0;
361}
362
363/*
364 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000365 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700366 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700367static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700368{
369 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700370 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000371 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
372 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700373 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000374 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700375
Sathya Perlacf588472010-02-14 21:22:01 +0000376 /* wait for ready to be set */
377 status = be_mbox_db_ready_wait(adapter, db);
378 if (status != 0)
379 return status;
380
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700381 val |= MPU_MAILBOX_DB_HI_MASK;
382 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
383 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
384 iowrite32(val, db);
385
386 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000387 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700388 if (status != 0)
389 return status;
390
391 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700392 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
393 val |= (u32)(mbox_mem->dma >> 4) << 2;
394 iowrite32(val, db);
395
Sathya Perla5f0b8492009-07-27 22:52:56 +0000396 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700397 if (status != 0)
398 return status;
399
Sathya Perla5fb379e2009-06-18 00:02:59 +0000400 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000401 if (be_mcc_compl_is_new(compl)) {
402 status = be_mcc_compl_process(adapter, &mbox->compl);
403 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000404 if (status)
405 return status;
406 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000407 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700408 return -1;
409 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000410 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700411}
412
Sathya Perla8788fdc2009-07-27 22:52:03 +0000413static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000415 u32 sem;
416
417 if (lancer_chip(adapter))
418 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
419 else
420 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700421
422 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
423 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
424 return -1;
425 else
426 return 0;
427}
428
Sathya Perla8788fdc2009-07-27 22:52:03 +0000429int be_cmd_POST(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700430{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000431 u16 stage;
432 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000433 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000435 do {
436 status = be_POST_stage_get(adapter, &stage);
437 if (status) {
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000438 dev_err(dev, "POST error; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000439 return -1;
440 } else if (stage != POST_STAGE_ARMFW_RDY) {
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000441 if (msleep_interruptible(2000)) {
442 dev_err(dev, "Waiting for POST aborted\n");
443 return -EINTR;
444 }
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000445 timeout += 2;
446 } else {
447 return 0;
448 }
Somnath Kotur3ab81b52011-10-03 08:10:57 +0000449 } while (timeout < 60);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700450
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000451 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000452 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453}
454
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700455
456static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
457{
458 return &wrb->payload.sgl[0];
459}
460
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700461
462/* Don't touch the hdr after it's prepared */
Somnath Kotur106df1e2011-10-27 07:12:13 +0000463/* mem will be NULL for embedded commands */
464static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
465 u8 subsystem, u8 opcode, int cmd_len,
466 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467{
Somnath Kotur106df1e2011-10-27 07:12:13 +0000468 struct be_sge *sge;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000469 unsigned long addr = (unsigned long)req_hdr;
470 u64 req_addr = addr;
Somnath Kotur106df1e2011-10-27 07:12:13 +0000471
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 req_hdr->opcode = opcode;
473 req_hdr->subsystem = subsystem;
474 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000475 req_hdr->version = 0;
Somnath Kotur106df1e2011-10-27 07:12:13 +0000476
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000477 wrb->tag0 = req_addr & 0xFFFFFFFF;
478 wrb->tag1 = upper_32_bits(req_addr);
479
Somnath Kotur106df1e2011-10-27 07:12:13 +0000480 wrb->payload_length = cmd_len;
481 if (mem) {
482 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
483 MCC_WRB_SGE_CNT_SHIFT;
484 sge = nonembedded_sgl(wrb);
485 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
486 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
487 sge->len = cpu_to_le32(mem->size);
488 } else
489 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
490 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491}
492
493static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
494 struct be_dma_mem *mem)
495{
496 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
497 u64 dma = (u64)mem->dma;
498
499 for (i = 0; i < buf_pages; i++) {
500 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
501 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
502 dma += PAGE_SIZE_4K;
503 }
504}
505
506/* Converts interrupt delay in microseconds to multiplier value */
507static u32 eq_delay_to_mult(u32 usec_delay)
508{
509#define MAX_INTR_RATE 651042
510 const u32 round = 10;
511 u32 multiplier;
512
513 if (usec_delay == 0)
514 multiplier = 0;
515 else {
516 u32 interrupt_rate = 1000000 / usec_delay;
517 /* Max delay, corresponding to the lowest interrupt rate */
518 if (interrupt_rate == 0)
519 multiplier = 1023;
520 else {
521 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
522 multiplier /= interrupt_rate;
523 /* Round the multiplier to the closest value.*/
524 multiplier = (multiplier + round/2) / round;
525 multiplier = min(multiplier, (u32)1023);
526 }
527 }
528 return multiplier;
529}
530
Sathya Perlab31c50a2009-09-17 10:30:13 -0700531static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700533 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
534 struct be_mcc_wrb *wrb
535 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
536 memset(wrb, 0, sizeof(*wrb));
537 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538}
539
Sathya Perlab31c50a2009-09-17 10:30:13 -0700540static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000541{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700542 struct be_queue_info *mccq = &adapter->mcc_obj.q;
543 struct be_mcc_wrb *wrb;
544
Sathya Perla713d03942009-11-22 22:02:45 +0000545 if (atomic_read(&mccq->used) >= mccq->len) {
546 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
547 return NULL;
548 }
549
Sathya Perlab31c50a2009-09-17 10:30:13 -0700550 wrb = queue_head_node(mccq);
551 queue_head_inc(mccq);
552 atomic_inc(&mccq->used);
553 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000554 return wrb;
555}
556
Sathya Perla2243e2e2009-11-22 22:02:03 +0000557/* Tell fw we're about to start firing cmds by writing a
558 * special pattern across the wrb hdr; uses mbox
559 */
560int be_cmd_fw_init(struct be_adapter *adapter)
561{
562 u8 *wrb;
563 int status;
564
Ivan Vecera29849612010-12-14 05:43:19 +0000565 if (mutex_lock_interruptible(&adapter->mbox_lock))
566 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000567
568 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000569 *wrb++ = 0xFF;
570 *wrb++ = 0x12;
571 *wrb++ = 0x34;
572 *wrb++ = 0xFF;
573 *wrb++ = 0xFF;
574 *wrb++ = 0x56;
575 *wrb++ = 0x78;
576 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000577
578 status = be_mbox_notify_wait(adapter);
579
Ivan Vecera29849612010-12-14 05:43:19 +0000580 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000581 return status;
582}
583
584/* Tell fw we're done with firing cmds by writing a
585 * special pattern across the wrb hdr; uses mbox
586 */
587int be_cmd_fw_clean(struct be_adapter *adapter)
588{
589 u8 *wrb;
590 int status;
591
Ivan Vecera29849612010-12-14 05:43:19 +0000592 if (mutex_lock_interruptible(&adapter->mbox_lock))
593 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000594
595 wrb = (u8 *)wrb_from_mbox(adapter);
596 *wrb++ = 0xFF;
597 *wrb++ = 0xAA;
598 *wrb++ = 0xBB;
599 *wrb++ = 0xFF;
600 *wrb++ = 0xFF;
601 *wrb++ = 0xCC;
602 *wrb++ = 0xDD;
603 *wrb = 0xFF;
604
605 status = be_mbox_notify_wait(adapter);
606
Ivan Vecera29849612010-12-14 05:43:19 +0000607 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000608 return status;
609}
Sathya Perla8788fdc2009-07-27 22:52:03 +0000610int be_cmd_eq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611 struct be_queue_info *eq, int eq_delay)
612{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700613 struct be_mcc_wrb *wrb;
614 struct be_cmd_req_eq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 struct be_dma_mem *q_mem = &eq->dma_mem;
616 int status;
617
Ivan Vecera29849612010-12-14 05:43:19 +0000618 if (mutex_lock_interruptible(&adapter->mbox_lock))
619 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700620
621 wrb = wrb_from_mbox(adapter);
622 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623
Somnath Kotur106df1e2011-10-27 07:12:13 +0000624 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
625 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626
627 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
628
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
630 /* 4byte eqe*/
631 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
632 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
633 __ilog2_u32(eq->len/256));
634 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
635 eq_delay_to_mult(eq_delay));
636 be_dws_cpu_to_le(req->context, sizeof(req->context));
637
638 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
639
Sathya Perlab31c50a2009-09-17 10:30:13 -0700640 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700642 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 eq->id = le16_to_cpu(resp->eq_id);
644 eq->created = true;
645 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700646
Ivan Vecera29849612010-12-14 05:43:19 +0000647 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 return status;
649}
650
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000651/* Use MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000652int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000653 u8 type, bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700655 struct be_mcc_wrb *wrb;
656 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 int status;
658
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000659 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700660
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000661 wrb = wrb_from_mccq(adapter);
662 if (!wrb) {
663 status = -EBUSY;
664 goto err;
665 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700666 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667
Somnath Kotur106df1e2011-10-27 07:12:13 +0000668 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
669 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 req->type = type;
671 if (permanent) {
672 req->permanent = 1;
673 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700674 req->if_id = cpu_to_le16((u16) if_handle);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000675 req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 req->permanent = 0;
677 }
678
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000679 status = be_mcc_notify_wait(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700680 if (!status) {
681 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700683 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000685err:
686 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 return status;
688}
689
Sathya Perlab31c50a2009-09-17 10:30:13 -0700690/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000691int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000692 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700694 struct be_mcc_wrb *wrb;
695 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 int status;
697
Sathya Perlab31c50a2009-09-17 10:30:13 -0700698 spin_lock_bh(&adapter->mcc_lock);
699
700 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000701 if (!wrb) {
702 status = -EBUSY;
703 goto err;
704 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700705 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
Somnath Kotur106df1e2011-10-27 07:12:13 +0000707 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
708 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709
Ajit Khapardef8617e02011-02-11 13:36:37 +0000710 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711 req->if_id = cpu_to_le32(if_id);
712 memcpy(req->mac_address, mac_addr, ETH_ALEN);
713
Sathya Perlab31c50a2009-09-17 10:30:13 -0700714 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715 if (!status) {
716 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
717 *pmac_id = le32_to_cpu(resp->pmac_id);
718 }
719
Sathya Perla713d03942009-11-22 22:02:45 +0000720err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700721 spin_unlock_bh(&adapter->mcc_lock);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000722
723 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
724 status = -EPERM;
725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 return status;
727}
728
Sathya Perlab31c50a2009-09-17 10:30:13 -0700729/* Uses synchronous MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +0000730int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700732 struct be_mcc_wrb *wrb;
733 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734 int status;
735
Sathya Perla30128032011-11-10 19:17:57 +0000736 if (pmac_id == -1)
737 return 0;
738
Sathya Perlab31c50a2009-09-17 10:30:13 -0700739 spin_lock_bh(&adapter->mcc_lock);
740
741 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000742 if (!wrb) {
743 status = -EBUSY;
744 goto err;
745 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700746 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Somnath Kotur106df1e2011-10-27 07:12:13 +0000748 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
749 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750
Ajit Khapardef8617e02011-02-11 13:36:37 +0000751 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 req->if_id = cpu_to_le32(if_id);
753 req->pmac_id = cpu_to_le32(pmac_id);
754
Sathya Perlab31c50a2009-09-17 10:30:13 -0700755 status = be_mcc_notify_wait(adapter);
756
Sathya Perla713d03942009-11-22 22:02:45 +0000757err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700758 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 return status;
760}
761
Sathya Perlab31c50a2009-09-17 10:30:13 -0700762/* Uses Mbox */
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000763int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
764 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700766 struct be_mcc_wrb *wrb;
767 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700769 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 int status;
771
Ivan Vecera29849612010-12-14 05:43:19 +0000772 if (mutex_lock_interruptible(&adapter->mbox_lock))
773 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700774
775 wrb = wrb_from_mbox(adapter);
776 req = embedded_payload(wrb);
777 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778
Somnath Kotur106df1e2011-10-27 07:12:13 +0000779 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
780 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781
782 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000783 if (lancer_chip(adapter)) {
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +0000784 req->hdr.version = 2;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000785 req->page_size = 1; /* 1 for 4K */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000786 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
787 no_delay);
788 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
789 __ilog2_u32(cq->len/256));
790 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
791 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
792 ctxt, 1);
793 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
794 ctxt, eq->id);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000795 } else {
796 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
797 coalesce_wm);
798 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
799 ctxt, no_delay);
800 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
801 __ilog2_u32(cq->len/256));
802 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000803 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
804 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000805 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 be_dws_cpu_to_le(ctxt, sizeof(req->context));
808
809 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
810
Sathya Perlab31c50a2009-09-17 10:30:13 -0700811 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700813 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 cq->id = le16_to_cpu(resp->cq_id);
815 cq->created = true;
816 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700817
Ivan Vecera29849612010-12-14 05:43:19 +0000818 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000819
820 return status;
821}
822
823static u32 be_encoded_q_len(int q_len)
824{
825 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
826 if (len_encoded == 16)
827 len_encoded = 0;
828 return len_encoded;
829}
830
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000831int be_cmd_mccq_ext_create(struct be_adapter *adapter,
Sathya Perla5fb379e2009-06-18 00:02:59 +0000832 struct be_queue_info *mccq,
833 struct be_queue_info *cq)
834{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700835 struct be_mcc_wrb *wrb;
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000836 struct be_cmd_req_mcc_ext_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000837 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700838 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000839 int status;
840
Ivan Vecera29849612010-12-14 05:43:19 +0000841 if (mutex_lock_interruptible(&adapter->mbox_lock))
842 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700843
844 wrb = wrb_from_mbox(adapter);
845 req = embedded_payload(wrb);
846 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000847
Somnath Kotur106df1e2011-10-27 07:12:13 +0000848 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
849 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000850
Ajit Khaparded4a2ac32010-03-11 01:35:59 +0000851 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000852 if (lancer_chip(adapter)) {
853 req->hdr.version = 1;
854 req->cq_id = cpu_to_le16(cq->id);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000855
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000856 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
857 be_encoded_q_len(mccq->len));
858 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
859 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
860 ctxt, cq->id);
861 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
862 ctxt, 1);
863
864 } else {
865 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
866 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
867 be_encoded_q_len(mccq->len));
868 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
869 }
870
Somnath Koturcc4ce022010-10-21 07:11:14 -0700871 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000872 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000873 be_dws_cpu_to_le(ctxt, sizeof(req->context));
874
875 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
876
Sathya Perlab31c50a2009-09-17 10:30:13 -0700877 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000878 if (!status) {
879 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
880 mccq->id = le16_to_cpu(resp->id);
881 mccq->created = true;
882 }
Ivan Vecera29849612010-12-14 05:43:19 +0000883 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884
885 return status;
886}
887
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000888int be_cmd_mccq_org_create(struct be_adapter *adapter,
889 struct be_queue_info *mccq,
890 struct be_queue_info *cq)
891{
892 struct be_mcc_wrb *wrb;
893 struct be_cmd_req_mcc_create *req;
894 struct be_dma_mem *q_mem = &mccq->dma_mem;
895 void *ctxt;
896 int status;
897
898 if (mutex_lock_interruptible(&adapter->mbox_lock))
899 return -1;
900
901 wrb = wrb_from_mbox(adapter);
902 req = embedded_payload(wrb);
903 ctxt = &req->context;
904
Somnath Kotur106df1e2011-10-27 07:12:13 +0000905 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
906 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
Somnath Kotur34b1ef02011-06-01 00:33:22 +0000907
908 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
909
910 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
911 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
912 be_encoded_q_len(mccq->len));
913 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
914
915 be_dws_cpu_to_le(ctxt, sizeof(req->context));
916
917 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
918
919 status = be_mbox_notify_wait(adapter);
920 if (!status) {
921 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
922 mccq->id = le16_to_cpu(resp->id);
923 mccq->created = true;
924 }
925
926 mutex_unlock(&adapter->mbox_lock);
927 return status;
928}
929
930int be_cmd_mccq_create(struct be_adapter *adapter,
931 struct be_queue_info *mccq,
932 struct be_queue_info *cq)
933{
934 int status;
935
936 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
937 if (status && !lancer_chip(adapter)) {
938 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
939 "or newer to avoid conflicting priorities between NIC "
940 "and FCoE traffic");
941 status = be_cmd_mccq_org_create(adapter, mccq, cq);
942 }
943 return status;
944}
945
Sathya Perla8788fdc2009-07-27 22:52:03 +0000946int be_cmd_txq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700947 struct be_queue_info *txq,
948 struct be_queue_info *cq)
949{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700950 struct be_mcc_wrb *wrb;
951 struct be_cmd_req_eth_tx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700952 struct be_dma_mem *q_mem = &txq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700953 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700954 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +0000956 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700957
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +0000958 wrb = wrb_from_mccq(adapter);
959 if (!wrb) {
960 status = -EBUSY;
961 goto err;
962 }
963
Sathya Perlab31c50a2009-09-17 10:30:13 -0700964 req = embedded_payload(wrb);
965 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700966
Somnath Kotur106df1e2011-10-27 07:12:13 +0000967 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
968 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +0000970 if (lancer_chip(adapter)) {
971 req->hdr.version = 1;
972 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
973 adapter->if_handle);
974 }
975
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
977 req->ulp_num = BE_ULP1_NUM;
978 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
979
Sathya Perlab31c50a2009-09-17 10:30:13 -0700980 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
981 be_encoded_q_len(txq->len));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
983 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
984
985 be_dws_cpu_to_le(ctxt, sizeof(req->context));
986
987 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
988
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +0000989 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990 if (!status) {
991 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
992 txq->id = le16_to_cpu(resp->cid);
993 txq->created = true;
994 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700995
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +0000996err:
997 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700998
999 return status;
1000}
1001
Sathya Perla482c9e72011-06-29 23:33:17 +00001002/* Uses MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001003int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001005 u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001006{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001007 struct be_mcc_wrb *wrb;
1008 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009 struct be_dma_mem *q_mem = &rxq->dma_mem;
1010 int status;
1011
Sathya Perla482c9e72011-06-29 23:33:17 +00001012 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001013
Sathya Perla482c9e72011-06-29 23:33:17 +00001014 wrb = wrb_from_mccq(adapter);
1015 if (!wrb) {
1016 status = -EBUSY;
1017 goto err;
1018 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001019 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Somnath Kotur106df1e2011-10-27 07:12:13 +00001021 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1022 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023
1024 req->cq_id = cpu_to_le16(cq_id);
1025 req->frag_size = fls(frag_size) - 1;
1026 req->num_pages = 2;
1027 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1028 req->interface_id = cpu_to_le32(if_id);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001029 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030 req->rss_queue = cpu_to_le32(rss);
1031
Sathya Perla482c9e72011-06-29 23:33:17 +00001032 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 if (!status) {
1034 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1035 rxq->id = le16_to_cpu(resp->id);
1036 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -07001037 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001039
Sathya Perla482c9e72011-06-29 23:33:17 +00001040err:
1041 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042 return status;
1043}
1044
Sathya Perlab31c50a2009-09-17 10:30:13 -07001045/* Generic destroyer function for all types of queues
1046 * Uses Mbox
1047 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001048int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049 int queue_type)
1050{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001051 struct be_mcc_wrb *wrb;
1052 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 u8 subsys = 0, opcode = 0;
1054 int status;
1055
Ivan Vecera29849612010-12-14 05:43:19 +00001056 if (mutex_lock_interruptible(&adapter->mbox_lock))
1057 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Sathya Perlab31c50a2009-09-17 10:30:13 -07001059 wrb = wrb_from_mbox(adapter);
1060 req = embedded_payload(wrb);
1061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 switch (queue_type) {
1063 case QTYPE_EQ:
1064 subsys = CMD_SUBSYSTEM_COMMON;
1065 opcode = OPCODE_COMMON_EQ_DESTROY;
1066 break;
1067 case QTYPE_CQ:
1068 subsys = CMD_SUBSYSTEM_COMMON;
1069 opcode = OPCODE_COMMON_CQ_DESTROY;
1070 break;
1071 case QTYPE_TXQ:
1072 subsys = CMD_SUBSYSTEM_ETH;
1073 opcode = OPCODE_ETH_TX_DESTROY;
1074 break;
1075 case QTYPE_RXQ:
1076 subsys = CMD_SUBSYSTEM_ETH;
1077 opcode = OPCODE_ETH_RX_DESTROY;
1078 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001079 case QTYPE_MCCQ:
1080 subsys = CMD_SUBSYSTEM_COMMON;
1081 opcode = OPCODE_COMMON_MCC_DESTROY;
1082 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +00001084 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 }
Ajit Khaparded744b442009-12-03 06:12:06 +00001086
Somnath Kotur106df1e2011-10-27 07:12:13 +00001087 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1088 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 req->id = cpu_to_le16(q->id);
1090
Sathya Perlab31c50a2009-09-17 10:30:13 -07001091 status = be_mbox_notify_wait(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00001092 if (!status)
1093 q->created = false;
Sathya Perla5f0b8492009-07-27 22:52:56 +00001094
Ivan Vecera29849612010-12-14 05:43:19 +00001095 mutex_unlock(&adapter->mbox_lock);
Sathya Perla482c9e72011-06-29 23:33:17 +00001096 return status;
1097}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Sathya Perla482c9e72011-06-29 23:33:17 +00001099/* Uses MCC */
1100int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1101{
1102 struct be_mcc_wrb *wrb;
1103 struct be_cmd_req_q_destroy *req;
1104 int status;
1105
1106 spin_lock_bh(&adapter->mcc_lock);
1107
1108 wrb = wrb_from_mccq(adapter);
1109 if (!wrb) {
1110 status = -EBUSY;
1111 goto err;
1112 }
1113 req = embedded_payload(wrb);
1114
Somnath Kotur106df1e2011-10-27 07:12:13 +00001115 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1116 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
Sathya Perla482c9e72011-06-29 23:33:17 +00001117 req->id = cpu_to_le16(q->id);
1118
1119 status = be_mcc_notify_wait(adapter);
1120 if (!status)
1121 q->created = false;
1122
1123err:
1124 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 return status;
1126}
1127
Sathya Perlab31c50a2009-09-17 10:30:13 -07001128/* Create an rx filtering policy configuration on an i/f
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001129 * Uses MCCQ
Sathya Perlab31c50a2009-09-17 10:30:13 -07001130 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001131int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001132 u32 *if_handle, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001134 struct be_mcc_wrb *wrb;
1135 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136 int status;
1137
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001138 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001139
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001140 wrb = wrb_from_mccq(adapter);
1141 if (!wrb) {
1142 status = -EBUSY;
1143 goto err;
1144 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001145 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
Somnath Kotur106df1e2011-10-27 07:12:13 +00001147 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1148 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001149 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001150 req->capability_flags = cpu_to_le32(cap_flags);
1151 req->enable_flags = cpu_to_le32(en_flags);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001152
1153 req->pmac_invalid = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001155 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 if (!status) {
1157 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1158 *if_handle = le32_to_cpu(resp->interface_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159 }
1160
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001161err:
1162 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163 return status;
1164}
1165
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001166/* Uses MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001167int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001169 struct be_mcc_wrb *wrb;
1170 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171 int status;
1172
Sathya Perla30128032011-11-10 19:17:57 +00001173 if (interface_id == -1)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001174 return 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001175
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001176 spin_lock_bh(&adapter->mcc_lock);
1177
1178 wrb = wrb_from_mccq(adapter);
1179 if (!wrb) {
1180 status = -EBUSY;
1181 goto err;
1182 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001183 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184
Somnath Kotur106df1e2011-10-27 07:12:13 +00001185 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1186 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
Ajit Khaparde658681f2011-02-11 13:34:46 +00001187 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001189
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001190 status = be_mcc_notify_wait(adapter);
1191err:
1192 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193 return status;
1194}
1195
1196/* Get stats is a non embedded command: the request is not embedded inside
1197 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001198 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001200int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001202 struct be_mcc_wrb *wrb;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001203 struct be_cmd_req_hdr *hdr;
Sathya Perla713d03942009-11-22 22:02:45 +00001204 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
Sathya Perlab31c50a2009-09-17 10:30:13 -07001206 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207
Sathya Perlab31c50a2009-09-17 10:30:13 -07001208 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001209 if (!wrb) {
1210 status = -EBUSY;
1211 goto err;
1212 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001213 hdr = nonemb_cmd->va;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
Somnath Kotur106df1e2011-10-27 07:12:13 +00001215 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1216 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001217
1218 if (adapter->generation == BE_GEN3)
1219 hdr->version = 1;
1220
Sathya Perlab31c50a2009-09-17 10:30:13 -07001221 be_mcc_notify(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001222 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223
Sathya Perla713d03942009-11-22 22:02:45 +00001224err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001225 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001226 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227}
1228
Selvin Xavier005d5692011-05-16 07:36:35 +00001229/* Lancer Stats */
1230int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1231 struct be_dma_mem *nonemb_cmd)
1232{
1233
1234 struct be_mcc_wrb *wrb;
1235 struct lancer_cmd_req_pport_stats *req;
Selvin Xavier005d5692011-05-16 07:36:35 +00001236 int status = 0;
1237
1238 spin_lock_bh(&adapter->mcc_lock);
1239
1240 wrb = wrb_from_mccq(adapter);
1241 if (!wrb) {
1242 status = -EBUSY;
1243 goto err;
1244 }
1245 req = nonemb_cmd->va;
Selvin Xavier005d5692011-05-16 07:36:35 +00001246
Somnath Kotur106df1e2011-10-27 07:12:13 +00001247 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1248 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1249 nonemb_cmd);
Selvin Xavier005d5692011-05-16 07:36:35 +00001250
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +00001251 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier005d5692011-05-16 07:36:35 +00001252 req->cmd_params.params.reset_stats = 0;
1253
Selvin Xavier005d5692011-05-16 07:36:35 +00001254 be_mcc_notify(adapter);
1255 adapter->stats_cmd_sent = true;
1256
1257err:
1258 spin_unlock_bh(&adapter->mcc_lock);
1259 return status;
1260}
1261
Sathya Perlab31c50a2009-09-17 10:30:13 -07001262/* Uses synchronous mcc */
Sathya Perlaea172a02011-08-02 19:57:42 +00001263int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001264 u16 *link_speed, u8 *link_status, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001266 struct be_mcc_wrb *wrb;
1267 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268 int status;
1269
Sathya Perlab31c50a2009-09-17 10:30:13 -07001270 spin_lock_bh(&adapter->mcc_lock);
1271
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001272 if (link_status)
1273 *link_status = LINK_DOWN;
1274
Sathya Perlab31c50a2009-09-17 10:30:13 -07001275 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001276 if (!wrb) {
1277 status = -EBUSY;
1278 goto err;
1279 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001280 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001281
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001282 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1283 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1284
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001285 if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
Padmanabh Ratnakardaad6162011-11-16 02:03:45 +00001286 req->hdr.version = 1;
1287
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001288 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289
Sathya Perlab31c50a2009-09-17 10:30:13 -07001290 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291 if (!status) {
1292 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001293 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001294 if (link_speed)
1295 *link_speed = le16_to_cpu(resp->link_speed);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001296 if (mac_speed)
1297 *mac_speed = resp->mac_speed;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001298 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001299 if (link_status)
1300 *link_status = resp->logical_link_status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 }
1302
Sathya Perla713d03942009-11-22 22:02:45 +00001303err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001304 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 return status;
1306}
1307
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001308/* Uses synchronous mcc */
1309int be_cmd_get_die_temperature(struct be_adapter *adapter)
1310{
1311 struct be_mcc_wrb *wrb;
1312 struct be_cmd_req_get_cntl_addnl_attribs *req;
1313 int status;
1314
1315 spin_lock_bh(&adapter->mcc_lock);
1316
1317 wrb = wrb_from_mccq(adapter);
1318 if (!wrb) {
1319 status = -EBUSY;
1320 goto err;
1321 }
1322 req = embedded_payload(wrb);
1323
Somnath Kotur106df1e2011-10-27 07:12:13 +00001324 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1325 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1326 wrb, NULL);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001327
Somnath Kotur3de09452011-09-30 07:25:05 +00001328 be_mcc_notify(adapter);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001329
1330err:
1331 spin_unlock_bh(&adapter->mcc_lock);
1332 return status;
1333}
1334
Somnath Kotur311fddc2011-03-16 21:22:43 +00001335/* Uses synchronous mcc */
1336int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1337{
1338 struct be_mcc_wrb *wrb;
1339 struct be_cmd_req_get_fat *req;
1340 int status;
1341
1342 spin_lock_bh(&adapter->mcc_lock);
1343
1344 wrb = wrb_from_mccq(adapter);
1345 if (!wrb) {
1346 status = -EBUSY;
1347 goto err;
1348 }
1349 req = embedded_payload(wrb);
1350
Somnath Kotur106df1e2011-10-27 07:12:13 +00001351 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1352 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001353 req->fat_operation = cpu_to_le32(QUERY_FAT);
1354 status = be_mcc_notify_wait(adapter);
1355 if (!status) {
1356 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1357 if (log_size && resp->log_size)
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001358 *log_size = le32_to_cpu(resp->log_size) -
1359 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001360 }
1361err:
1362 spin_unlock_bh(&adapter->mcc_lock);
1363 return status;
1364}
1365
1366void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1367{
1368 struct be_dma_mem get_fat_cmd;
1369 struct be_mcc_wrb *wrb;
1370 struct be_cmd_req_get_fat *req;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001371 u32 offset = 0, total_size, buf_size,
1372 log_offset = sizeof(u32), payload_len;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001373 int status;
1374
1375 if (buf_len == 0)
1376 return;
1377
1378 total_size = buf_len;
1379
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001380 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1381 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1382 get_fat_cmd.size,
1383 &get_fat_cmd.dma);
1384 if (!get_fat_cmd.va) {
1385 status = -ENOMEM;
1386 dev_err(&adapter->pdev->dev,
1387 "Memory allocation failure while retrieving FAT data\n");
1388 return;
1389 }
1390
Somnath Kotur311fddc2011-03-16 21:22:43 +00001391 spin_lock_bh(&adapter->mcc_lock);
1392
Somnath Kotur311fddc2011-03-16 21:22:43 +00001393 while (total_size) {
1394 buf_size = min(total_size, (u32)60*1024);
1395 total_size -= buf_size;
1396
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001397 wrb = wrb_from_mccq(adapter);
1398 if (!wrb) {
1399 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001400 goto err;
1401 }
1402 req = get_fat_cmd.va;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001403
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001404 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
Somnath Kotur106df1e2011-10-27 07:12:13 +00001405 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1406 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1407 &get_fat_cmd);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001408
1409 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1410 req->read_log_offset = cpu_to_le32(log_offset);
1411 req->read_log_length = cpu_to_le32(buf_size);
1412 req->data_buffer_size = cpu_to_le32(buf_size);
1413
1414 status = be_mcc_notify_wait(adapter);
1415 if (!status) {
1416 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1417 memcpy(buf + offset,
1418 resp->data_buffer,
Somnath Kotur92aa9212011-09-30 07:24:00 +00001419 le32_to_cpu(resp->read_log_length));
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001420 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001421 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001422 goto err;
1423 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001424 offset += buf_size;
1425 log_offset += buf_size;
1426 }
1427err:
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001428 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1429 get_fat_cmd.va,
1430 get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001431 spin_unlock_bh(&adapter->mcc_lock);
1432}
1433
Sathya Perla04b71172011-09-27 13:30:27 -04001434/* Uses synchronous mcc */
1435int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1436 char *fw_on_flash)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001438 struct be_mcc_wrb *wrb;
1439 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 int status;
1441
Sathya Perla04b71172011-09-27 13:30:27 -04001442 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001443
Sathya Perla04b71172011-09-27 13:30:27 -04001444 wrb = wrb_from_mccq(adapter);
1445 if (!wrb) {
1446 status = -EBUSY;
1447 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 }
1449
Sathya Perla04b71172011-09-27 13:30:27 -04001450 req = embedded_payload(wrb);
Sathya Perla04b71172011-09-27 13:30:27 -04001451
Somnath Kotur106df1e2011-10-27 07:12:13 +00001452 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1453 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
Sathya Perla04b71172011-09-27 13:30:27 -04001454 status = be_mcc_notify_wait(adapter);
1455 if (!status) {
1456 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1457 strcpy(fw_ver, resp->firmware_version_string);
1458 if (fw_on_flash)
1459 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1460 }
1461err:
1462 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463 return status;
1464}
1465
Sathya Perlab31c50a2009-09-17 10:30:13 -07001466/* set the EQ delay interval of an EQ to specified value
1467 * Uses async mcc
1468 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001469int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001471 struct be_mcc_wrb *wrb;
1472 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla713d03942009-11-22 22:02:45 +00001473 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Sathya Perlab31c50a2009-09-17 10:30:13 -07001475 spin_lock_bh(&adapter->mcc_lock);
1476
1477 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001478 if (!wrb) {
1479 status = -EBUSY;
1480 goto err;
1481 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001482 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483
Somnath Kotur106df1e2011-10-27 07:12:13 +00001484 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1485 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486
1487 req->num_eq = cpu_to_le32(1);
1488 req->delay[0].eq_id = cpu_to_le32(eq_id);
1489 req->delay[0].phase = 0;
1490 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1491
Sathya Perlab31c50a2009-09-17 10:30:13 -07001492 be_mcc_notify(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493
Sathya Perla713d03942009-11-22 22:02:45 +00001494err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001495 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001496 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497}
1498
Sathya Perlab31c50a2009-09-17 10:30:13 -07001499/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001500int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 u32 num, bool untagged, bool promiscuous)
1502{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001503 struct be_mcc_wrb *wrb;
1504 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 int status;
1506
Sathya Perlab31c50a2009-09-17 10:30:13 -07001507 spin_lock_bh(&adapter->mcc_lock);
1508
1509 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001510 if (!wrb) {
1511 status = -EBUSY;
1512 goto err;
1513 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001514 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515
Somnath Kotur106df1e2011-10-27 07:12:13 +00001516 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1517 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518
1519 req->interface_id = if_id;
1520 req->promiscuous = promiscuous;
1521 req->untagged = untagged;
1522 req->num_vlan = num;
1523 if (!promiscuous) {
1524 memcpy(req->normal_vlan, vtag_array,
1525 req->num_vlan * sizeof(vtag_array[0]));
1526 }
1527
Sathya Perlab31c50a2009-09-17 10:30:13 -07001528 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
Sathya Perla713d03942009-11-22 22:02:45 +00001530err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001531 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532 return status;
1533}
1534
Sathya Perla5b8821b2011-08-02 19:57:44 +00001535int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001537 struct be_mcc_wrb *wrb;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001538 struct be_dma_mem *mem = &adapter->rx_filter;
1539 struct be_cmd_req_rx_filter *req = mem->va;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001540 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541
Sathya Perla8788fdc2009-07-27 22:52:03 +00001542 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001543
Sathya Perlab31c50a2009-09-17 10:30:13 -07001544 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001545 if (!wrb) {
1546 status = -EBUSY;
1547 goto err;
1548 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00001549 memset(req, 0, sizeof(*req));
Somnath Kotur106df1e2011-10-27 07:12:13 +00001550 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1551 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1552 wrb, mem);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553
Sathya Perla5b8821b2011-08-02 19:57:44 +00001554 req->if_id = cpu_to_le32(adapter->if_handle);
1555 if (flags & IFF_PROMISC) {
1556 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1557 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1558 if (value == ON)
1559 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001560 BE_IF_FLAGS_VLAN_PROMISCUOUS);
Sathya Perla5b8821b2011-08-02 19:57:44 +00001561 } else if (flags & IFF_ALLMULTI) {
1562 req->if_flags_mask = req->if_flags =
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001563 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
Sathya Perla24307ee2009-06-18 00:09:25 +00001564 } else {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001565 struct netdev_hw_addr *ha;
1566 int i = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001568 req->if_flags_mask = req->if_flags =
1569 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
Padmanabh Ratnakar1610c792011-11-03 01:49:27 +00001570
1571 /* Reset mcast promisc mode if already set by setting mask
1572 * and not setting flags field
1573 */
1574 req->if_flags_mask |=
1575 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1576
Padmanabh Ratnakar016f97b2011-11-03 01:49:13 +00001577 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
Sathya Perla5b8821b2011-08-02 19:57:44 +00001578 netdev_for_each_mc_addr(ha, adapter->netdev)
1579 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1580 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Sathya Perla0d1d5872011-08-03 05:19:27 -07001582 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001583err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001584 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001585 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586}
1587
Sathya Perlab31c50a2009-09-17 10:30:13 -07001588/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001589int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001591 struct be_mcc_wrb *wrb;
1592 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 int status;
1594
Sathya Perlab31c50a2009-09-17 10:30:13 -07001595 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perlab31c50a2009-09-17 10:30:13 -07001597 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001598 if (!wrb) {
1599 status = -EBUSY;
1600 goto err;
1601 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001602 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603
Somnath Kotur106df1e2011-10-27 07:12:13 +00001604 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1605 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606
1607 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1608 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1609
Sathya Perlab31c50a2009-09-17 10:30:13 -07001610 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Sathya Perla713d03942009-11-22 22:02:45 +00001612err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001613 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 return status;
1615}
1616
Sathya Perlab31c50a2009-09-17 10:30:13 -07001617/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001618int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001620 struct be_mcc_wrb *wrb;
1621 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 int status;
1623
Sathya Perlab31c50a2009-09-17 10:30:13 -07001624 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Sathya Perlab31c50a2009-09-17 10:30:13 -07001626 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001627 if (!wrb) {
1628 status = -EBUSY;
1629 goto err;
1630 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001631 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
Somnath Kotur106df1e2011-10-27 07:12:13 +00001633 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1634 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Sathya Perlab31c50a2009-09-17 10:30:13 -07001636 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 if (!status) {
1638 struct be_cmd_resp_get_flow_control *resp =
1639 embedded_payload(wrb);
1640 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1641 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1642 }
1643
Sathya Perla713d03942009-11-22 22:02:45 +00001644err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001645 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646 return status;
1647}
1648
Sathya Perlab31c50a2009-09-17 10:30:13 -07001649/* Uses mbox */
Sathya Perla3abcded2010-10-03 22:12:27 -07001650int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1651 u32 *mode, u32 *caps)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001652{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001653 struct be_mcc_wrb *wrb;
1654 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655 int status;
1656
Ivan Vecera29849612010-12-14 05:43:19 +00001657 if (mutex_lock_interruptible(&adapter->mbox_lock))
1658 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659
Sathya Perlab31c50a2009-09-17 10:30:13 -07001660 wrb = wrb_from_mbox(adapter);
1661 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662
Somnath Kotur106df1e2011-10-27 07:12:13 +00001663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1664 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665
Sathya Perlab31c50a2009-09-17 10:30:13 -07001666 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667 if (!status) {
1668 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1669 *port_num = le32_to_cpu(resp->phys_port);
Ajit Khaparde3486be22010-07-23 02:04:54 +00001670 *mode = le32_to_cpu(resp->function_mode);
Sathya Perla3abcded2010-10-03 22:12:27 -07001671 *caps = le32_to_cpu(resp->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672 }
1673
Ivan Vecera29849612010-12-14 05:43:19 +00001674 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 return status;
1676}
sarveshwarb14074ea2009-08-05 13:05:24 -07001677
Sathya Perlab31c50a2009-09-17 10:30:13 -07001678/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07001679int be_cmd_reset_function(struct be_adapter *adapter)
1680{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001681 struct be_mcc_wrb *wrb;
1682 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07001683 int status;
1684
Ivan Vecera29849612010-12-14 05:43:19 +00001685 if (mutex_lock_interruptible(&adapter->mbox_lock))
1686 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07001687
Sathya Perlab31c50a2009-09-17 10:30:13 -07001688 wrb = wrb_from_mbox(adapter);
1689 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07001690
Somnath Kotur106df1e2011-10-27 07:12:13 +00001691 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1692 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
sarveshwarb14074ea2009-08-05 13:05:24 -07001693
Sathya Perlab31c50a2009-09-17 10:30:13 -07001694 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07001695
Ivan Vecera29849612010-12-14 05:43:19 +00001696 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07001697 return status;
1698}
Ajit Khaparde84517482009-09-04 03:12:16 +00001699
Sathya Perla3abcded2010-10-03 22:12:27 -07001700int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1701{
1702 struct be_mcc_wrb *wrb;
1703 struct be_cmd_req_rss_config *req;
Padmanabh Ratnakar65f85842011-11-25 05:48:38 +00001704 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1705 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1706 0x3ea83c02, 0x4a110304};
Sathya Perla3abcded2010-10-03 22:12:27 -07001707 int status;
1708
Ivan Vecera29849612010-12-14 05:43:19 +00001709 if (mutex_lock_interruptible(&adapter->mbox_lock))
1710 return -1;
Sathya Perla3abcded2010-10-03 22:12:27 -07001711
1712 wrb = wrb_from_mbox(adapter);
1713 req = embedded_payload(wrb);
1714
Somnath Kotur106df1e2011-10-27 07:12:13 +00001715 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1716 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001717
1718 req->if_id = cpu_to_le32(adapter->if_handle);
Sathya Perla1ca7ba92012-02-23 18:50:16 +00001719 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1720 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1722 memcpy(req->cpu_table, rsstable, table_size);
1723 memcpy(req->hash, myhash, sizeof(myhash));
1724 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1725
1726 status = be_mbox_notify_wait(adapter);
1727
Ivan Vecera29849612010-12-14 05:43:19 +00001728 mutex_unlock(&adapter->mbox_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 return status;
1730}
1731
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001732/* Uses sync mcc */
1733int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1734 u8 bcn, u8 sts, u8 state)
1735{
1736 struct be_mcc_wrb *wrb;
1737 struct be_cmd_req_enable_disable_beacon *req;
1738 int status;
1739
1740 spin_lock_bh(&adapter->mcc_lock);
1741
1742 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001743 if (!wrb) {
1744 status = -EBUSY;
1745 goto err;
1746 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001747 req = embedded_payload(wrb);
1748
Somnath Kotur106df1e2011-10-27 07:12:13 +00001749 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1750 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001751
1752 req->port_num = port_num;
1753 req->beacon_state = state;
1754 req->beacon_duration = bcn;
1755 req->status_duration = sts;
1756
1757 status = be_mcc_notify_wait(adapter);
1758
Sathya Perla713d03942009-11-22 22:02:45 +00001759err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001760 spin_unlock_bh(&adapter->mcc_lock);
1761 return status;
1762}
1763
1764/* Uses sync mcc */
1765int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1766{
1767 struct be_mcc_wrb *wrb;
1768 struct be_cmd_req_get_beacon_state *req;
1769 int status;
1770
1771 spin_lock_bh(&adapter->mcc_lock);
1772
1773 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001774 if (!wrb) {
1775 status = -EBUSY;
1776 goto err;
1777 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001778 req = embedded_payload(wrb);
1779
Somnath Kotur106df1e2011-10-27 07:12:13 +00001780 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1781 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001782
1783 req->port_num = port_num;
1784
1785 status = be_mcc_notify_wait(adapter);
1786 if (!status) {
1787 struct be_cmd_resp_get_beacon_state *resp =
1788 embedded_payload(wrb);
1789 *state = resp->beacon_state;
1790 }
1791
Sathya Perla713d03942009-11-22 22:02:45 +00001792err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07001793 spin_unlock_bh(&adapter->mcc_lock);
1794 return status;
1795}
1796
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001797int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1798 u32 data_size, u32 data_offset, const char *obj_name,
1799 u32 *data_written, u8 *addn_status)
1800{
1801 struct be_mcc_wrb *wrb;
1802 struct lancer_cmd_req_write_object *req;
1803 struct lancer_cmd_resp_write_object *resp;
1804 void *ctxt = NULL;
1805 int status;
1806
1807 spin_lock_bh(&adapter->mcc_lock);
1808 adapter->flash_status = 0;
1809
1810 wrb = wrb_from_mccq(adapter);
1811 if (!wrb) {
1812 status = -EBUSY;
1813 goto err_unlock;
1814 }
1815
1816 req = embedded_payload(wrb);
1817
Somnath Kotur106df1e2011-10-27 07:12:13 +00001818 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001819 OPCODE_COMMON_WRITE_OBJECT,
Somnath Kotur106df1e2011-10-27 07:12:13 +00001820 sizeof(struct lancer_cmd_req_write_object), wrb,
1821 NULL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001822
1823 ctxt = &req->context;
1824 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1825 write_length, ctxt, data_size);
1826
1827 if (data_size == 0)
1828 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1829 eof, ctxt, 1);
1830 else
1831 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1832 eof, ctxt, 0);
1833
1834 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1835 req->write_offset = cpu_to_le32(data_offset);
1836 strcpy(req->object_name, obj_name);
1837 req->descriptor_count = cpu_to_le32(1);
1838 req->buf_len = cpu_to_le32(data_size);
1839 req->addr_low = cpu_to_le32((cmd->dma +
1840 sizeof(struct lancer_cmd_req_write_object))
1841 & 0xFFFFFFFF);
1842 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1843 sizeof(struct lancer_cmd_req_write_object)));
1844
1845 be_mcc_notify(adapter);
1846 spin_unlock_bh(&adapter->mcc_lock);
1847
1848 if (!wait_for_completion_timeout(&adapter->flash_compl,
Padmanabh Ratnakar804c7512012-04-25 01:46:18 +00001849 msecs_to_jiffies(30000)))
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001850 status = -1;
1851 else
1852 status = adapter->flash_status;
1853
1854 resp = embedded_payload(wrb);
Padmanabh Ratnakar804c7512012-04-25 01:46:18 +00001855 if (!status)
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001856 *data_written = le32_to_cpu(resp->actual_write_len);
Padmanabh Ratnakar804c7512012-04-25 01:46:18 +00001857 else
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001858 *addn_status = resp->additional_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00001859
1860 return status;
1861
1862err_unlock:
1863 spin_unlock_bh(&adapter->mcc_lock);
1864 return status;
1865}
1866
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00001867int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1868 u32 data_size, u32 data_offset, const char *obj_name,
1869 u32 *data_read, u32 *eof, u8 *addn_status)
1870{
1871 struct be_mcc_wrb *wrb;
1872 struct lancer_cmd_req_read_object *req;
1873 struct lancer_cmd_resp_read_object *resp;
1874 int status;
1875
1876 spin_lock_bh(&adapter->mcc_lock);
1877
1878 wrb = wrb_from_mccq(adapter);
1879 if (!wrb) {
1880 status = -EBUSY;
1881 goto err_unlock;
1882 }
1883
1884 req = embedded_payload(wrb);
1885
1886 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1887 OPCODE_COMMON_READ_OBJECT,
1888 sizeof(struct lancer_cmd_req_read_object), wrb,
1889 NULL);
1890
1891 req->desired_read_len = cpu_to_le32(data_size);
1892 req->read_offset = cpu_to_le32(data_offset);
1893 strcpy(req->object_name, obj_name);
1894 req->descriptor_count = cpu_to_le32(1);
1895 req->buf_len = cpu_to_le32(data_size);
1896 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1897 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1898
1899 status = be_mcc_notify_wait(adapter);
1900
1901 resp = embedded_payload(wrb);
1902 if (!status) {
1903 *data_read = le32_to_cpu(resp->actual_read_len);
1904 *eof = le32_to_cpu(resp->eof);
1905 } else {
1906 *addn_status = resp->additional_status;
1907 }
1908
1909err_unlock:
1910 spin_unlock_bh(&adapter->mcc_lock);
1911 return status;
1912}
1913
Ajit Khaparde84517482009-09-04 03:12:16 +00001914int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1915 u32 flash_type, u32 flash_opcode, u32 buf_size)
1916{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001917 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001918 struct be_cmd_write_flashrom *req;
Ajit Khaparde84517482009-09-04 03:12:16 +00001919 int status;
1920
Sathya Perlab31c50a2009-09-17 10:30:13 -07001921 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001922 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001923
1924 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001925 if (!wrb) {
1926 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001927 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00001928 }
1929 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001930
Somnath Kotur106df1e2011-10-27 07:12:13 +00001931 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1932 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
Ajit Khaparde84517482009-09-04 03:12:16 +00001933
1934 req->params.op_type = cpu_to_le32(flash_type);
1935 req->params.op_code = cpu_to_le32(flash_opcode);
1936 req->params.data_buf_size = cpu_to_le32(buf_size);
1937
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001938 be_mcc_notify(adapter);
1939 spin_unlock_bh(&adapter->mcc_lock);
1940
1941 if (!wait_for_completion_timeout(&adapter->flash_compl,
Sathya Perlae2edb7d2011-08-22 19:41:54 +00001942 msecs_to_jiffies(40000)))
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07001943 status = -1;
1944 else
1945 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00001946
Dan Carpenter2892d9c2010-05-26 04:46:35 +00001947 return status;
1948
1949err_unlock:
1950 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00001951 return status;
1952}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001953
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00001954int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1955 int offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001956{
1957 struct be_mcc_wrb *wrb;
1958 struct be_cmd_write_flashrom *req;
1959 int status;
1960
1961 spin_lock_bh(&adapter->mcc_lock);
1962
1963 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001964 if (!wrb) {
1965 status = -EBUSY;
1966 goto err;
1967 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001968 req = embedded_payload(wrb);
1969
Somnath Kotur106df1e2011-10-27 07:12:13 +00001970 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1971 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001972
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00001973 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001974 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00001975 req->params.offset = cpu_to_le32(offset);
1976 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001977
1978 status = be_mcc_notify_wait(adapter);
1979 if (!status)
1980 memcpy(flashed_crc, req->params.data_buf, 4);
1981
Sathya Perla713d03942009-11-22 22:02:45 +00001982err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001983 spin_unlock_bh(&adapter->mcc_lock);
1984 return status;
1985}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001986
Dan Carpenterc196b022010-05-26 04:47:39 +00001987int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001988 struct be_dma_mem *nonemb_cmd)
1989{
1990 struct be_mcc_wrb *wrb;
1991 struct be_cmd_req_acpi_wol_magic_config *req;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00001992 int status;
1993
1994 spin_lock_bh(&adapter->mcc_lock);
1995
1996 wrb = wrb_from_mccq(adapter);
1997 if (!wrb) {
1998 status = -EBUSY;
1999 goto err;
2000 }
2001 req = nonemb_cmd->va;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002002
Somnath Kotur106df1e2011-10-27 07:12:13 +00002003 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2004 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2005 nonemb_cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002006 memcpy(req->magic_mac, mac, ETH_ALEN);
2007
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002008 status = be_mcc_notify_wait(adapter);
2009
2010err:
2011 spin_unlock_bh(&adapter->mcc_lock);
2012 return status;
2013}
Suresh Rff33a6e2009-12-03 16:15:52 -08002014
Sarveshwar Bandifced9992009-12-23 04:41:44 +00002015int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2016 u8 loopback_type, u8 enable)
2017{
2018 struct be_mcc_wrb *wrb;
2019 struct be_cmd_req_set_lmode *req;
2020 int status;
2021
2022 spin_lock_bh(&adapter->mcc_lock);
2023
2024 wrb = wrb_from_mccq(adapter);
2025 if (!wrb) {
2026 status = -EBUSY;
2027 goto err;
2028 }
2029
2030 req = embedded_payload(wrb);
2031
Somnath Kotur106df1e2011-10-27 07:12:13 +00002032 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2033 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2034 NULL);
Sarveshwar Bandifced9992009-12-23 04:41:44 +00002035
2036 req->src_port = port_num;
2037 req->dest_port = port_num;
2038 req->loopback_type = loopback_type;
2039 req->loopback_state = enable;
2040
2041 status = be_mcc_notify_wait(adapter);
2042err:
2043 spin_unlock_bh(&adapter->mcc_lock);
2044 return status;
2045}
2046
Suresh Rff33a6e2009-12-03 16:15:52 -08002047int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2048 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2049{
2050 struct be_mcc_wrb *wrb;
2051 struct be_cmd_req_loopback_test *req;
2052 int status;
2053
2054 spin_lock_bh(&adapter->mcc_lock);
2055
2056 wrb = wrb_from_mccq(adapter);
2057 if (!wrb) {
2058 status = -EBUSY;
2059 goto err;
2060 }
2061
2062 req = embedded_payload(wrb);
2063
Somnath Kotur106df1e2011-10-27 07:12:13 +00002064 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2065 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
Sathya Perla3ffd0512010-06-01 00:19:33 -07002066 req->hdr.timeout = cpu_to_le32(4);
Suresh Rff33a6e2009-12-03 16:15:52 -08002067
2068 req->pattern = cpu_to_le64(pattern);
2069 req->src_port = cpu_to_le32(port_num);
2070 req->dest_port = cpu_to_le32(port_num);
2071 req->pkt_size = cpu_to_le32(pkt_size);
2072 req->num_pkts = cpu_to_le32(num_pkts);
2073 req->loopback_type = cpu_to_le32(loopback_type);
2074
2075 status = be_mcc_notify_wait(adapter);
2076 if (!status) {
2077 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2078 status = le32_to_cpu(resp->status);
2079 }
2080
2081err:
2082 spin_unlock_bh(&adapter->mcc_lock);
2083 return status;
2084}
2085
2086int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2087 u32 byte_cnt, struct be_dma_mem *cmd)
2088{
2089 struct be_mcc_wrb *wrb;
2090 struct be_cmd_req_ddrdma_test *req;
Suresh Rff33a6e2009-12-03 16:15:52 -08002091 int status;
2092 int i, j = 0;
2093
2094 spin_lock_bh(&adapter->mcc_lock);
2095
2096 wrb = wrb_from_mccq(adapter);
2097 if (!wrb) {
2098 status = -EBUSY;
2099 goto err;
2100 }
2101 req = cmd->va;
Somnath Kotur106df1e2011-10-27 07:12:13 +00002102 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2103 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
Suresh Rff33a6e2009-12-03 16:15:52 -08002104
2105 req->pattern = cpu_to_le64(pattern);
2106 req->byte_count = cpu_to_le32(byte_cnt);
2107 for (i = 0; i < byte_cnt; i++) {
2108 req->snd_buff[i] = (u8)(pattern >> (j*8));
2109 j++;
2110 if (j > 7)
2111 j = 0;
2112 }
2113
2114 status = be_mcc_notify_wait(adapter);
2115
2116 if (!status) {
2117 struct be_cmd_resp_ddrdma_test *resp;
2118 resp = cmd->va;
2119 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2120 resp->snd_err) {
2121 status = -1;
2122 }
2123 }
2124
2125err:
2126 spin_unlock_bh(&adapter->mcc_lock);
2127 return status;
2128}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002129
Dan Carpenterc196b022010-05-26 04:47:39 +00002130int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002131 struct be_dma_mem *nonemb_cmd)
2132{
2133 struct be_mcc_wrb *wrb;
2134 struct be_cmd_req_seeprom_read *req;
2135 struct be_sge *sge;
2136 int status;
2137
2138 spin_lock_bh(&adapter->mcc_lock);
2139
2140 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00002141 if (!wrb) {
2142 status = -EBUSY;
2143 goto err;
2144 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002145 req = nonemb_cmd->va;
2146 sge = nonembedded_sgl(wrb);
2147
Somnath Kotur106df1e2011-10-27 07:12:13 +00002148 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2149 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2150 nonemb_cmd);
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002151
2152 status = be_mcc_notify_wait(adapter);
2153
Ajit Khapardee45ff012011-02-04 17:18:28 +00002154err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002155 spin_unlock_bh(&adapter->mcc_lock);
2156 return status;
2157}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002158
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002159int be_cmd_get_phy_info(struct be_adapter *adapter)
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002160{
2161 struct be_mcc_wrb *wrb;
2162 struct be_cmd_req_get_phy_info *req;
Sathya Perla306f1342011-08-02 19:57:45 +00002163 struct be_dma_mem cmd;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002164 int status;
2165
2166 spin_lock_bh(&adapter->mcc_lock);
2167
2168 wrb = wrb_from_mccq(adapter);
2169 if (!wrb) {
2170 status = -EBUSY;
2171 goto err;
2172 }
Sathya Perla306f1342011-08-02 19:57:45 +00002173 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2174 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2175 &cmd.dma);
2176 if (!cmd.va) {
2177 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2178 status = -ENOMEM;
2179 goto err;
2180 }
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002181
Sathya Perla306f1342011-08-02 19:57:45 +00002182 req = cmd.va;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002183
Somnath Kotur106df1e2011-10-27 07:12:13 +00002184 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2185 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2186 wrb, &cmd);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002187
2188 status = be_mcc_notify_wait(adapter);
Sathya Perla306f1342011-08-02 19:57:45 +00002189 if (!status) {
2190 struct be_phy_info *resp_phy_info =
2191 cmd.va + sizeof(struct be_cmd_req_hdr);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002192 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2193 adapter->phy.interface_type =
Sathya Perla306f1342011-08-02 19:57:45 +00002194 le16_to_cpu(resp_phy_info->interface_type);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002195 adapter->phy.auto_speeds_supported =
2196 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2197 adapter->phy.fixed_speeds_supported =
2198 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2199 adapter->phy.misc_params =
2200 le32_to_cpu(resp_phy_info->misc_params);
Sathya Perla306f1342011-08-02 19:57:45 +00002201 }
2202 pci_free_consistent(adapter->pdev, cmd.size,
2203 cmd.va, cmd.dma);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002204err:
2205 spin_unlock_bh(&adapter->mcc_lock);
2206 return status;
2207}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002208
2209int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2210{
2211 struct be_mcc_wrb *wrb;
2212 struct be_cmd_req_set_qos *req;
2213 int status;
2214
2215 spin_lock_bh(&adapter->mcc_lock);
2216
2217 wrb = wrb_from_mccq(adapter);
2218 if (!wrb) {
2219 status = -EBUSY;
2220 goto err;
2221 }
2222
2223 req = embedded_payload(wrb);
2224
Somnath Kotur106df1e2011-10-27 07:12:13 +00002225 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2226 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002227
2228 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00002229 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2230 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002231
2232 status = be_mcc_notify_wait(adapter);
2233
2234err:
2235 spin_unlock_bh(&adapter->mcc_lock);
2236 return status;
2237}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002238
2239int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2240{
2241 struct be_mcc_wrb *wrb;
2242 struct be_cmd_req_cntl_attribs *req;
2243 struct be_cmd_resp_cntl_attribs *resp;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002244 int status;
2245 int payload_len = max(sizeof(*req), sizeof(*resp));
2246 struct mgmt_controller_attrib *attribs;
2247 struct be_dma_mem attribs_cmd;
2248
2249 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2250 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2251 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2252 &attribs_cmd.dma);
2253 if (!attribs_cmd.va) {
2254 dev_err(&adapter->pdev->dev,
2255 "Memory allocation failure\n");
2256 return -ENOMEM;
2257 }
2258
2259 if (mutex_lock_interruptible(&adapter->mbox_lock))
2260 return -1;
2261
2262 wrb = wrb_from_mbox(adapter);
2263 if (!wrb) {
2264 status = -EBUSY;
2265 goto err;
2266 }
2267 req = attribs_cmd.va;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002268
Somnath Kotur106df1e2011-10-27 07:12:13 +00002269 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2270 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2271 &attribs_cmd);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002272
2273 status = be_mbox_notify_wait(adapter);
2274 if (!status) {
Joe Perches43d620c2011-06-16 19:08:06 +00002275 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002276 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2277 }
2278
2279err:
2280 mutex_unlock(&adapter->mbox_lock);
2281 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2282 attribs_cmd.dma);
2283 return status;
2284}
Sathya Perla2e588f82011-03-11 02:49:26 +00002285
2286/* Uses mbox */
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002287int be_cmd_req_native_mode(struct be_adapter *adapter)
Sathya Perla2e588f82011-03-11 02:49:26 +00002288{
2289 struct be_mcc_wrb *wrb;
2290 struct be_cmd_req_set_func_cap *req;
2291 int status;
2292
2293 if (mutex_lock_interruptible(&adapter->mbox_lock))
2294 return -1;
2295
2296 wrb = wrb_from_mbox(adapter);
2297 if (!wrb) {
2298 status = -EBUSY;
2299 goto err;
2300 }
2301
2302 req = embedded_payload(wrb);
2303
Somnath Kotur106df1e2011-10-27 07:12:13 +00002304 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2305 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
Sathya Perla2e588f82011-03-11 02:49:26 +00002306
2307 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2308 CAPABILITY_BE3_NATIVE_ERX_API);
2309 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2310
2311 status = be_mbox_notify_wait(adapter);
2312 if (!status) {
2313 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2314 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2315 CAPABILITY_BE3_NATIVE_ERX_API;
2316 }
2317err:
2318 mutex_unlock(&adapter->mbox_lock);
2319 return status;
2320}
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002321
2322/* Uses synchronous MCCQ */
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002323int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2324 bool *pmac_id_active, u32 *pmac_id, u8 domain)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002325{
2326 struct be_mcc_wrb *wrb;
2327 struct be_cmd_req_get_mac_list *req;
2328 int status;
2329 int mac_count;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002330 struct be_dma_mem get_mac_list_cmd;
2331 int i;
2332
2333 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2334 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2335 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2336 get_mac_list_cmd.size,
2337 &get_mac_list_cmd.dma);
2338
2339 if (!get_mac_list_cmd.va) {
2340 dev_err(&adapter->pdev->dev,
2341 "Memory allocation failure during GET_MAC_LIST\n");
2342 return -ENOMEM;
2343 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002344
2345 spin_lock_bh(&adapter->mcc_lock);
2346
2347 wrb = wrb_from_mccq(adapter);
2348 if (!wrb) {
2349 status = -EBUSY;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002350 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002351 }
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002352
2353 req = get_mac_list_cmd.va;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002354
2355 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2356 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002357 wrb, &get_mac_list_cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002358
2359 req->hdr.domain = domain;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002360 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2361 req->perm_override = 1;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002362
2363 status = be_mcc_notify_wait(adapter);
2364 if (!status) {
2365 struct be_cmd_resp_get_mac_list *resp =
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002366 get_mac_list_cmd.va;
2367 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2368 /* Mac list returned could contain one or more active mac_ids
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002369 * or one or more true or pseudo permanant mac addresses.
2370 * If an active mac_id is present, return first active mac_id
2371 * found.
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002372 */
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002373 for (i = 0; i < mac_count; i++) {
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002374 struct get_list_macaddr *mac_entry;
2375 u16 mac_addr_size;
2376 u32 mac_id;
2377
2378 mac_entry = &resp->macaddr_list[i];
2379 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2380 /* mac_id is a 32 bit value and mac_addr size
2381 * is 6 bytes
2382 */
2383 if (mac_addr_size == sizeof(u32)) {
2384 *pmac_id_active = true;
2385 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2386 *pmac_id = le32_to_cpu(mac_id);
2387 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002388 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002389 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002390 /* If no active mac_id found, return first mac addr */
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002391 *pmac_id_active = false;
2392 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2393 ETH_ALEN);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002394 }
2395
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002396out:
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002397 spin_unlock_bh(&adapter->mcc_lock);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002398 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2399 get_mac_list_cmd.va, get_mac_list_cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002400 return status;
2401}
2402
2403/* Uses synchronous MCCQ */
2404int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2405 u8 mac_count, u32 domain)
2406{
2407 struct be_mcc_wrb *wrb;
2408 struct be_cmd_req_set_mac_list *req;
2409 int status;
2410 struct be_dma_mem cmd;
2411
2412 memset(&cmd, 0, sizeof(struct be_dma_mem));
2413 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2414 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2415 &cmd.dma, GFP_KERNEL);
2416 if (!cmd.va) {
2417 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2418 return -ENOMEM;
2419 }
2420
2421 spin_lock_bh(&adapter->mcc_lock);
2422
2423 wrb = wrb_from_mccq(adapter);
2424 if (!wrb) {
2425 status = -EBUSY;
2426 goto err;
2427 }
2428
2429 req = cmd.va;
2430 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2431 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2432 wrb, &cmd);
2433
2434 req->hdr.domain = domain;
2435 req->mac_count = mac_count;
2436 if (mac_count)
2437 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2438
2439 status = be_mcc_notify_wait(adapter);
2440
2441err:
2442 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2443 cmd.va, cmd.dma);
2444 spin_unlock_bh(&adapter->mcc_lock);
2445 return status;
2446}
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00002447
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002448int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2449 u32 domain, u16 intf_id)
2450{
2451 struct be_mcc_wrb *wrb;
2452 struct be_cmd_req_set_hsw_config *req;
2453 void *ctxt;
2454 int status;
2455
2456 spin_lock_bh(&adapter->mcc_lock);
2457
2458 wrb = wrb_from_mccq(adapter);
2459 if (!wrb) {
2460 status = -EBUSY;
2461 goto err;
2462 }
2463
2464 req = embedded_payload(wrb);
2465 ctxt = &req->context;
2466
2467 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2468 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2469
2470 req->hdr.domain = domain;
2471 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2472 if (pvid) {
2473 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2474 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2475 }
2476
2477 be_dws_cpu_to_le(req->context, sizeof(req->context));
2478 status = be_mcc_notify_wait(adapter);
2479
2480err:
2481 spin_unlock_bh(&adapter->mcc_lock);
2482 return status;
2483}
2484
2485/* Get Hyper switch config */
2486int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2487 u32 domain, u16 intf_id)
2488{
2489 struct be_mcc_wrb *wrb;
2490 struct be_cmd_req_get_hsw_config *req;
2491 void *ctxt;
2492 int status;
2493 u16 vid;
2494
2495 spin_lock_bh(&adapter->mcc_lock);
2496
2497 wrb = wrb_from_mccq(adapter);
2498 if (!wrb) {
2499 status = -EBUSY;
2500 goto err;
2501 }
2502
2503 req = embedded_payload(wrb);
2504 ctxt = &req->context;
2505
2506 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2507 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2508
2509 req->hdr.domain = domain;
2510 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2511 intf_id);
2512 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2513 be_dws_cpu_to_le(req->context, sizeof(req->context));
2514
2515 status = be_mcc_notify_wait(adapter);
2516 if (!status) {
2517 struct be_cmd_resp_get_hsw_config *resp =
2518 embedded_payload(wrb);
2519 be_dws_le_to_cpu(&resp->context,
2520 sizeof(resp->context));
2521 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2522 pvid, &resp->context);
2523 *pvid = le16_to_cpu(vid);
2524 }
2525
2526err:
2527 spin_unlock_bh(&adapter->mcc_lock);
2528 return status;
2529}
2530
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00002531int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2532{
2533 struct be_mcc_wrb *wrb;
2534 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2535 int status;
2536 int payload_len = sizeof(*req);
2537 struct be_dma_mem cmd;
2538
2539 memset(&cmd, 0, sizeof(struct be_dma_mem));
2540 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2541 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2542 &cmd.dma);
2543 if (!cmd.va) {
2544 dev_err(&adapter->pdev->dev,
2545 "Memory allocation failure\n");
2546 return -ENOMEM;
2547 }
2548
2549 if (mutex_lock_interruptible(&adapter->mbox_lock))
2550 return -1;
2551
2552 wrb = wrb_from_mbox(adapter);
2553 if (!wrb) {
2554 status = -EBUSY;
2555 goto err;
2556 }
2557
2558 req = cmd.va;
2559
2560 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2561 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2562 payload_len, wrb, &cmd);
2563
2564 req->hdr.version = 1;
2565 req->query_options = BE_GET_WOL_CAP;
2566
2567 status = be_mbox_notify_wait(adapter);
2568 if (!status) {
2569 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2570 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2571
2572 /* the command could succeed misleadingly on old f/w
2573 * which is not aware of the V1 version. fake an error. */
2574 if (resp->hdr.response_length < payload_len) {
2575 status = -1;
2576 goto err;
2577 }
2578 adapter->wol_cap = resp->wol_settings;
2579 }
2580err:
2581 mutex_unlock(&adapter->mbox_lock);
2582 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2583 return status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00002584
2585}
2586int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2587 struct be_dma_mem *cmd)
2588{
2589 struct be_mcc_wrb *wrb;
2590 struct be_cmd_req_get_ext_fat_caps *req;
2591 int status;
2592
2593 if (mutex_lock_interruptible(&adapter->mbox_lock))
2594 return -1;
2595
2596 wrb = wrb_from_mbox(adapter);
2597 if (!wrb) {
2598 status = -EBUSY;
2599 goto err;
2600 }
2601
2602 req = cmd->va;
2603 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2604 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2605 cmd->size, wrb, cmd);
2606 req->parameter_type = cpu_to_le32(1);
2607
2608 status = be_mbox_notify_wait(adapter);
2609err:
2610 mutex_unlock(&adapter->mbox_lock);
2611 return status;
2612}
2613
2614int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2615 struct be_dma_mem *cmd,
2616 struct be_fat_conf_params *configs)
2617{
2618 struct be_mcc_wrb *wrb;
2619 struct be_cmd_req_set_ext_fat_caps *req;
2620 int status;
2621
2622 spin_lock_bh(&adapter->mcc_lock);
2623
2624 wrb = wrb_from_mccq(adapter);
2625 if (!wrb) {
2626 status = -EBUSY;
2627 goto err;
2628 }
2629
2630 req = cmd->va;
2631 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2632 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2633 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2634 cmd->size, wrb, cmd);
2635
2636 status = be_mcc_notify_wait(adapter);
2637err:
2638 spin_unlock_bh(&adapter->mcc_lock);
2639 return status;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00002640}
Parav Pandit6a4ab662012-03-26 14:27:12 +00002641
2642int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2643 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2644{
2645 struct be_adapter *adapter = netdev_priv(netdev_handle);
2646 struct be_mcc_wrb *wrb;
2647 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
2648 struct be_cmd_req_hdr *req;
2649 struct be_cmd_resp_hdr *resp;
2650 int status;
2651
2652 spin_lock_bh(&adapter->mcc_lock);
2653
2654 wrb = wrb_from_mccq(adapter);
2655 if (!wrb) {
2656 status = -EBUSY;
2657 goto err;
2658 }
2659 req = embedded_payload(wrb);
2660 resp = embedded_payload(wrb);
2661
2662 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
2663 hdr->opcode, wrb_payload_size, wrb, NULL);
2664 memcpy(req, wrb_payload, wrb_payload_size);
2665 be_dws_cpu_to_le(req, wrb_payload_size);
2666
2667 status = be_mcc_notify_wait(adapter);
2668 if (cmd_status)
2669 *cmd_status = (status & 0xffff);
2670 if (ext_status)
2671 *ext_status = 0;
2672 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
2673 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
2674err:
2675 spin_unlock_bh(&adapter->mcc_lock);
2676 return status;
2677}
2678EXPORT_SYMBOL(be_roce_mcc_cmd);