blob: a2ecdffbb5174c77190db875630588b48affb125 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Parav Pandit6a4ab662012-03-26 14:27:12 +000018#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000022static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53};
54
Sathya Perlaa2cc4e02014-05-09 13:29:14 +053055static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000056{
57 int i;
58 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
59 u32 cmd_privileges = adapter->cmd_privileges;
60
61 for (i = 0; i < num_entries; i++)
62 if (opcode == cmd_priv_map[i].opcode &&
63 subsystem == cmd_priv_map[i].subsystem)
64 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
65 return false;
66
67 return true;
68}
69
Somnath Kotur3de09452011-09-30 07:25:05 +000070static inline void *embedded_payload(struct be_mcc_wrb *wrb)
71{
72 return wrb->payload.embedded_payload;
73}
Ajit Khaparde609ff3b2011-02-20 11:42:07 +000074
Sathya Perla8788fdc2009-07-27 22:52:03 +000075static void be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +000076{
Sathya Perla8788fdc2009-07-27 22:52:03 +000077 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +000078 u32 val = 0;
79
Sathya Perla6589ade2011-11-10 19:18:00 +000080 if (be_error(adapter))
Ajit Khaparde7acc2082011-02-11 13:38:17 +000081 return;
Ajit Khaparde7acc2082011-02-11 13:38:17 +000082
Sathya Perla5fb379e2009-06-18 00:02:59 +000083 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
84 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +000085
86 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +000087 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Sathya Perla5fb379e2009-06-18 00:02:59 +000088}
89
90/* To check if valid bit is set, check the entire word as we don't know
91 * the endianness of the data (old entry is host endian while a new entry is
92 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +000093static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +000094{
Sathya Perla9e9ff4b2013-02-12 23:05:19 +000095 u32 flags;
96
Sathya Perla5fb379e2009-06-18 00:02:59 +000097 if (compl->flags != 0) {
Sathya Perla9e9ff4b2013-02-12 23:05:19 +000098 flags = le32_to_cpu(compl->flags);
99 if (flags & CQE_FLAGS_VALID_MASK) {
100 compl->flags = flags;
101 return true;
102 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000103 }
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000104 return false;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000105}
106
107/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000108static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000109{
110 compl->flags = 0;
111}
112
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000113static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
114{
115 unsigned long addr;
116
117 addr = tag1;
118 addr = ((addr << 16) << 16) | tag0;
119 return (void *)addr;
120}
121
Kalesh AP4c600052014-05-30 19:06:26 +0530122static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
123{
124 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
125 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
126 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
127 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
128 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
129 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
130 return true;
131 else
132 return false;
133}
134
Sathya Perla8788fdc2009-07-27 22:52:03 +0000135static int be_mcc_compl_process(struct be_adapter *adapter,
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000136 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000137{
Kalesh AP4c600052014-05-30 19:06:26 +0530138 enum mcc_base_status base_status;
139 enum mcc_addl_status addl_status;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000140 struct be_cmd_resp_hdr *resp_hdr;
141 u8 opcode = 0, subsystem = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000142
143 /* Just swap the status to host endian; mcc tag is opaquely copied
144 * from mcc_wrb */
145 be_dws_le_to_cpu(compl, 4);
146
Kalesh AP4c600052014-05-30 19:06:26 +0530147 base_status = base_status(compl->status);
148 addl_status = addl_status(compl->status);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +0530149
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000150 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
151
152 if (resp_hdr) {
153 opcode = resp_hdr->opcode;
154 subsystem = resp_hdr->subsystem;
155 }
156
Suresh Reddy5eeff632014-01-06 13:02:24 +0530157 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
158 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
159 complete(&adapter->et_cmd_compl);
160 return 0;
161 }
162
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000163 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
164 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
165 (subsystem == CMD_SUBSYSTEM_COMMON)) {
Kalesh AP4c600052014-05-30 19:06:26 +0530166 adapter->flash_status = compl->status;
Suresh Reddy5eeff632014-01-06 13:02:24 +0530167 complete(&adapter->et_cmd_compl);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -0700168 }
169
Kalesh AP4c600052014-05-30 19:06:26 +0530170 if (base_status == MCC_STATUS_SUCCESS) {
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000171 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
172 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
173 (subsystem == CMD_SUBSYSTEM_ETH)) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000174 be_parse_stats(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +0000175 adapter->stats_cmd_sent = false;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700176 }
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000177 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
178 subsystem == CMD_SUBSYSTEM_COMMON) {
Somnath Kotur3de09452011-09-30 07:25:05 +0000179 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000180 (void *)resp_hdr;
Somnath Kotur3de09452011-09-30 07:25:05 +0000181 adapter->drv_stats.be_on_die_temperature =
182 resp->on_die_temperature;
183 }
Sathya Perla2b3f2912011-06-29 23:32:56 +0000184 } else {
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000185 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +0000186 adapter->be_get_temp_freq = 0;
Somnath Kotur3de09452011-09-30 07:25:05 +0000187
Kalesh AP4c600052014-05-30 19:06:26 +0530188 if (be_skip_err_log(opcode, base_status, addl_status))
189 return compl->status;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +0530190
Kalesh AP4c600052014-05-30 19:06:26 +0530191 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000192 dev_warn(&adapter->pdev->dev,
Vasundhara Volam522609f2012-08-28 20:37:44 +0000193 "VF is not privileged to issue opcode %d-%d\n",
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000194 opcode, subsystem);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000195 } else {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000196 dev_err(&adapter->pdev->dev,
197 "opcode %d-%d failed:status %d-%d\n",
Kalesh AP4c600052014-05-30 19:06:26 +0530198 opcode, subsystem, base_status, addl_status);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000199 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000200 }
Kalesh AP4c600052014-05-30 19:06:26 +0530201 return compl->status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000202}
203
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000204/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530206 struct be_async_event_link_state *evt)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000207{
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000208 /* When link status changes, link speed must be re-queried from FW */
Ajit Khaparde42f11cf2012-04-21 18:53:22 +0000209 adapter->phy.link_speed = -1;
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000210
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530211 /* On BEx the FW does not send a separate link status
212 * notification for physical and logical link.
213 * On other chips just process the logical link
214 * status notification
215 */
216 if (!BEx_chip(adapter) &&
Padmanabh Ratnakar2e177a52012-07-18 02:52:15 +0000217 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
218 return;
219
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000220 /* For the initial link status do not rely on the ASYNC event as
221 * it may not be received in some cases.
222 */
223 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530224 be_link_status_update(adapter,
225 evt->port_link_status & LINK_STATUS_MASK);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000226}
227
Somnath Koturcc4ce022010-10-21 07:11:14 -0700228/* Grp5 CoS Priority evt */
229static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530230 struct
231 be_async_event_grp5_cos_priority
232 *evt)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700233{
234 if (evt->valid) {
235 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Ajit Khaparde60964dd2011-02-11 13:37:25 +0000236 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700237 adapter->recommended_prio =
238 evt->reco_default_priority << VLAN_PRIO_SHIFT;
239 }
240}
241
Sathya Perla323ff712012-09-28 04:39:43 +0000242/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
Somnath Koturcc4ce022010-10-21 07:11:14 -0700243static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530244 struct
245 be_async_event_grp5_qos_link_speed
246 *evt)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700247{
Sathya Perla323ff712012-09-28 04:39:43 +0000248 if (adapter->phy.link_speed >= 0 &&
249 evt->physical_port == adapter->port_num)
250 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700251}
252
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000253/*Grp5 PVID evt*/
254static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530255 struct
256 be_async_event_grp5_pvid_state
257 *evt)
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000258{
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530259 if (evt->enabled) {
Somnath Kotur939cf302011-08-18 21:51:49 -0700260 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530261 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
262 } else {
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000263 adapter->pvid = 0;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530264 }
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000265}
266
Somnath Koturcc4ce022010-10-21 07:11:14 -0700267static void be_async_grp5_evt_process(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530268 u32 trailer, struct be_mcc_compl *evt)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700269{
270 u8 event_type = 0;
271
272 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
273 ASYNC_TRAILER_EVENT_TYPE_MASK;
274
275 switch (event_type) {
276 case ASYNC_EVENT_COS_PRIORITY:
277 be_async_grp5_cos_priority_process(adapter,
278 (struct be_async_event_grp5_cos_priority *)evt);
279 break;
280 case ASYNC_EVENT_QOS_SPEED:
281 be_async_grp5_qos_speed_process(adapter,
282 (struct be_async_event_grp5_qos_link_speed *)evt);
283 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000284 case ASYNC_EVENT_PVID_STATE:
285 be_async_grp5_pvid_state_process(adapter,
286 (struct be_async_event_grp5_pvid_state *)evt);
287 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700288 default:
Vasundhara Volam05ccaa22013-08-06 09:27:19 +0530289 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
290 event_type);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700291 break;
292 }
293}
294
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000295static void be_async_dbg_evt_process(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530296 u32 trailer, struct be_mcc_compl *cmp)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000297{
298 u8 event_type = 0;
299 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
300
301 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
302 ASYNC_TRAILER_EVENT_TYPE_MASK;
303
304 switch (event_type) {
305 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
306 if (evt->valid)
307 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
308 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
309 break;
310 default:
Vasundhara Volam05ccaa22013-08-06 09:27:19 +0530311 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
312 event_type);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000313 break;
314 }
315}
316
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000317static inline bool is_link_state_evt(u32 trailer)
318{
Eric Dumazet807540b2010-09-23 05:40:09 +0000319 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000320 ASYNC_TRAILER_EVENT_CODE_MASK) ==
Eric Dumazet807540b2010-09-23 05:40:09 +0000321 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000322}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000323
Somnath Koturcc4ce022010-10-21 07:11:14 -0700324static inline bool is_grp5_evt(u32 trailer)
325{
326 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
327 ASYNC_TRAILER_EVENT_CODE_MASK) ==
328 ASYNC_EVENT_CODE_GRP_5);
329}
330
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000331static inline bool is_dbg_evt(u32 trailer)
332{
333 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
334 ASYNC_TRAILER_EVENT_CODE_MASK) ==
335 ASYNC_EVENT_CODE_QNQ);
336}
337
Sathya Perlaefd2e402009-07-27 22:53:10 +0000338static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000339{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000340 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000341 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000342
343 if (be_mcc_compl_is_new(compl)) {
344 queue_tail_inc(mcc_cq);
345 return compl;
346 }
347 return NULL;
348}
349
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000350void be_async_mcc_enable(struct be_adapter *adapter)
351{
352 spin_lock_bh(&adapter->mcc_cq_lock);
353
354 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
355 adapter->mcc_obj.rearm_cq = true;
356
357 spin_unlock_bh(&adapter->mcc_cq_lock);
358}
359
360void be_async_mcc_disable(struct be_adapter *adapter)
361{
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000362 spin_lock_bh(&adapter->mcc_cq_lock);
363
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000364 adapter->mcc_obj.rearm_cq = false;
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000365 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
366
367 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000368}
369
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000370int be_process_mcc(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000371{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000372 struct be_mcc_compl *compl;
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000373 int num = 0, status = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000374 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000375
Amerigo Wang072a9c42012-08-24 21:41:11 +0000376 spin_lock(&adapter->mcc_cq_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +0000377 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000378 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
379 /* Interpret flags as an async trailer */
Ajit Khaparde323f30b2010-09-03 06:24:13 +0000380 if (is_link_state_evt(compl->flags))
381 be_async_link_state_process(adapter,
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000382 (struct be_async_event_link_state *) compl);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700383 else if (is_grp5_evt(compl->flags))
384 be_async_grp5_evt_process(adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530385 compl->flags, compl);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000386 else if (is_dbg_evt(compl->flags))
387 be_async_dbg_evt_process(adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530388 compl->flags, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700389 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000390 status = be_mcc_compl_process(adapter, compl);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000391 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000392 }
393 be_mcc_compl_use(compl);
394 num++;
395 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700396
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000397 if (num)
398 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
399
Amerigo Wang072a9c42012-08-24 21:41:11 +0000400 spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000401 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000402}
403
Sathya Perla6ac7b682009-06-18 00:05:54 +0000404/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700405static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000406{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700407#define mcc_timeout 120000 /* 12s timeout */
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000408 int i, status = 0;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800409 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700410
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800411 for (i = 0; i < mcc_timeout; i++) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000412 if (be_error(adapter))
413 return -EIO;
414
Amerigo Wang072a9c42012-08-24 21:41:11 +0000415 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000416 status = be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +0000417 local_bh_enable();
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800418
419 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000420 break;
421 udelay(100);
422 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700423 if (i == mcc_timeout) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000424 dev_err(&adapter->pdev->dev, "FW not responding\n");
425 adapter->fw_timeout = true;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000426 return -EIO;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700427 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800428 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000429}
430
431/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700432static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000433{
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000434 int status;
435 struct be_mcc_wrb *wrb;
436 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
437 u16 index = mcc_obj->q.head;
438 struct be_cmd_resp_hdr *resp;
439
440 index_dec(&index, mcc_obj->q.len);
441 wrb = queue_index_node(&mcc_obj->q, index);
442
443 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
444
Sathya Perla8788fdc2009-07-27 22:52:03 +0000445 be_mcc_notify(adapter);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000446
447 status = be_mcc_wait_compl(adapter);
448 if (status == -EIO)
449 goto out;
450
Kalesh AP4c600052014-05-30 19:06:26 +0530451 status = (resp->base_status |
452 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
453 CQE_ADDL_STATUS_SHIFT));
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000454out:
455 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000456}
457
Sathya Perla5f0b8492009-07-27 22:52:56 +0000458static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700459{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000460 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700461 u32 ready;
462
463 do {
Sathya Perla6589ade2011-11-10 19:18:00 +0000464 if (be_error(adapter))
465 return -EIO;
466
Sathya Perlacf588472010-02-14 21:22:01 +0000467 ready = ioread32(db);
Sathya Perla434b3642011-11-10 19:17:59 +0000468 if (ready == 0xffffffff)
Sathya Perlacf588472010-02-14 21:22:01 +0000469 return -1;
Sathya Perlacf588472010-02-14 21:22:01 +0000470
471 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 if (ready)
473 break;
474
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000475 if (msecs > 4000) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000476 dev_err(&adapter->pdev->dev, "FW not responding\n");
477 adapter->fw_timeout = true;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000478 be_detect_error(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479 return -1;
480 }
481
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000482 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000483 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700484 } while (true);
485
486 return 0;
487}
488
489/*
490 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000491 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700493static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494{
495 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000497 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
498 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000500 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501
Sathya Perlacf588472010-02-14 21:22:01 +0000502 /* wait for ready to be set */
503 status = be_mbox_db_ready_wait(adapter, db);
504 if (status != 0)
505 return status;
506
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 val |= MPU_MAILBOX_DB_HI_MASK;
508 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
509 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
510 iowrite32(val, db);
511
512 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000513 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700514 if (status != 0)
515 return status;
516
517 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
519 val |= (u32)(mbox_mem->dma >> 4) << 2;
520 iowrite32(val, db);
521
Sathya Perla5f0b8492009-07-27 22:52:56 +0000522 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 if (status != 0)
524 return status;
525
Sathya Perla5fb379e2009-06-18 00:02:59 +0000526 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000527 if (be_mcc_compl_is_new(compl)) {
528 status = be_mcc_compl_process(adapter, &mbox->compl);
529 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000530 if (status)
531 return status;
532 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000533 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700534 return -1;
535 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000536 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537}
538
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000539static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000541 u32 sem;
542
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000543 if (BEx_chip(adapter))
544 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545 else
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000546 pci_read_config_dword(adapter->pdev,
547 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
548
549 return sem & POST_STAGE_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700550}
551
Gavin Shan87f20c22013-10-29 17:30:57 +0800552static int lancer_wait_ready(struct be_adapter *adapter)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000553{
554#define SLIPORT_READY_TIMEOUT 30
555 u32 sliport_status;
556 int status = 0, i;
557
558 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
559 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
560 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
561 break;
562
563 msleep(1000);
564 }
565
566 if (i == SLIPORT_READY_TIMEOUT)
567 status = -1;
568
569 return status;
570}
571
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +0000572static bool lancer_provisioning_error(struct be_adapter *adapter)
573{
574 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
575 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
576 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530577 sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
578 sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +0000579
580 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
581 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
582 return true;
583 }
584 return false;
585}
586
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000587int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
588{
589 int status;
590 u32 sliport_status, err, reset_needed;
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +0000591 bool resource_error;
592
593 resource_error = lancer_provisioning_error(adapter);
594 if (resource_error)
Somnath Kotur01e5b2c2013-05-29 22:56:17 +0000595 return -EAGAIN;
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +0000596
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000597 status = lancer_wait_ready(adapter);
598 if (!status) {
599 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
600 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
601 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
602 if (err && reset_needed) {
603 iowrite32(SLI_PORT_CONTROL_IP_MASK,
604 adapter->db + SLIPORT_CONTROL_OFFSET);
605
606 /* check adapter has corrected the error */
607 status = lancer_wait_ready(adapter);
608 sliport_status = ioread32(adapter->db +
609 SLIPORT_STATUS_OFFSET);
610 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
611 SLIPORT_STATUS_RN_MASK);
612 if (status || sliport_status)
613 status = -1;
614 } else if (err || reset_needed) {
615 status = -1;
616 }
617 }
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +0000618 /* Stop error recovery if error is not recoverable.
619 * No resource error is temporary errors and will go away
620 * when PF provisions resources.
621 */
622 resource_error = lancer_provisioning_error(adapter);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +0000623 if (resource_error)
624 status = -EAGAIN;
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +0000625
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000626 return status;
627}
628
629int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000631 u16 stage;
632 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000633 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000635 if (lancer_chip(adapter)) {
636 status = lancer_wait_ready(adapter);
637 return status;
638 }
639
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000640 do {
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000641 stage = be_POST_stage_get(adapter);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000642 if (stage == POST_STAGE_ARMFW_RDY)
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000643 return 0;
Gavin Shan66d29cb2013-03-03 21:48:46 +0000644
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530645 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000646 if (msleep_interruptible(2000)) {
647 dev_err(dev, "Waiting for POST aborted\n");
648 return -EINTR;
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000649 }
Gavin Shan66d29cb2013-03-03 21:48:46 +0000650 timeout += 2;
Somnath Kotur3ab81b52011-10-03 08:10:57 +0000651 } while (timeout < 60);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000653 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000654 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655}
656
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
658static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
659{
660 return &wrb->payload.sgl[0];
661}
662
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530663static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
Sathya Perlabea50982013-08-27 16:57:33 +0530664{
665 wrb->tag0 = addr & 0xFFFFFFFF;
666 wrb->tag1 = upper_32_bits(addr);
667}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
669/* Don't touch the hdr after it's prepared */
Somnath Kotur106df1e2011-10-27 07:12:13 +0000670/* mem will be NULL for embedded commands */
671static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530672 u8 subsystem, u8 opcode, int cmd_len,
673 struct be_mcc_wrb *wrb,
674 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675{
Somnath Kotur106df1e2011-10-27 07:12:13 +0000676 struct be_sge *sge;
677
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 req_hdr->opcode = opcode;
679 req_hdr->subsystem = subsystem;
680 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000681 req_hdr->version = 0;
Sathya Perlabea50982013-08-27 16:57:33 +0530682 fill_wrb_tags(wrb, (ulong) req_hdr);
Somnath Kotur106df1e2011-10-27 07:12:13 +0000683 wrb->payload_length = cmd_len;
684 if (mem) {
685 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
686 MCC_WRB_SGE_CNT_SHIFT;
687 sge = nonembedded_sgl(wrb);
688 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
689 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
690 sge->len = cpu_to_le32(mem->size);
691 } else
692 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
693 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694}
695
696static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530697 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698{
699 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
700 u64 dma = (u64)mem->dma;
701
702 for (i = 0; i < buf_pages; i++) {
703 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
704 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
705 dma += PAGE_SIZE_4K;
706 }
707}
708
Sathya Perlab31c50a2009-09-17 10:30:13 -0700709static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700711 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
712 struct be_mcc_wrb *wrb
713 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
714 memset(wrb, 0, sizeof(*wrb));
715 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716}
717
Sathya Perlab31c50a2009-09-17 10:30:13 -0700718static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000719{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700720 struct be_queue_info *mccq = &adapter->mcc_obj.q;
721 struct be_mcc_wrb *wrb;
722
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +0000723 if (!mccq->created)
724 return NULL;
725
Vasundhara Volam4d277122013-04-21 23:28:15 +0000726 if (atomic_read(&mccq->used) >= mccq->len)
Sathya Perla713d03942009-11-22 22:02:45 +0000727 return NULL;
Sathya Perla713d03942009-11-22 22:02:45 +0000728
Sathya Perlab31c50a2009-09-17 10:30:13 -0700729 wrb = queue_head_node(mccq);
730 queue_head_inc(mccq);
731 atomic_inc(&mccq->used);
732 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000733 return wrb;
734}
735
Sathya Perlabea50982013-08-27 16:57:33 +0530736static bool use_mcc(struct be_adapter *adapter)
737{
738 return adapter->mcc_obj.q.created;
739}
740
741/* Must be used only in process context */
742static int be_cmd_lock(struct be_adapter *adapter)
743{
744 if (use_mcc(adapter)) {
745 spin_lock_bh(&adapter->mcc_lock);
746 return 0;
747 } else {
748 return mutex_lock_interruptible(&adapter->mbox_lock);
749 }
750}
751
752/* Must be used only in process context */
753static void be_cmd_unlock(struct be_adapter *adapter)
754{
755 if (use_mcc(adapter))
756 spin_unlock_bh(&adapter->mcc_lock);
757 else
758 return mutex_unlock(&adapter->mbox_lock);
759}
760
761static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
762 struct be_mcc_wrb *wrb)
763{
764 struct be_mcc_wrb *dest_wrb;
765
766 if (use_mcc(adapter)) {
767 dest_wrb = wrb_from_mccq(adapter);
768 if (!dest_wrb)
769 return NULL;
770 } else {
771 dest_wrb = wrb_from_mbox(adapter);
772 }
773
774 memcpy(dest_wrb, wrb, sizeof(*wrb));
775 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
776 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
777
778 return dest_wrb;
779}
780
781/* Must be used only in process context */
782static int be_cmd_notify_wait(struct be_adapter *adapter,
783 struct be_mcc_wrb *wrb)
784{
785 struct be_mcc_wrb *dest_wrb;
786 int status;
787
788 status = be_cmd_lock(adapter);
789 if (status)
790 return status;
791
792 dest_wrb = be_cmd_copy(adapter, wrb);
793 if (!dest_wrb)
794 return -EBUSY;
795
796 if (use_mcc(adapter))
797 status = be_mcc_notify_wait(adapter);
798 else
799 status = be_mbox_notify_wait(adapter);
800
801 if (!status)
802 memcpy(wrb, dest_wrb, sizeof(*wrb));
803
804 be_cmd_unlock(adapter);
805 return status;
806}
807
Sathya Perla2243e2e2009-11-22 22:02:03 +0000808/* Tell fw we're about to start firing cmds by writing a
809 * special pattern across the wrb hdr; uses mbox
810 */
811int be_cmd_fw_init(struct be_adapter *adapter)
812{
813 u8 *wrb;
814 int status;
815
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000816 if (lancer_chip(adapter))
817 return 0;
818
Ivan Vecera29849612010-12-14 05:43:19 +0000819 if (mutex_lock_interruptible(&adapter->mbox_lock))
820 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000821
822 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000823 *wrb++ = 0xFF;
824 *wrb++ = 0x12;
825 *wrb++ = 0x34;
826 *wrb++ = 0xFF;
827 *wrb++ = 0xFF;
828 *wrb++ = 0x56;
829 *wrb++ = 0x78;
830 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000831
832 status = be_mbox_notify_wait(adapter);
833
Ivan Vecera29849612010-12-14 05:43:19 +0000834 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000835 return status;
836}
837
838/* Tell fw we're done with firing cmds by writing a
839 * special pattern across the wrb hdr; uses mbox
840 */
841int be_cmd_fw_clean(struct be_adapter *adapter)
842{
843 u8 *wrb;
844 int status;
845
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000846 if (lancer_chip(adapter))
847 return 0;
848
Ivan Vecera29849612010-12-14 05:43:19 +0000849 if (mutex_lock_interruptible(&adapter->mbox_lock))
850 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000851
852 wrb = (u8 *)wrb_from_mbox(adapter);
853 *wrb++ = 0xFF;
854 *wrb++ = 0xAA;
855 *wrb++ = 0xBB;
856 *wrb++ = 0xFF;
857 *wrb++ = 0xFF;
858 *wrb++ = 0xCC;
859 *wrb++ = 0xDD;
860 *wrb = 0xFF;
861
862 status = be_mbox_notify_wait(adapter);
863
Ivan Vecera29849612010-12-14 05:43:19 +0000864 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000865 return status;
866}
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000867
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530868int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700870 struct be_mcc_wrb *wrb;
871 struct be_cmd_req_eq_create *req;
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530872 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
873 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874
Ivan Vecera29849612010-12-14 05:43:19 +0000875 if (mutex_lock_interruptible(&adapter->mbox_lock))
876 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700877
878 wrb = wrb_from_mbox(adapter);
879 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Somnath Kotur106df1e2011-10-27 07:12:13 +0000881 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530882 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
883 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530885 /* Support for EQ_CREATEv2 available only SH-R onwards */
886 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
887 ver = 2;
888
889 req->hdr.version = ver;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
891
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
893 /* 4byte eqe*/
894 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
895 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530896 __ilog2_u32(eqo->q.len / 256));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700897 be_dws_cpu_to_le(req->context, sizeof(req->context));
898
899 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
900
Sathya Perlab31c50a2009-09-17 10:30:13 -0700901 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700903 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530904 eqo->q.id = le16_to_cpu(resp->eq_id);
905 eqo->msix_idx =
906 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
907 eqo->q.created = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700909
Ivan Vecera29849612010-12-14 05:43:19 +0000910 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911 return status;
912}
913
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000914/* Use MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000915int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla5ee49792012-09-28 04:39:41 +0000916 bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700918 struct be_mcc_wrb *wrb;
919 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920 int status;
921
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000922 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700923
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000924 wrb = wrb_from_mccq(adapter);
925 if (!wrb) {
926 status = -EBUSY;
927 goto err;
928 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700929 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930
Somnath Kotur106df1e2011-10-27 07:12:13 +0000931 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530932 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
933 NULL);
Sathya Perla5ee49792012-09-28 04:39:41 +0000934 req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935 if (permanent) {
936 req->permanent = 1;
937 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700938 req->if_id = cpu_to_le16((u16) if_handle);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000939 req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700940 req->permanent = 0;
941 }
942
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000943 status = be_mcc_notify_wait(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700944 if (!status) {
945 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700947 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000949err:
950 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951 return status;
952}
953
Sathya Perlab31c50a2009-09-17 10:30:13 -0700954/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000955int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530956 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700957{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700958 struct be_mcc_wrb *wrb;
959 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960 int status;
961
Sathya Perlab31c50a2009-09-17 10:30:13 -0700962 spin_lock_bh(&adapter->mcc_lock);
963
964 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +0000965 if (!wrb) {
966 status = -EBUSY;
967 goto err;
968 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700969 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970
Somnath Kotur106df1e2011-10-27 07:12:13 +0000971 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530972 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
973 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700974
Ajit Khapardef8617e02011-02-11 13:36:37 +0000975 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 req->if_id = cpu_to_le32(if_id);
977 memcpy(req->mac_address, mac_addr, ETH_ALEN);
978
Sathya Perlab31c50a2009-09-17 10:30:13 -0700979 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700980 if (!status) {
981 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
982 *pmac_id = le32_to_cpu(resp->pmac_id);
983 }
984
Sathya Perla713d03942009-11-22 22:02:45 +0000985err:
Sathya Perlab31c50a2009-09-17 10:30:13 -0700986 spin_unlock_bh(&adapter->mcc_lock);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000987
988 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
989 status = -EPERM;
990
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700991 return status;
992}
993
Sathya Perlab31c50a2009-09-17 10:30:13 -0700994/* Uses synchronous MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +0000995int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700996{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700997 struct be_mcc_wrb *wrb;
998 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999 int status;
1000
Sathya Perla30128032011-11-10 19:17:57 +00001001 if (pmac_id == -1)
1002 return 0;
1003
Sathya Perlab31c50a2009-09-17 10:30:13 -07001004 spin_lock_bh(&adapter->mcc_lock);
1005
1006 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001007 if (!wrb) {
1008 status = -EBUSY;
1009 goto err;
1010 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001011 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012
Somnath Kotur106df1e2011-10-27 07:12:13 +00001013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1014 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015
Ajit Khapardef8617e02011-02-11 13:36:37 +00001016 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017 req->if_id = cpu_to_le32(if_id);
1018 req->pmac_id = cpu_to_le32(pmac_id);
1019
Sathya Perlab31c50a2009-09-17 10:30:13 -07001020 status = be_mcc_notify_wait(adapter);
1021
Sathya Perla713d03942009-11-22 22:02:45 +00001022err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001023 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024 return status;
1025}
1026
Sathya Perlab31c50a2009-09-17 10:30:13 -07001027/* Uses Mbox */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001028int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301029 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001031 struct be_mcc_wrb *wrb;
1032 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001034 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035 int status;
1036
Ivan Vecera29849612010-12-14 05:43:19 +00001037 if (mutex_lock_interruptible(&adapter->mbox_lock))
1038 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001039
1040 wrb = wrb_from_mbox(adapter);
1041 req = embedded_payload(wrb);
1042 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001043
Somnath Kotur106df1e2011-10-27 07:12:13 +00001044 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301045 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1046 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047
1048 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001049
1050 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001051 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301052 coalesce_wm);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001053 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301054 ctxt, no_delay);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001055 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301056 __ilog2_u32(cq->len / 256));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001057 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001058 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1059 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001060 } else {
1061 req->hdr.version = 2;
1062 req->page_size = 1; /* 1 for 4K */
Ajit Khaparde09e83a92013-11-22 12:51:20 -06001063
1064 /* coalesce-wm field in this cmd is not relevant to Lancer.
1065 * Lancer uses COMMON_MODIFY_CQ to set this field
1066 */
1067 if (!lancer_chip(adapter))
1068 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1069 ctxt, coalesce_wm);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001070 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301071 no_delay);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001072 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301073 __ilog2_u32(cq->len / 256));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001074 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301075 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1076 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001077 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1080
1081 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1082
Sathya Perlab31c50a2009-09-17 10:30:13 -07001083 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -07001085 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 cq->id = le16_to_cpu(resp->cq_id);
1087 cq->created = true;
1088 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001089
Ivan Vecera29849612010-12-14 05:43:19 +00001090 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001091
1092 return status;
1093}
1094
1095static u32 be_encoded_q_len(int q_len)
1096{
1097 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1098 if (len_encoded == 16)
1099 len_encoded = 0;
1100 return len_encoded;
1101}
1102
Jingoo Han4188e7d2013-08-05 18:02:02 +09001103static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301104 struct be_queue_info *mccq,
1105 struct be_queue_info *cq)
Sathya Perla5fb379e2009-06-18 00:02:59 +00001106{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001107 struct be_mcc_wrb *wrb;
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001108 struct be_cmd_req_mcc_ext_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001109 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001110 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001111 int status;
1112
Ivan Vecera29849612010-12-14 05:43:19 +00001113 if (mutex_lock_interruptible(&adapter->mbox_lock))
1114 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001115
1116 wrb = wrb_from_mbox(adapter);
1117 req = embedded_payload(wrb);
1118 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001119
Somnath Kotur106df1e2011-10-27 07:12:13 +00001120 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301121 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1122 NULL);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001123
Ajit Khaparded4a2ac32010-03-11 01:35:59 +00001124 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301125 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001126 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1127 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301128 be_encoded_q_len(mccq->len));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001129 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301130 } else {
1131 req->hdr.version = 1;
1132 req->cq_id = cpu_to_le16(cq->id);
1133
1134 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1135 be_encoded_q_len(mccq->len));
1136 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1137 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1138 ctxt, cq->id);
1139 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1140 ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001141 }
1142
Somnath Koturcc4ce022010-10-21 07:11:14 -07001143 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001144 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001145 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001146 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1147
1148 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1149
Sathya Perlab31c50a2009-09-17 10:30:13 -07001150 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001151 if (!status) {
1152 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1153 mccq->id = le16_to_cpu(resp->id);
1154 mccq->created = true;
1155 }
Ivan Vecera29849612010-12-14 05:43:19 +00001156 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157
1158 return status;
1159}
1160
Jingoo Han4188e7d2013-08-05 18:02:02 +09001161static int be_cmd_mccq_org_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301162 struct be_queue_info *mccq,
1163 struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001164{
1165 struct be_mcc_wrb *wrb;
1166 struct be_cmd_req_mcc_create *req;
1167 struct be_dma_mem *q_mem = &mccq->dma_mem;
1168 void *ctxt;
1169 int status;
1170
1171 if (mutex_lock_interruptible(&adapter->mbox_lock))
1172 return -1;
1173
1174 wrb = wrb_from_mbox(adapter);
1175 req = embedded_payload(wrb);
1176 ctxt = &req->context;
1177
Somnath Kotur106df1e2011-10-27 07:12:13 +00001178 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301179 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1180 NULL);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001181
1182 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1183
1184 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1185 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301186 be_encoded_q_len(mccq->len));
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001187 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1188
1189 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1190
1191 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1192
1193 status = be_mbox_notify_wait(adapter);
1194 if (!status) {
1195 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1196 mccq->id = le16_to_cpu(resp->id);
1197 mccq->created = true;
1198 }
1199
1200 mutex_unlock(&adapter->mbox_lock);
1201 return status;
1202}
1203
1204int be_cmd_mccq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301205 struct be_queue_info *mccq, struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001206{
1207 int status;
1208
1209 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301210 if (status && BEx_chip(adapter)) {
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001211 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1212 "or newer to avoid conflicting priorities between NIC "
1213 "and FCoE traffic");
1214 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1215 }
1216 return status;
1217}
1218
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001219int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220{
Sathya Perla77071332013-08-27 16:57:34 +05301221 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001222 struct be_cmd_req_eth_tx_create *req;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001223 struct be_queue_info *txq = &txo->q;
1224 struct be_queue_info *cq = &txo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 struct be_dma_mem *q_mem = &txq->dma_mem;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001226 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227
Sathya Perla77071332013-08-27 16:57:34 +05301228 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001229 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301230 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001232 if (lancer_chip(adapter)) {
1233 req->hdr.version = 1;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001234 } else if (BEx_chip(adapter)) {
1235 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1236 req->hdr.version = 2;
1237 } else { /* For SH */
1238 req->hdr.version = 2;
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001239 }
1240
Vasundhara Volam81b02652013-10-01 15:59:57 +05301241 if (req->hdr.version > 0)
1242 req->if_id = cpu_to_le16(adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1244 req->ulp_num = BE_ULP1_NUM;
1245 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001246 req->cq_id = cpu_to_le16(cq->id);
1247 req->queue_size = be_encoded_q_len(txq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001249 ver = req->hdr.version;
1250
Sathya Perla77071332013-08-27 16:57:34 +05301251 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 if (!status) {
Sathya Perla77071332013-08-27 16:57:34 +05301253 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 txq->id = le16_to_cpu(resp->cid);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001255 if (ver == 2)
1256 txo->db_offset = le32_to_cpu(resp->db_offset);
1257 else
1258 txo->db_offset = DB_TXULP1_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 txq->created = true;
1260 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001261
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001262 return status;
1263}
1264
Sathya Perla482c9e72011-06-29 23:33:17 +00001265/* Uses MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001266int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301267 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1268 u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001270 struct be_mcc_wrb *wrb;
1271 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 struct be_dma_mem *q_mem = &rxq->dma_mem;
1273 int status;
1274
Sathya Perla482c9e72011-06-29 23:33:17 +00001275 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001276
Sathya Perla482c9e72011-06-29 23:33:17 +00001277 wrb = wrb_from_mccq(adapter);
1278 if (!wrb) {
1279 status = -EBUSY;
1280 goto err;
1281 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001282 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Somnath Kotur106df1e2011-10-27 07:12:13 +00001284 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301285 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
1287 req->cq_id = cpu_to_le16(cq_id);
1288 req->frag_size = fls(frag_size) - 1;
1289 req->num_pages = 2;
1290 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1291 req->interface_id = cpu_to_le32(if_id);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 req->rss_queue = cpu_to_le32(rss);
1294
Sathya Perla482c9e72011-06-29 23:33:17 +00001295 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 if (!status) {
1297 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1298 rxq->id = le16_to_cpu(resp->id);
1299 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -07001300 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001302
Sathya Perla482c9e72011-06-29 23:33:17 +00001303err:
1304 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 return status;
1306}
1307
Sathya Perlab31c50a2009-09-17 10:30:13 -07001308/* Generic destroyer function for all types of queues
1309 * Uses Mbox
1310 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001311int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301312 int queue_type)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001314 struct be_mcc_wrb *wrb;
1315 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316 u8 subsys = 0, opcode = 0;
1317 int status;
1318
Ivan Vecera29849612010-12-14 05:43:19 +00001319 if (mutex_lock_interruptible(&adapter->mbox_lock))
1320 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321
Sathya Perlab31c50a2009-09-17 10:30:13 -07001322 wrb = wrb_from_mbox(adapter);
1323 req = embedded_payload(wrb);
1324
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 switch (queue_type) {
1326 case QTYPE_EQ:
1327 subsys = CMD_SUBSYSTEM_COMMON;
1328 opcode = OPCODE_COMMON_EQ_DESTROY;
1329 break;
1330 case QTYPE_CQ:
1331 subsys = CMD_SUBSYSTEM_COMMON;
1332 opcode = OPCODE_COMMON_CQ_DESTROY;
1333 break;
1334 case QTYPE_TXQ:
1335 subsys = CMD_SUBSYSTEM_ETH;
1336 opcode = OPCODE_ETH_TX_DESTROY;
1337 break;
1338 case QTYPE_RXQ:
1339 subsys = CMD_SUBSYSTEM_ETH;
1340 opcode = OPCODE_ETH_RX_DESTROY;
1341 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001342 case QTYPE_MCCQ:
1343 subsys = CMD_SUBSYSTEM_COMMON;
1344 opcode = OPCODE_COMMON_MCC_DESTROY;
1345 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +00001347 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 }
Ajit Khaparded744b442009-12-03 06:12:06 +00001349
Somnath Kotur106df1e2011-10-27 07:12:13 +00001350 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301351 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 req->id = cpu_to_le16(q->id);
1353
Sathya Perlab31c50a2009-09-17 10:30:13 -07001354 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001355 q->created = false;
Sathya Perla5f0b8492009-07-27 22:52:56 +00001356
Ivan Vecera29849612010-12-14 05:43:19 +00001357 mutex_unlock(&adapter->mbox_lock);
Sathya Perla482c9e72011-06-29 23:33:17 +00001358 return status;
1359}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
Sathya Perla482c9e72011-06-29 23:33:17 +00001361/* Uses MCC */
1362int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1363{
1364 struct be_mcc_wrb *wrb;
1365 struct be_cmd_req_q_destroy *req;
1366 int status;
1367
1368 spin_lock_bh(&adapter->mcc_lock);
1369
1370 wrb = wrb_from_mccq(adapter);
1371 if (!wrb) {
1372 status = -EBUSY;
1373 goto err;
1374 }
1375 req = embedded_payload(wrb);
1376
Somnath Kotur106df1e2011-10-27 07:12:13 +00001377 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301378 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
Sathya Perla482c9e72011-06-29 23:33:17 +00001379 req->id = cpu_to_le16(q->id);
1380
1381 status = be_mcc_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001382 q->created = false;
Sathya Perla482c9e72011-06-29 23:33:17 +00001383
1384err:
1385 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 return status;
1387}
1388
Sathya Perlab31c50a2009-09-17 10:30:13 -07001389/* Create an rx filtering policy configuration on an i/f
Sathya Perlabea50982013-08-27 16:57:33 +05301390 * Will use MBOX only if MCCQ has not been created.
Sathya Perlab31c50a2009-09-17 10:30:13 -07001391 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001392int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001393 u32 *if_handle, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394{
Sathya Perlabea50982013-08-27 16:57:33 +05301395 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001396 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 int status;
1398
Sathya Perlabea50982013-08-27 16:57:33 +05301399 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001400 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301401 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1402 sizeof(*req), &wrb, NULL);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001403 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001404 req->capability_flags = cpu_to_le32(cap_flags);
1405 req->enable_flags = cpu_to_le32(en_flags);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001406 req->pmac_invalid = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407
Sathya Perlabea50982013-08-27 16:57:33 +05301408 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 if (!status) {
Sathya Perlabea50982013-08-27 16:57:33 +05301410 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 *if_handle = le32_to_cpu(resp->interface_id);
Sathya Perlab5bb9772013-07-23 15:25:01 +05301412
1413 /* Hack to retrieve VF's pmac-id on BE3 */
1414 if (BE3_chip(adapter) && !be_physfn(adapter))
1415 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417 return status;
1418}
1419
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001420/* Uses MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001421int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001423 struct be_mcc_wrb *wrb;
1424 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 int status;
1426
Sathya Perla30128032011-11-10 19:17:57 +00001427 if (interface_id == -1)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001428 return 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001429
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001430 spin_lock_bh(&adapter->mcc_lock);
1431
1432 wrb = wrb_from_mccq(adapter);
1433 if (!wrb) {
1434 status = -EBUSY;
1435 goto err;
1436 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001437 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438
Somnath Kotur106df1e2011-10-27 07:12:13 +00001439 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301440 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1441 sizeof(*req), wrb, NULL);
Ajit Khaparde658681f2011-02-11 13:34:46 +00001442 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001444
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001445 status = be_mcc_notify_wait(adapter);
1446err:
1447 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 return status;
1449}
1450
1451/* Get stats is a non embedded command: the request is not embedded inside
1452 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001453 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001455int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001457 struct be_mcc_wrb *wrb;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001458 struct be_cmd_req_hdr *hdr;
Sathya Perla713d03942009-11-22 22:02:45 +00001459 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460
Sathya Perlab31c50a2009-09-17 10:30:13 -07001461 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462
Sathya Perlab31c50a2009-09-17 10:30:13 -07001463 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001464 if (!wrb) {
1465 status = -EBUSY;
1466 goto err;
1467 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001468 hdr = nonemb_cmd->va;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469
Somnath Kotur106df1e2011-10-27 07:12:13 +00001470 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301471 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1472 nonemb_cmd);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001473
Sathya Perlaca34fe32012-11-06 17:48:56 +00001474 /* version 1 of the cmd is not supported only by BE2 */
Ajit Khaparde61000862013-10-03 16:16:33 -05001475 if (BE2_chip(adapter))
1476 hdr->version = 0;
1477 if (BE3_chip(adapter) || lancer_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001478 hdr->version = 1;
Ajit Khaparde61000862013-10-03 16:16:33 -05001479 else
1480 hdr->version = 2;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001481
Sathya Perlab31c50a2009-09-17 10:30:13 -07001482 be_mcc_notify(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001483 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484
Sathya Perla713d03942009-11-22 22:02:45 +00001485err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001486 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001487 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488}
1489
Selvin Xavier005d5692011-05-16 07:36:35 +00001490/* Lancer Stats */
1491int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301492 struct be_dma_mem *nonemb_cmd)
Selvin Xavier005d5692011-05-16 07:36:35 +00001493{
1494
1495 struct be_mcc_wrb *wrb;
1496 struct lancer_cmd_req_pport_stats *req;
Selvin Xavier005d5692011-05-16 07:36:35 +00001497 int status = 0;
1498
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001499 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1500 CMD_SUBSYSTEM_ETH))
1501 return -EPERM;
1502
Selvin Xavier005d5692011-05-16 07:36:35 +00001503 spin_lock_bh(&adapter->mcc_lock);
1504
1505 wrb = wrb_from_mccq(adapter);
1506 if (!wrb) {
1507 status = -EBUSY;
1508 goto err;
1509 }
1510 req = nonemb_cmd->va;
Selvin Xavier005d5692011-05-16 07:36:35 +00001511
Somnath Kotur106df1e2011-10-27 07:12:13 +00001512 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301513 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1514 wrb, nonemb_cmd);
Selvin Xavier005d5692011-05-16 07:36:35 +00001515
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +00001516 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier005d5692011-05-16 07:36:35 +00001517 req->cmd_params.params.reset_stats = 0;
1518
Selvin Xavier005d5692011-05-16 07:36:35 +00001519 be_mcc_notify(adapter);
1520 adapter->stats_cmd_sent = true;
1521
1522err:
1523 spin_unlock_bh(&adapter->mcc_lock);
1524 return status;
1525}
1526
Sathya Perla323ff712012-09-28 04:39:43 +00001527static int be_mac_to_link_speed(int mac_speed)
1528{
1529 switch (mac_speed) {
1530 case PHY_LINK_SPEED_ZERO:
1531 return 0;
1532 case PHY_LINK_SPEED_10MBPS:
1533 return 10;
1534 case PHY_LINK_SPEED_100MBPS:
1535 return 100;
1536 case PHY_LINK_SPEED_1GBPS:
1537 return 1000;
1538 case PHY_LINK_SPEED_10GBPS:
1539 return 10000;
Vasundhara Volamb971f842013-08-06 09:27:15 +05301540 case PHY_LINK_SPEED_20GBPS:
1541 return 20000;
1542 case PHY_LINK_SPEED_25GBPS:
1543 return 25000;
1544 case PHY_LINK_SPEED_40GBPS:
1545 return 40000;
Sathya Perla323ff712012-09-28 04:39:43 +00001546 }
1547 return 0;
1548}
1549
1550/* Uses synchronous mcc
1551 * Returns link_speed in Mbps
1552 */
1553int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1554 u8 *link_status, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001556 struct be_mcc_wrb *wrb;
1557 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 int status;
1559
Sathya Perlab31c50a2009-09-17 10:30:13 -07001560 spin_lock_bh(&adapter->mcc_lock);
1561
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001562 if (link_status)
1563 *link_status = LINK_DOWN;
1564
Sathya Perlab31c50a2009-09-17 10:30:13 -07001565 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001566 if (!wrb) {
1567 status = -EBUSY;
1568 goto err;
1569 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001570 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001571
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001572 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301573 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1574 sizeof(*req), wrb, NULL);
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001575
Sathya Perlaca34fe32012-11-06 17:48:56 +00001576 /* version 1 of the cmd is not supported only by BE2 */
1577 if (!BE2_chip(adapter))
Padmanabh Ratnakardaad6162011-11-16 02:03:45 +00001578 req->hdr.version = 1;
1579
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001580 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Sathya Perlab31c50a2009-09-17 10:30:13 -07001582 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583 if (!status) {
1584 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Sathya Perla323ff712012-09-28 04:39:43 +00001585 if (link_speed) {
1586 *link_speed = resp->link_speed ?
1587 le16_to_cpu(resp->link_speed) * 10 :
1588 be_mac_to_link_speed(resp->mac_speed);
1589
1590 if (!resp->logical_link_status)
1591 *link_speed = 0;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001592 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001593 if (link_status)
1594 *link_status = resp->logical_link_status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001595 }
1596
Sathya Perla713d03942009-11-22 22:02:45 +00001597err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001598 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599 return status;
1600}
1601
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001602/* Uses synchronous mcc */
1603int be_cmd_get_die_temperature(struct be_adapter *adapter)
1604{
1605 struct be_mcc_wrb *wrb;
1606 struct be_cmd_req_get_cntl_addnl_attribs *req;
Vasundhara Volam117affe2013-08-06 09:27:20 +05301607 int status = 0;
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001608
1609 spin_lock_bh(&adapter->mcc_lock);
1610
1611 wrb = wrb_from_mccq(adapter);
1612 if (!wrb) {
1613 status = -EBUSY;
1614 goto err;
1615 }
1616 req = embedded_payload(wrb);
1617
Somnath Kotur106df1e2011-10-27 07:12:13 +00001618 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301619 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1620 sizeof(*req), wrb, NULL);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001621
Somnath Kotur3de09452011-09-30 07:25:05 +00001622 be_mcc_notify(adapter);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001623
1624err:
1625 spin_unlock_bh(&adapter->mcc_lock);
1626 return status;
1627}
1628
Somnath Kotur311fddc2011-03-16 21:22:43 +00001629/* Uses synchronous mcc */
1630int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1631{
1632 struct be_mcc_wrb *wrb;
1633 struct be_cmd_req_get_fat *req;
1634 int status;
1635
1636 spin_lock_bh(&adapter->mcc_lock);
1637
1638 wrb = wrb_from_mccq(adapter);
1639 if (!wrb) {
1640 status = -EBUSY;
1641 goto err;
1642 }
1643 req = embedded_payload(wrb);
1644
Somnath Kotur106df1e2011-10-27 07:12:13 +00001645 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301646 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1647 NULL);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001648 req->fat_operation = cpu_to_le32(QUERY_FAT);
1649 status = be_mcc_notify_wait(adapter);
1650 if (!status) {
1651 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1652 if (log_size && resp->log_size)
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001653 *log_size = le32_to_cpu(resp->log_size) -
1654 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001655 }
1656err:
1657 spin_unlock_bh(&adapter->mcc_lock);
1658 return status;
1659}
1660
1661void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1662{
1663 struct be_dma_mem get_fat_cmd;
1664 struct be_mcc_wrb *wrb;
1665 struct be_cmd_req_get_fat *req;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001666 u32 offset = 0, total_size, buf_size,
1667 log_offset = sizeof(u32), payload_len;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001668 int status;
1669
1670 if (buf_len == 0)
1671 return;
1672
1673 total_size = buf_len;
1674
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001675 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1676 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301677 get_fat_cmd.size,
1678 &get_fat_cmd.dma);
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001679 if (!get_fat_cmd.va) {
1680 status = -ENOMEM;
1681 dev_err(&adapter->pdev->dev,
1682 "Memory allocation failure while retrieving FAT data\n");
1683 return;
1684 }
1685
Somnath Kotur311fddc2011-03-16 21:22:43 +00001686 spin_lock_bh(&adapter->mcc_lock);
1687
Somnath Kotur311fddc2011-03-16 21:22:43 +00001688 while (total_size) {
1689 buf_size = min(total_size, (u32)60*1024);
1690 total_size -= buf_size;
1691
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001692 wrb = wrb_from_mccq(adapter);
1693 if (!wrb) {
1694 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001695 goto err;
1696 }
1697 req = get_fat_cmd.va;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001698
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001699 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
Somnath Kotur106df1e2011-10-27 07:12:13 +00001700 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301701 OPCODE_COMMON_MANAGE_FAT, payload_len,
1702 wrb, &get_fat_cmd);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001703
1704 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1705 req->read_log_offset = cpu_to_le32(log_offset);
1706 req->read_log_length = cpu_to_le32(buf_size);
1707 req->data_buffer_size = cpu_to_le32(buf_size);
1708
1709 status = be_mcc_notify_wait(adapter);
1710 if (!status) {
1711 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1712 memcpy(buf + offset,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301713 resp->data_buffer,
1714 le32_to_cpu(resp->read_log_length));
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001715 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001716 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001717 goto err;
1718 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001719 offset += buf_size;
1720 log_offset += buf_size;
1721 }
1722err:
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001723 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301724 get_fat_cmd.va, get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001725 spin_unlock_bh(&adapter->mcc_lock);
1726}
1727
Sathya Perla04b71172011-09-27 13:30:27 -04001728/* Uses synchronous mcc */
1729int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301730 char *fw_on_flash)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001732 struct be_mcc_wrb *wrb;
1733 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734 int status;
1735
Sathya Perla04b71172011-09-27 13:30:27 -04001736 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001737
Sathya Perla04b71172011-09-27 13:30:27 -04001738 wrb = wrb_from_mccq(adapter);
1739 if (!wrb) {
1740 status = -EBUSY;
1741 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 }
1743
Sathya Perla04b71172011-09-27 13:30:27 -04001744 req = embedded_payload(wrb);
Sathya Perla04b71172011-09-27 13:30:27 -04001745
Somnath Kotur106df1e2011-10-27 07:12:13 +00001746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301747 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1748 NULL);
Sathya Perla04b71172011-09-27 13:30:27 -04001749 status = be_mcc_notify_wait(adapter);
1750 if (!status) {
1751 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1752 strcpy(fw_ver, resp->firmware_version_string);
1753 if (fw_on_flash)
1754 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1755 }
1756err:
1757 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 return status;
1759}
1760
Sathya Perlab31c50a2009-09-17 10:30:13 -07001761/* set the EQ delay interval of an EQ to specified value
1762 * Uses async mcc
1763 */
Sathya Perla2632baf2013-10-01 16:00:00 +05301764int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1765 int num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001767 struct be_mcc_wrb *wrb;
1768 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla2632baf2013-10-01 16:00:00 +05301769 int status = 0, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Sathya Perlab31c50a2009-09-17 10:30:13 -07001771 spin_lock_bh(&adapter->mcc_lock);
1772
1773 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001774 if (!wrb) {
1775 status = -EBUSY;
1776 goto err;
1777 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001778 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779
Somnath Kotur106df1e2011-10-27 07:12:13 +00001780 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301781 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1782 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perla2632baf2013-10-01 16:00:00 +05301784 req->num_eq = cpu_to_le32(num);
1785 for (i = 0; i < num; i++) {
1786 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1787 req->set_eqd[i].phase = 0;
1788 req->set_eqd[i].delay_multiplier =
1789 cpu_to_le32(set_eqd[i].delay_multiplier);
1790 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791
Sathya Perlab31c50a2009-09-17 10:30:13 -07001792 be_mcc_notify(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001793err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001794 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001795 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796}
1797
Sathya Perlab31c50a2009-09-17 10:30:13 -07001798/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001799int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Kalesh AP4d567d92014-05-09 13:29:17 +05301800 u32 num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001802 struct be_mcc_wrb *wrb;
1803 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804 int status;
1805
Sathya Perlab31c50a2009-09-17 10:30:13 -07001806 spin_lock_bh(&adapter->mcc_lock);
1807
1808 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001809 if (!wrb) {
1810 status = -EBUSY;
1811 goto err;
1812 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001813 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
Somnath Kotur106df1e2011-10-27 07:12:13 +00001815 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301816 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1817 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818
1819 req->interface_id = if_id;
Ajit Khaparde012bd382013-11-18 10:44:24 -06001820 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 req->num_vlan = num;
Kalesh AP4d567d92014-05-09 13:29:17 +05301822 memcpy(req->normal_vlan, vtag_array,
1823 req->num_vlan * sizeof(vtag_array[0]));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perlab31c50a2009-09-17 10:30:13 -07001825 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001826err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001827 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 return status;
1829}
1830
Sathya Perla5b8821b2011-08-02 19:57:44 +00001831int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001833 struct be_mcc_wrb *wrb;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001834 struct be_dma_mem *mem = &adapter->rx_filter;
1835 struct be_cmd_req_rx_filter *req = mem->va;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001836 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla8788fdc2009-07-27 22:52:03 +00001838 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001839
Sathya Perlab31c50a2009-09-17 10:30:13 -07001840 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001841 if (!wrb) {
1842 status = -EBUSY;
1843 goto err;
1844 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00001845 memset(req, 0, sizeof(*req));
Somnath Kotur106df1e2011-10-27 07:12:13 +00001846 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301847 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1848 wrb, mem);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
Sathya Perla5b8821b2011-08-02 19:57:44 +00001850 req->if_id = cpu_to_le32(adapter->if_handle);
1851 if (flags & IFF_PROMISC) {
1852 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301853 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1854 BE_IF_FLAGS_MCAST_PROMISCUOUS);
Sathya Perla5b8821b2011-08-02 19:57:44 +00001855 if (value == ON)
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301856 req->if_flags =
1857 cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1858 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1859 BE_IF_FLAGS_MCAST_PROMISCUOUS);
Sathya Perla5b8821b2011-08-02 19:57:44 +00001860 } else if (flags & IFF_ALLMULTI) {
1861 req->if_flags_mask = req->if_flags =
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001862 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001863 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1864 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1865
1866 if (value == ON)
1867 req->if_flags =
1868 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
Sathya Perla24307ee2009-06-18 00:09:25 +00001869 } else {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001870 struct netdev_hw_addr *ha;
1871 int i = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perla8e7d3f62011-09-27 13:29:38 -04001873 req->if_flags_mask = req->if_flags =
1874 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
Padmanabh Ratnakar1610c792011-11-03 01:49:27 +00001875
1876 /* Reset mcast promisc mode if already set by setting mask
1877 * and not setting flags field
1878 */
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001879 req->if_flags_mask |=
1880 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
Sathya Perla92bf14a2013-08-27 16:57:32 +05301881 be_if_cap_flags(adapter));
Padmanabh Ratnakar016f97b2011-11-03 01:49:13 +00001882 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
Sathya Perla5b8821b2011-08-02 19:57:44 +00001883 netdev_for_each_mc_addr(ha, adapter->netdev)
1884 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1885 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886
Ajit Khaparde012bd382013-11-18 10:44:24 -06001887 if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301888 req->if_flags_mask) {
Ajit Khaparde012bd382013-11-18 10:44:24 -06001889 dev_warn(&adapter->pdev->dev,
1890 "Cannot set rx filter flags 0x%x\n",
1891 req->if_flags_mask);
1892 dev_warn(&adapter->pdev->dev,
1893 "Interface is capable of 0x%x flags only\n",
1894 be_if_cap_flags(adapter));
1895 }
1896 req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1897
Sathya Perla0d1d5872011-08-03 05:19:27 -07001898 status = be_mcc_notify_wait(adapter);
Ajit Khaparde012bd382013-11-18 10:44:24 -06001899
Sathya Perla713d03942009-11-22 22:02:45 +00001900err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001901 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001902 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903}
1904
Sathya Perlab31c50a2009-09-17 10:30:13 -07001905/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001906int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001908 struct be_mcc_wrb *wrb;
1909 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910 int status;
1911
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001912 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1913 CMD_SUBSYSTEM_COMMON))
1914 return -EPERM;
1915
Sathya Perlab31c50a2009-09-17 10:30:13 -07001916 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917
Sathya Perlab31c50a2009-09-17 10:30:13 -07001918 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001919 if (!wrb) {
1920 status = -EBUSY;
1921 goto err;
1922 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001923 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
Somnath Kotur106df1e2011-10-27 07:12:13 +00001925 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301926 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1927 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928
1929 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1930 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1931
Sathya Perlab31c50a2009-09-17 10:30:13 -07001932 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933
Sathya Perla713d03942009-11-22 22:02:45 +00001934err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001935 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 return status;
1937}
1938
Sathya Perlab31c50a2009-09-17 10:30:13 -07001939/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001940int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001942 struct be_mcc_wrb *wrb;
1943 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944 int status;
1945
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001946 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1947 CMD_SUBSYSTEM_COMMON))
1948 return -EPERM;
1949
Sathya Perlab31c50a2009-09-17 10:30:13 -07001950 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perlab31c50a2009-09-17 10:30:13 -07001952 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001953 if (!wrb) {
1954 status = -EBUSY;
1955 goto err;
1956 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001957 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958
Somnath Kotur106df1e2011-10-27 07:12:13 +00001959 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301960 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
1961 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962
Sathya Perlab31c50a2009-09-17 10:30:13 -07001963 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 if (!status) {
1965 struct be_cmd_resp_get_flow_control *resp =
1966 embedded_payload(wrb);
1967 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1968 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1969 }
1970
Sathya Perla713d03942009-11-22 22:02:45 +00001971err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001972 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 return status;
1974}
1975
Sathya Perlab31c50a2009-09-17 10:30:13 -07001976/* Uses mbox */
Sathya Perla3abcded2010-10-03 22:12:27 -07001977int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00001978 u32 *mode, u32 *caps, u16 *asic_rev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001980 struct be_mcc_wrb *wrb;
1981 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982 int status;
1983
Ivan Vecera29849612010-12-14 05:43:19 +00001984 if (mutex_lock_interruptible(&adapter->mbox_lock))
1985 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perlab31c50a2009-09-17 10:30:13 -07001987 wrb = wrb_from_mbox(adapter);
1988 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989
Somnath Kotur106df1e2011-10-27 07:12:13 +00001990 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301991 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1992 sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Sathya Perlab31c50a2009-09-17 10:30:13 -07001994 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 if (!status) {
1996 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1997 *port_num = le32_to_cpu(resp->phys_port);
Ajit Khaparde3486be22010-07-23 02:04:54 +00001998 *mode = le32_to_cpu(resp->function_mode);
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 *caps = le32_to_cpu(resp->function_caps);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00002000 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001 }
2002
Ivan Vecera29849612010-12-14 05:43:19 +00002003 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004 return status;
2005}
sarveshwarb14074ea2009-08-05 13:05:24 -07002006
Sathya Perlab31c50a2009-09-17 10:30:13 -07002007/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07002008int be_cmd_reset_function(struct be_adapter *adapter)
2009{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002010 struct be_mcc_wrb *wrb;
2011 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07002012 int status;
2013
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002014 if (lancer_chip(adapter)) {
2015 status = lancer_wait_ready(adapter);
2016 if (!status) {
2017 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2018 adapter->db + SLIPORT_CONTROL_OFFSET);
2019 status = lancer_test_and_set_rdy_state(adapter);
2020 }
2021 if (status) {
2022 dev_err(&adapter->pdev->dev,
2023 "Adapter in non recoverable error\n");
2024 }
2025 return status;
2026 }
2027
Ivan Vecera29849612010-12-14 05:43:19 +00002028 if (mutex_lock_interruptible(&adapter->mbox_lock))
2029 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07002030
Sathya Perlab31c50a2009-09-17 10:30:13 -07002031 wrb = wrb_from_mbox(adapter);
2032 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07002033
Somnath Kotur106df1e2011-10-27 07:12:13 +00002034 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302035 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2036 NULL);
sarveshwarb14074ea2009-08-05 13:05:24 -07002037
Sathya Perlab31c50a2009-09-17 10:30:13 -07002038 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07002039
Ivan Vecera29849612010-12-14 05:43:19 +00002040 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07002041 return status;
2042}
Ajit Khaparde84517482009-09-04 03:12:16 +00002043
Suresh Reddy594ad542013-04-25 23:03:20 +00002044int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302045 u32 rss_hash_opts, u16 table_size, u8 *rss_hkey)
Sathya Perla3abcded2010-10-03 22:12:27 -07002046{
2047 struct be_mcc_wrb *wrb;
2048 struct be_cmd_req_rss_config *req;
Sathya Perla3abcded2010-10-03 22:12:27 -07002049 int status;
2050
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302051 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2052 return 0;
2053
Kalesh APb51aa362014-05-09 13:29:19 +05302054 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002055
Kalesh APb51aa362014-05-09 13:29:19 +05302056 wrb = wrb_from_mccq(adapter);
2057 if (!wrb) {
2058 status = -EBUSY;
2059 goto err;
2060 }
Sathya Perla3abcded2010-10-03 22:12:27 -07002061 req = embedded_payload(wrb);
2062
Somnath Kotur106df1e2011-10-27 07:12:13 +00002063 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302064 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002065
2066 req->if_id = cpu_to_le32(adapter->if_handle);
Suresh Reddy594ad542013-04-25 23:03:20 +00002067 req->enable_rss = cpu_to_le16(rss_hash_opts);
Sathya Perla3abcded2010-10-03 22:12:27 -07002068 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
Suresh Reddy594ad542013-04-25 23:03:20 +00002069
Kalesh APb51aa362014-05-09 13:29:19 +05302070 if (!BEx_chip(adapter))
Suresh Reddy594ad542013-04-25 23:03:20 +00002071 req->hdr.version = 1;
2072
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 memcpy(req->cpu_table, rsstable, table_size);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302074 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla3abcded2010-10-03 22:12:27 -07002075 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2076
Kalesh APb51aa362014-05-09 13:29:19 +05302077 status = be_mcc_notify_wait(adapter);
2078err:
2079 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002080 return status;
2081}
2082
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002083/* Uses sync mcc */
2084int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302085 u8 bcn, u8 sts, u8 state)
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002086{
2087 struct be_mcc_wrb *wrb;
2088 struct be_cmd_req_enable_disable_beacon *req;
2089 int status;
2090
2091 spin_lock_bh(&adapter->mcc_lock);
2092
2093 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002094 if (!wrb) {
2095 status = -EBUSY;
2096 goto err;
2097 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002098 req = embedded_payload(wrb);
2099
Somnath Kotur106df1e2011-10-27 07:12:13 +00002100 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302101 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2102 sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002103
2104 req->port_num = port_num;
2105 req->beacon_state = state;
2106 req->beacon_duration = bcn;
2107 req->status_duration = sts;
2108
2109 status = be_mcc_notify_wait(adapter);
2110
Sathya Perla713d03942009-11-22 22:02:45 +00002111err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002112 spin_unlock_bh(&adapter->mcc_lock);
2113 return status;
2114}
2115
2116/* Uses sync mcc */
2117int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2118{
2119 struct be_mcc_wrb *wrb;
2120 struct be_cmd_req_get_beacon_state *req;
2121 int status;
2122
2123 spin_lock_bh(&adapter->mcc_lock);
2124
2125 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002126 if (!wrb) {
2127 status = -EBUSY;
2128 goto err;
2129 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002130 req = embedded_payload(wrb);
2131
Somnath Kotur106df1e2011-10-27 07:12:13 +00002132 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302133 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2134 wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002135
2136 req->port_num = port_num;
2137
2138 status = be_mcc_notify_wait(adapter);
2139 if (!status) {
2140 struct be_cmd_resp_get_beacon_state *resp =
2141 embedded_payload(wrb);
2142 *state = resp->beacon_state;
2143 }
2144
Sathya Perla713d03942009-11-22 22:02:45 +00002145err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002146 spin_unlock_bh(&adapter->mcc_lock);
2147 return status;
2148}
2149
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002150int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002151 u32 data_size, u32 data_offset,
2152 const char *obj_name, u32 *data_written,
2153 u8 *change_status, u8 *addn_status)
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002154{
2155 struct be_mcc_wrb *wrb;
2156 struct lancer_cmd_req_write_object *req;
2157 struct lancer_cmd_resp_write_object *resp;
2158 void *ctxt = NULL;
2159 int status;
2160
2161 spin_lock_bh(&adapter->mcc_lock);
2162 adapter->flash_status = 0;
2163
2164 wrb = wrb_from_mccq(adapter);
2165 if (!wrb) {
2166 status = -EBUSY;
2167 goto err_unlock;
2168 }
2169
2170 req = embedded_payload(wrb);
2171
Somnath Kotur106df1e2011-10-27 07:12:13 +00002172 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302173 OPCODE_COMMON_WRITE_OBJECT,
2174 sizeof(struct lancer_cmd_req_write_object), wrb,
2175 NULL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002176
2177 ctxt = &req->context;
2178 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302179 write_length, ctxt, data_size);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002180
2181 if (data_size == 0)
2182 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302183 eof, ctxt, 1);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002184 else
2185 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302186 eof, ctxt, 0);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002187
2188 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2189 req->write_offset = cpu_to_le32(data_offset);
2190 strcpy(req->object_name, obj_name);
2191 req->descriptor_count = cpu_to_le32(1);
2192 req->buf_len = cpu_to_le32(data_size);
2193 req->addr_low = cpu_to_le32((cmd->dma +
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302194 sizeof(struct lancer_cmd_req_write_object))
2195 & 0xFFFFFFFF);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002196 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2197 sizeof(struct lancer_cmd_req_write_object)));
2198
2199 be_mcc_notify(adapter);
2200 spin_unlock_bh(&adapter->mcc_lock);
2201
Suresh Reddy5eeff632014-01-06 13:02:24 +05302202 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
Somnath Kotur701962d2013-05-02 03:36:34 +00002203 msecs_to_jiffies(60000)))
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002204 status = -1;
2205 else
2206 status = adapter->flash_status;
2207
2208 resp = embedded_payload(wrb);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002209 if (!status) {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002210 *data_written = le32_to_cpu(resp->actual_write_len);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002211 *change_status = resp->change_status;
2212 } else {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002213 *addn_status = resp->additional_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002214 }
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002215
2216 return status;
2217
2218err_unlock:
2219 spin_unlock_bh(&adapter->mcc_lock);
2220 return status;
2221}
2222
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002223int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302224 u32 data_size, u32 data_offset, const char *obj_name,
2225 u32 *data_read, u32 *eof, u8 *addn_status)
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002226{
2227 struct be_mcc_wrb *wrb;
2228 struct lancer_cmd_req_read_object *req;
2229 struct lancer_cmd_resp_read_object *resp;
2230 int status;
2231
2232 spin_lock_bh(&adapter->mcc_lock);
2233
2234 wrb = wrb_from_mccq(adapter);
2235 if (!wrb) {
2236 status = -EBUSY;
2237 goto err_unlock;
2238 }
2239
2240 req = embedded_payload(wrb);
2241
2242 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302243 OPCODE_COMMON_READ_OBJECT,
2244 sizeof(struct lancer_cmd_req_read_object), wrb,
2245 NULL);
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002246
2247 req->desired_read_len = cpu_to_le32(data_size);
2248 req->read_offset = cpu_to_le32(data_offset);
2249 strcpy(req->object_name, obj_name);
2250 req->descriptor_count = cpu_to_le32(1);
2251 req->buf_len = cpu_to_le32(data_size);
2252 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2253 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2254
2255 status = be_mcc_notify_wait(adapter);
2256
2257 resp = embedded_payload(wrb);
2258 if (!status) {
2259 *data_read = le32_to_cpu(resp->actual_read_len);
2260 *eof = le32_to_cpu(resp->eof);
2261 } else {
2262 *addn_status = resp->additional_status;
2263 }
2264
2265err_unlock:
2266 spin_unlock_bh(&adapter->mcc_lock);
2267 return status;
2268}
2269
Ajit Khaparde84517482009-09-04 03:12:16 +00002270int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302271 u32 flash_type, u32 flash_opcode, u32 buf_size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002272{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002273 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002274 struct be_cmd_write_flashrom *req;
Ajit Khaparde84517482009-09-04 03:12:16 +00002275 int status;
2276
Sathya Perlab31c50a2009-09-17 10:30:13 -07002277 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002278 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002279
2280 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002281 if (!wrb) {
2282 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002283 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00002284 }
2285 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002286
Somnath Kotur106df1e2011-10-27 07:12:13 +00002287 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302288 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2289 cmd);
Ajit Khaparde84517482009-09-04 03:12:16 +00002290
2291 req->params.op_type = cpu_to_le32(flash_type);
2292 req->params.op_code = cpu_to_le32(flash_opcode);
2293 req->params.data_buf_size = cpu_to_le32(buf_size);
2294
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002295 be_mcc_notify(adapter);
2296 spin_unlock_bh(&adapter->mcc_lock);
2297
Suresh Reddy5eeff632014-01-06 13:02:24 +05302298 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2299 msecs_to_jiffies(40000)))
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002300 status = -1;
2301 else
2302 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00002303
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002304 return status;
2305
2306err_unlock:
2307 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00002308 return status;
2309}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002310
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002311int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05302312 u16 optype, int offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002313{
2314 struct be_mcc_wrb *wrb;
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002315 struct be_cmd_read_flash_crc *req;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002316 int status;
2317
2318 spin_lock_bh(&adapter->mcc_lock);
2319
2320 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002321 if (!wrb) {
2322 status = -EBUSY;
2323 goto err;
2324 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002325 req = embedded_payload(wrb);
2326
Somnath Kotur106df1e2011-10-27 07:12:13 +00002327 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002328 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2329 wrb, NULL);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002330
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05302331 req->params.op_type = cpu_to_le32(optype);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002332 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002333 req->params.offset = cpu_to_le32(offset);
2334 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002335
2336 status = be_mcc_notify_wait(adapter);
2337 if (!status)
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002338 memcpy(flashed_crc, req->crc, 4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002339
Sathya Perla713d03942009-11-22 22:02:45 +00002340err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002341 spin_unlock_bh(&adapter->mcc_lock);
2342 return status;
2343}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002344
Dan Carpenterc196b022010-05-26 04:47:39 +00002345int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302346 struct be_dma_mem *nonemb_cmd)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002347{
2348 struct be_mcc_wrb *wrb;
2349 struct be_cmd_req_acpi_wol_magic_config *req;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002350 int status;
2351
2352 spin_lock_bh(&adapter->mcc_lock);
2353
2354 wrb = wrb_from_mccq(adapter);
2355 if (!wrb) {
2356 status = -EBUSY;
2357 goto err;
2358 }
2359 req = nonemb_cmd->va;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002360
Somnath Kotur106df1e2011-10-27 07:12:13 +00002361 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302362 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2363 wrb, nonemb_cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002364 memcpy(req->magic_mac, mac, ETH_ALEN);
2365
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002366 status = be_mcc_notify_wait(adapter);
2367
2368err:
2369 spin_unlock_bh(&adapter->mcc_lock);
2370 return status;
2371}
Suresh Rff33a6e2009-12-03 16:15:52 -08002372
Sarveshwar Bandifced9992009-12-23 04:41:44 +00002373int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2374 u8 loopback_type, u8 enable)
2375{
2376 struct be_mcc_wrb *wrb;
2377 struct be_cmd_req_set_lmode *req;
2378 int status;
2379
2380 spin_lock_bh(&adapter->mcc_lock);
2381
2382 wrb = wrb_from_mccq(adapter);
2383 if (!wrb) {
2384 status = -EBUSY;
2385 goto err;
2386 }
2387
2388 req = embedded_payload(wrb);
2389
Somnath Kotur106df1e2011-10-27 07:12:13 +00002390 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302391 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2392 wrb, NULL);
Sarveshwar Bandifced9992009-12-23 04:41:44 +00002393
2394 req->src_port = port_num;
2395 req->dest_port = port_num;
2396 req->loopback_type = loopback_type;
2397 req->loopback_state = enable;
2398
2399 status = be_mcc_notify_wait(adapter);
2400err:
2401 spin_unlock_bh(&adapter->mcc_lock);
2402 return status;
2403}
2404
Suresh Rff33a6e2009-12-03 16:15:52 -08002405int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302406 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2407 u64 pattern)
Suresh Rff33a6e2009-12-03 16:15:52 -08002408{
2409 struct be_mcc_wrb *wrb;
2410 struct be_cmd_req_loopback_test *req;
Suresh Reddy5eeff632014-01-06 13:02:24 +05302411 struct be_cmd_resp_loopback_test *resp;
Suresh Rff33a6e2009-12-03 16:15:52 -08002412 int status;
2413
2414 spin_lock_bh(&adapter->mcc_lock);
2415
2416 wrb = wrb_from_mccq(adapter);
2417 if (!wrb) {
2418 status = -EBUSY;
2419 goto err;
2420 }
2421
2422 req = embedded_payload(wrb);
2423
Somnath Kotur106df1e2011-10-27 07:12:13 +00002424 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302425 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2426 NULL);
Suresh Rff33a6e2009-12-03 16:15:52 -08002427
Suresh Reddy5eeff632014-01-06 13:02:24 +05302428 req->hdr.timeout = cpu_to_le32(15);
Suresh Rff33a6e2009-12-03 16:15:52 -08002429 req->pattern = cpu_to_le64(pattern);
2430 req->src_port = cpu_to_le32(port_num);
2431 req->dest_port = cpu_to_le32(port_num);
2432 req->pkt_size = cpu_to_le32(pkt_size);
2433 req->num_pkts = cpu_to_le32(num_pkts);
2434 req->loopback_type = cpu_to_le32(loopback_type);
2435
Suresh Reddy5eeff632014-01-06 13:02:24 +05302436 be_mcc_notify(adapter);
Suresh Rff33a6e2009-12-03 16:15:52 -08002437
Suresh Reddy5eeff632014-01-06 13:02:24 +05302438 spin_unlock_bh(&adapter->mcc_lock);
2439
2440 wait_for_completion(&adapter->et_cmd_compl);
2441 resp = embedded_payload(wrb);
2442 status = le32_to_cpu(resp->status);
2443
2444 return status;
Suresh Rff33a6e2009-12-03 16:15:52 -08002445err:
2446 spin_unlock_bh(&adapter->mcc_lock);
2447 return status;
2448}
2449
2450int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302451 u32 byte_cnt, struct be_dma_mem *cmd)
Suresh Rff33a6e2009-12-03 16:15:52 -08002452{
2453 struct be_mcc_wrb *wrb;
2454 struct be_cmd_req_ddrdma_test *req;
Suresh Rff33a6e2009-12-03 16:15:52 -08002455 int status;
2456 int i, j = 0;
2457
2458 spin_lock_bh(&adapter->mcc_lock);
2459
2460 wrb = wrb_from_mccq(adapter);
2461 if (!wrb) {
2462 status = -EBUSY;
2463 goto err;
2464 }
2465 req = cmd->va;
Somnath Kotur106df1e2011-10-27 07:12:13 +00002466 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302467 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2468 cmd);
Suresh Rff33a6e2009-12-03 16:15:52 -08002469
2470 req->pattern = cpu_to_le64(pattern);
2471 req->byte_count = cpu_to_le32(byte_cnt);
2472 for (i = 0; i < byte_cnt; i++) {
2473 req->snd_buff[i] = (u8)(pattern >> (j*8));
2474 j++;
2475 if (j > 7)
2476 j = 0;
2477 }
2478
2479 status = be_mcc_notify_wait(adapter);
2480
2481 if (!status) {
2482 struct be_cmd_resp_ddrdma_test *resp;
2483 resp = cmd->va;
2484 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2485 resp->snd_err) {
2486 status = -1;
2487 }
2488 }
2489
2490err:
2491 spin_unlock_bh(&adapter->mcc_lock);
2492 return status;
2493}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002494
Dan Carpenterc196b022010-05-26 04:47:39 +00002495int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302496 struct be_dma_mem *nonemb_cmd)
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002497{
2498 struct be_mcc_wrb *wrb;
2499 struct be_cmd_req_seeprom_read *req;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002500 int status;
2501
2502 spin_lock_bh(&adapter->mcc_lock);
2503
2504 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00002505 if (!wrb) {
2506 status = -EBUSY;
2507 goto err;
2508 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002509 req = nonemb_cmd->va;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002510
Somnath Kotur106df1e2011-10-27 07:12:13 +00002511 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302512 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2513 nonemb_cmd);
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002514
2515 status = be_mcc_notify_wait(adapter);
2516
Ajit Khapardee45ff012011-02-04 17:18:28 +00002517err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002518 spin_unlock_bh(&adapter->mcc_lock);
2519 return status;
2520}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002521
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002522int be_cmd_get_phy_info(struct be_adapter *adapter)
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002523{
2524 struct be_mcc_wrb *wrb;
2525 struct be_cmd_req_get_phy_info *req;
Sathya Perla306f1342011-08-02 19:57:45 +00002526 struct be_dma_mem cmd;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002527 int status;
2528
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002529 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2530 CMD_SUBSYSTEM_COMMON))
2531 return -EPERM;
2532
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002533 spin_lock_bh(&adapter->mcc_lock);
2534
2535 wrb = wrb_from_mccq(adapter);
2536 if (!wrb) {
2537 status = -EBUSY;
2538 goto err;
2539 }
Sathya Perla306f1342011-08-02 19:57:45 +00002540 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302541 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
Sathya Perla306f1342011-08-02 19:57:45 +00002542 if (!cmd.va) {
2543 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2544 status = -ENOMEM;
2545 goto err;
2546 }
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002547
Sathya Perla306f1342011-08-02 19:57:45 +00002548 req = cmd.va;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002549
Somnath Kotur106df1e2011-10-27 07:12:13 +00002550 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302551 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2552 wrb, &cmd);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002553
2554 status = be_mcc_notify_wait(adapter);
Sathya Perla306f1342011-08-02 19:57:45 +00002555 if (!status) {
2556 struct be_phy_info *resp_phy_info =
2557 cmd.va + sizeof(struct be_cmd_req_hdr);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002558 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2559 adapter->phy.interface_type =
Sathya Perla306f1342011-08-02 19:57:45 +00002560 le16_to_cpu(resp_phy_info->interface_type);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002561 adapter->phy.auto_speeds_supported =
2562 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2563 adapter->phy.fixed_speeds_supported =
2564 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2565 adapter->phy.misc_params =
2566 le32_to_cpu(resp_phy_info->misc_params);
Vasundhara Volam68cb7e42013-08-06 09:27:18 +05302567
2568 if (BE2_chip(adapter)) {
2569 adapter->phy.fixed_speeds_supported =
2570 BE_SUPPORTED_SPEED_10GBPS |
2571 BE_SUPPORTED_SPEED_1GBPS;
2572 }
Sathya Perla306f1342011-08-02 19:57:45 +00002573 }
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302574 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002575err:
2576 spin_unlock_bh(&adapter->mcc_lock);
2577 return status;
2578}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002579
2580int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2581{
2582 struct be_mcc_wrb *wrb;
2583 struct be_cmd_req_set_qos *req;
2584 int status;
2585
2586 spin_lock_bh(&adapter->mcc_lock);
2587
2588 wrb = wrb_from_mccq(adapter);
2589 if (!wrb) {
2590 status = -EBUSY;
2591 goto err;
2592 }
2593
2594 req = embedded_payload(wrb);
2595
Somnath Kotur106df1e2011-10-27 07:12:13 +00002596 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302597 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002598
2599 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00002600 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2601 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002602
2603 status = be_mcc_notify_wait(adapter);
2604
2605err:
2606 spin_unlock_bh(&adapter->mcc_lock);
2607 return status;
2608}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002609
2610int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2611{
2612 struct be_mcc_wrb *wrb;
2613 struct be_cmd_req_cntl_attribs *req;
2614 struct be_cmd_resp_cntl_attribs *resp;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002615 int status;
2616 int payload_len = max(sizeof(*req), sizeof(*resp));
2617 struct mgmt_controller_attrib *attribs;
2618 struct be_dma_mem attribs_cmd;
2619
Suresh Reddyd98ef502013-04-25 00:56:55 +00002620 if (mutex_lock_interruptible(&adapter->mbox_lock))
2621 return -1;
2622
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002623 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2624 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2625 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302626 &attribs_cmd.dma);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002627 if (!attribs_cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302628 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00002629 status = -ENOMEM;
2630 goto err;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002631 }
2632
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002633 wrb = wrb_from_mbox(adapter);
2634 if (!wrb) {
2635 status = -EBUSY;
2636 goto err;
2637 }
2638 req = attribs_cmd.va;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002639
Somnath Kotur106df1e2011-10-27 07:12:13 +00002640 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302641 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2642 wrb, &attribs_cmd);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002643
2644 status = be_mbox_notify_wait(adapter);
2645 if (!status) {
Joe Perches43d620c2011-06-16 19:08:06 +00002646 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002647 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2648 }
2649
2650err:
2651 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00002652 if (attribs_cmd.va)
2653 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2654 attribs_cmd.va, attribs_cmd.dma);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002655 return status;
2656}
Sathya Perla2e588f82011-03-11 02:49:26 +00002657
2658/* Uses mbox */
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002659int be_cmd_req_native_mode(struct be_adapter *adapter)
Sathya Perla2e588f82011-03-11 02:49:26 +00002660{
2661 struct be_mcc_wrb *wrb;
2662 struct be_cmd_req_set_func_cap *req;
2663 int status;
2664
2665 if (mutex_lock_interruptible(&adapter->mbox_lock))
2666 return -1;
2667
2668 wrb = wrb_from_mbox(adapter);
2669 if (!wrb) {
2670 status = -EBUSY;
2671 goto err;
2672 }
2673
2674 req = embedded_payload(wrb);
2675
Somnath Kotur106df1e2011-10-27 07:12:13 +00002676 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302677 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2678 sizeof(*req), wrb, NULL);
Sathya Perla2e588f82011-03-11 02:49:26 +00002679
2680 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2681 CAPABILITY_BE3_NATIVE_ERX_API);
2682 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2683
2684 status = be_mbox_notify_wait(adapter);
2685 if (!status) {
2686 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2687 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2688 CAPABILITY_BE3_NATIVE_ERX_API;
Sathya Perlad3791422012-09-28 04:39:44 +00002689 if (!adapter->be3_native)
2690 dev_warn(&adapter->pdev->dev,
2691 "adapter not in advanced mode\n");
Sathya Perla2e588f82011-03-11 02:49:26 +00002692 }
2693err:
2694 mutex_unlock(&adapter->mbox_lock);
2695 return status;
2696}
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002697
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002698/* Get privilege(s) for a function */
2699int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2700 u32 domain)
2701{
2702 struct be_mcc_wrb *wrb;
2703 struct be_cmd_req_get_fn_privileges *req;
2704 int status;
2705
2706 spin_lock_bh(&adapter->mcc_lock);
2707
2708 wrb = wrb_from_mccq(adapter);
2709 if (!wrb) {
2710 status = -EBUSY;
2711 goto err;
2712 }
2713
2714 req = embedded_payload(wrb);
2715
2716 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2717 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2718 wrb, NULL);
2719
2720 req->hdr.domain = domain;
2721
2722 status = be_mcc_notify_wait(adapter);
2723 if (!status) {
2724 struct be_cmd_resp_get_fn_privileges *resp =
2725 embedded_payload(wrb);
2726 *privilege = le32_to_cpu(resp->privilege_mask);
Suresh Reddy02308d72014-01-15 13:23:36 +05302727
2728 /* In UMC mode FW does not return right privileges.
2729 * Override with correct privilege equivalent to PF.
2730 */
2731 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2732 be_physfn(adapter))
2733 *privilege = MAX_PRIVILEGES;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002734 }
2735
2736err:
2737 spin_unlock_bh(&adapter->mcc_lock);
2738 return status;
2739}
2740
Sathya Perla04a06022013-07-23 15:25:00 +05302741/* Set privilege(s) for a function */
2742int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2743 u32 domain)
2744{
2745 struct be_mcc_wrb *wrb;
2746 struct be_cmd_req_set_fn_privileges *req;
2747 int status;
2748
2749 spin_lock_bh(&adapter->mcc_lock);
2750
2751 wrb = wrb_from_mccq(adapter);
2752 if (!wrb) {
2753 status = -EBUSY;
2754 goto err;
2755 }
2756
2757 req = embedded_payload(wrb);
2758 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2759 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2760 wrb, NULL);
2761 req->hdr.domain = domain;
2762 if (lancer_chip(adapter))
2763 req->privileges_lancer = cpu_to_le32(privileges);
2764 else
2765 req->privileges = cpu_to_le32(privileges);
2766
2767 status = be_mcc_notify_wait(adapter);
2768err:
2769 spin_unlock_bh(&adapter->mcc_lock);
2770 return status;
2771}
2772
Sathya Perla5a712c12013-07-23 15:24:59 +05302773/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2774 * pmac_id_valid: false => pmac_id or MAC address is requested.
2775 * If pmac_id is returned, pmac_id_valid is returned as true
2776 */
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002777int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05302778 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2779 u8 domain)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002780{
2781 struct be_mcc_wrb *wrb;
2782 struct be_cmd_req_get_mac_list *req;
2783 int status;
2784 int mac_count;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002785 struct be_dma_mem get_mac_list_cmd;
2786 int i;
2787
2788 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2789 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2790 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302791 get_mac_list_cmd.size,
2792 &get_mac_list_cmd.dma);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002793
2794 if (!get_mac_list_cmd.va) {
2795 dev_err(&adapter->pdev->dev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302796 "Memory allocation failure during GET_MAC_LIST\n");
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002797 return -ENOMEM;
2798 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002799
2800 spin_lock_bh(&adapter->mcc_lock);
2801
2802 wrb = wrb_from_mccq(adapter);
2803 if (!wrb) {
2804 status = -EBUSY;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002805 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002806 }
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002807
2808 req = get_mac_list_cmd.va;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002809
2810 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlabf591f52013-05-08 02:05:48 +00002811 OPCODE_COMMON_GET_MAC_LIST,
2812 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002813 req->hdr.domain = domain;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002814 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla5a712c12013-07-23 15:24:59 +05302815 if (*pmac_id_valid) {
2816 req->mac_id = cpu_to_le32(*pmac_id);
Suresh Reddyb188f092014-01-15 13:23:39 +05302817 req->iface_id = cpu_to_le16(if_handle);
Sathya Perla5a712c12013-07-23 15:24:59 +05302818 req->perm_override = 0;
2819 } else {
2820 req->perm_override = 1;
2821 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002822
2823 status = be_mcc_notify_wait(adapter);
2824 if (!status) {
2825 struct be_cmd_resp_get_mac_list *resp =
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002826 get_mac_list_cmd.va;
Sathya Perla5a712c12013-07-23 15:24:59 +05302827
2828 if (*pmac_id_valid) {
2829 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2830 ETH_ALEN);
2831 goto out;
2832 }
2833
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002834 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2835 /* Mac list returned could contain one or more active mac_ids
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002836 * or one or more true or pseudo permanant mac addresses.
2837 * If an active mac_id is present, return first active mac_id
2838 * found.
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002839 */
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002840 for (i = 0; i < mac_count; i++) {
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002841 struct get_list_macaddr *mac_entry;
2842 u16 mac_addr_size;
2843 u32 mac_id;
2844
2845 mac_entry = &resp->macaddr_list[i];
2846 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2847 /* mac_id is a 32 bit value and mac_addr size
2848 * is 6 bytes
2849 */
2850 if (mac_addr_size == sizeof(u32)) {
Sathya Perla5a712c12013-07-23 15:24:59 +05302851 *pmac_id_valid = true;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002852 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2853 *pmac_id = le32_to_cpu(mac_id);
2854 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002855 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002856 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002857 /* If no active mac_id found, return first mac addr */
Sathya Perla5a712c12013-07-23 15:24:59 +05302858 *pmac_id_valid = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002859 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302860 ETH_ALEN);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002861 }
2862
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002863out:
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002864 spin_unlock_bh(&adapter->mcc_lock);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002865 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302866 get_mac_list_cmd.va, get_mac_list_cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002867 return status;
2868}
2869
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302870int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
2871 u8 *mac, u32 if_handle, bool active, u32 domain)
Sathya Perla5a712c12013-07-23 15:24:59 +05302872{
Sathya Perla5a712c12013-07-23 15:24:59 +05302873
Suresh Reddyb188f092014-01-15 13:23:39 +05302874 if (!active)
2875 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
2876 if_handle, domain);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302877 if (BEx_chip(adapter))
Sathya Perla5a712c12013-07-23 15:24:59 +05302878 return be_cmd_mac_addr_query(adapter, mac, false,
Suresh Reddyb188f092014-01-15 13:23:39 +05302879 if_handle, curr_pmac_id);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302880 else
2881 /* Fetch the MAC address using pmac_id */
2882 return be_cmd_get_mac_from_list(adapter, mac, &active,
Suresh Reddyb188f092014-01-15 13:23:39 +05302883 &curr_pmac_id,
2884 if_handle, domain);
Sathya Perla5a712c12013-07-23 15:24:59 +05302885}
2886
Sathya Perla95046b92013-07-23 15:25:02 +05302887int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2888{
2889 int status;
2890 bool pmac_valid = false;
2891
2892 memset(mac, 0, ETH_ALEN);
2893
Sathya Perla3175d8c2013-07-23 15:25:03 +05302894 if (BEx_chip(adapter)) {
2895 if (be_physfn(adapter))
2896 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2897 0);
2898 else
2899 status = be_cmd_mac_addr_query(adapter, mac, false,
2900 adapter->if_handle, 0);
2901 } else {
Sathya Perla95046b92013-07-23 15:25:02 +05302902 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
Suresh Reddyb188f092014-01-15 13:23:39 +05302903 NULL, adapter->if_handle, 0);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302904 }
2905
Sathya Perla95046b92013-07-23 15:25:02 +05302906 return status;
2907}
2908
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002909/* Uses synchronous MCCQ */
2910int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2911 u8 mac_count, u32 domain)
2912{
2913 struct be_mcc_wrb *wrb;
2914 struct be_cmd_req_set_mac_list *req;
2915 int status;
2916 struct be_dma_mem cmd;
2917
2918 memset(&cmd, 0, sizeof(struct be_dma_mem));
2919 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2920 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302921 &cmd.dma, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002922 if (!cmd.va)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002923 return -ENOMEM;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002924
2925 spin_lock_bh(&adapter->mcc_lock);
2926
2927 wrb = wrb_from_mccq(adapter);
2928 if (!wrb) {
2929 status = -EBUSY;
2930 goto err;
2931 }
2932
2933 req = cmd.va;
2934 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302935 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2936 wrb, &cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002937
2938 req->hdr.domain = domain;
2939 req->mac_count = mac_count;
2940 if (mac_count)
2941 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2942
2943 status = be_mcc_notify_wait(adapter);
2944
2945err:
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302946 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002947 spin_unlock_bh(&adapter->mcc_lock);
2948 return status;
2949}
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00002950
Sathya Perla3175d8c2013-07-23 15:25:03 +05302951/* Wrapper to delete any active MACs and provision the new mac.
2952 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2953 * current list are active.
2954 */
2955int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2956{
2957 bool active_mac = false;
2958 u8 old_mac[ETH_ALEN];
2959 u32 pmac_id;
2960 int status;
2961
2962 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05302963 &pmac_id, if_id, dom);
2964
Sathya Perla3175d8c2013-07-23 15:25:03 +05302965 if (!status && active_mac)
2966 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2967
2968 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2969}
2970
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002971int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05002972 u32 domain, u16 intf_id, u16 hsw_mode)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002973{
2974 struct be_mcc_wrb *wrb;
2975 struct be_cmd_req_set_hsw_config *req;
2976 void *ctxt;
2977 int status;
2978
2979 spin_lock_bh(&adapter->mcc_lock);
2980
2981 wrb = wrb_from_mccq(adapter);
2982 if (!wrb) {
2983 status = -EBUSY;
2984 goto err;
2985 }
2986
2987 req = embedded_payload(wrb);
2988 ctxt = &req->context;
2989
2990 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302991 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
2992 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002993
2994 req->hdr.domain = domain;
2995 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2996 if (pvid) {
2997 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2998 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2999 }
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003000 if (!BEx_chip(adapter) && hsw_mode) {
3001 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3002 ctxt, adapter->hba_port_num);
3003 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3004 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3005 ctxt, hsw_mode);
3006 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003007
3008 be_dws_cpu_to_le(req->context, sizeof(req->context));
3009 status = be_mcc_notify_wait(adapter);
3010
3011err:
3012 spin_unlock_bh(&adapter->mcc_lock);
3013 return status;
3014}
3015
3016/* Get Hyper switch config */
3017int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003018 u32 domain, u16 intf_id, u8 *mode)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003019{
3020 struct be_mcc_wrb *wrb;
3021 struct be_cmd_req_get_hsw_config *req;
3022 void *ctxt;
3023 int status;
3024 u16 vid;
3025
3026 spin_lock_bh(&adapter->mcc_lock);
3027
3028 wrb = wrb_from_mccq(adapter);
3029 if (!wrb) {
3030 status = -EBUSY;
3031 goto err;
3032 }
3033
3034 req = embedded_payload(wrb);
3035 ctxt = &req->context;
3036
3037 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303038 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3039 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003040
3041 req->hdr.domain = domain;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003042 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3043 ctxt, intf_id);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003044 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003045
Vasundhara Volam2c07c1d2014-01-15 13:23:32 +05303046 if (!BEx_chip(adapter) && mode) {
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003047 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3048 ctxt, adapter->hba_port_num);
3049 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3050 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003051 be_dws_cpu_to_le(req->context, sizeof(req->context));
3052
3053 status = be_mcc_notify_wait(adapter);
3054 if (!status) {
3055 struct be_cmd_resp_get_hsw_config *resp =
3056 embedded_payload(wrb);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303057 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003058 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303059 pvid, &resp->context);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003060 if (pvid)
3061 *pvid = le16_to_cpu(vid);
3062 if (mode)
3063 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3064 port_fwd_type, &resp->context);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003065 }
3066
3067err:
3068 spin_unlock_bh(&adapter->mcc_lock);
3069 return status;
3070}
3071
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003072int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3073{
3074 struct be_mcc_wrb *wrb;
3075 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303076 int status = 0;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003077 struct be_dma_mem cmd;
3078
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003079 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3080 CMD_SUBSYSTEM_ETH))
3081 return -EPERM;
3082
Suresh Reddy76a9e082014-01-15 13:23:40 +05303083 if (be_is_wol_excluded(adapter))
3084 return status;
3085
Suresh Reddyd98ef502013-04-25 00:56:55 +00003086 if (mutex_lock_interruptible(&adapter->mbox_lock))
3087 return -1;
3088
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003089 memset(&cmd, 0, sizeof(struct be_dma_mem));
3090 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303091 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003092 if (!cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303093 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003094 status = -ENOMEM;
3095 goto err;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003096 }
3097
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003098 wrb = wrb_from_mbox(adapter);
3099 if (!wrb) {
3100 status = -EBUSY;
3101 goto err;
3102 }
3103
3104 req = cmd.va;
3105
3106 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3107 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
Suresh Reddy76a9e082014-01-15 13:23:40 +05303108 sizeof(*req), wrb, &cmd);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003109
3110 req->hdr.version = 1;
3111 req->query_options = BE_GET_WOL_CAP;
3112
3113 status = be_mbox_notify_wait(adapter);
3114 if (!status) {
3115 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3116 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3117
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003118 adapter->wol_cap = resp->wol_settings;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303119 if (adapter->wol_cap & BE_WOL_CAP)
3120 adapter->wol_en = true;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003121 }
3122err:
3123 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003124 if (cmd.va)
3125 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003126 return status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003127
3128}
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05303129
3130int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3131{
3132 struct be_dma_mem extfat_cmd;
3133 struct be_fat_conf_params *cfgs;
3134 int status;
3135 int i, j;
3136
3137 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3138 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3139 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3140 &extfat_cmd.dma);
3141 if (!extfat_cmd.va)
3142 return -ENOMEM;
3143
3144 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3145 if (status)
3146 goto err;
3147
3148 cfgs = (struct be_fat_conf_params *)
3149 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3150 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3151 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3152 for (j = 0; j < num_modes; j++) {
3153 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3154 cfgs->module[i].trace_lvl[j].dbg_lvl =
3155 cpu_to_le32(level);
3156 }
3157 }
3158
3159 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3160err:
3161 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3162 extfat_cmd.dma);
3163 return status;
3164}
3165
3166int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3167{
3168 struct be_dma_mem extfat_cmd;
3169 struct be_fat_conf_params *cfgs;
3170 int status, j;
3171 int level = 0;
3172
3173 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3174 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3175 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3176 &extfat_cmd.dma);
3177
3178 if (!extfat_cmd.va) {
3179 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3180 __func__);
3181 goto err;
3182 }
3183
3184 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3185 if (!status) {
3186 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3187 sizeof(struct be_cmd_resp_hdr));
3188 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3189 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3190 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3191 }
3192 }
3193 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3194 extfat_cmd.dma);
3195err:
3196 return level;
3197}
3198
Somnath Kotur941a77d2012-05-17 22:59:03 +00003199int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3200 struct be_dma_mem *cmd)
3201{
3202 struct be_mcc_wrb *wrb;
3203 struct be_cmd_req_get_ext_fat_caps *req;
3204 int status;
3205
3206 if (mutex_lock_interruptible(&adapter->mbox_lock))
3207 return -1;
3208
3209 wrb = wrb_from_mbox(adapter);
3210 if (!wrb) {
3211 status = -EBUSY;
3212 goto err;
3213 }
3214
3215 req = cmd->va;
3216 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3217 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3218 cmd->size, wrb, cmd);
3219 req->parameter_type = cpu_to_le32(1);
3220
3221 status = be_mbox_notify_wait(adapter);
3222err:
3223 mutex_unlock(&adapter->mbox_lock);
3224 return status;
3225}
3226
3227int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3228 struct be_dma_mem *cmd,
3229 struct be_fat_conf_params *configs)
3230{
3231 struct be_mcc_wrb *wrb;
3232 struct be_cmd_req_set_ext_fat_caps *req;
3233 int status;
3234
3235 spin_lock_bh(&adapter->mcc_lock);
3236
3237 wrb = wrb_from_mccq(adapter);
3238 if (!wrb) {
3239 status = -EBUSY;
3240 goto err;
3241 }
3242
3243 req = cmd->va;
3244 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3245 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3246 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3247 cmd->size, wrb, cmd);
3248
3249 status = be_mcc_notify_wait(adapter);
3250err:
3251 spin_unlock_bh(&adapter->mcc_lock);
3252 return status;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003253}
Parav Pandit6a4ab662012-03-26 14:27:12 +00003254
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003255int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3256{
3257 struct be_mcc_wrb *wrb;
3258 struct be_cmd_req_get_port_name *req;
3259 int status;
3260
3261 if (!lancer_chip(adapter)) {
3262 *port_name = adapter->hba_port_num + '0';
3263 return 0;
3264 }
3265
3266 spin_lock_bh(&adapter->mcc_lock);
3267
3268 wrb = wrb_from_mccq(adapter);
3269 if (!wrb) {
3270 status = -EBUSY;
3271 goto err;
3272 }
3273
3274 req = embedded_payload(wrb);
3275
3276 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3277 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3278 NULL);
3279 req->hdr.version = 1;
3280
3281 status = be_mcc_notify_wait(adapter);
3282 if (!status) {
3283 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3284 *port_name = resp->port_name[adapter->hba_port_num];
3285 } else {
3286 *port_name = adapter->hba_port_num + '0';
3287 }
3288err:
3289 spin_unlock_bh(&adapter->mcc_lock);
3290 return status;
3291}
3292
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303293static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003294{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303295 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003296 int i;
3297
3298 for (i = 0; i < desc_count; i++) {
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303299 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3300 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
3301 return (struct be_nic_res_desc *)hdr;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003302
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303303 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3304 hdr = (void *)hdr + hdr->desc_len;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003305 }
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303306 return NULL;
3307}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003308
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303309static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3310 u32 desc_count)
3311{
3312 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3313 struct be_pcie_res_desc *pcie;
3314 int i;
3315
3316 for (i = 0; i < desc_count; i++) {
3317 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3318 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3319 pcie = (struct be_pcie_res_desc *)hdr;
3320 if (pcie->pf_num == devfn)
3321 return pcie;
3322 }
3323
3324 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3325 hdr = (void *)hdr + hdr->desc_len;
3326 }
Wei Yang950e2952013-05-22 15:58:22 +00003327 return NULL;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003328}
3329
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303330static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3331{
3332 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3333 int i;
3334
3335 for (i = 0; i < desc_count; i++) {
3336 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3337 return (struct be_port_res_desc *)hdr;
3338
3339 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3340 hdr = (void *)hdr + hdr->desc_len;
3341 }
3342 return NULL;
3343}
3344
Sathya Perla92bf14a2013-08-27 16:57:32 +05303345static void be_copy_nic_desc(struct be_resources *res,
3346 struct be_nic_res_desc *desc)
3347{
3348 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3349 res->max_vlans = le16_to_cpu(desc->vlan_count);
3350 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3351 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3352 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3353 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3354 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3355 /* Clear flags that driver is not interested in */
3356 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3357 BE_IF_CAP_FLAGS_WANT;
3358 /* Need 1 RXQ as the default RXQ */
3359 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3360 res->max_rss_qs -= 1;
3361}
3362
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003363/* Uses Mbox */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303364int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003365{
3366 struct be_mcc_wrb *wrb;
3367 struct be_cmd_req_get_func_config *req;
3368 int status;
3369 struct be_dma_mem cmd;
3370
Suresh Reddyd98ef502013-04-25 00:56:55 +00003371 if (mutex_lock_interruptible(&adapter->mbox_lock))
3372 return -1;
3373
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003374 memset(&cmd, 0, sizeof(struct be_dma_mem));
3375 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303376 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003377 if (!cmd.va) {
3378 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003379 status = -ENOMEM;
3380 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003381 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003382
3383 wrb = wrb_from_mbox(adapter);
3384 if (!wrb) {
3385 status = -EBUSY;
3386 goto err;
3387 }
3388
3389 req = cmd.va;
3390
3391 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3392 OPCODE_COMMON_GET_FUNC_CONFIG,
3393 cmd.size, wrb, &cmd);
3394
Kalesh AP28710c52013-04-28 22:21:13 +00003395 if (skyhawk_chip(adapter))
3396 req->hdr.version = 1;
3397
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003398 status = be_mbox_notify_wait(adapter);
3399 if (!status) {
3400 struct be_cmd_resp_get_func_config *resp = cmd.va;
3401 u32 desc_count = le32_to_cpu(resp->desc_count);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303402 struct be_nic_res_desc *desc;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003403
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303404 desc = be_get_nic_desc(resp->func_param, desc_count);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003405 if (!desc) {
3406 status = -EINVAL;
3407 goto err;
3408 }
3409
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003410 adapter->pf_number = desc->pf_num;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303411 be_copy_nic_desc(res, desc);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003412 }
3413err:
3414 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003415 if (cmd.va)
3416 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003417 return status;
3418}
3419
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003420/* Uses mbox */
Jingoo Han4188e7d2013-08-05 18:02:02 +09003421static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303422 u8 domain, struct be_dma_mem *cmd)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003423{
3424 struct be_mcc_wrb *wrb;
3425 struct be_cmd_req_get_profile_config *req;
3426 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003427
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003428 if (mutex_lock_interruptible(&adapter->mbox_lock))
3429 return -1;
3430 wrb = wrb_from_mbox(adapter);
3431
3432 req = cmd->va;
3433 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3434 OPCODE_COMMON_GET_PROFILE_CONFIG,
3435 cmd->size, wrb, cmd);
3436
3437 req->type = ACTIVE_PROFILE_TYPE;
3438 req->hdr.domain = domain;
3439 if (!lancer_chip(adapter))
3440 req->hdr.version = 1;
3441
3442 status = be_mbox_notify_wait(adapter);
3443
3444 mutex_unlock(&adapter->mbox_lock);
3445 return status;
3446}
3447
3448/* Uses sync mcc */
Jingoo Han4188e7d2013-08-05 18:02:02 +09003449static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303450 u8 domain, struct be_dma_mem *cmd)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003451{
3452 struct be_mcc_wrb *wrb;
3453 struct be_cmd_req_get_profile_config *req;
3454 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003455
3456 spin_lock_bh(&adapter->mcc_lock);
3457
3458 wrb = wrb_from_mccq(adapter);
3459 if (!wrb) {
3460 status = -EBUSY;
3461 goto err;
3462 }
3463
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003464 req = cmd->va;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003465 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3466 OPCODE_COMMON_GET_PROFILE_CONFIG,
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003467 cmd->size, wrb, cmd);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003468
3469 req->type = ACTIVE_PROFILE_TYPE;
3470 req->hdr.domain = domain;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003471 if (!lancer_chip(adapter))
3472 req->hdr.version = 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003473
3474 status = be_mcc_notify_wait(adapter);
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003475
3476err:
3477 spin_unlock_bh(&adapter->mcc_lock);
3478 return status;
3479}
3480
3481/* Uses sync mcc, if MCCQ is already created otherwise mbox */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303482int be_cmd_get_profile_config(struct be_adapter *adapter,
3483 struct be_resources *res, u8 domain)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003484{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303485 struct be_cmd_resp_get_profile_config *resp;
3486 struct be_pcie_res_desc *pcie;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303487 struct be_port_res_desc *port;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303488 struct be_nic_res_desc *nic;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003489 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3490 struct be_dma_mem cmd;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303491 u32 desc_count;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003492 int status;
3493
3494 memset(&cmd, 0, sizeof(struct be_dma_mem));
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303495 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3496 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3497 if (!cmd.va)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003498 return -ENOMEM;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003499
3500 if (!mccq->created)
3501 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3502 else
3503 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303504 if (status)
3505 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003506
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303507 resp = cmd.va;
3508 desc_count = le32_to_cpu(resp->desc_count);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003509
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303510 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3511 desc_count);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303512 if (pcie)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303513 res->max_vfs = le16_to_cpu(pcie->num_vfs);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303514
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303515 port = be_get_port_desc(resp->func_param, desc_count);
3516 if (port)
3517 adapter->mc_type = port->mc_type;
3518
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303519 nic = be_get_nic_desc(resp->func_param, desc_count);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303520 if (nic)
3521 be_copy_nic_desc(res, nic);
3522
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003523err:
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003524 if (cmd.va)
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303525 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003526 return status;
3527}
3528
Sathya Perlaa4018012014-03-27 10:46:18 +05303529int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3530 int size, u8 version, u8 domain)
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003531{
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003532 struct be_cmd_req_set_profile_config *req;
Sathya Perlaa4018012014-03-27 10:46:18 +05303533 struct be_mcc_wrb *wrb;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003534 int status;
3535
3536 spin_lock_bh(&adapter->mcc_lock);
3537
3538 wrb = wrb_from_mccq(adapter);
3539 if (!wrb) {
3540 status = -EBUSY;
3541 goto err;
3542 }
3543
3544 req = embedded_payload(wrb);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003545 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3546 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3547 wrb, NULL);
Sathya Perlaa4018012014-03-27 10:46:18 +05303548 req->hdr.version = version;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003549 req->hdr.domain = domain;
3550 req->desc_count = cpu_to_le32(1);
Sathya Perlaa4018012014-03-27 10:46:18 +05303551 memcpy(req->desc, desc, size);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003552
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003553 status = be_mcc_notify_wait(adapter);
3554err:
3555 spin_unlock_bh(&adapter->mcc_lock);
3556 return status;
3557}
3558
Sathya Perlaa4018012014-03-27 10:46:18 +05303559/* Mark all fields invalid */
3560void be_reset_nic_desc(struct be_nic_res_desc *nic)
3561{
3562 memset(nic, 0, sizeof(*nic));
3563 nic->unicast_mac_count = 0xFFFF;
3564 nic->mcc_count = 0xFFFF;
3565 nic->vlan_count = 0xFFFF;
3566 nic->mcast_mac_count = 0xFFFF;
3567 nic->txq_count = 0xFFFF;
3568 nic->rq_count = 0xFFFF;
3569 nic->rssq_count = 0xFFFF;
3570 nic->lro_count = 0xFFFF;
3571 nic->cq_count = 0xFFFF;
3572 nic->toe_conn_count = 0xFFFF;
3573 nic->eq_count = 0xFFFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303574 nic->iface_count = 0xFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05303575 nic->link_param = 0xFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303576 nic->channel_id_param = cpu_to_le16(0xF000);
Sathya Perlaa4018012014-03-27 10:46:18 +05303577 nic->acpi_params = 0xFF;
3578 nic->wol_param = 0x0F;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303579 nic->tunnel_iface_count = 0xFFFF;
3580 nic->direct_tenant_iface_count = 0xFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05303581 nic->bw_max = 0xFFFFFFFF;
3582}
3583
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303584int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3585 u8 domain)
Sathya Perlaa4018012014-03-27 10:46:18 +05303586{
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303587 struct be_nic_res_desc nic_desc;
3588 u32 bw_percent;
3589 u16 version = 0;
Sathya Perlaa4018012014-03-27 10:46:18 +05303590
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303591 if (BE3_chip(adapter))
3592 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3593
3594 be_reset_nic_desc(&nic_desc);
3595 nic_desc.pf_num = adapter->pf_number;
3596 nic_desc.vf_num = domain;
3597 if (lancer_chip(adapter)) {
Sathya Perlaa4018012014-03-27 10:46:18 +05303598 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3599 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3600 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3601 (1 << NOSV_SHIFT);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303602 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
Sathya Perlaa4018012014-03-27 10:46:18 +05303603 } else {
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303604 version = 1;
3605 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3606 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3607 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3608 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3609 nic_desc.bw_max = cpu_to_le32(bw_percent);
Sathya Perlaa4018012014-03-27 10:46:18 +05303610 }
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303611
3612 return be_cmd_set_profile_config(adapter, &nic_desc,
3613 nic_desc.hdr.desc_len,
3614 version, domain);
Sathya Perlaa4018012014-03-27 10:46:18 +05303615}
3616
3617int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3618{
3619 struct be_mcc_wrb *wrb;
3620 struct be_cmd_req_manage_iface_filters *req;
3621 int status;
3622
3623 if (iface == 0xFFFFFFFF)
3624 return -1;
3625
3626 spin_lock_bh(&adapter->mcc_lock);
3627
3628 wrb = wrb_from_mccq(adapter);
3629 if (!wrb) {
3630 status = -EBUSY;
3631 goto err;
3632 }
3633 req = embedded_payload(wrb);
3634
3635 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3636 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3637 wrb, NULL);
3638 req->op = op;
3639 req->target_iface_id = cpu_to_le32(iface);
3640
3641 status = be_mcc_notify_wait(adapter);
3642err:
3643 spin_unlock_bh(&adapter->mcc_lock);
3644 return status;
3645}
3646
3647int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3648{
3649 struct be_port_res_desc port_desc;
3650
3651 memset(&port_desc, 0, sizeof(port_desc));
3652 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3653 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3654 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3655 port_desc.link_num = adapter->hba_port_num;
3656 if (port) {
3657 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3658 (1 << RCVID_SHIFT);
3659 port_desc.nv_port = swab16(port);
3660 } else {
3661 port_desc.nv_flags = NV_TYPE_DISABLED;
3662 port_desc.nv_port = 0;
3663 }
3664
3665 return be_cmd_set_profile_config(adapter, &port_desc,
3666 RESOURCE_DESC_SIZE_V1, 1, 0);
3667}
3668
Sathya Perla4c876612013-02-03 20:30:11 +00003669int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3670 int vf_num)
3671{
3672 struct be_mcc_wrb *wrb;
3673 struct be_cmd_req_get_iface_list *req;
3674 struct be_cmd_resp_get_iface_list *resp;
3675 int status;
3676
3677 spin_lock_bh(&adapter->mcc_lock);
3678
3679 wrb = wrb_from_mccq(adapter);
3680 if (!wrb) {
3681 status = -EBUSY;
3682 goto err;
3683 }
3684 req = embedded_payload(wrb);
3685
3686 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3687 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3688 wrb, NULL);
3689 req->hdr.domain = vf_num + 1;
3690
3691 status = be_mcc_notify_wait(adapter);
3692 if (!status) {
3693 resp = (struct be_cmd_resp_get_iface_list *)req;
3694 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3695 }
3696
3697err:
3698 spin_unlock_bh(&adapter->mcc_lock);
3699 return status;
3700}
3701
Somnath Kotur5c510812013-05-30 02:52:23 +00003702static int lancer_wait_idle(struct be_adapter *adapter)
3703{
3704#define SLIPORT_IDLE_TIMEOUT 30
3705 u32 reg_val;
3706 int status = 0, i;
3707
3708 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3709 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3710 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3711 break;
3712
3713 ssleep(1);
3714 }
3715
3716 if (i == SLIPORT_IDLE_TIMEOUT)
3717 status = -1;
3718
3719 return status;
3720}
3721
3722int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3723{
3724 int status = 0;
3725
3726 status = lancer_wait_idle(adapter);
3727 if (status)
3728 return status;
3729
3730 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3731
3732 return status;
3733}
3734
3735/* Routine to check whether dump image is present or not */
3736bool dump_present(struct be_adapter *adapter)
3737{
3738 u32 sliport_status = 0;
3739
3740 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3741 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3742}
3743
3744int lancer_initiate_dump(struct be_adapter *adapter)
3745{
3746 int status;
3747
3748 /* give firmware reset and diagnostic dump */
3749 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3750 PHYSDEV_CONTROL_DD_MASK);
3751 if (status < 0) {
3752 dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3753 return status;
3754 }
3755
3756 status = lancer_wait_idle(adapter);
3757 if (status)
3758 return status;
3759
3760 if (!dump_present(adapter)) {
3761 dev_err(&adapter->pdev->dev, "Dump image not present\n");
3762 return -1;
3763 }
3764
3765 return 0;
3766}
3767
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003768/* Uses sync mcc */
3769int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3770{
3771 struct be_mcc_wrb *wrb;
3772 struct be_cmd_enable_disable_vf *req;
3773 int status;
3774
Vasundhara Volam05998632013-10-01 15:59:59 +05303775 if (BEx_chip(adapter))
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003776 return 0;
3777
3778 spin_lock_bh(&adapter->mcc_lock);
3779
3780 wrb = wrb_from_mccq(adapter);
3781 if (!wrb) {
3782 status = -EBUSY;
3783 goto err;
3784 }
3785
3786 req = embedded_payload(wrb);
3787
3788 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3789 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3790 wrb, NULL);
3791
3792 req->hdr.domain = domain;
3793 req->enable = 1;
3794 status = be_mcc_notify_wait(adapter);
3795err:
3796 spin_unlock_bh(&adapter->mcc_lock);
3797 return status;
3798}
3799
Somnath Kotur68c45a22013-03-14 02:42:07 +00003800int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3801{
3802 struct be_mcc_wrb *wrb;
3803 struct be_cmd_req_intr_set *req;
3804 int status;
3805
3806 if (mutex_lock_interruptible(&adapter->mbox_lock))
3807 return -1;
3808
3809 wrb = wrb_from_mbox(adapter);
3810
3811 req = embedded_payload(wrb);
3812
3813 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3814 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3815 wrb, NULL);
3816
3817 req->intr_enabled = intr_enable;
3818
3819 status = be_mbox_notify_wait(adapter);
3820
3821 mutex_unlock(&adapter->mbox_lock);
3822 return status;
3823}
3824
Vasundhara Volam542963b2014-01-15 13:23:33 +05303825/* Uses MBOX */
3826int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
3827{
3828 struct be_cmd_req_get_active_profile *req;
3829 struct be_mcc_wrb *wrb;
3830 int status;
3831
3832 if (mutex_lock_interruptible(&adapter->mbox_lock))
3833 return -1;
3834
3835 wrb = wrb_from_mbox(adapter);
3836 if (!wrb) {
3837 status = -EBUSY;
3838 goto err;
3839 }
3840
3841 req = embedded_payload(wrb);
3842
3843 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3844 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
3845 wrb, NULL);
3846
3847 status = be_mbox_notify_wait(adapter);
3848 if (!status) {
3849 struct be_cmd_resp_get_active_profile *resp =
3850 embedded_payload(wrb);
3851 *profile_id = le16_to_cpu(resp->active_profile_id);
3852 }
3853
3854err:
3855 mutex_unlock(&adapter->mbox_lock);
3856 return status;
3857}
3858
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303859int be_cmd_set_logical_link_config(struct be_adapter *adapter,
3860 int link_state, u8 domain)
3861{
3862 struct be_mcc_wrb *wrb;
3863 struct be_cmd_req_set_ll_link *req;
3864 int status;
3865
3866 if (BEx_chip(adapter) || lancer_chip(adapter))
3867 return 0;
3868
3869 spin_lock_bh(&adapter->mcc_lock);
3870
3871 wrb = wrb_from_mccq(adapter);
3872 if (!wrb) {
3873 status = -EBUSY;
3874 goto err;
3875 }
3876
3877 req = embedded_payload(wrb);
3878
3879 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3880 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
3881 sizeof(*req), wrb, NULL);
3882
3883 req->hdr.version = 1;
3884 req->hdr.domain = domain;
3885
3886 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
3887 req->link_config |= 1;
3888
3889 if (link_state == IFLA_VF_LINK_STATE_AUTO)
3890 req->link_config |= 1 << PLINK_TRACK_SHIFT;
3891
3892 status = be_mcc_notify_wait(adapter);
3893err:
3894 spin_unlock_bh(&adapter->mcc_lock);
3895 return status;
3896}
3897
Parav Pandit6a4ab662012-03-26 14:27:12 +00003898int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303899 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
Parav Pandit6a4ab662012-03-26 14:27:12 +00003900{
3901 struct be_adapter *adapter = netdev_priv(netdev_handle);
3902 struct be_mcc_wrb *wrb;
3903 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3904 struct be_cmd_req_hdr *req;
3905 struct be_cmd_resp_hdr *resp;
3906 int status;
3907
3908 spin_lock_bh(&adapter->mcc_lock);
3909
3910 wrb = wrb_from_mccq(adapter);
3911 if (!wrb) {
3912 status = -EBUSY;
3913 goto err;
3914 }
3915 req = embedded_payload(wrb);
3916 resp = embedded_payload(wrb);
3917
3918 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3919 hdr->opcode, wrb_payload_size, wrb, NULL);
3920 memcpy(req, wrb_payload, wrb_payload_size);
3921 be_dws_cpu_to_le(req, wrb_payload_size);
3922
3923 status = be_mcc_notify_wait(adapter);
3924 if (cmd_status)
3925 *cmd_status = (status & 0xffff);
3926 if (ext_status)
3927 *ext_status = 0;
3928 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3929 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3930err:
3931 spin_unlock_bh(&adapter->mcc_lock);
3932 return status;
3933}
3934EXPORT_SYMBOL(be_roce_mcc_cmd);