blob: 41150543906a4505aad66b85dedc9be82fc1f1af [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Parav Pandit6a4ab662012-03-26 14:27:12 +000018#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
Vasundhara Volam21252372015-02-06 08:18:42 -050022static char *be_port_misconfig_evt_desc[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
28};
29
30static char *be_port_misconfig_remedy_desc[] = {
31 "",
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
35 ""
36};
37
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000038static struct be_cmd_priv_map cmd_priv_map[] = {
39 {
40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
41 CMD_SUBSYSTEM_ETH,
42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
44 },
45 {
46 OPCODE_COMMON_GET_FLOW_CONTROL,
47 CMD_SUBSYSTEM_COMMON,
48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50 },
51 {
52 OPCODE_COMMON_SET_FLOW_CONTROL,
53 CMD_SUBSYSTEM_COMMON,
54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56 },
57 {
58 OPCODE_ETH_GET_PPORT_STATS,
59 CMD_SUBSYSTEM_ETH,
60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62 },
63 {
64 OPCODE_COMMON_GET_PHY_DETAILS,
65 CMD_SUBSYSTEM_COMMON,
66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
68 }
69};
70
Sathya Perlaa2cc4e02014-05-09 13:29:14 +053071static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000072{
73 int i;
74 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
75 u32 cmd_privileges = adapter->cmd_privileges;
76
77 for (i = 0; i < num_entries; i++)
78 if (opcode == cmd_priv_map[i].opcode &&
79 subsystem == cmd_priv_map[i].subsystem)
80 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
81 return false;
82
83 return true;
84}
85
Somnath Kotur3de09452011-09-30 07:25:05 +000086static inline void *embedded_payload(struct be_mcc_wrb *wrb)
87{
88 return wrb->payload.embedded_payload;
89}
Ajit Khaparde609ff3b2011-02-20 11:42:07 +000090
Sathya Perla8788fdc2009-07-27 22:52:03 +000091static void be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +000092{
Sathya Perla8788fdc2009-07-27 22:52:03 +000093 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +000094 u32 val = 0;
95
Venkata Duvvuru954f6822015-05-13 13:00:13 +053096 if (be_check_error(adapter, BE_ERROR_ANY))
Ajit Khaparde7acc2082011-02-11 13:38:17 +000097 return;
Ajit Khaparde7acc2082011-02-11 13:38:17 +000098
Sathya Perla5fb379e2009-06-18 00:02:59 +000099 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
100 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000101
102 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000103 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000104}
105
106/* To check if valid bit is set, check the entire word as we don't know
107 * the endianness of the data (old entry is host endian while a new entry is
108 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000109static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000110{
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000111 u32 flags;
112
Sathya Perla5fb379e2009-06-18 00:02:59 +0000113 if (compl->flags != 0) {
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000114 flags = le32_to_cpu(compl->flags);
115 if (flags & CQE_FLAGS_VALID_MASK) {
116 compl->flags = flags;
117 return true;
118 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000119 }
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000120 return false;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000121}
122
123/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000124static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000125{
126 compl->flags = 0;
127}
128
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000129static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
130{
131 unsigned long addr;
132
133 addr = tag1;
134 addr = ((addr << 16) << 16) | tag0;
135 return (void *)addr;
136}
137
Kalesh AP4c600052014-05-30 19:06:26 +0530138static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
139{
140 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
141 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
142 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
Kalesh AP77be8c12015-05-06 05:30:35 -0400143 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
Kalesh AP4c600052014-05-30 19:06:26 +0530144 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
145 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
146 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
147 return true;
148 else
149 return false;
150}
151
Sathya Perla559b6332014-05-30 19:06:27 +0530152/* Place holder for all the async MCC cmds wherein the caller is not in a busy
153 * loop (has not issued be_mcc_notify_wait())
154 */
155static void be_async_cmd_process(struct be_adapter *adapter,
156 struct be_mcc_compl *compl,
157 struct be_cmd_resp_hdr *resp_hdr)
158{
159 enum mcc_base_status base_status = base_status(compl->status);
160 u8 opcode = 0, subsystem = 0;
161
162 if (resp_hdr) {
163 opcode = resp_hdr->opcode;
164 subsystem = resp_hdr->subsystem;
165 }
166
167 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
168 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
169 complete(&adapter->et_cmd_compl);
170 return;
171 }
172
173 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
174 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
175 subsystem == CMD_SUBSYSTEM_COMMON) {
176 adapter->flash_status = compl->status;
177 complete(&adapter->et_cmd_compl);
178 return;
179 }
180
181 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
182 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
183 subsystem == CMD_SUBSYSTEM_ETH &&
184 base_status == MCC_STATUS_SUCCESS) {
185 be_parse_stats(adapter);
186 adapter->stats_cmd_sent = false;
187 return;
188 }
189
190 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
191 subsystem == CMD_SUBSYSTEM_COMMON) {
192 if (base_status == MCC_STATUS_SUCCESS) {
193 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
194 (void *)resp_hdr;
Venkata Duvvuru29e91222015-05-13 13:00:12 +0530195 adapter->hwmon_info.be_on_die_temp =
Sathya Perla559b6332014-05-30 19:06:27 +0530196 resp->on_die_temperature;
197 } else {
198 adapter->be_get_temp_freq = 0;
Venkata Duvvuru29e91222015-05-13 13:00:12 +0530199 adapter->hwmon_info.be_on_die_temp =
200 BE_INVALID_DIE_TEMP;
Sathya Perla559b6332014-05-30 19:06:27 +0530201 }
202 return;
203 }
204}
205
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206static int be_mcc_compl_process(struct be_adapter *adapter,
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000207 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000208{
Kalesh AP4c600052014-05-30 19:06:26 +0530209 enum mcc_base_status base_status;
210 enum mcc_addl_status addl_status;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000211 struct be_cmd_resp_hdr *resp_hdr;
212 u8 opcode = 0, subsystem = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000213
214 /* Just swap the status to host endian; mcc tag is opaquely copied
215 * from mcc_wrb */
216 be_dws_le_to_cpu(compl, 4);
217
Kalesh AP4c600052014-05-30 19:06:26 +0530218 base_status = base_status(compl->status);
219 addl_status = addl_status(compl->status);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +0530220
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000221 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000222 if (resp_hdr) {
223 opcode = resp_hdr->opcode;
224 subsystem = resp_hdr->subsystem;
225 }
226
Sathya Perla559b6332014-05-30 19:06:27 +0530227 be_async_cmd_process(adapter, compl, resp_hdr);
Suresh Reddy5eeff632014-01-06 13:02:24 +0530228
Sathya Perla559b6332014-05-30 19:06:27 +0530229 if (base_status != MCC_STATUS_SUCCESS &&
230 !be_skip_err_log(opcode, base_status, addl_status)) {
Kalesh AP4c600052014-05-30 19:06:26 +0530231 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000232 dev_warn(&adapter->pdev->dev,
Vasundhara Volam522609f2012-08-28 20:37:44 +0000233 "VF is not privileged to issue opcode %d-%d\n",
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000234 opcode, subsystem);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000235 } else {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000236 dev_err(&adapter->pdev->dev,
237 "opcode %d-%d failed:status %d-%d\n",
Kalesh AP4c600052014-05-30 19:06:26 +0530238 opcode, subsystem, base_status, addl_status);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000239 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000240 }
Kalesh AP4c600052014-05-30 19:06:26 +0530241 return compl->status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000242}
243
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000244/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530246 struct be_mcc_compl *compl)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000247{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530248 struct be_async_event_link_state *evt =
249 (struct be_async_event_link_state *)compl;
250
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000251 /* When link status changes, link speed must be re-queried from FW */
Ajit Khaparde42f11cf2012-04-21 18:53:22 +0000252 adapter->phy.link_speed = -1;
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000253
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530254 /* On BEx the FW does not send a separate link status
255 * notification for physical and logical link.
256 * On other chips just process the logical link
257 * status notification
258 */
259 if (!BEx_chip(adapter) &&
Padmanabh Ratnakar2e177a52012-07-18 02:52:15 +0000260 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
261 return;
262
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000263 /* For the initial link status do not rely on the ASYNC event as
264 * it may not be received in some cases.
265 */
266 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530267 be_link_status_update(adapter,
268 evt->port_link_status & LINK_STATUS_MASK);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000269}
270
Vasundhara Volam21252372015-02-06 08:18:42 -0500271static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
272 struct be_mcc_compl *compl)
273{
274 struct be_async_event_misconfig_port *evt =
275 (struct be_async_event_misconfig_port *)compl;
276 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
277 struct device *dev = &adapter->pdev->dev;
278 u8 port_misconfig_evt;
279
280 port_misconfig_evt =
281 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
282
283 /* Log an error message that would allow a user to determine
284 * whether the SFPs have an issue
285 */
286 dev_info(dev, "Port %c: %s %s", adapter->port_name,
287 be_port_misconfig_evt_desc[port_misconfig_evt],
288 be_port_misconfig_remedy_desc[port_misconfig_evt]);
289
290 if (port_misconfig_evt == INCOMPATIBLE_SFP)
291 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
292}
293
Somnath Koturcc4ce022010-10-21 07:11:14 -0700294/* Grp5 CoS Priority evt */
295static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530296 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700297{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530298 struct be_async_event_grp5_cos_priority *evt =
299 (struct be_async_event_grp5_cos_priority *)compl;
300
Somnath Koturcc4ce022010-10-21 07:11:14 -0700301 if (evt->valid) {
302 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Ajit Khaparde60964dd2011-02-11 13:37:25 +0000303 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700304 adapter->recommended_prio =
305 evt->reco_default_priority << VLAN_PRIO_SHIFT;
306 }
307}
308
Sathya Perla323ff712012-09-28 04:39:43 +0000309/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
Somnath Koturcc4ce022010-10-21 07:11:14 -0700310static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530311 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700312{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530313 struct be_async_event_grp5_qos_link_speed *evt =
314 (struct be_async_event_grp5_qos_link_speed *)compl;
315
Sathya Perla323ff712012-09-28 04:39:43 +0000316 if (adapter->phy.link_speed >= 0 &&
317 evt->physical_port == adapter->port_num)
318 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700319}
320
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000321/*Grp5 PVID evt*/
322static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530323 struct be_mcc_compl *compl)
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000324{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530325 struct be_async_event_grp5_pvid_state *evt =
326 (struct be_async_event_grp5_pvid_state *)compl;
327
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530328 if (evt->enabled) {
Somnath Kotur939cf302011-08-18 21:51:49 -0700329 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530330 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
331 } else {
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000332 adapter->pvid = 0;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530333 }
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000334}
335
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530336#define MGMT_ENABLE_MASK 0x4
337static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
338 struct be_mcc_compl *compl)
339{
340 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
341 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
342
343 if (evt_dw1 & MGMT_ENABLE_MASK) {
344 adapter->flags |= BE_FLAGS_OS2BMC;
345 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
346 } else {
347 adapter->flags &= ~BE_FLAGS_OS2BMC;
348 }
349}
350
Somnath Koturcc4ce022010-10-21 07:11:14 -0700351static void be_async_grp5_evt_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530352 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700353{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530354 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
355 ASYNC_EVENT_TYPE_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700356
357 switch (event_type) {
358 case ASYNC_EVENT_COS_PRIORITY:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530359 be_async_grp5_cos_priority_process(adapter, compl);
360 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700361 case ASYNC_EVENT_QOS_SPEED:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530362 be_async_grp5_qos_speed_process(adapter, compl);
363 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000364 case ASYNC_EVENT_PVID_STATE:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530365 be_async_grp5_pvid_state_process(adapter, compl);
366 break;
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530367 /* Async event to disable/enable os2bmc and/or mac-learning */
368 case ASYNC_EVENT_FW_CONTROL:
369 be_async_grp5_fw_control_process(adapter, compl);
370 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700371 default:
Somnath Koturcc4ce022010-10-21 07:11:14 -0700372 break;
373 }
374}
375
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000376static void be_async_dbg_evt_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530377 struct be_mcc_compl *cmp)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000378{
379 u8 event_type = 0;
Kalesh AP504fbf12014-09-19 15:47:00 +0530380 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000381
Sathya Perla3acf19d2014-05-30 19:06:28 +0530382 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
383 ASYNC_EVENT_TYPE_MASK;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000384
385 switch (event_type) {
386 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
387 if (evt->valid)
388 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
389 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
390 break;
391 default:
Vasundhara Volam05ccaa22013-08-06 09:27:19 +0530392 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
393 event_type);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000394 break;
395 }
396}
397
Vasundhara Volam21252372015-02-06 08:18:42 -0500398static void be_async_sliport_evt_process(struct be_adapter *adapter,
399 struct be_mcc_compl *cmp)
400{
401 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
402 ASYNC_EVENT_TYPE_MASK;
403
404 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
405 be_async_port_misconfig_event_process(adapter, cmp);
406}
407
Sathya Perla3acf19d2014-05-30 19:06:28 +0530408static inline bool is_link_state_evt(u32 flags)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000409{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530410 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
411 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000412}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000413
Sathya Perla3acf19d2014-05-30 19:06:28 +0530414static inline bool is_grp5_evt(u32 flags)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700415{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530416 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
417 ASYNC_EVENT_CODE_GRP_5;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700418}
419
Sathya Perla3acf19d2014-05-30 19:06:28 +0530420static inline bool is_dbg_evt(u32 flags)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000421{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530422 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
423 ASYNC_EVENT_CODE_QNQ;
424}
425
Vasundhara Volam21252372015-02-06 08:18:42 -0500426static inline bool is_sliport_evt(u32 flags)
427{
428 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
429 ASYNC_EVENT_CODE_SLIPORT;
430}
431
Sathya Perla3acf19d2014-05-30 19:06:28 +0530432static void be_mcc_event_process(struct be_adapter *adapter,
433 struct be_mcc_compl *compl)
434{
435 if (is_link_state_evt(compl->flags))
436 be_async_link_state_process(adapter, compl);
437 else if (is_grp5_evt(compl->flags))
438 be_async_grp5_evt_process(adapter, compl);
439 else if (is_dbg_evt(compl->flags))
440 be_async_dbg_evt_process(adapter, compl);
Vasundhara Volam21252372015-02-06 08:18:42 -0500441 else if (is_sliport_evt(compl->flags))
442 be_async_sliport_evt_process(adapter, compl);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000443}
444
Sathya Perlaefd2e402009-07-27 22:53:10 +0000445static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000446{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000447 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000448 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000449
450 if (be_mcc_compl_is_new(compl)) {
451 queue_tail_inc(mcc_cq);
452 return compl;
453 }
454 return NULL;
455}
456
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000457void be_async_mcc_enable(struct be_adapter *adapter)
458{
459 spin_lock_bh(&adapter->mcc_cq_lock);
460
461 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
462 adapter->mcc_obj.rearm_cq = true;
463
464 spin_unlock_bh(&adapter->mcc_cq_lock);
465}
466
467void be_async_mcc_disable(struct be_adapter *adapter)
468{
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000469 spin_lock_bh(&adapter->mcc_cq_lock);
470
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000471 adapter->mcc_obj.rearm_cq = false;
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000472 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
473
474 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000475}
476
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000477int be_process_mcc(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000478{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000479 struct be_mcc_compl *compl;
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000480 int num = 0, status = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000481 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000482
Amerigo Wang072a9c42012-08-24 21:41:11 +0000483 spin_lock(&adapter->mcc_cq_lock);
Sathya Perla3acf19d2014-05-30 19:06:28 +0530484
Sathya Perla8788fdc2009-07-27 22:52:03 +0000485 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000486 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
Sathya Perla3acf19d2014-05-30 19:06:28 +0530487 be_mcc_event_process(adapter, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700488 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla3acf19d2014-05-30 19:06:28 +0530489 status = be_mcc_compl_process(adapter, compl);
490 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000491 }
492 be_mcc_compl_use(compl);
493 num++;
494 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700495
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000496 if (num)
497 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
498
Amerigo Wang072a9c42012-08-24 21:41:11 +0000499 spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000500 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000501}
502
Sathya Perla6ac7b682009-06-18 00:05:54 +0000503/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700504static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000505{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700506#define mcc_timeout 120000 /* 12s timeout */
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000507 int i, status = 0;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800508 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700509
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800510 for (i = 0; i < mcc_timeout; i++) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530511 if (be_check_error(adapter, BE_ERROR_ANY))
Sathya Perla6589ade2011-11-10 19:18:00 +0000512 return -EIO;
513
Amerigo Wang072a9c42012-08-24 21:41:11 +0000514 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000515 status = be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +0000516 local_bh_enable();
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800517
518 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000519 break;
520 udelay(100);
521 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700522 if (i == mcc_timeout) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000523 dev_err(&adapter->pdev->dev, "FW not responding\n");
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530524 be_set_error(adapter, BE_ERROR_FW);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000525 return -EIO;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700526 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800527 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000528}
529
530/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700531static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000532{
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000533 int status;
534 struct be_mcc_wrb *wrb;
535 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
536 u16 index = mcc_obj->q.head;
537 struct be_cmd_resp_hdr *resp;
538
539 index_dec(&index, mcc_obj->q.len);
540 wrb = queue_index_node(&mcc_obj->q, index);
541
542 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
543
Sathya Perla8788fdc2009-07-27 22:52:03 +0000544 be_mcc_notify(adapter);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000545
546 status = be_mcc_wait_compl(adapter);
547 if (status == -EIO)
548 goto out;
549
Kalesh AP4c600052014-05-30 19:06:26 +0530550 status = (resp->base_status |
551 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
552 CQE_ADDL_STATUS_SHIFT));
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000553out:
554 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000555}
556
Sathya Perla5f0b8492009-07-27 22:52:56 +0000557static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700558{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000559 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560 u32 ready;
561
562 do {
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530563 if (be_check_error(adapter, BE_ERROR_ANY))
Sathya Perla6589ade2011-11-10 19:18:00 +0000564 return -EIO;
565
Sathya Perlacf588472010-02-14 21:22:01 +0000566 ready = ioread32(db);
Sathya Perla434b3642011-11-10 19:17:59 +0000567 if (ready == 0xffffffff)
Sathya Perlacf588472010-02-14 21:22:01 +0000568 return -1;
Sathya Perlacf588472010-02-14 21:22:01 +0000569
570 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571 if (ready)
572 break;
573
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000574 if (msecs > 4000) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000575 dev_err(&adapter->pdev->dev, "FW not responding\n");
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530576 be_set_error(adapter, BE_ERROR_FW);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000577 be_detect_error(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 return -1;
579 }
580
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000581 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000582 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 } while (true);
584
585 return 0;
586}
587
588/*
589 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000590 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700592static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593{
594 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000596 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
597 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000599 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600
Sathya Perlacf588472010-02-14 21:22:01 +0000601 /* wait for ready to be set */
602 status = be_mbox_db_ready_wait(adapter, db);
603 if (status != 0)
604 return status;
605
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 val |= MPU_MAILBOX_DB_HI_MASK;
607 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
608 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
609 iowrite32(val, db);
610
611 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000612 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 if (status != 0)
614 return status;
615
616 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
618 val |= (u32)(mbox_mem->dma >> 4) << 2;
619 iowrite32(val, db);
620
Sathya Perla5f0b8492009-07-27 22:52:56 +0000621 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 if (status != 0)
623 return status;
624
Sathya Perla5fb379e2009-06-18 00:02:59 +0000625 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000626 if (be_mcc_compl_is_new(compl)) {
627 status = be_mcc_compl_process(adapter, &mbox->compl);
628 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000629 if (status)
630 return status;
631 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000632 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 return -1;
634 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000635 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636}
637
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000638static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000640 u32 sem;
641
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000642 if (BEx_chip(adapter))
643 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 else
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000645 pci_read_config_dword(adapter->pdev,
646 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
647
648 return sem & POST_STAGE_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649}
650
Gavin Shan87f20c22013-10-29 17:30:57 +0800651static int lancer_wait_ready(struct be_adapter *adapter)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000652{
653#define SLIPORT_READY_TIMEOUT 30
654 u32 sliport_status;
Kalesh APe6732442015-01-20 03:51:46 -0500655 int i;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000656
657 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
658 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
659 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
Sathya Perla9fa465c2015-02-23 04:20:13 -0500660 return 0;
661
662 if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
663 !(sliport_status & SLIPORT_STATUS_RN_MASK))
664 return -EIO;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000665
666 msleep(1000);
667 }
668
Sathya Perla9fa465c2015-02-23 04:20:13 -0500669 return sliport_status ? : -1;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000670}
671
672int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000674 u16 stage;
675 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000676 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000678 if (lancer_chip(adapter)) {
679 status = lancer_wait_ready(adapter);
Kalesh APe6732442015-01-20 03:51:46 -0500680 if (status) {
681 stage = status;
682 goto err;
683 }
684 return 0;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000685 }
686
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000687 do {
Sathya Perlaca3de6b2015-02-23 04:20:10 -0500688 /* There's no means to poll POST state on BE2/3 VFs */
689 if (BEx_chip(adapter) && be_virtfn(adapter))
690 return 0;
691
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000692 stage = be_POST_stage_get(adapter);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000693 if (stage == POST_STAGE_ARMFW_RDY)
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000694 return 0;
Gavin Shan66d29cb2013-03-03 21:48:46 +0000695
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530696 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000697 if (msleep_interruptible(2000)) {
698 dev_err(dev, "Waiting for POST aborted\n");
699 return -EINTR;
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000700 }
Gavin Shan66d29cb2013-03-03 21:48:46 +0000701 timeout += 2;
Somnath Kotur3ab81b52011-10-03 08:10:57 +0000702 } while (timeout < 60);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703
Kalesh APe6732442015-01-20 03:51:46 -0500704err:
705 dev_err(dev, "POST timeout; stage=%#x\n", stage);
Sathya Perla9fa465c2015-02-23 04:20:13 -0500706 return -ETIMEDOUT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707}
708
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
710{
711 return &wrb->payload.sgl[0];
712}
713
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530714static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
Sathya Perlabea50982013-08-27 16:57:33 +0530715{
716 wrb->tag0 = addr & 0xFFFFFFFF;
717 wrb->tag1 = upper_32_bits(addr);
718}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719
720/* Don't touch the hdr after it's prepared */
Somnath Kotur106df1e2011-10-27 07:12:13 +0000721/* mem will be NULL for embedded commands */
722static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530723 u8 subsystem, u8 opcode, int cmd_len,
724 struct be_mcc_wrb *wrb,
725 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726{
Somnath Kotur106df1e2011-10-27 07:12:13 +0000727 struct be_sge *sge;
728
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729 req_hdr->opcode = opcode;
730 req_hdr->subsystem = subsystem;
731 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000732 req_hdr->version = 0;
Sathya Perlabea50982013-08-27 16:57:33 +0530733 fill_wrb_tags(wrb, (ulong) req_hdr);
Somnath Kotur106df1e2011-10-27 07:12:13 +0000734 wrb->payload_length = cmd_len;
735 if (mem) {
736 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
737 MCC_WRB_SGE_CNT_SHIFT;
738 sge = nonembedded_sgl(wrb);
739 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
740 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
741 sge->len = cpu_to_le32(mem->size);
742 } else
743 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
744 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745}
746
747static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530748 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749{
750 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
751 u64 dma = (u64)mem->dma;
752
753 for (i = 0; i < buf_pages; i++) {
754 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
755 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
756 dma += PAGE_SIZE_4K;
757 }
758}
759
Sathya Perlab31c50a2009-09-17 10:30:13 -0700760static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700762 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
763 struct be_mcc_wrb *wrb
764 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
765 memset(wrb, 0, sizeof(*wrb));
766 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767}
768
Sathya Perlab31c50a2009-09-17 10:30:13 -0700769static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000770{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700771 struct be_queue_info *mccq = &adapter->mcc_obj.q;
772 struct be_mcc_wrb *wrb;
773
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +0000774 if (!mccq->created)
775 return NULL;
776
Vasundhara Volam4d277122013-04-21 23:28:15 +0000777 if (atomic_read(&mccq->used) >= mccq->len)
Sathya Perla713d03942009-11-22 22:02:45 +0000778 return NULL;
Sathya Perla713d03942009-11-22 22:02:45 +0000779
Sathya Perlab31c50a2009-09-17 10:30:13 -0700780 wrb = queue_head_node(mccq);
781 queue_head_inc(mccq);
782 atomic_inc(&mccq->used);
783 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000784 return wrb;
785}
786
Sathya Perlabea50982013-08-27 16:57:33 +0530787static bool use_mcc(struct be_adapter *adapter)
788{
789 return adapter->mcc_obj.q.created;
790}
791
792/* Must be used only in process context */
793static int be_cmd_lock(struct be_adapter *adapter)
794{
795 if (use_mcc(adapter)) {
796 spin_lock_bh(&adapter->mcc_lock);
797 return 0;
798 } else {
799 return mutex_lock_interruptible(&adapter->mbox_lock);
800 }
801}
802
803/* Must be used only in process context */
804static void be_cmd_unlock(struct be_adapter *adapter)
805{
806 if (use_mcc(adapter))
807 spin_unlock_bh(&adapter->mcc_lock);
808 else
809 return mutex_unlock(&adapter->mbox_lock);
810}
811
812static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
813 struct be_mcc_wrb *wrb)
814{
815 struct be_mcc_wrb *dest_wrb;
816
817 if (use_mcc(adapter)) {
818 dest_wrb = wrb_from_mccq(adapter);
819 if (!dest_wrb)
820 return NULL;
821 } else {
822 dest_wrb = wrb_from_mbox(adapter);
823 }
824
825 memcpy(dest_wrb, wrb, sizeof(*wrb));
826 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
827 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
828
829 return dest_wrb;
830}
831
832/* Must be used only in process context */
833static int be_cmd_notify_wait(struct be_adapter *adapter,
834 struct be_mcc_wrb *wrb)
835{
836 struct be_mcc_wrb *dest_wrb;
837 int status;
838
839 status = be_cmd_lock(adapter);
840 if (status)
841 return status;
842
843 dest_wrb = be_cmd_copy(adapter, wrb);
844 if (!dest_wrb)
845 return -EBUSY;
846
847 if (use_mcc(adapter))
848 status = be_mcc_notify_wait(adapter);
849 else
850 status = be_mbox_notify_wait(adapter);
851
852 if (!status)
853 memcpy(wrb, dest_wrb, sizeof(*wrb));
854
855 be_cmd_unlock(adapter);
856 return status;
857}
858
Sathya Perla2243e2e2009-11-22 22:02:03 +0000859/* Tell fw we're about to start firing cmds by writing a
860 * special pattern across the wrb hdr; uses mbox
861 */
862int be_cmd_fw_init(struct be_adapter *adapter)
863{
864 u8 *wrb;
865 int status;
866
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000867 if (lancer_chip(adapter))
868 return 0;
869
Ivan Vecera29849612010-12-14 05:43:19 +0000870 if (mutex_lock_interruptible(&adapter->mbox_lock))
871 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000872
873 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000874 *wrb++ = 0xFF;
875 *wrb++ = 0x12;
876 *wrb++ = 0x34;
877 *wrb++ = 0xFF;
878 *wrb++ = 0xFF;
879 *wrb++ = 0x56;
880 *wrb++ = 0x78;
881 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000882
883 status = be_mbox_notify_wait(adapter);
884
Ivan Vecera29849612010-12-14 05:43:19 +0000885 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000886 return status;
887}
888
889/* Tell fw we're done with firing cmds by writing a
890 * special pattern across the wrb hdr; uses mbox
891 */
892int be_cmd_fw_clean(struct be_adapter *adapter)
893{
894 u8 *wrb;
895 int status;
896
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000897 if (lancer_chip(adapter))
898 return 0;
899
Ivan Vecera29849612010-12-14 05:43:19 +0000900 if (mutex_lock_interruptible(&adapter->mbox_lock))
901 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000902
903 wrb = (u8 *)wrb_from_mbox(adapter);
904 *wrb++ = 0xFF;
905 *wrb++ = 0xAA;
906 *wrb++ = 0xBB;
907 *wrb++ = 0xFF;
908 *wrb++ = 0xFF;
909 *wrb++ = 0xCC;
910 *wrb++ = 0xDD;
911 *wrb = 0xFF;
912
913 status = be_mbox_notify_wait(adapter);
914
Ivan Vecera29849612010-12-14 05:43:19 +0000915 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000916 return status;
917}
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000918
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530919int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700921 struct be_mcc_wrb *wrb;
922 struct be_cmd_req_eq_create *req;
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530923 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
924 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925
Ivan Vecera29849612010-12-14 05:43:19 +0000926 if (mutex_lock_interruptible(&adapter->mbox_lock))
927 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700928
929 wrb = wrb_from_mbox(adapter);
930 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700931
Somnath Kotur106df1e2011-10-27 07:12:13 +0000932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530933 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
934 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530936 /* Support for EQ_CREATEv2 available only SH-R onwards */
937 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
938 ver = 2;
939
940 req->hdr.version = ver;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
942
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
944 /* 4byte eqe*/
945 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
946 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530947 __ilog2_u32(eqo->q.len / 256));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948 be_dws_cpu_to_le(req->context, sizeof(req->context));
949
950 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
951
Sathya Perlab31c50a2009-09-17 10:30:13 -0700952 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700954 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530955
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530956 eqo->q.id = le16_to_cpu(resp->eq_id);
957 eqo->msix_idx =
958 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
959 eqo->q.created = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700961
Ivan Vecera29849612010-12-14 05:43:19 +0000962 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963 return status;
964}
965
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000966/* Use MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000967int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla5ee49792012-09-28 04:39:41 +0000968 bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700970 struct be_mcc_wrb *wrb;
971 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972 int status;
973
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000974 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700975
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000976 wrb = wrb_from_mccq(adapter);
977 if (!wrb) {
978 status = -EBUSY;
979 goto err;
980 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700981 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700982
Somnath Kotur106df1e2011-10-27 07:12:13 +0000983 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530984 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
985 NULL);
Sathya Perla5ee49792012-09-28 04:39:41 +0000986 req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700987 if (permanent) {
988 req->permanent = 1;
989 } else {
Kalesh AP504fbf12014-09-19 15:47:00 +0530990 req->if_id = cpu_to_le16((u16)if_handle);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000991 req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700992 req->permanent = 0;
993 }
994
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000995 status = be_mcc_notify_wait(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700996 if (!status) {
997 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530998
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001000 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001002err:
1003 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004 return status;
1005}
1006
Sathya Perlab31c50a2009-09-17 10:30:13 -07001007/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001008int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301009 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001011 struct be_mcc_wrb *wrb;
1012 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013 int status;
1014
Sathya Perlab31c50a2009-09-17 10:30:13 -07001015 spin_lock_bh(&adapter->mcc_lock);
1016
1017 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001018 if (!wrb) {
1019 status = -EBUSY;
1020 goto err;
1021 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001022 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023
Somnath Kotur106df1e2011-10-27 07:12:13 +00001024 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301025 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1026 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027
Ajit Khapardef8617e02011-02-11 13:36:37 +00001028 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029 req->if_id = cpu_to_le32(if_id);
1030 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1031
Sathya Perlab31c50a2009-09-17 10:30:13 -07001032 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 if (!status) {
1034 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301035
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036 *pmac_id = le32_to_cpu(resp->pmac_id);
1037 }
1038
Sathya Perla713d03942009-11-22 22:02:45 +00001039err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001040 spin_unlock_bh(&adapter->mcc_lock);
Somnath Koture3a7ae22011-10-27 07:14:05 +00001041
1042 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1043 status = -EPERM;
1044
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045 return status;
1046}
1047
Sathya Perlab31c50a2009-09-17 10:30:13 -07001048/* Uses synchronous MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001049int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001051 struct be_mcc_wrb *wrb;
1052 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 int status;
1054
Sathya Perla30128032011-11-10 19:17:57 +00001055 if (pmac_id == -1)
1056 return 0;
1057
Sathya Perlab31c50a2009-09-17 10:30:13 -07001058 spin_lock_bh(&adapter->mcc_lock);
1059
1060 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001061 if (!wrb) {
1062 status = -EBUSY;
1063 goto err;
1064 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001065 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066
Somnath Kotur106df1e2011-10-27 07:12:13 +00001067 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Kalesh APcd3307aa2014-09-19 15:47:02 +05301068 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1069 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Ajit Khapardef8617e02011-02-11 13:36:37 +00001071 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 req->if_id = cpu_to_le32(if_id);
1073 req->pmac_id = cpu_to_le32(pmac_id);
1074
Sathya Perlab31c50a2009-09-17 10:30:13 -07001075 status = be_mcc_notify_wait(adapter);
1076
Sathya Perla713d03942009-11-22 22:02:45 +00001077err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001078 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 return status;
1080}
1081
Sathya Perlab31c50a2009-09-17 10:30:13 -07001082/* Uses Mbox */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001083int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301084 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001086 struct be_mcc_wrb *wrb;
1087 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001089 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090 int status;
1091
Ivan Vecera29849612010-12-14 05:43:19 +00001092 if (mutex_lock_interruptible(&adapter->mbox_lock))
1093 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001094
1095 wrb = wrb_from_mbox(adapter);
1096 req = embedded_payload(wrb);
1097 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Somnath Kotur106df1e2011-10-27 07:12:13 +00001099 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301100 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1101 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102
1103 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001104
1105 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001106 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301107 coalesce_wm);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001108 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301109 ctxt, no_delay);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001110 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301111 __ilog2_u32(cq->len / 256));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001112 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001113 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1114 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001115 } else {
1116 req->hdr.version = 2;
1117 req->page_size = 1; /* 1 for 4K */
Ajit Khaparde09e83a92013-11-22 12:51:20 -06001118
1119 /* coalesce-wm field in this cmd is not relevant to Lancer.
1120 * Lancer uses COMMON_MODIFY_CQ to set this field
1121 */
1122 if (!lancer_chip(adapter))
1123 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1124 ctxt, coalesce_wm);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001125 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301126 no_delay);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001127 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301128 __ilog2_u32(cq->len / 256));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001129 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301130 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1131 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1135
1136 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1137
Sathya Perlab31c50a2009-09-17 10:30:13 -07001138 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -07001140 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142 cq->id = le16_to_cpu(resp->cq_id);
1143 cq->created = true;
1144 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001145
Ivan Vecera29849612010-12-14 05:43:19 +00001146 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001147
1148 return status;
1149}
1150
1151static u32 be_encoded_q_len(int q_len)
1152{
1153 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
Kalesh AP03d28ff2014-09-19 15:46:56 +05301154
Sathya Perla5fb379e2009-06-18 00:02:59 +00001155 if (len_encoded == 16)
1156 len_encoded = 0;
1157 return len_encoded;
1158}
1159
Jingoo Han4188e7d2013-08-05 18:02:02 +09001160static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301161 struct be_queue_info *mccq,
1162 struct be_queue_info *cq)
Sathya Perla5fb379e2009-06-18 00:02:59 +00001163{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001164 struct be_mcc_wrb *wrb;
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001165 struct be_cmd_req_mcc_ext_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001166 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001167 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001168 int status;
1169
Ivan Vecera29849612010-12-14 05:43:19 +00001170 if (mutex_lock_interruptible(&adapter->mbox_lock))
1171 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001172
1173 wrb = wrb_from_mbox(adapter);
1174 req = embedded_payload(wrb);
1175 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001176
Somnath Kotur106df1e2011-10-27 07:12:13 +00001177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301178 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1179 NULL);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001180
Ajit Khaparded4a2ac32010-03-11 01:35:59 +00001181 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301182 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001183 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1184 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301185 be_encoded_q_len(mccq->len));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001186 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301187 } else {
1188 req->hdr.version = 1;
1189 req->cq_id = cpu_to_le16(cq->id);
1190
1191 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1192 be_encoded_q_len(mccq->len));
1193 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1194 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1195 ctxt, cq->id);
1196 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1197 ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001198 }
1199
Vasundhara Volam21252372015-02-06 08:18:42 -05001200 /* Subscribe to Link State, Sliport Event and Group 5 Events
1201 * (bits 1, 5 and 17 set)
1202 */
1203 req->async_event_bitmap[0] =
1204 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1205 BIT(ASYNC_EVENT_CODE_GRP_5) |
1206 BIT(ASYNC_EVENT_CODE_QNQ) |
1207 BIT(ASYNC_EVENT_CODE_SLIPORT));
1208
Sathya Perla5fb379e2009-06-18 00:02:59 +00001209 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1210
1211 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1212
Sathya Perlab31c50a2009-09-17 10:30:13 -07001213 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001214 if (!status) {
1215 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301216
Sathya Perla5fb379e2009-06-18 00:02:59 +00001217 mccq->id = le16_to_cpu(resp->id);
1218 mccq->created = true;
1219 }
Ivan Vecera29849612010-12-14 05:43:19 +00001220 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221
1222 return status;
1223}
1224
Jingoo Han4188e7d2013-08-05 18:02:02 +09001225static int be_cmd_mccq_org_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301226 struct be_queue_info *mccq,
1227 struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001228{
1229 struct be_mcc_wrb *wrb;
1230 struct be_cmd_req_mcc_create *req;
1231 struct be_dma_mem *q_mem = &mccq->dma_mem;
1232 void *ctxt;
1233 int status;
1234
1235 if (mutex_lock_interruptible(&adapter->mbox_lock))
1236 return -1;
1237
1238 wrb = wrb_from_mbox(adapter);
1239 req = embedded_payload(wrb);
1240 ctxt = &req->context;
1241
Somnath Kotur106df1e2011-10-27 07:12:13 +00001242 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301243 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1244 NULL);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001245
1246 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1247
1248 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1249 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301250 be_encoded_q_len(mccq->len));
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001251 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1252
1253 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1254
1255 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1256
1257 status = be_mbox_notify_wait(adapter);
1258 if (!status) {
1259 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301260
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001261 mccq->id = le16_to_cpu(resp->id);
1262 mccq->created = true;
1263 }
1264
1265 mutex_unlock(&adapter->mbox_lock);
1266 return status;
1267}
1268
1269int be_cmd_mccq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301270 struct be_queue_info *mccq, struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001271{
1272 int status;
1273
1274 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301275 if (status && BEx_chip(adapter)) {
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001276 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1277 "or newer to avoid conflicting priorities between NIC "
1278 "and FCoE traffic");
1279 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1280 }
1281 return status;
1282}
1283
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001284int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285{
Sathya Perla77071332013-08-27 16:57:34 +05301286 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001287 struct be_cmd_req_eth_tx_create *req;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001288 struct be_queue_info *txq = &txo->q;
1289 struct be_queue_info *cq = &txo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290 struct be_dma_mem *q_mem = &txq->dma_mem;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001291 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Sathya Perla77071332013-08-27 16:57:34 +05301293 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001294 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301295 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001297 if (lancer_chip(adapter)) {
1298 req->hdr.version = 1;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001299 } else if (BEx_chip(adapter)) {
1300 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1301 req->hdr.version = 2;
1302 } else { /* For SH */
1303 req->hdr.version = 2;
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001304 }
1305
Vasundhara Volam81b02652013-10-01 15:59:57 +05301306 if (req->hdr.version > 0)
1307 req->if_id = cpu_to_le16(adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1309 req->ulp_num = BE_ULP1_NUM;
1310 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001311 req->cq_id = cpu_to_le16(cq->id);
1312 req->queue_size = be_encoded_q_len(txq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001314 ver = req->hdr.version;
1315
Sathya Perla77071332013-08-27 16:57:34 +05301316 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 if (!status) {
Sathya Perla77071332013-08-27 16:57:34 +05301318 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301319
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320 txq->id = le16_to_cpu(resp->cid);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001321 if (ver == 2)
1322 txo->db_offset = le32_to_cpu(resp->db_offset);
1323 else
1324 txo->db_offset = DB_TXULP1_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 txq->created = true;
1326 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001327
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328 return status;
1329}
1330
Sathya Perla482c9e72011-06-29 23:33:17 +00001331/* Uses MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001332int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301333 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1334 u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001336 struct be_mcc_wrb *wrb;
1337 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338 struct be_dma_mem *q_mem = &rxq->dma_mem;
1339 int status;
1340
Sathya Perla482c9e72011-06-29 23:33:17 +00001341 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001342
Sathya Perla482c9e72011-06-29 23:33:17 +00001343 wrb = wrb_from_mccq(adapter);
1344 if (!wrb) {
1345 status = -EBUSY;
1346 goto err;
1347 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001348 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349
Somnath Kotur106df1e2011-10-27 07:12:13 +00001350 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301351 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352
1353 req->cq_id = cpu_to_le16(cq_id);
1354 req->frag_size = fls(frag_size) - 1;
1355 req->num_pages = 2;
1356 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1357 req->interface_id = cpu_to_le32(if_id);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001358 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 req->rss_queue = cpu_to_le32(rss);
1360
Sathya Perla482c9e72011-06-29 23:33:17 +00001361 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 if (!status) {
1363 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301364
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 rxq->id = le16_to_cpu(resp->id);
1366 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -07001367 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001369
Sathya Perla482c9e72011-06-29 23:33:17 +00001370err:
1371 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372 return status;
1373}
1374
Sathya Perlab31c50a2009-09-17 10:30:13 -07001375/* Generic destroyer function for all types of queues
1376 * Uses Mbox
1377 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001378int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301379 int queue_type)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001381 struct be_mcc_wrb *wrb;
1382 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 u8 subsys = 0, opcode = 0;
1384 int status;
1385
Ivan Vecera29849612010-12-14 05:43:19 +00001386 if (mutex_lock_interruptible(&adapter->mbox_lock))
1387 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388
Sathya Perlab31c50a2009-09-17 10:30:13 -07001389 wrb = wrb_from_mbox(adapter);
1390 req = embedded_payload(wrb);
1391
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 switch (queue_type) {
1393 case QTYPE_EQ:
1394 subsys = CMD_SUBSYSTEM_COMMON;
1395 opcode = OPCODE_COMMON_EQ_DESTROY;
1396 break;
1397 case QTYPE_CQ:
1398 subsys = CMD_SUBSYSTEM_COMMON;
1399 opcode = OPCODE_COMMON_CQ_DESTROY;
1400 break;
1401 case QTYPE_TXQ:
1402 subsys = CMD_SUBSYSTEM_ETH;
1403 opcode = OPCODE_ETH_TX_DESTROY;
1404 break;
1405 case QTYPE_RXQ:
1406 subsys = CMD_SUBSYSTEM_ETH;
1407 opcode = OPCODE_ETH_RX_DESTROY;
1408 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001409 case QTYPE_MCCQ:
1410 subsys = CMD_SUBSYSTEM_COMMON;
1411 opcode = OPCODE_COMMON_MCC_DESTROY;
1412 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +00001414 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 }
Ajit Khaparded744b442009-12-03 06:12:06 +00001416
Somnath Kotur106df1e2011-10-27 07:12:13 +00001417 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301418 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 req->id = cpu_to_le16(q->id);
1420
Sathya Perlab31c50a2009-09-17 10:30:13 -07001421 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001422 q->created = false;
Sathya Perla5f0b8492009-07-27 22:52:56 +00001423
Ivan Vecera29849612010-12-14 05:43:19 +00001424 mutex_unlock(&adapter->mbox_lock);
Sathya Perla482c9e72011-06-29 23:33:17 +00001425 return status;
1426}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
Sathya Perla482c9e72011-06-29 23:33:17 +00001428/* Uses MCC */
1429int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1430{
1431 struct be_mcc_wrb *wrb;
1432 struct be_cmd_req_q_destroy *req;
1433 int status;
1434
1435 spin_lock_bh(&adapter->mcc_lock);
1436
1437 wrb = wrb_from_mccq(adapter);
1438 if (!wrb) {
1439 status = -EBUSY;
1440 goto err;
1441 }
1442 req = embedded_payload(wrb);
1443
Somnath Kotur106df1e2011-10-27 07:12:13 +00001444 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301445 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
Sathya Perla482c9e72011-06-29 23:33:17 +00001446 req->id = cpu_to_le16(q->id);
1447
1448 status = be_mcc_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001449 q->created = false;
Sathya Perla482c9e72011-06-29 23:33:17 +00001450
1451err:
1452 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 return status;
1454}
1455
Sathya Perlab31c50a2009-09-17 10:30:13 -07001456/* Create an rx filtering policy configuration on an i/f
Sathya Perlabea50982013-08-27 16:57:33 +05301457 * Will use MBOX only if MCCQ has not been created.
Sathya Perlab31c50a2009-09-17 10:30:13 -07001458 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001459int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001460 u32 *if_handle, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461{
Sathya Perlabea50982013-08-27 16:57:33 +05301462 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001463 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 int status;
1465
Sathya Perlabea50982013-08-27 16:57:33 +05301466 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001467 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301468 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1469 sizeof(*req), &wrb, NULL);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001470 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001471 req->capability_flags = cpu_to_le32(cap_flags);
1472 req->enable_flags = cpu_to_le32(en_flags);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001473 req->pmac_invalid = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Sathya Perlabea50982013-08-27 16:57:33 +05301475 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 if (!status) {
Sathya Perlabea50982013-08-27 16:57:33 +05301477 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 *if_handle = le32_to_cpu(resp->interface_id);
Sathya Perlab5bb9772013-07-23 15:25:01 +05301480
1481 /* Hack to retrieve VF's pmac-id on BE3 */
Kalesh AP18c57c72015-05-06 05:30:38 -04001482 if (BE3_chip(adapter) && be_virtfn(adapter))
Sathya Perlab5bb9772013-07-23 15:25:01 +05301483 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 return status;
1486}
1487
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001488/* Uses MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001489int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001491 struct be_mcc_wrb *wrb;
1492 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 int status;
1494
Sathya Perla30128032011-11-10 19:17:57 +00001495 if (interface_id == -1)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001496 return 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001497
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001498 spin_lock_bh(&adapter->mcc_lock);
1499
1500 wrb = wrb_from_mccq(adapter);
1501 if (!wrb) {
1502 status = -EBUSY;
1503 goto err;
1504 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001505 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506
Somnath Kotur106df1e2011-10-27 07:12:13 +00001507 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301508 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1509 sizeof(*req), wrb, NULL);
Ajit Khaparde658681f2011-02-11 13:34:46 +00001510 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001512
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001513 status = be_mcc_notify_wait(adapter);
1514err:
1515 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 return status;
1517}
1518
1519/* Get stats is a non embedded command: the request is not embedded inside
1520 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001521 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001523int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001525 struct be_mcc_wrb *wrb;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001526 struct be_cmd_req_hdr *hdr;
Sathya Perla713d03942009-11-22 22:02:45 +00001527 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528
Sathya Perlab31c50a2009-09-17 10:30:13 -07001529 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
Sathya Perlab31c50a2009-09-17 10:30:13 -07001531 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001532 if (!wrb) {
1533 status = -EBUSY;
1534 goto err;
1535 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001536 hdr = nonemb_cmd->va;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537
Somnath Kotur106df1e2011-10-27 07:12:13 +00001538 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301539 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1540 nonemb_cmd);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001541
Sathya Perlaca34fe32012-11-06 17:48:56 +00001542 /* version 1 of the cmd is not supported only by BE2 */
Ajit Khaparde61000862013-10-03 16:16:33 -05001543 if (BE2_chip(adapter))
1544 hdr->version = 0;
1545 if (BE3_chip(adapter) || lancer_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001546 hdr->version = 1;
Ajit Khaparde61000862013-10-03 16:16:33 -05001547 else
1548 hdr->version = 2;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001549
Sathya Perlab31c50a2009-09-17 10:30:13 -07001550 be_mcc_notify(adapter);
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001551 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Sathya Perla713d03942009-11-22 22:02:45 +00001553err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001554 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001555 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556}
1557
Selvin Xavier005d5692011-05-16 07:36:35 +00001558/* Lancer Stats */
1559int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301560 struct be_dma_mem *nonemb_cmd)
Selvin Xavier005d5692011-05-16 07:36:35 +00001561{
Selvin Xavier005d5692011-05-16 07:36:35 +00001562 struct be_mcc_wrb *wrb;
1563 struct lancer_cmd_req_pport_stats *req;
Selvin Xavier005d5692011-05-16 07:36:35 +00001564 int status = 0;
1565
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001566 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1567 CMD_SUBSYSTEM_ETH))
1568 return -EPERM;
1569
Selvin Xavier005d5692011-05-16 07:36:35 +00001570 spin_lock_bh(&adapter->mcc_lock);
1571
1572 wrb = wrb_from_mccq(adapter);
1573 if (!wrb) {
1574 status = -EBUSY;
1575 goto err;
1576 }
1577 req = nonemb_cmd->va;
Selvin Xavier005d5692011-05-16 07:36:35 +00001578
Somnath Kotur106df1e2011-10-27 07:12:13 +00001579 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301580 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1581 wrb, nonemb_cmd);
Selvin Xavier005d5692011-05-16 07:36:35 +00001582
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +00001583 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier005d5692011-05-16 07:36:35 +00001584 req->cmd_params.params.reset_stats = 0;
1585
Selvin Xavier005d5692011-05-16 07:36:35 +00001586 be_mcc_notify(adapter);
1587 adapter->stats_cmd_sent = true;
1588
1589err:
1590 spin_unlock_bh(&adapter->mcc_lock);
1591 return status;
1592}
1593
Sathya Perla323ff712012-09-28 04:39:43 +00001594static int be_mac_to_link_speed(int mac_speed)
1595{
1596 switch (mac_speed) {
1597 case PHY_LINK_SPEED_ZERO:
1598 return 0;
1599 case PHY_LINK_SPEED_10MBPS:
1600 return 10;
1601 case PHY_LINK_SPEED_100MBPS:
1602 return 100;
1603 case PHY_LINK_SPEED_1GBPS:
1604 return 1000;
1605 case PHY_LINK_SPEED_10GBPS:
1606 return 10000;
Vasundhara Volamb971f842013-08-06 09:27:15 +05301607 case PHY_LINK_SPEED_20GBPS:
1608 return 20000;
1609 case PHY_LINK_SPEED_25GBPS:
1610 return 25000;
1611 case PHY_LINK_SPEED_40GBPS:
1612 return 40000;
Sathya Perla323ff712012-09-28 04:39:43 +00001613 }
1614 return 0;
1615}
1616
1617/* Uses synchronous mcc
1618 * Returns link_speed in Mbps
1619 */
1620int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1621 u8 *link_status, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001623 struct be_mcc_wrb *wrb;
1624 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 int status;
1626
Sathya Perlab31c50a2009-09-17 10:30:13 -07001627 spin_lock_bh(&adapter->mcc_lock);
1628
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001629 if (link_status)
1630 *link_status = LINK_DOWN;
1631
Sathya Perlab31c50a2009-09-17 10:30:13 -07001632 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001633 if (!wrb) {
1634 status = -EBUSY;
1635 goto err;
1636 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001637 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001638
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001639 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301640 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1641 sizeof(*req), wrb, NULL);
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001642
Sathya Perlaca34fe32012-11-06 17:48:56 +00001643 /* version 1 of the cmd is not supported only by BE2 */
1644 if (!BE2_chip(adapter))
Padmanabh Ratnakardaad6162011-11-16 02:03:45 +00001645 req->hdr.version = 1;
1646
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001647 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Sathya Perlab31c50a2009-09-17 10:30:13 -07001649 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 if (!status) {
1651 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301652
Sathya Perla323ff712012-09-28 04:39:43 +00001653 if (link_speed) {
1654 *link_speed = resp->link_speed ?
1655 le16_to_cpu(resp->link_speed) * 10 :
1656 be_mac_to_link_speed(resp->mac_speed);
1657
1658 if (!resp->logical_link_status)
1659 *link_speed = 0;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001660 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001661 if (link_status)
1662 *link_status = resp->logical_link_status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 }
1664
Sathya Perla713d03942009-11-22 22:02:45 +00001665err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001666 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667 return status;
1668}
1669
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001670/* Uses synchronous mcc */
1671int be_cmd_get_die_temperature(struct be_adapter *adapter)
1672{
1673 struct be_mcc_wrb *wrb;
1674 struct be_cmd_req_get_cntl_addnl_attribs *req;
Vasundhara Volam117affe2013-08-06 09:27:20 +05301675 int status = 0;
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001676
1677 spin_lock_bh(&adapter->mcc_lock);
1678
1679 wrb = wrb_from_mccq(adapter);
1680 if (!wrb) {
1681 status = -EBUSY;
1682 goto err;
1683 }
1684 req = embedded_payload(wrb);
1685
Somnath Kotur106df1e2011-10-27 07:12:13 +00001686 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301687 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1688 sizeof(*req), wrb, NULL);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001689
Somnath Kotur3de09452011-09-30 07:25:05 +00001690 be_mcc_notify(adapter);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001691
1692err:
1693 spin_unlock_bh(&adapter->mcc_lock);
1694 return status;
1695}
1696
Somnath Kotur311fddc2011-03-16 21:22:43 +00001697/* Uses synchronous mcc */
1698int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1699{
1700 struct be_mcc_wrb *wrb;
1701 struct be_cmd_req_get_fat *req;
1702 int status;
1703
1704 spin_lock_bh(&adapter->mcc_lock);
1705
1706 wrb = wrb_from_mccq(adapter);
1707 if (!wrb) {
1708 status = -EBUSY;
1709 goto err;
1710 }
1711 req = embedded_payload(wrb);
1712
Somnath Kotur106df1e2011-10-27 07:12:13 +00001713 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301714 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1715 NULL);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001716 req->fat_operation = cpu_to_le32(QUERY_FAT);
1717 status = be_mcc_notify_wait(adapter);
1718 if (!status) {
1719 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301720
Somnath Kotur311fddc2011-03-16 21:22:43 +00001721 if (log_size && resp->log_size)
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001722 *log_size = le32_to_cpu(resp->log_size) -
1723 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001724 }
1725err:
1726 spin_unlock_bh(&adapter->mcc_lock);
1727 return status;
1728}
1729
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301730int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
Somnath Kotur311fddc2011-03-16 21:22:43 +00001731{
1732 struct be_dma_mem get_fat_cmd;
1733 struct be_mcc_wrb *wrb;
1734 struct be_cmd_req_get_fat *req;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001735 u32 offset = 0, total_size, buf_size,
1736 log_offset = sizeof(u32), payload_len;
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301737 int status = 0;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001738
1739 if (buf_len == 0)
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301740 return -EIO;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001741
1742 total_size = buf_len;
1743
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001744 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1745 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301746 get_fat_cmd.size,
1747 &get_fat_cmd.dma);
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001748 if (!get_fat_cmd.va) {
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001749 dev_err(&adapter->pdev->dev,
Kalesh APcd3307aa2014-09-19 15:47:02 +05301750 "Memory allocation failure while reading FAT data\n");
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301751 return -ENOMEM;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001752 }
1753
Somnath Kotur311fddc2011-03-16 21:22:43 +00001754 spin_lock_bh(&adapter->mcc_lock);
1755
Somnath Kotur311fddc2011-03-16 21:22:43 +00001756 while (total_size) {
1757 buf_size = min(total_size, (u32)60*1024);
1758 total_size -= buf_size;
1759
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001760 wrb = wrb_from_mccq(adapter);
1761 if (!wrb) {
1762 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001763 goto err;
1764 }
1765 req = get_fat_cmd.va;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001766
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001767 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
Somnath Kotur106df1e2011-10-27 07:12:13 +00001768 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301769 OPCODE_COMMON_MANAGE_FAT, payload_len,
1770 wrb, &get_fat_cmd);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001771
1772 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1773 req->read_log_offset = cpu_to_le32(log_offset);
1774 req->read_log_length = cpu_to_le32(buf_size);
1775 req->data_buffer_size = cpu_to_le32(buf_size);
1776
1777 status = be_mcc_notify_wait(adapter);
1778 if (!status) {
1779 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
Kalesh AP03d28ff2014-09-19 15:46:56 +05301780
Somnath Kotur311fddc2011-03-16 21:22:43 +00001781 memcpy(buf + offset,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301782 resp->data_buffer,
1783 le32_to_cpu(resp->read_log_length));
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001784 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001785 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001786 goto err;
1787 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001788 offset += buf_size;
1789 log_offset += buf_size;
1790 }
1791err:
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001792 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301793 get_fat_cmd.va, get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001794 spin_unlock_bh(&adapter->mcc_lock);
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301795 return status;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001796}
1797
Sathya Perla04b71172011-09-27 13:30:27 -04001798/* Uses synchronous mcc */
Kalesh APe97e3cd2014-07-17 16:20:26 +05301799int be_cmd_get_fw_ver(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001801 struct be_mcc_wrb *wrb;
1802 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803 int status;
1804
Sathya Perla04b71172011-09-27 13:30:27 -04001805 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001806
Sathya Perla04b71172011-09-27 13:30:27 -04001807 wrb = wrb_from_mccq(adapter);
1808 if (!wrb) {
1809 status = -EBUSY;
1810 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 }
1812
Sathya Perla04b71172011-09-27 13:30:27 -04001813 req = embedded_payload(wrb);
Sathya Perla04b71172011-09-27 13:30:27 -04001814
Somnath Kotur106df1e2011-10-27 07:12:13 +00001815 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301816 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1817 NULL);
Sathya Perla04b71172011-09-27 13:30:27 -04001818 status = be_mcc_notify_wait(adapter);
1819 if (!status) {
1820 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05301821
Vasundhara Volam242eb472014-09-12 17:39:15 +05301822 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1823 sizeof(adapter->fw_ver));
1824 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1825 sizeof(adapter->fw_on_flash));
Sathya Perla04b71172011-09-27 13:30:27 -04001826 }
1827err:
1828 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 return status;
1830}
1831
Sathya Perlab31c50a2009-09-17 10:30:13 -07001832/* set the EQ delay interval of an EQ to specified value
1833 * Uses async mcc
1834 */
Kalesh APb502ae82014-09-19 15:46:51 +05301835static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1836 struct be_set_eqd *set_eqd, int num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001838 struct be_mcc_wrb *wrb;
1839 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla2632baf2013-10-01 16:00:00 +05301840 int status = 0, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841
Sathya Perlab31c50a2009-09-17 10:30:13 -07001842 spin_lock_bh(&adapter->mcc_lock);
1843
1844 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001845 if (!wrb) {
1846 status = -EBUSY;
1847 goto err;
1848 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001849 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850
Somnath Kotur106df1e2011-10-27 07:12:13 +00001851 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301852 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1853 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perla2632baf2013-10-01 16:00:00 +05301855 req->num_eq = cpu_to_le32(num);
1856 for (i = 0; i < num; i++) {
1857 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1858 req->set_eqd[i].phase = 0;
1859 req->set_eqd[i].delay_multiplier =
1860 cpu_to_le32(set_eqd[i].delay_multiplier);
1861 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
Sathya Perlab31c50a2009-09-17 10:30:13 -07001863 be_mcc_notify(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001864err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001865 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001866 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867}
1868
Kalesh AP93676702014-09-12 17:39:20 +05301869int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1870 int num)
1871{
1872 int num_eqs, i = 0;
1873
Suresh Reddyc8ba4ad02015-03-20 06:28:24 -04001874 while (num) {
1875 num_eqs = min(num, 8);
1876 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1877 i += num_eqs;
1878 num -= num_eqs;
Kalesh AP93676702014-09-12 17:39:20 +05301879 }
1880
1881 return 0;
1882}
1883
Sathya Perlab31c50a2009-09-17 10:30:13 -07001884/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001885int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001886 u32 num, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001888 struct be_mcc_wrb *wrb;
1889 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 int status;
1891
Sathya Perlab31c50a2009-09-17 10:30:13 -07001892 spin_lock_bh(&adapter->mcc_lock);
1893
1894 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001895 if (!wrb) {
1896 status = -EBUSY;
1897 goto err;
1898 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001899 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900
Somnath Kotur106df1e2011-10-27 07:12:13 +00001901 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301902 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1903 wrb, NULL);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001904 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
1906 req->interface_id = if_id;
Ajit Khaparde012bd382013-11-18 10:44:24 -06001907 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908 req->num_vlan = num;
Kalesh AP4d567d92014-05-09 13:29:17 +05301909 memcpy(req->normal_vlan, vtag_array,
1910 req->num_vlan * sizeof(vtag_array[0]));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
Sathya Perlab31c50a2009-09-17 10:30:13 -07001912 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001913err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001914 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 return status;
1916}
1917
Sathya Perlaac34b742015-02-06 08:18:40 -05001918static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001920 struct be_mcc_wrb *wrb;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001921 struct be_dma_mem *mem = &adapter->rx_filter;
1922 struct be_cmd_req_rx_filter *req = mem->va;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001923 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
Sathya Perla8788fdc2009-07-27 22:52:03 +00001925 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001926
Sathya Perlab31c50a2009-09-17 10:30:13 -07001927 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001928 if (!wrb) {
1929 status = -EBUSY;
1930 goto err;
1931 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00001932 memset(req, 0, sizeof(*req));
Somnath Kotur106df1e2011-10-27 07:12:13 +00001933 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301934 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1935 wrb, mem);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936
Sathya Perla5b8821b2011-08-02 19:57:44 +00001937 req->if_id = cpu_to_le32(adapter->if_handle);
Sathya Perlaac34b742015-02-06 08:18:40 -05001938 req->if_flags_mask = cpu_to_le32(flags);
1939 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001940
Sathya Perlaac34b742015-02-06 08:18:40 -05001941 if (flags & BE_IF_FLAGS_MULTICAST) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001942 struct netdev_hw_addr *ha;
1943 int i = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944
Padmanabh Ratnakar1610c792011-11-03 01:49:27 +00001945 /* Reset mcast promisc mode if already set by setting mask
1946 * and not setting flags field
1947 */
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001948 req->if_flags_mask |=
1949 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
Sathya Perla92bf14a2013-08-27 16:57:32 +05301950 be_if_cap_flags(adapter));
Padmanabh Ratnakar016f97b2011-11-03 01:49:13 +00001951 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
Sathya Perla5b8821b2011-08-02 19:57:44 +00001952 netdev_for_each_mc_addr(ha, adapter->netdev)
1953 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1954 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955
Sathya Perla0d1d5872011-08-03 05:19:27 -07001956 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001957err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001958 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001959 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960}
1961
Sathya Perlaac34b742015-02-06 08:18:40 -05001962int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1963{
1964 struct device *dev = &adapter->pdev->dev;
1965
1966 if ((flags & be_if_cap_flags(adapter)) != flags) {
1967 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
1968 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
1969 be_if_cap_flags(adapter));
1970 }
1971 flags &= be_if_cap_flags(adapter);
1972
1973 return __be_cmd_rx_filter(adapter, flags, value);
1974}
1975
Sathya Perlab31c50a2009-09-17 10:30:13 -07001976/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001977int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001979 struct be_mcc_wrb *wrb;
1980 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981 int status;
1982
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001983 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1984 CMD_SUBSYSTEM_COMMON))
1985 return -EPERM;
1986
Sathya Perlab31c50a2009-09-17 10:30:13 -07001987 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988
Sathya Perlab31c50a2009-09-17 10:30:13 -07001989 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001990 if (!wrb) {
1991 status = -EBUSY;
1992 goto err;
1993 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001994 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995
Somnath Kotur106df1e2011-10-27 07:12:13 +00001996 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301997 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1998 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999
Suresh Reddyb29812c2014-09-12 17:39:17 +05302000 req->hdr.version = 1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2002 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2003
Sathya Perlab31c50a2009-09-17 10:30:13 -07002004 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
Sathya Perla713d03942009-11-22 22:02:45 +00002006err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07002007 spin_unlock_bh(&adapter->mcc_lock);
Suresh Reddyb29812c2014-09-12 17:39:17 +05302008
2009 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2010 return -EOPNOTSUPP;
2011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012 return status;
2013}
2014
Sathya Perlab31c50a2009-09-17 10:30:13 -07002015/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002016int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002018 struct be_mcc_wrb *wrb;
2019 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 int status;
2021
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002022 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2023 CMD_SUBSYSTEM_COMMON))
2024 return -EPERM;
2025
Sathya Perlab31c50a2009-09-17 10:30:13 -07002026 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027
Sathya Perlab31c50a2009-09-17 10:30:13 -07002028 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002029 if (!wrb) {
2030 status = -EBUSY;
2031 goto err;
2032 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07002033 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Somnath Kotur106df1e2011-10-27 07:12:13 +00002035 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302036 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2037 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perlab31c50a2009-09-17 10:30:13 -07002039 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040 if (!status) {
2041 struct be_cmd_resp_get_flow_control *resp =
2042 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302043
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2045 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2046 }
2047
Sathya Perla713d03942009-11-22 22:02:45 +00002048err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07002049 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050 return status;
2051}
2052
Sathya Perlab31c50a2009-09-17 10:30:13 -07002053/* Uses mbox */
Kalesh APe97e3cd2014-07-17 16:20:26 +05302054int be_cmd_query_fw_cfg(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002056 struct be_mcc_wrb *wrb;
2057 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 int status;
2059
Ivan Vecera29849612010-12-14 05:43:19 +00002060 if (mutex_lock_interruptible(&adapter->mbox_lock))
2061 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062
Sathya Perlab31c50a2009-09-17 10:30:13 -07002063 wrb = wrb_from_mbox(adapter);
2064 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065
Somnath Kotur106df1e2011-10-27 07:12:13 +00002066 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302067 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2068 sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069
Sathya Perlab31c50a2009-09-17 10:30:13 -07002070 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071 if (!status) {
2072 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302073
Kalesh APe97e3cd2014-07-17 16:20:26 +05302074 adapter->port_num = le32_to_cpu(resp->phys_port);
2075 adapter->function_mode = le32_to_cpu(resp->function_mode);
2076 adapter->function_caps = le32_to_cpu(resp->function_caps);
2077 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
Sathya Perlaacbafeb2014-09-02 09:56:46 +05302078 dev_info(&adapter->pdev->dev,
2079 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2080 adapter->function_mode, adapter->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081 }
2082
Ivan Vecera29849612010-12-14 05:43:19 +00002083 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084 return status;
2085}
sarveshwarb14074ea2009-08-05 13:05:24 -07002086
Sathya Perlab31c50a2009-09-17 10:30:13 -07002087/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07002088int be_cmd_reset_function(struct be_adapter *adapter)
2089{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002090 struct be_mcc_wrb *wrb;
2091 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07002092 int status;
2093
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002094 if (lancer_chip(adapter)) {
Sathya Perla9fa465c2015-02-23 04:20:13 -05002095 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2096 adapter->db + SLIPORT_CONTROL_OFFSET);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002097 status = lancer_wait_ready(adapter);
Sathya Perla9fa465c2015-02-23 04:20:13 -05002098 if (status)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002099 dev_err(&adapter->pdev->dev,
2100 "Adapter in non recoverable error\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002101 return status;
2102 }
2103
Ivan Vecera29849612010-12-14 05:43:19 +00002104 if (mutex_lock_interruptible(&adapter->mbox_lock))
2105 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07002106
Sathya Perlab31c50a2009-09-17 10:30:13 -07002107 wrb = wrb_from_mbox(adapter);
2108 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07002109
Somnath Kotur106df1e2011-10-27 07:12:13 +00002110 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302111 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2112 NULL);
sarveshwarb14074ea2009-08-05 13:05:24 -07002113
Sathya Perlab31c50a2009-09-17 10:30:13 -07002114 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07002115
Ivan Vecera29849612010-12-14 05:43:19 +00002116 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07002117 return status;
2118}
Ajit Khaparde84517482009-09-04 03:12:16 +00002119
Suresh Reddy594ad542013-04-25 23:03:20 +00002120int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
Ben Hutchings33cb0fa2014-05-15 02:01:23 +01002121 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
Sathya Perla3abcded2010-10-03 22:12:27 -07002122{
2123 struct be_mcc_wrb *wrb;
2124 struct be_cmd_req_rss_config *req;
Sathya Perla3abcded2010-10-03 22:12:27 -07002125 int status;
2126
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302127 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2128 return 0;
2129
Kalesh APb51aa362014-05-09 13:29:19 +05302130 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002131
Kalesh APb51aa362014-05-09 13:29:19 +05302132 wrb = wrb_from_mccq(adapter);
2133 if (!wrb) {
2134 status = -EBUSY;
2135 goto err;
2136 }
Sathya Perla3abcded2010-10-03 22:12:27 -07002137 req = embedded_payload(wrb);
2138
Somnath Kotur106df1e2011-10-27 07:12:13 +00002139 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302140 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002141
2142 req->if_id = cpu_to_le32(adapter->if_handle);
Suresh Reddy594ad542013-04-25 23:03:20 +00002143 req->enable_rss = cpu_to_le16(rss_hash_opts);
Sathya Perla3abcded2010-10-03 22:12:27 -07002144 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
Suresh Reddy594ad542013-04-25 23:03:20 +00002145
Kalesh APb51aa362014-05-09 13:29:19 +05302146 if (!BEx_chip(adapter))
Suresh Reddy594ad542013-04-25 23:03:20 +00002147 req->hdr.version = 1;
2148
Sathya Perla3abcded2010-10-03 22:12:27 -07002149 memcpy(req->cpu_table, rsstable, table_size);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302150 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla3abcded2010-10-03 22:12:27 -07002151 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2152
Kalesh APb51aa362014-05-09 13:29:19 +05302153 status = be_mcc_notify_wait(adapter);
2154err:
2155 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002156 return status;
2157}
2158
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002159/* Uses sync mcc */
2160int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302161 u8 bcn, u8 sts, u8 state)
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002162{
2163 struct be_mcc_wrb *wrb;
2164 struct be_cmd_req_enable_disable_beacon *req;
2165 int status;
2166
2167 spin_lock_bh(&adapter->mcc_lock);
2168
2169 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002170 if (!wrb) {
2171 status = -EBUSY;
2172 goto err;
2173 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002174 req = embedded_payload(wrb);
2175
Somnath Kotur106df1e2011-10-27 07:12:13 +00002176 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302177 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2178 sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002179
2180 req->port_num = port_num;
2181 req->beacon_state = state;
2182 req->beacon_duration = bcn;
2183 req->status_duration = sts;
2184
2185 status = be_mcc_notify_wait(adapter);
2186
Sathya Perla713d03942009-11-22 22:02:45 +00002187err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002188 spin_unlock_bh(&adapter->mcc_lock);
2189 return status;
2190}
2191
2192/* Uses sync mcc */
2193int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2194{
2195 struct be_mcc_wrb *wrb;
2196 struct be_cmd_req_get_beacon_state *req;
2197 int status;
2198
2199 spin_lock_bh(&adapter->mcc_lock);
2200
2201 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002202 if (!wrb) {
2203 status = -EBUSY;
2204 goto err;
2205 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002206 req = embedded_payload(wrb);
2207
Somnath Kotur106df1e2011-10-27 07:12:13 +00002208 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302209 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2210 wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002211
2212 req->port_num = port_num;
2213
2214 status = be_mcc_notify_wait(adapter);
2215 if (!status) {
2216 struct be_cmd_resp_get_beacon_state *resp =
2217 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302218
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002219 *state = resp->beacon_state;
2220 }
2221
Sathya Perla713d03942009-11-22 22:02:45 +00002222err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002223 spin_unlock_bh(&adapter->mcc_lock);
2224 return status;
2225}
2226
Mark Leonarde36edd92014-09-12 17:39:18 +05302227/* Uses sync mcc */
2228int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2229 u8 page_num, u8 *data)
2230{
2231 struct be_dma_mem cmd;
2232 struct be_mcc_wrb *wrb;
2233 struct be_cmd_req_port_type *req;
2234 int status;
2235
2236 if (page_num > TR_PAGE_A2)
2237 return -EINVAL;
2238
2239 cmd.size = sizeof(struct be_cmd_resp_port_type);
2240 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2241 if (!cmd.va) {
2242 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2243 return -ENOMEM;
2244 }
2245 memset(cmd.va, 0, cmd.size);
2246
2247 spin_lock_bh(&adapter->mcc_lock);
2248
2249 wrb = wrb_from_mccq(adapter);
2250 if (!wrb) {
2251 status = -EBUSY;
2252 goto err;
2253 }
2254 req = cmd.va;
2255
2256 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2257 OPCODE_COMMON_READ_TRANSRECV_DATA,
2258 cmd.size, wrb, &cmd);
2259
2260 req->port = cpu_to_le32(adapter->hba_port_num);
2261 req->page_num = cpu_to_le32(page_num);
2262 status = be_mcc_notify_wait(adapter);
2263 if (!status) {
2264 struct be_cmd_resp_port_type *resp = cmd.va;
2265
2266 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2267 }
2268err:
2269 spin_unlock_bh(&adapter->mcc_lock);
2270 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2271 return status;
2272}
2273
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002274int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002275 u32 data_size, u32 data_offset,
2276 const char *obj_name, u32 *data_written,
2277 u8 *change_status, u8 *addn_status)
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002278{
2279 struct be_mcc_wrb *wrb;
2280 struct lancer_cmd_req_write_object *req;
2281 struct lancer_cmd_resp_write_object *resp;
2282 void *ctxt = NULL;
2283 int status;
2284
2285 spin_lock_bh(&adapter->mcc_lock);
2286 adapter->flash_status = 0;
2287
2288 wrb = wrb_from_mccq(adapter);
2289 if (!wrb) {
2290 status = -EBUSY;
2291 goto err_unlock;
2292 }
2293
2294 req = embedded_payload(wrb);
2295
Somnath Kotur106df1e2011-10-27 07:12:13 +00002296 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302297 OPCODE_COMMON_WRITE_OBJECT,
2298 sizeof(struct lancer_cmd_req_write_object), wrb,
2299 NULL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002300
2301 ctxt = &req->context;
2302 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302303 write_length, ctxt, data_size);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002304
2305 if (data_size == 0)
2306 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302307 eof, ctxt, 1);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002308 else
2309 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302310 eof, ctxt, 0);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002311
2312 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2313 req->write_offset = cpu_to_le32(data_offset);
Vasundhara Volam242eb472014-09-12 17:39:15 +05302314 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002315 req->descriptor_count = cpu_to_le32(1);
2316 req->buf_len = cpu_to_le32(data_size);
2317 req->addr_low = cpu_to_le32((cmd->dma +
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302318 sizeof(struct lancer_cmd_req_write_object))
2319 & 0xFFFFFFFF);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002320 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2321 sizeof(struct lancer_cmd_req_write_object)));
2322
2323 be_mcc_notify(adapter);
2324 spin_unlock_bh(&adapter->mcc_lock);
2325
Suresh Reddy5eeff632014-01-06 13:02:24 +05302326 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
Somnath Kotur701962d2013-05-02 03:36:34 +00002327 msecs_to_jiffies(60000)))
Kalesh APfd451602014-07-17 16:20:21 +05302328 status = -ETIMEDOUT;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002329 else
2330 status = adapter->flash_status;
2331
2332 resp = embedded_payload(wrb);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002333 if (!status) {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002334 *data_written = le32_to_cpu(resp->actual_write_len);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002335 *change_status = resp->change_status;
2336 } else {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002337 *addn_status = resp->additional_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002338 }
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002339
2340 return status;
2341
2342err_unlock:
2343 spin_unlock_bh(&adapter->mcc_lock);
2344 return status;
2345}
2346
Ravikumar Nelavelli6809cee2014-09-12 17:39:19 +05302347int be_cmd_query_cable_type(struct be_adapter *adapter)
2348{
2349 u8 page_data[PAGE_DATA_LEN];
2350 int status;
2351
2352 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2353 page_data);
2354 if (!status) {
2355 switch (adapter->phy.interface_type) {
2356 case PHY_TYPE_QSFP:
2357 adapter->phy.cable_type =
2358 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2359 break;
2360 case PHY_TYPE_SFP_PLUS_10GB:
2361 adapter->phy.cable_type =
2362 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2363 break;
2364 default:
2365 adapter->phy.cable_type = 0;
2366 break;
2367 }
2368 }
2369 return status;
2370}
2371
Vasundhara Volam21252372015-02-06 08:18:42 -05002372int be_cmd_query_sfp_info(struct be_adapter *adapter)
2373{
2374 u8 page_data[PAGE_DATA_LEN];
2375 int status;
2376
2377 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2378 page_data);
2379 if (!status) {
2380 strlcpy(adapter->phy.vendor_name, page_data +
2381 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2382 strlcpy(adapter->phy.vendor_pn,
2383 page_data + SFP_VENDOR_PN_OFFSET,
2384 SFP_VENDOR_NAME_LEN - 1);
2385 }
2386
2387 return status;
2388}
2389
Kalesh APf0613382014-08-01 17:47:32 +05302390int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2391{
2392 struct lancer_cmd_req_delete_object *req;
2393 struct be_mcc_wrb *wrb;
2394 int status;
2395
2396 spin_lock_bh(&adapter->mcc_lock);
2397
2398 wrb = wrb_from_mccq(adapter);
2399 if (!wrb) {
2400 status = -EBUSY;
2401 goto err;
2402 }
2403
2404 req = embedded_payload(wrb);
2405
2406 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2407 OPCODE_COMMON_DELETE_OBJECT,
2408 sizeof(*req), wrb, NULL);
2409
Vasundhara Volam242eb472014-09-12 17:39:15 +05302410 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
Kalesh APf0613382014-08-01 17:47:32 +05302411
2412 status = be_mcc_notify_wait(adapter);
2413err:
2414 spin_unlock_bh(&adapter->mcc_lock);
2415 return status;
2416}
2417
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002418int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302419 u32 data_size, u32 data_offset, const char *obj_name,
2420 u32 *data_read, u32 *eof, u8 *addn_status)
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002421{
2422 struct be_mcc_wrb *wrb;
2423 struct lancer_cmd_req_read_object *req;
2424 struct lancer_cmd_resp_read_object *resp;
2425 int status;
2426
2427 spin_lock_bh(&adapter->mcc_lock);
2428
2429 wrb = wrb_from_mccq(adapter);
2430 if (!wrb) {
2431 status = -EBUSY;
2432 goto err_unlock;
2433 }
2434
2435 req = embedded_payload(wrb);
2436
2437 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302438 OPCODE_COMMON_READ_OBJECT,
2439 sizeof(struct lancer_cmd_req_read_object), wrb,
2440 NULL);
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002441
2442 req->desired_read_len = cpu_to_le32(data_size);
2443 req->read_offset = cpu_to_le32(data_offset);
2444 strcpy(req->object_name, obj_name);
2445 req->descriptor_count = cpu_to_le32(1);
2446 req->buf_len = cpu_to_le32(data_size);
2447 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2448 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2449
2450 status = be_mcc_notify_wait(adapter);
2451
2452 resp = embedded_payload(wrb);
2453 if (!status) {
2454 *data_read = le32_to_cpu(resp->actual_read_len);
2455 *eof = le32_to_cpu(resp->eof);
2456 } else {
2457 *addn_status = resp->additional_status;
2458 }
2459
2460err_unlock:
2461 spin_unlock_bh(&adapter->mcc_lock);
2462 return status;
2463}
2464
Ajit Khaparde84517482009-09-04 03:12:16 +00002465int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002466 u32 flash_type, u32 flash_opcode, u32 img_offset,
2467 u32 buf_size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002468{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002469 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002470 struct be_cmd_write_flashrom *req;
Ajit Khaparde84517482009-09-04 03:12:16 +00002471 int status;
2472
Sathya Perlab31c50a2009-09-17 10:30:13 -07002473 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002474 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002475
2476 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002477 if (!wrb) {
2478 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002479 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00002480 }
2481 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002482
Somnath Kotur106df1e2011-10-27 07:12:13 +00002483 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302484 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2485 cmd);
Ajit Khaparde84517482009-09-04 03:12:16 +00002486
2487 req->params.op_type = cpu_to_le32(flash_type);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002488 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2489 req->params.offset = cpu_to_le32(img_offset);
2490
Ajit Khaparde84517482009-09-04 03:12:16 +00002491 req->params.op_code = cpu_to_le32(flash_opcode);
2492 req->params.data_buf_size = cpu_to_le32(buf_size);
2493
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002494 be_mcc_notify(adapter);
2495 spin_unlock_bh(&adapter->mcc_lock);
2496
Suresh Reddy5eeff632014-01-06 13:02:24 +05302497 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2498 msecs_to_jiffies(40000)))
Kalesh APfd451602014-07-17 16:20:21 +05302499 status = -ETIMEDOUT;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002500 else
2501 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00002502
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002503 return status;
2504
2505err_unlock:
2506 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00002507 return status;
2508}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002509
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002510int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002511 u16 img_optype, u32 img_offset, u32 crc_offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002512{
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002513 struct be_cmd_read_flash_crc *req;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002514 struct be_mcc_wrb *wrb;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002515 int status;
2516
2517 spin_lock_bh(&adapter->mcc_lock);
2518
2519 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002520 if (!wrb) {
2521 status = -EBUSY;
2522 goto err;
2523 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002524 req = embedded_payload(wrb);
2525
Somnath Kotur106df1e2011-10-27 07:12:13 +00002526 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002527 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2528 wrb, NULL);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002529
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002530 req->params.op_type = cpu_to_le32(img_optype);
2531 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2532 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2533 else
2534 req->params.offset = cpu_to_le32(crc_offset);
2535
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002536 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002537 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002538
2539 status = be_mcc_notify_wait(adapter);
2540 if (!status)
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002541 memcpy(flashed_crc, req->crc, 4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002542
Sathya Perla713d03942009-11-22 22:02:45 +00002543err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002544 spin_unlock_bh(&adapter->mcc_lock);
2545 return status;
2546}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002547
Dan Carpenterc196b022010-05-26 04:47:39 +00002548int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302549 struct be_dma_mem *nonemb_cmd)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002550{
2551 struct be_mcc_wrb *wrb;
2552 struct be_cmd_req_acpi_wol_magic_config *req;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002553 int status;
2554
2555 spin_lock_bh(&adapter->mcc_lock);
2556
2557 wrb = wrb_from_mccq(adapter);
2558 if (!wrb) {
2559 status = -EBUSY;
2560 goto err;
2561 }
2562 req = nonemb_cmd->va;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002563
Somnath Kotur106df1e2011-10-27 07:12:13 +00002564 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302565 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2566 wrb, nonemb_cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002567 memcpy(req->magic_mac, mac, ETH_ALEN);
2568
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002569 status = be_mcc_notify_wait(adapter);
2570
2571err:
2572 spin_unlock_bh(&adapter->mcc_lock);
2573 return status;
2574}
Suresh Rff33a6e2009-12-03 16:15:52 -08002575
Sarveshwar Bandifced9992009-12-23 04:41:44 +00002576int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2577 u8 loopback_type, u8 enable)
2578{
2579 struct be_mcc_wrb *wrb;
2580 struct be_cmd_req_set_lmode *req;
2581 int status;
2582
2583 spin_lock_bh(&adapter->mcc_lock);
2584
2585 wrb = wrb_from_mccq(adapter);
2586 if (!wrb) {
2587 status = -EBUSY;
2588 goto err;
2589 }
2590
2591 req = embedded_payload(wrb);
2592
Somnath Kotur106df1e2011-10-27 07:12:13 +00002593 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302594 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2595 wrb, NULL);
Sarveshwar Bandifced9992009-12-23 04:41:44 +00002596
2597 req->src_port = port_num;
2598 req->dest_port = port_num;
2599 req->loopback_type = loopback_type;
2600 req->loopback_state = enable;
2601
2602 status = be_mcc_notify_wait(adapter);
2603err:
2604 spin_unlock_bh(&adapter->mcc_lock);
2605 return status;
2606}
2607
Suresh Rff33a6e2009-12-03 16:15:52 -08002608int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302609 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2610 u64 pattern)
Suresh Rff33a6e2009-12-03 16:15:52 -08002611{
2612 struct be_mcc_wrb *wrb;
2613 struct be_cmd_req_loopback_test *req;
Suresh Reddy5eeff632014-01-06 13:02:24 +05302614 struct be_cmd_resp_loopback_test *resp;
Suresh Rff33a6e2009-12-03 16:15:52 -08002615 int status;
2616
2617 spin_lock_bh(&adapter->mcc_lock);
2618
2619 wrb = wrb_from_mccq(adapter);
2620 if (!wrb) {
2621 status = -EBUSY;
2622 goto err;
2623 }
2624
2625 req = embedded_payload(wrb);
2626
Somnath Kotur106df1e2011-10-27 07:12:13 +00002627 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302628 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2629 NULL);
Suresh Rff33a6e2009-12-03 16:15:52 -08002630
Suresh Reddy5eeff632014-01-06 13:02:24 +05302631 req->hdr.timeout = cpu_to_le32(15);
Suresh Rff33a6e2009-12-03 16:15:52 -08002632 req->pattern = cpu_to_le64(pattern);
2633 req->src_port = cpu_to_le32(port_num);
2634 req->dest_port = cpu_to_le32(port_num);
2635 req->pkt_size = cpu_to_le32(pkt_size);
2636 req->num_pkts = cpu_to_le32(num_pkts);
2637 req->loopback_type = cpu_to_le32(loopback_type);
2638
Suresh Reddy5eeff632014-01-06 13:02:24 +05302639 be_mcc_notify(adapter);
Suresh Rff33a6e2009-12-03 16:15:52 -08002640
Suresh Reddy5eeff632014-01-06 13:02:24 +05302641 spin_unlock_bh(&adapter->mcc_lock);
2642
2643 wait_for_completion(&adapter->et_cmd_compl);
2644 resp = embedded_payload(wrb);
2645 status = le32_to_cpu(resp->status);
2646
2647 return status;
Suresh Rff33a6e2009-12-03 16:15:52 -08002648err:
2649 spin_unlock_bh(&adapter->mcc_lock);
2650 return status;
2651}
2652
2653int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302654 u32 byte_cnt, struct be_dma_mem *cmd)
Suresh Rff33a6e2009-12-03 16:15:52 -08002655{
2656 struct be_mcc_wrb *wrb;
2657 struct be_cmd_req_ddrdma_test *req;
Suresh Rff33a6e2009-12-03 16:15:52 -08002658 int status;
2659 int i, j = 0;
2660
2661 spin_lock_bh(&adapter->mcc_lock);
2662
2663 wrb = wrb_from_mccq(adapter);
2664 if (!wrb) {
2665 status = -EBUSY;
2666 goto err;
2667 }
2668 req = cmd->va;
Somnath Kotur106df1e2011-10-27 07:12:13 +00002669 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302670 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2671 cmd);
Suresh Rff33a6e2009-12-03 16:15:52 -08002672
2673 req->pattern = cpu_to_le64(pattern);
2674 req->byte_count = cpu_to_le32(byte_cnt);
2675 for (i = 0; i < byte_cnt; i++) {
2676 req->snd_buff[i] = (u8)(pattern >> (j*8));
2677 j++;
2678 if (j > 7)
2679 j = 0;
2680 }
2681
2682 status = be_mcc_notify_wait(adapter);
2683
2684 if (!status) {
2685 struct be_cmd_resp_ddrdma_test *resp;
Kalesh AP03d28ff2014-09-19 15:46:56 +05302686
Suresh Rff33a6e2009-12-03 16:15:52 -08002687 resp = cmd->va;
2688 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
Kalesh APcd3307aa2014-09-19 15:47:02 +05302689 resp->snd_err) {
Suresh Rff33a6e2009-12-03 16:15:52 -08002690 status = -1;
2691 }
2692 }
2693
2694err:
2695 spin_unlock_bh(&adapter->mcc_lock);
2696 return status;
2697}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002698
Dan Carpenterc196b022010-05-26 04:47:39 +00002699int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302700 struct be_dma_mem *nonemb_cmd)
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002701{
2702 struct be_mcc_wrb *wrb;
2703 struct be_cmd_req_seeprom_read *req;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002704 int status;
2705
2706 spin_lock_bh(&adapter->mcc_lock);
2707
2708 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00002709 if (!wrb) {
2710 status = -EBUSY;
2711 goto err;
2712 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002713 req = nonemb_cmd->va;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002714
Somnath Kotur106df1e2011-10-27 07:12:13 +00002715 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302716 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2717 nonemb_cmd);
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002718
2719 status = be_mcc_notify_wait(adapter);
2720
Ajit Khapardee45ff012011-02-04 17:18:28 +00002721err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08002722 spin_unlock_bh(&adapter->mcc_lock);
2723 return status;
2724}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002725
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002726int be_cmd_get_phy_info(struct be_adapter *adapter)
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002727{
2728 struct be_mcc_wrb *wrb;
2729 struct be_cmd_req_get_phy_info *req;
Sathya Perla306f1342011-08-02 19:57:45 +00002730 struct be_dma_mem cmd;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002731 int status;
2732
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002733 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2734 CMD_SUBSYSTEM_COMMON))
2735 return -EPERM;
2736
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002737 spin_lock_bh(&adapter->mcc_lock);
2738
2739 wrb = wrb_from_mccq(adapter);
2740 if (!wrb) {
2741 status = -EBUSY;
2742 goto err;
2743 }
Sathya Perla306f1342011-08-02 19:57:45 +00002744 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302745 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
Sathya Perla306f1342011-08-02 19:57:45 +00002746 if (!cmd.va) {
2747 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2748 status = -ENOMEM;
2749 goto err;
2750 }
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002751
Sathya Perla306f1342011-08-02 19:57:45 +00002752 req = cmd.va;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002753
Somnath Kotur106df1e2011-10-27 07:12:13 +00002754 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302755 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2756 wrb, &cmd);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002757
2758 status = be_mcc_notify_wait(adapter);
Sathya Perla306f1342011-08-02 19:57:45 +00002759 if (!status) {
2760 struct be_phy_info *resp_phy_info =
2761 cmd.va + sizeof(struct be_cmd_req_hdr);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302762
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002763 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2764 adapter->phy.interface_type =
Sathya Perla306f1342011-08-02 19:57:45 +00002765 le16_to_cpu(resp_phy_info->interface_type);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002766 adapter->phy.auto_speeds_supported =
2767 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2768 adapter->phy.fixed_speeds_supported =
2769 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2770 adapter->phy.misc_params =
2771 le32_to_cpu(resp_phy_info->misc_params);
Vasundhara Volam68cb7e42013-08-06 09:27:18 +05302772
2773 if (BE2_chip(adapter)) {
2774 adapter->phy.fixed_speeds_supported =
2775 BE_SUPPORTED_SPEED_10GBPS |
2776 BE_SUPPORTED_SPEED_1GBPS;
2777 }
Sathya Perla306f1342011-08-02 19:57:45 +00002778 }
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302779 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00002780err:
2781 spin_unlock_bh(&adapter->mcc_lock);
2782 return status;
2783}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002784
Lad, Prabhakarbc0ee162015-02-05 15:24:43 +00002785static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
Ajit Khapardee1d18732010-07-23 01:52:13 +00002786{
2787 struct be_mcc_wrb *wrb;
2788 struct be_cmd_req_set_qos *req;
2789 int status;
2790
2791 spin_lock_bh(&adapter->mcc_lock);
2792
2793 wrb = wrb_from_mccq(adapter);
2794 if (!wrb) {
2795 status = -EBUSY;
2796 goto err;
2797 }
2798
2799 req = embedded_payload(wrb);
2800
Somnath Kotur106df1e2011-10-27 07:12:13 +00002801 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302802 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002803
2804 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00002805 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2806 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002807
2808 status = be_mcc_notify_wait(adapter);
2809
2810err:
2811 spin_unlock_bh(&adapter->mcc_lock);
2812 return status;
2813}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002814
2815int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2816{
2817 struct be_mcc_wrb *wrb;
2818 struct be_cmd_req_cntl_attribs *req;
2819 struct be_cmd_resp_cntl_attribs *resp;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002820 int status;
2821 int payload_len = max(sizeof(*req), sizeof(*resp));
2822 struct mgmt_controller_attrib *attribs;
2823 struct be_dma_mem attribs_cmd;
2824
Suresh Reddyd98ef502013-04-25 00:56:55 +00002825 if (mutex_lock_interruptible(&adapter->mbox_lock))
2826 return -1;
2827
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002828 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2829 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2830 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302831 &attribs_cmd.dma);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002832 if (!attribs_cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302833 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00002834 status = -ENOMEM;
2835 goto err;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002836 }
2837
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002838 wrb = wrb_from_mbox(adapter);
2839 if (!wrb) {
2840 status = -EBUSY;
2841 goto err;
2842 }
2843 req = attribs_cmd.va;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002844
Somnath Kotur106df1e2011-10-27 07:12:13 +00002845 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302846 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2847 wrb, &attribs_cmd);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002848
2849 status = be_mbox_notify_wait(adapter);
2850 if (!status) {
Joe Perches43d620c2011-06-16 19:08:06 +00002851 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002852 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2853 }
2854
2855err:
2856 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00002857 if (attribs_cmd.va)
2858 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2859 attribs_cmd.va, attribs_cmd.dma);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002860 return status;
2861}
Sathya Perla2e588f82011-03-11 02:49:26 +00002862
2863/* Uses mbox */
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002864int be_cmd_req_native_mode(struct be_adapter *adapter)
Sathya Perla2e588f82011-03-11 02:49:26 +00002865{
2866 struct be_mcc_wrb *wrb;
2867 struct be_cmd_req_set_func_cap *req;
2868 int status;
2869
2870 if (mutex_lock_interruptible(&adapter->mbox_lock))
2871 return -1;
2872
2873 wrb = wrb_from_mbox(adapter);
2874 if (!wrb) {
2875 status = -EBUSY;
2876 goto err;
2877 }
2878
2879 req = embedded_payload(wrb);
2880
Somnath Kotur106df1e2011-10-27 07:12:13 +00002881 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302882 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2883 sizeof(*req), wrb, NULL);
Sathya Perla2e588f82011-03-11 02:49:26 +00002884
2885 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2886 CAPABILITY_BE3_NATIVE_ERX_API);
2887 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2888
2889 status = be_mbox_notify_wait(adapter);
2890 if (!status) {
2891 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302892
Sathya Perla2e588f82011-03-11 02:49:26 +00002893 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2894 CAPABILITY_BE3_NATIVE_ERX_API;
Sathya Perlad3791422012-09-28 04:39:44 +00002895 if (!adapter->be3_native)
2896 dev_warn(&adapter->pdev->dev,
2897 "adapter not in advanced mode\n");
Sathya Perla2e588f82011-03-11 02:49:26 +00002898 }
2899err:
2900 mutex_unlock(&adapter->mbox_lock);
2901 return status;
2902}
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002903
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002904/* Get privilege(s) for a function */
2905int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2906 u32 domain)
2907{
2908 struct be_mcc_wrb *wrb;
2909 struct be_cmd_req_get_fn_privileges *req;
2910 int status;
2911
2912 spin_lock_bh(&adapter->mcc_lock);
2913
2914 wrb = wrb_from_mccq(adapter);
2915 if (!wrb) {
2916 status = -EBUSY;
2917 goto err;
2918 }
2919
2920 req = embedded_payload(wrb);
2921
2922 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2923 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2924 wrb, NULL);
2925
2926 req->hdr.domain = domain;
2927
2928 status = be_mcc_notify_wait(adapter);
2929 if (!status) {
2930 struct be_cmd_resp_get_fn_privileges *resp =
2931 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302932
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002933 *privilege = le32_to_cpu(resp->privilege_mask);
Suresh Reddy02308d72014-01-15 13:23:36 +05302934
2935 /* In UMC mode FW does not return right privileges.
2936 * Override with correct privilege equivalent to PF.
2937 */
2938 if (BEx_chip(adapter) && be_is_mc(adapter) &&
2939 be_physfn(adapter))
2940 *privilege = MAX_PRIVILEGES;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002941 }
2942
2943err:
2944 spin_unlock_bh(&adapter->mcc_lock);
2945 return status;
2946}
2947
Sathya Perla04a06022013-07-23 15:25:00 +05302948/* Set privilege(s) for a function */
2949int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2950 u32 domain)
2951{
2952 struct be_mcc_wrb *wrb;
2953 struct be_cmd_req_set_fn_privileges *req;
2954 int status;
2955
2956 spin_lock_bh(&adapter->mcc_lock);
2957
2958 wrb = wrb_from_mccq(adapter);
2959 if (!wrb) {
2960 status = -EBUSY;
2961 goto err;
2962 }
2963
2964 req = embedded_payload(wrb);
2965 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2966 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2967 wrb, NULL);
2968 req->hdr.domain = domain;
2969 if (lancer_chip(adapter))
2970 req->privileges_lancer = cpu_to_le32(privileges);
2971 else
2972 req->privileges = cpu_to_le32(privileges);
2973
2974 status = be_mcc_notify_wait(adapter);
2975err:
2976 spin_unlock_bh(&adapter->mcc_lock);
2977 return status;
2978}
2979
Sathya Perla5a712c12013-07-23 15:24:59 +05302980/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2981 * pmac_id_valid: false => pmac_id or MAC address is requested.
2982 * If pmac_id is returned, pmac_id_valid is returned as true
2983 */
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002984int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05302985 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2986 u8 domain)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002987{
2988 struct be_mcc_wrb *wrb;
2989 struct be_cmd_req_get_mac_list *req;
2990 int status;
2991 int mac_count;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002992 struct be_dma_mem get_mac_list_cmd;
2993 int i;
2994
2995 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2996 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2997 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302998 get_mac_list_cmd.size,
2999 &get_mac_list_cmd.dma);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003000
3001 if (!get_mac_list_cmd.va) {
3002 dev_err(&adapter->pdev->dev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303003 "Memory allocation failure during GET_MAC_LIST\n");
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003004 return -ENOMEM;
3005 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003006
3007 spin_lock_bh(&adapter->mcc_lock);
3008
3009 wrb = wrb_from_mccq(adapter);
3010 if (!wrb) {
3011 status = -EBUSY;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003012 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003013 }
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003014
3015 req = get_mac_list_cmd.va;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003016
3017 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlabf591f52013-05-08 02:05:48 +00003018 OPCODE_COMMON_GET_MAC_LIST,
3019 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003020 req->hdr.domain = domain;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003021 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla5a712c12013-07-23 15:24:59 +05303022 if (*pmac_id_valid) {
3023 req->mac_id = cpu_to_le32(*pmac_id);
Suresh Reddyb188f092014-01-15 13:23:39 +05303024 req->iface_id = cpu_to_le16(if_handle);
Sathya Perla5a712c12013-07-23 15:24:59 +05303025 req->perm_override = 0;
3026 } else {
3027 req->perm_override = 1;
3028 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003029
3030 status = be_mcc_notify_wait(adapter);
3031 if (!status) {
3032 struct be_cmd_resp_get_mac_list *resp =
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003033 get_mac_list_cmd.va;
Sathya Perla5a712c12013-07-23 15:24:59 +05303034
3035 if (*pmac_id_valid) {
3036 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3037 ETH_ALEN);
3038 goto out;
3039 }
3040
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003041 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3042 /* Mac list returned could contain one or more active mac_ids
Joe Perchesdbedd442015-03-06 20:49:12 -08003043 * or one or more true or pseudo permanent mac addresses.
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003044 * If an active mac_id is present, return first active mac_id
3045 * found.
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003046 */
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003047 for (i = 0; i < mac_count; i++) {
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003048 struct get_list_macaddr *mac_entry;
3049 u16 mac_addr_size;
3050 u32 mac_id;
3051
3052 mac_entry = &resp->macaddr_list[i];
3053 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3054 /* mac_id is a 32 bit value and mac_addr size
3055 * is 6 bytes
3056 */
3057 if (mac_addr_size == sizeof(u32)) {
Sathya Perla5a712c12013-07-23 15:24:59 +05303058 *pmac_id_valid = true;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003059 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3060 *pmac_id = le32_to_cpu(mac_id);
3061 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003062 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003063 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003064 /* If no active mac_id found, return first mac addr */
Sathya Perla5a712c12013-07-23 15:24:59 +05303065 *pmac_id_valid = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003066 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303067 ETH_ALEN);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003068 }
3069
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003070out:
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003071 spin_unlock_bh(&adapter->mcc_lock);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003072 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303073 get_mac_list_cmd.va, get_mac_list_cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003074 return status;
3075}
3076
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303077int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3078 u8 *mac, u32 if_handle, bool active, u32 domain)
Sathya Perla5a712c12013-07-23 15:24:59 +05303079{
Suresh Reddyb188f092014-01-15 13:23:39 +05303080 if (!active)
3081 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3082 if_handle, domain);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303083 if (BEx_chip(adapter))
Sathya Perla5a712c12013-07-23 15:24:59 +05303084 return be_cmd_mac_addr_query(adapter, mac, false,
Suresh Reddyb188f092014-01-15 13:23:39 +05303085 if_handle, curr_pmac_id);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303086 else
3087 /* Fetch the MAC address using pmac_id */
3088 return be_cmd_get_mac_from_list(adapter, mac, &active,
Suresh Reddyb188f092014-01-15 13:23:39 +05303089 &curr_pmac_id,
3090 if_handle, domain);
Sathya Perla5a712c12013-07-23 15:24:59 +05303091}
3092
Sathya Perla95046b92013-07-23 15:25:02 +05303093int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3094{
3095 int status;
3096 bool pmac_valid = false;
3097
Joe Perchesc7bf7162015-03-02 19:54:47 -08003098 eth_zero_addr(mac);
Sathya Perla95046b92013-07-23 15:25:02 +05303099
Sathya Perla3175d8c2013-07-23 15:25:03 +05303100 if (BEx_chip(adapter)) {
3101 if (be_physfn(adapter))
3102 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3103 0);
3104 else
3105 status = be_cmd_mac_addr_query(adapter, mac, false,
3106 adapter->if_handle, 0);
3107 } else {
Sathya Perla95046b92013-07-23 15:25:02 +05303108 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
Suresh Reddyb188f092014-01-15 13:23:39 +05303109 NULL, adapter->if_handle, 0);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303110 }
3111
Sathya Perla95046b92013-07-23 15:25:02 +05303112 return status;
3113}
3114
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003115/* Uses synchronous MCCQ */
3116int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3117 u8 mac_count, u32 domain)
3118{
3119 struct be_mcc_wrb *wrb;
3120 struct be_cmd_req_set_mac_list *req;
3121 int status;
3122 struct be_dma_mem cmd;
3123
3124 memset(&cmd, 0, sizeof(struct be_dma_mem));
3125 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3126 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303127 &cmd.dma, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00003128 if (!cmd.va)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003129 return -ENOMEM;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003130
3131 spin_lock_bh(&adapter->mcc_lock);
3132
3133 wrb = wrb_from_mccq(adapter);
3134 if (!wrb) {
3135 status = -EBUSY;
3136 goto err;
3137 }
3138
3139 req = cmd.va;
3140 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303141 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3142 wrb, &cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003143
3144 req->hdr.domain = domain;
3145 req->mac_count = mac_count;
3146 if (mac_count)
3147 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3148
3149 status = be_mcc_notify_wait(adapter);
3150
3151err:
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303152 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003153 spin_unlock_bh(&adapter->mcc_lock);
3154 return status;
3155}
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003156
Sathya Perla3175d8c2013-07-23 15:25:03 +05303157/* Wrapper to delete any active MACs and provision the new mac.
3158 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3159 * current list are active.
3160 */
3161int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3162{
3163 bool active_mac = false;
3164 u8 old_mac[ETH_ALEN];
3165 u32 pmac_id;
3166 int status;
3167
3168 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05303169 &pmac_id, if_id, dom);
3170
Sathya Perla3175d8c2013-07-23 15:25:03 +05303171 if (!status && active_mac)
3172 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3173
3174 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3175}
3176
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003177int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
Kalesh APe7bcbd72015-05-06 05:30:32 -04003178 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003179{
3180 struct be_mcc_wrb *wrb;
3181 struct be_cmd_req_set_hsw_config *req;
3182 void *ctxt;
3183 int status;
3184
3185 spin_lock_bh(&adapter->mcc_lock);
3186
3187 wrb = wrb_from_mccq(adapter);
3188 if (!wrb) {
3189 status = -EBUSY;
3190 goto err;
3191 }
3192
3193 req = embedded_payload(wrb);
3194 ctxt = &req->context;
3195
3196 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303197 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3198 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003199
3200 req->hdr.domain = domain;
3201 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3202 if (pvid) {
3203 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3204 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3205 }
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003206 if (!BEx_chip(adapter) && hsw_mode) {
3207 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3208 ctxt, adapter->hba_port_num);
3209 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3210 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3211 ctxt, hsw_mode);
3212 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003213
Kalesh APe7bcbd72015-05-06 05:30:32 -04003214 /* Enable/disable both mac and vlan spoof checking */
3215 if (!BEx_chip(adapter) && spoofchk) {
3216 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3217 ctxt, spoofchk);
3218 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3219 ctxt, spoofchk);
3220 }
3221
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003222 be_dws_cpu_to_le(req->context, sizeof(req->context));
3223 status = be_mcc_notify_wait(adapter);
3224
3225err:
3226 spin_unlock_bh(&adapter->mcc_lock);
3227 return status;
3228}
3229
3230/* Get Hyper switch config */
3231int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
Kalesh APe7bcbd72015-05-06 05:30:32 -04003232 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003233{
3234 struct be_mcc_wrb *wrb;
3235 struct be_cmd_req_get_hsw_config *req;
3236 void *ctxt;
3237 int status;
3238 u16 vid;
3239
3240 spin_lock_bh(&adapter->mcc_lock);
3241
3242 wrb = wrb_from_mccq(adapter);
3243 if (!wrb) {
3244 status = -EBUSY;
3245 goto err;
3246 }
3247
3248 req = embedded_payload(wrb);
3249 ctxt = &req->context;
3250
3251 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303252 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3253 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003254
3255 req->hdr.domain = domain;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003256 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3257 ctxt, intf_id);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003258 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003259
Vasundhara Volam2c07c1d2014-01-15 13:23:32 +05303260 if (!BEx_chip(adapter) && mode) {
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003261 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3262 ctxt, adapter->hba_port_num);
3263 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3264 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003265 be_dws_cpu_to_le(req->context, sizeof(req->context));
3266
3267 status = be_mcc_notify_wait(adapter);
3268 if (!status) {
3269 struct be_cmd_resp_get_hsw_config *resp =
3270 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303271
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303272 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003273 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303274 pvid, &resp->context);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003275 if (pvid)
3276 *pvid = le16_to_cpu(vid);
3277 if (mode)
3278 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3279 port_fwd_type, &resp->context);
Kalesh APe7bcbd72015-05-06 05:30:32 -04003280 if (spoofchk)
3281 *spoofchk =
3282 AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3283 spoofchk, &resp->context);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003284 }
3285
3286err:
3287 spin_unlock_bh(&adapter->mcc_lock);
3288 return status;
3289}
3290
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003291static bool be_is_wol_excluded(struct be_adapter *adapter)
3292{
3293 struct pci_dev *pdev = adapter->pdev;
3294
Kalesh AP18c57c72015-05-06 05:30:38 -04003295 if (be_virtfn(adapter))
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003296 return true;
3297
3298 switch (pdev->subsystem_device) {
3299 case OC_SUBSYS_DEVICE_ID1:
3300 case OC_SUBSYS_DEVICE_ID2:
3301 case OC_SUBSYS_DEVICE_ID3:
3302 case OC_SUBSYS_DEVICE_ID4:
3303 return true;
3304 default:
3305 return false;
3306 }
3307}
3308
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003309int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3310{
3311 struct be_mcc_wrb *wrb;
3312 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303313 int status = 0;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003314 struct be_dma_mem cmd;
3315
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003316 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3317 CMD_SUBSYSTEM_ETH))
3318 return -EPERM;
3319
Suresh Reddy76a9e082014-01-15 13:23:40 +05303320 if (be_is_wol_excluded(adapter))
3321 return status;
3322
Suresh Reddyd98ef502013-04-25 00:56:55 +00003323 if (mutex_lock_interruptible(&adapter->mbox_lock))
3324 return -1;
3325
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003326 memset(&cmd, 0, sizeof(struct be_dma_mem));
3327 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303328 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003329 if (!cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303330 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003331 status = -ENOMEM;
3332 goto err;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003333 }
3334
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003335 wrb = wrb_from_mbox(adapter);
3336 if (!wrb) {
3337 status = -EBUSY;
3338 goto err;
3339 }
3340
3341 req = cmd.va;
3342
3343 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3344 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
Suresh Reddy76a9e082014-01-15 13:23:40 +05303345 sizeof(*req), wrb, &cmd);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003346
3347 req->hdr.version = 1;
3348 req->query_options = BE_GET_WOL_CAP;
3349
3350 status = be_mbox_notify_wait(adapter);
3351 if (!status) {
3352 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
Kalesh AP03d28ff2014-09-19 15:46:56 +05303353
Kalesh AP504fbf12014-09-19 15:47:00 +05303354 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003355
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003356 adapter->wol_cap = resp->wol_settings;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303357 if (adapter->wol_cap & BE_WOL_CAP)
3358 adapter->wol_en = true;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003359 }
3360err:
3361 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003362 if (cmd.va)
3363 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003364 return status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003365
3366}
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05303367
3368int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3369{
3370 struct be_dma_mem extfat_cmd;
3371 struct be_fat_conf_params *cfgs;
3372 int status;
3373 int i, j;
3374
3375 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3376 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3377 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3378 &extfat_cmd.dma);
3379 if (!extfat_cmd.va)
3380 return -ENOMEM;
3381
3382 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3383 if (status)
3384 goto err;
3385
3386 cfgs = (struct be_fat_conf_params *)
3387 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3388 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3389 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303390
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05303391 for (j = 0; j < num_modes; j++) {
3392 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3393 cfgs->module[i].trace_lvl[j].dbg_lvl =
3394 cpu_to_le32(level);
3395 }
3396 }
3397
3398 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3399err:
3400 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3401 extfat_cmd.dma);
3402 return status;
3403}
3404
3405int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3406{
3407 struct be_dma_mem extfat_cmd;
3408 struct be_fat_conf_params *cfgs;
3409 int status, j;
3410 int level = 0;
3411
3412 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3413 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3414 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3415 &extfat_cmd.dma);
3416
3417 if (!extfat_cmd.va) {
3418 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3419 __func__);
3420 goto err;
3421 }
3422
3423 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3424 if (!status) {
3425 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3426 sizeof(struct be_cmd_resp_hdr));
Kalesh AP03d28ff2014-09-19 15:46:56 +05303427
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05303428 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3429 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3430 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3431 }
3432 }
3433 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3434 extfat_cmd.dma);
3435err:
3436 return level;
3437}
3438
Somnath Kotur941a77d2012-05-17 22:59:03 +00003439int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3440 struct be_dma_mem *cmd)
3441{
3442 struct be_mcc_wrb *wrb;
3443 struct be_cmd_req_get_ext_fat_caps *req;
3444 int status;
3445
3446 if (mutex_lock_interruptible(&adapter->mbox_lock))
3447 return -1;
3448
3449 wrb = wrb_from_mbox(adapter);
3450 if (!wrb) {
3451 status = -EBUSY;
3452 goto err;
3453 }
3454
3455 req = cmd->va;
3456 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3457 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3458 cmd->size, wrb, cmd);
3459 req->parameter_type = cpu_to_le32(1);
3460
3461 status = be_mbox_notify_wait(adapter);
3462err:
3463 mutex_unlock(&adapter->mbox_lock);
3464 return status;
3465}
3466
3467int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3468 struct be_dma_mem *cmd,
3469 struct be_fat_conf_params *configs)
3470{
3471 struct be_mcc_wrb *wrb;
3472 struct be_cmd_req_set_ext_fat_caps *req;
3473 int status;
3474
3475 spin_lock_bh(&adapter->mcc_lock);
3476
3477 wrb = wrb_from_mccq(adapter);
3478 if (!wrb) {
3479 status = -EBUSY;
3480 goto err;
3481 }
3482
3483 req = cmd->va;
3484 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3485 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3486 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3487 cmd->size, wrb, cmd);
3488
3489 status = be_mcc_notify_wait(adapter);
3490err:
3491 spin_unlock_bh(&adapter->mcc_lock);
3492 return status;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003493}
Parav Pandit6a4ab662012-03-26 14:27:12 +00003494
Vasundhara Volam21252372015-02-06 08:18:42 -05003495int be_cmd_query_port_name(struct be_adapter *adapter)
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003496{
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003497 struct be_cmd_req_get_port_name *req;
Vasundhara Volam21252372015-02-06 08:18:42 -05003498 struct be_mcc_wrb *wrb;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003499 int status;
3500
Vasundhara Volam21252372015-02-06 08:18:42 -05003501 if (mutex_lock_interruptible(&adapter->mbox_lock))
3502 return -1;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003503
Vasundhara Volam21252372015-02-06 08:18:42 -05003504 wrb = wrb_from_mbox(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003505 req = embedded_payload(wrb);
3506
3507 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3508 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3509 NULL);
Vasundhara Volam21252372015-02-06 08:18:42 -05003510 if (!BEx_chip(adapter))
3511 req->hdr.version = 1;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003512
Vasundhara Volam21252372015-02-06 08:18:42 -05003513 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003514 if (!status) {
3515 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303516
Vasundhara Volam21252372015-02-06 08:18:42 -05003517 adapter->port_name = resp->port_name[adapter->hba_port_num];
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003518 } else {
Vasundhara Volam21252372015-02-06 08:18:42 -05003519 adapter->port_name = adapter->hba_port_num + '0';
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003520 }
Vasundhara Volam21252372015-02-06 08:18:42 -05003521
3522 mutex_unlock(&adapter->mbox_lock);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00003523 return status;
3524}
3525
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303526/* Descriptor type */
3527enum {
3528 FUNC_DESC = 1,
3529 VFT_DESC = 2
3530};
3531
3532static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3533 int desc_type)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003534{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303535 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303536 struct be_nic_res_desc *nic;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003537 int i;
3538
3539 for (i = 0; i < desc_count; i++) {
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303540 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303541 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3542 nic = (struct be_nic_res_desc *)hdr;
3543 if (desc_type == FUNC_DESC ||
3544 (desc_type == VFT_DESC &&
3545 nic->flags & (1 << VFT_SHIFT)))
3546 return nic;
3547 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003548
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303549 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3550 hdr = (void *)hdr + hdr->desc_len;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003551 }
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303552 return NULL;
3553}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003554
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303555static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3556{
3557 return be_get_nic_desc(buf, desc_count, VFT_DESC);
3558}
3559
3560static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3561{
3562 return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3563}
3564
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303565static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3566 u32 desc_count)
3567{
3568 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3569 struct be_pcie_res_desc *pcie;
3570 int i;
3571
3572 for (i = 0; i < desc_count; i++) {
3573 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3574 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3575 pcie = (struct be_pcie_res_desc *)hdr;
3576 if (pcie->pf_num == devfn)
3577 return pcie;
3578 }
3579
3580 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3581 hdr = (void *)hdr + hdr->desc_len;
3582 }
Wei Yang950e2952013-05-22 15:58:22 +00003583 return NULL;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003584}
3585
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303586static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3587{
3588 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3589 int i;
3590
3591 for (i = 0; i < desc_count; i++) {
3592 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3593 return (struct be_port_res_desc *)hdr;
3594
3595 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3596 hdr = (void *)hdr + hdr->desc_len;
3597 }
3598 return NULL;
3599}
3600
Sathya Perla92bf14a2013-08-27 16:57:32 +05303601static void be_copy_nic_desc(struct be_resources *res,
3602 struct be_nic_res_desc *desc)
3603{
3604 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3605 res->max_vlans = le16_to_cpu(desc->vlan_count);
3606 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3607 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3608 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3609 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3610 res->max_evt_qs = le16_to_cpu(desc->eq_count);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003611 res->max_cq_count = le16_to_cpu(desc->cq_count);
3612 res->max_iface_count = le16_to_cpu(desc->iface_count);
3613 res->max_mcc_count = le16_to_cpu(desc->mcc_count);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303614 /* Clear flags that driver is not interested in */
3615 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3616 BE_IF_CAP_FLAGS_WANT;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303617}
3618
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003619/* Uses Mbox */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303620int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003621{
3622 struct be_mcc_wrb *wrb;
3623 struct be_cmd_req_get_func_config *req;
3624 int status;
3625 struct be_dma_mem cmd;
3626
Suresh Reddyd98ef502013-04-25 00:56:55 +00003627 if (mutex_lock_interruptible(&adapter->mbox_lock))
3628 return -1;
3629
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003630 memset(&cmd, 0, sizeof(struct be_dma_mem));
3631 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303632 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003633 if (!cmd.va) {
3634 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003635 status = -ENOMEM;
3636 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003637 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003638
3639 wrb = wrb_from_mbox(adapter);
3640 if (!wrb) {
3641 status = -EBUSY;
3642 goto err;
3643 }
3644
3645 req = cmd.va;
3646
3647 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3648 OPCODE_COMMON_GET_FUNC_CONFIG,
3649 cmd.size, wrb, &cmd);
3650
Kalesh AP28710c52013-04-28 22:21:13 +00003651 if (skyhawk_chip(adapter))
3652 req->hdr.version = 1;
3653
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003654 status = be_mbox_notify_wait(adapter);
3655 if (!status) {
3656 struct be_cmd_resp_get_func_config *resp = cmd.va;
3657 u32 desc_count = le32_to_cpu(resp->desc_count);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303658 struct be_nic_res_desc *desc;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003659
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303660 desc = be_get_func_nic_desc(resp->func_param, desc_count);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003661 if (!desc) {
3662 status = -EINVAL;
3663 goto err;
3664 }
3665
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003666 adapter->pf_number = desc->pf_num;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303667 be_copy_nic_desc(res, desc);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003668 }
3669err:
3670 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003671 if (cmd.va)
3672 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003673 return status;
3674}
3675
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05303676/* Will use MBOX only if MCCQ has not been created */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303677int be_cmd_get_profile_config(struct be_adapter *adapter,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003678 struct be_resources *res, u8 query, u8 domain)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003679{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303680 struct be_cmd_resp_get_profile_config *resp;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05303681 struct be_cmd_req_get_profile_config *req;
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303682 struct be_nic_res_desc *vf_res;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303683 struct be_pcie_res_desc *pcie;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303684 struct be_port_res_desc *port;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303685 struct be_nic_res_desc *nic;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05303686 struct be_mcc_wrb wrb = {0};
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003687 struct be_dma_mem cmd;
Vasundhara Volamf2858732015-03-04 00:44:33 -05003688 u16 desc_count;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003689 int status;
3690
3691 memset(&cmd, 0, sizeof(struct be_dma_mem));
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303692 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3693 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3694 if (!cmd.va)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003695 return -ENOMEM;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003696
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05303697 req = cmd.va;
3698 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3699 OPCODE_COMMON_GET_PROFILE_CONFIG,
3700 cmd.size, &wrb, &cmd);
3701
3702 req->hdr.domain = domain;
3703 if (!lancer_chip(adapter))
3704 req->hdr.version = 1;
3705 req->type = ACTIVE_PROFILE_TYPE;
3706
Vasundhara Volamf2858732015-03-04 00:44:33 -05003707 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3708 * descriptors with all bits set to "1" for the fields which can be
3709 * modified using SET_PROFILE_CONFIG cmd.
3710 */
3711 if (query == RESOURCE_MODIFIABLE)
3712 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
3713
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05303714 status = be_cmd_notify_wait(adapter, &wrb);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303715 if (status)
3716 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003717
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303718 resp = cmd.va;
Vasundhara Volamf2858732015-03-04 00:44:33 -05003719 desc_count = le16_to_cpu(resp->desc_count);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003720
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303721 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3722 desc_count);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303723 if (pcie)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303724 res->max_vfs = le16_to_cpu(pcie->num_vfs);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303725
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303726 port = be_get_port_desc(resp->func_param, desc_count);
3727 if (port)
3728 adapter->mc_type = port->mc_type;
3729
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303730 nic = be_get_func_nic_desc(resp->func_param, desc_count);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303731 if (nic)
3732 be_copy_nic_desc(res, nic);
3733
Vasundhara Volam10cccf62014-06-30 13:01:31 +05303734 vf_res = be_get_vft_desc(resp->func_param, desc_count);
3735 if (vf_res)
3736 res->vf_if_cap_flags = vf_res->cap_flags;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003737err:
Vasundhara Volama05f99d2013-04-21 23:28:17 +00003738 if (cmd.va)
Vasundhara Volam150d58c2013-08-27 16:57:31 +05303739 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003740 return status;
3741}
3742
Vasundhara Volambec84e62014-06-30 13:01:32 +05303743/* Will use MBOX only if MCCQ has not been created */
3744static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3745 int size, int count, u8 version, u8 domain)
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003746{
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003747 struct be_cmd_req_set_profile_config *req;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303748 struct be_mcc_wrb wrb = {0};
3749 struct be_dma_mem cmd;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003750 int status;
3751
Vasundhara Volambec84e62014-06-30 13:01:32 +05303752 memset(&cmd, 0, sizeof(struct be_dma_mem));
3753 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3754 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3755 if (!cmd.va)
3756 return -ENOMEM;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003757
Vasundhara Volambec84e62014-06-30 13:01:32 +05303758 req = cmd.va;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003759 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Vasundhara Volambec84e62014-06-30 13:01:32 +05303760 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3761 &wrb, &cmd);
Sathya Perlaa4018012014-03-27 10:46:18 +05303762 req->hdr.version = version;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003763 req->hdr.domain = domain;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303764 req->desc_count = cpu_to_le32(count);
Sathya Perlaa4018012014-03-27 10:46:18 +05303765 memcpy(req->desc, desc, size);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003766
Vasundhara Volambec84e62014-06-30 13:01:32 +05303767 status = be_cmd_notify_wait(adapter, &wrb);
3768
3769 if (cmd.va)
3770 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00003771 return status;
3772}
3773
Sathya Perlaa4018012014-03-27 10:46:18 +05303774/* Mark all fields invalid */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303775static void be_reset_nic_desc(struct be_nic_res_desc *nic)
Sathya Perlaa4018012014-03-27 10:46:18 +05303776{
3777 memset(nic, 0, sizeof(*nic));
3778 nic->unicast_mac_count = 0xFFFF;
3779 nic->mcc_count = 0xFFFF;
3780 nic->vlan_count = 0xFFFF;
3781 nic->mcast_mac_count = 0xFFFF;
3782 nic->txq_count = 0xFFFF;
3783 nic->rq_count = 0xFFFF;
3784 nic->rssq_count = 0xFFFF;
3785 nic->lro_count = 0xFFFF;
3786 nic->cq_count = 0xFFFF;
3787 nic->toe_conn_count = 0xFFFF;
3788 nic->eq_count = 0xFFFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303789 nic->iface_count = 0xFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05303790 nic->link_param = 0xFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303791 nic->channel_id_param = cpu_to_le16(0xF000);
Sathya Perlaa4018012014-03-27 10:46:18 +05303792 nic->acpi_params = 0xFF;
3793 nic->wol_param = 0x0F;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303794 nic->tunnel_iface_count = 0xFFFF;
3795 nic->direct_tenant_iface_count = 0xFFFF;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303796 nic->bw_min = 0xFFFFFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05303797 nic->bw_max = 0xFFFFFFFF;
3798}
3799
Vasundhara Volambec84e62014-06-30 13:01:32 +05303800/* Mark all fields invalid */
3801static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3802{
3803 memset(pcie, 0, sizeof(*pcie));
3804 pcie->sriov_state = 0xFF;
3805 pcie->pf_state = 0xFF;
3806 pcie->pf_type = 0xFF;
3807 pcie->num_vfs = 0xFFFF;
3808}
3809
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303810int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3811 u8 domain)
Sathya Perlaa4018012014-03-27 10:46:18 +05303812{
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303813 struct be_nic_res_desc nic_desc;
3814 u32 bw_percent;
3815 u16 version = 0;
Sathya Perlaa4018012014-03-27 10:46:18 +05303816
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303817 if (BE3_chip(adapter))
3818 return be_cmd_set_qos(adapter, max_rate / 10, domain);
3819
3820 be_reset_nic_desc(&nic_desc);
3821 nic_desc.pf_num = adapter->pf_number;
3822 nic_desc.vf_num = domain;
Kalesh AP58bdeaa2015-01-20 03:51:49 -05003823 nic_desc.bw_min = 0;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303824 if (lancer_chip(adapter)) {
Sathya Perlaa4018012014-03-27 10:46:18 +05303825 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3826 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3827 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3828 (1 << NOSV_SHIFT);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303829 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
Sathya Perlaa4018012014-03-27 10:46:18 +05303830 } else {
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303831 version = 1;
3832 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3833 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3834 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3835 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3836 nic_desc.bw_max = cpu_to_le32(bw_percent);
Sathya Perlaa4018012014-03-27 10:46:18 +05303837 }
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303838
3839 return be_cmd_set_profile_config(adapter, &nic_desc,
3840 nic_desc.hdr.desc_len,
Vasundhara Volambec84e62014-06-30 13:01:32 +05303841 1, version, domain);
3842}
3843
Vasundhara Volamf2858732015-03-04 00:44:33 -05003844static void be_fill_vf_res_template(struct be_adapter *adapter,
3845 struct be_resources pool_res,
3846 u16 num_vfs, u16 num_vf_qs,
3847 struct be_nic_res_desc *nic_vft)
3848{
3849 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
3850 struct be_resources res_mod = {0};
3851
3852 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3853 * which are modifiable using SET_PROFILE_CONFIG cmd.
3854 */
3855 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
3856
3857 /* If RSS IFACE capability flags are modifiable for a VF, set the
3858 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3859 * more than 1 RSSQ is available for a VF.
3860 * Otherwise, provision only 1 queue pair for VF.
3861 */
3862 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3863 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3864 if (num_vf_qs > 1) {
3865 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3866 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3867 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3868 } else {
3869 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3870 BE_IF_FLAGS_DEFQ_RSS);
3871 }
3872
3873 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3874 } else {
3875 num_vf_qs = 1;
3876 }
3877
3878 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
3879 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
3880 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
3881 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
3882 (num_vfs + 1));
3883
3884 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3885 * among the PF and it's VFs, if the fields are changeable
3886 */
3887 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3888 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
3889 (num_vfs + 1));
3890
3891 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3892 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
3893 (num_vfs + 1));
3894
3895 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3896 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
3897 (num_vfs + 1));
3898
3899 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3900 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
3901 (num_vfs + 1));
3902}
3903
Vasundhara Volambec84e62014-06-30 13:01:32 +05303904int be_cmd_set_sriov_config(struct be_adapter *adapter,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003905 struct be_resources pool_res, u16 num_vfs,
3906 u16 num_vf_qs)
Vasundhara Volambec84e62014-06-30 13:01:32 +05303907{
3908 struct {
3909 struct be_pcie_res_desc pcie;
3910 struct be_nic_res_desc nic_vft;
3911 } __packed desc;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303912
Vasundhara Volambec84e62014-06-30 13:01:32 +05303913 /* PF PCIE descriptor */
3914 be_reset_pcie_desc(&desc.pcie);
3915 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3916 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
Vasundhara Volamf2858732015-03-04 00:44:33 -05003917 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303918 desc.pcie.pf_num = adapter->pdev->devfn;
3919 desc.pcie.sriov_state = num_vfs ? 1 : 0;
3920 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3921
3922 /* VF NIC Template descriptor */
3923 be_reset_nic_desc(&desc.nic_vft);
3924 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3925 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
Vasundhara Volamf2858732015-03-04 00:44:33 -05003926 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303927 desc.nic_vft.pf_num = adapter->pdev->devfn;
3928 desc.nic_vft.vf_num = 0;
3929
Vasundhara Volamf2858732015-03-04 00:44:33 -05003930 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
3931 &desc.nic_vft);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303932
3933 return be_cmd_set_profile_config(adapter, &desc,
3934 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
Sathya Perlaa4018012014-03-27 10:46:18 +05303935}
3936
3937int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3938{
3939 struct be_mcc_wrb *wrb;
3940 struct be_cmd_req_manage_iface_filters *req;
3941 int status;
3942
3943 if (iface == 0xFFFFFFFF)
3944 return -1;
3945
3946 spin_lock_bh(&adapter->mcc_lock);
3947
3948 wrb = wrb_from_mccq(adapter);
3949 if (!wrb) {
3950 status = -EBUSY;
3951 goto err;
3952 }
3953 req = embedded_payload(wrb);
3954
3955 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3956 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3957 wrb, NULL);
3958 req->op = op;
3959 req->target_iface_id = cpu_to_le32(iface);
3960
3961 status = be_mcc_notify_wait(adapter);
3962err:
3963 spin_unlock_bh(&adapter->mcc_lock);
3964 return status;
3965}
3966
3967int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3968{
3969 struct be_port_res_desc port_desc;
3970
3971 memset(&port_desc, 0, sizeof(port_desc));
3972 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3973 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3974 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3975 port_desc.link_num = adapter->hba_port_num;
3976 if (port) {
3977 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3978 (1 << RCVID_SHIFT);
3979 port_desc.nv_port = swab16(port);
3980 } else {
3981 port_desc.nv_flags = NV_TYPE_DISABLED;
3982 port_desc.nv_port = 0;
3983 }
3984
3985 return be_cmd_set_profile_config(adapter, &port_desc,
Vasundhara Volambec84e62014-06-30 13:01:32 +05303986 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
Sathya Perlaa4018012014-03-27 10:46:18 +05303987}
3988
Sathya Perla4c876612013-02-03 20:30:11 +00003989int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3990 int vf_num)
3991{
3992 struct be_mcc_wrb *wrb;
3993 struct be_cmd_req_get_iface_list *req;
3994 struct be_cmd_resp_get_iface_list *resp;
3995 int status;
3996
3997 spin_lock_bh(&adapter->mcc_lock);
3998
3999 wrb = wrb_from_mccq(adapter);
4000 if (!wrb) {
4001 status = -EBUSY;
4002 goto err;
4003 }
4004 req = embedded_payload(wrb);
4005
4006 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4007 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4008 wrb, NULL);
4009 req->hdr.domain = vf_num + 1;
4010
4011 status = be_mcc_notify_wait(adapter);
4012 if (!status) {
4013 resp = (struct be_cmd_resp_get_iface_list *)req;
4014 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4015 }
4016
4017err:
4018 spin_unlock_bh(&adapter->mcc_lock);
4019 return status;
4020}
4021
Somnath Kotur5c510812013-05-30 02:52:23 +00004022static int lancer_wait_idle(struct be_adapter *adapter)
4023{
4024#define SLIPORT_IDLE_TIMEOUT 30
4025 u32 reg_val;
4026 int status = 0, i;
4027
4028 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4029 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4030 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4031 break;
4032
4033 ssleep(1);
4034 }
4035
4036 if (i == SLIPORT_IDLE_TIMEOUT)
4037 status = -1;
4038
4039 return status;
4040}
4041
4042int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4043{
4044 int status = 0;
4045
4046 status = lancer_wait_idle(adapter);
4047 if (status)
4048 return status;
4049
4050 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4051
4052 return status;
4053}
4054
4055/* Routine to check whether dump image is present or not */
4056bool dump_present(struct be_adapter *adapter)
4057{
4058 u32 sliport_status = 0;
4059
4060 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4061 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4062}
4063
4064int lancer_initiate_dump(struct be_adapter *adapter)
4065{
Kalesh APf0613382014-08-01 17:47:32 +05304066 struct device *dev = &adapter->pdev->dev;
Somnath Kotur5c510812013-05-30 02:52:23 +00004067 int status;
4068
Kalesh APf0613382014-08-01 17:47:32 +05304069 if (dump_present(adapter)) {
4070 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4071 return -EEXIST;
4072 }
4073
Somnath Kotur5c510812013-05-30 02:52:23 +00004074 /* give firmware reset and diagnostic dump */
4075 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4076 PHYSDEV_CONTROL_DD_MASK);
4077 if (status < 0) {
Kalesh APf0613382014-08-01 17:47:32 +05304078 dev_err(dev, "FW reset failed\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004079 return status;
4080 }
4081
4082 status = lancer_wait_idle(adapter);
4083 if (status)
4084 return status;
4085
4086 if (!dump_present(adapter)) {
Kalesh APf0613382014-08-01 17:47:32 +05304087 dev_err(dev, "FW dump not generated\n");
4088 return -EIO;
Somnath Kotur5c510812013-05-30 02:52:23 +00004089 }
4090
4091 return 0;
4092}
4093
Kalesh APf0613382014-08-01 17:47:32 +05304094int lancer_delete_dump(struct be_adapter *adapter)
4095{
4096 int status;
4097
4098 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4099 return be_cmd_status(status);
4100}
4101
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00004102/* Uses sync mcc */
4103int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4104{
4105 struct be_mcc_wrb *wrb;
4106 struct be_cmd_enable_disable_vf *req;
4107 int status;
4108
Vasundhara Volam05998632013-10-01 15:59:59 +05304109 if (BEx_chip(adapter))
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00004110 return 0;
4111
4112 spin_lock_bh(&adapter->mcc_lock);
4113
4114 wrb = wrb_from_mccq(adapter);
4115 if (!wrb) {
4116 status = -EBUSY;
4117 goto err;
4118 }
4119
4120 req = embedded_payload(wrb);
4121
4122 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4123 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4124 wrb, NULL);
4125
4126 req->hdr.domain = domain;
4127 req->enable = 1;
4128 status = be_mcc_notify_wait(adapter);
4129err:
4130 spin_unlock_bh(&adapter->mcc_lock);
4131 return status;
4132}
4133
Somnath Kotur68c45a22013-03-14 02:42:07 +00004134int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4135{
4136 struct be_mcc_wrb *wrb;
4137 struct be_cmd_req_intr_set *req;
4138 int status;
4139
4140 if (mutex_lock_interruptible(&adapter->mbox_lock))
4141 return -1;
4142
4143 wrb = wrb_from_mbox(adapter);
4144
4145 req = embedded_payload(wrb);
4146
4147 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4148 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4149 wrb, NULL);
4150
4151 req->intr_enabled = intr_enable;
4152
4153 status = be_mbox_notify_wait(adapter);
4154
4155 mutex_unlock(&adapter->mbox_lock);
4156 return status;
4157}
4158
Vasundhara Volam542963b2014-01-15 13:23:33 +05304159/* Uses MBOX */
4160int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4161{
4162 struct be_cmd_req_get_active_profile *req;
4163 struct be_mcc_wrb *wrb;
4164 int status;
4165
4166 if (mutex_lock_interruptible(&adapter->mbox_lock))
4167 return -1;
4168
4169 wrb = wrb_from_mbox(adapter);
4170 if (!wrb) {
4171 status = -EBUSY;
4172 goto err;
4173 }
4174
4175 req = embedded_payload(wrb);
4176
4177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4178 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4179 wrb, NULL);
4180
4181 status = be_mbox_notify_wait(adapter);
4182 if (!status) {
4183 struct be_cmd_resp_get_active_profile *resp =
4184 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304185
Vasundhara Volam542963b2014-01-15 13:23:33 +05304186 *profile_id = le16_to_cpu(resp->active_profile_id);
4187 }
4188
4189err:
4190 mutex_unlock(&adapter->mbox_lock);
4191 return status;
4192}
4193
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304194int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4195 int link_state, u8 domain)
4196{
4197 struct be_mcc_wrb *wrb;
4198 struct be_cmd_req_set_ll_link *req;
4199 int status;
4200
4201 if (BEx_chip(adapter) || lancer_chip(adapter))
Kalesh AP18fd6022015-01-20 03:51:45 -05004202 return -EOPNOTSUPP;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304203
4204 spin_lock_bh(&adapter->mcc_lock);
4205
4206 wrb = wrb_from_mccq(adapter);
4207 if (!wrb) {
4208 status = -EBUSY;
4209 goto err;
4210 }
4211
4212 req = embedded_payload(wrb);
4213
4214 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4215 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4216 sizeof(*req), wrb, NULL);
4217
4218 req->hdr.version = 1;
4219 req->hdr.domain = domain;
4220
4221 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4222 req->link_config |= 1;
4223
4224 if (link_state == IFLA_VF_LINK_STATE_AUTO)
4225 req->link_config |= 1 << PLINK_TRACK_SHIFT;
4226
4227 status = be_mcc_notify_wait(adapter);
4228err:
4229 spin_unlock_bh(&adapter->mcc_lock);
4230 return status;
4231}
4232
Parav Pandit6a4ab662012-03-26 14:27:12 +00004233int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05304234 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
Parav Pandit6a4ab662012-03-26 14:27:12 +00004235{
4236 struct be_adapter *adapter = netdev_priv(netdev_handle);
4237 struct be_mcc_wrb *wrb;
Kalesh AP504fbf12014-09-19 15:47:00 +05304238 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
Parav Pandit6a4ab662012-03-26 14:27:12 +00004239 struct be_cmd_req_hdr *req;
4240 struct be_cmd_resp_hdr *resp;
4241 int status;
4242
4243 spin_lock_bh(&adapter->mcc_lock);
4244
4245 wrb = wrb_from_mccq(adapter);
4246 if (!wrb) {
4247 status = -EBUSY;
4248 goto err;
4249 }
4250 req = embedded_payload(wrb);
4251 resp = embedded_payload(wrb);
4252
4253 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4254 hdr->opcode, wrb_payload_size, wrb, NULL);
4255 memcpy(req, wrb_payload, wrb_payload_size);
4256 be_dws_cpu_to_le(req, wrb_payload_size);
4257
4258 status = be_mcc_notify_wait(adapter);
4259 if (cmd_status)
4260 *cmd_status = (status & 0xffff);
4261 if (ext_status)
4262 *ext_status = 0;
4263 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4264 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4265err:
4266 spin_unlock_bh(&adapter->mcc_lock);
4267 return status;
4268}
4269EXPORT_SYMBOL(be_roce_mcc_cmd);