blob: 22402db275f282f099fe9f329df0c78ec723b7cf [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Parav Pandit6a4ab662012-03-26 14:27:12 +000018#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
Ajit Khaparde51d1f982016-02-10 22:45:54 +053022char *be_misconfig_evt_port_state[] = {
23 "Physical Link is functional",
24 "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
25 "Optics of two types installed – Remove one optic or install matching pair of optics.",
26 "Incompatible optics – Replace with compatible optics for card to function.",
27 "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
28 "Uncertified optics – Replace with Avago-certified optics to enable link operation."
Vasundhara Volam21252372015-02-06 08:18:42 -050029};
30
Ajit Khaparde51d1f982016-02-10 22:45:54 +053031static char *be_port_misconfig_evt_severity[] = {
32 "KERN_WARN",
33 "KERN_INFO",
34 "KERN_ERR",
35 "KERN_WARN"
36};
37
38static char *phy_state_oper_desc[] = {
39 "Link is non-operational",
40 "Link is operational",
Vasundhara Volam21252372015-02-06 08:18:42 -050041 ""
42};
43
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000044static struct be_cmd_priv_map cmd_priv_map[] = {
45 {
46 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
47 CMD_SUBSYSTEM_ETH,
48 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50 },
51 {
52 OPCODE_COMMON_GET_FLOW_CONTROL,
53 CMD_SUBSYSTEM_COMMON,
54 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56 },
57 {
58 OPCODE_COMMON_SET_FLOW_CONTROL,
59 CMD_SUBSYSTEM_COMMON,
60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62 },
63 {
64 OPCODE_ETH_GET_PPORT_STATS,
65 CMD_SUBSYSTEM_ETH,
66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
68 },
69 {
70 OPCODE_COMMON_GET_PHY_DETAILS,
71 CMD_SUBSYSTEM_COMMON,
72 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
73 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
Somnath Kotur2e365b12016-02-03 09:49:20 +053074 },
75 {
76 OPCODE_LOWLEVEL_HOST_DDR_DMA,
77 CMD_SUBSYSTEM_LOWLEVEL,
78 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
79 },
80 {
81 OPCODE_LOWLEVEL_LOOPBACK_TEST,
82 CMD_SUBSYSTEM_LOWLEVEL,
83 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
84 },
85 {
86 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
87 CMD_SUBSYSTEM_LOWLEVEL,
88 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
89 },
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000090};
91
Sathya Perlaa2cc4e02014-05-09 13:29:14 +053092static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000093{
94 int i;
95 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
96 u32 cmd_privileges = adapter->cmd_privileges;
97
98 for (i = 0; i < num_entries; i++)
99 if (opcode == cmd_priv_map[i].opcode &&
100 subsystem == cmd_priv_map[i].subsystem)
101 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
102 return false;
103
104 return true;
105}
106
Somnath Kotur3de09452011-09-30 07:25:05 +0000107static inline void *embedded_payload(struct be_mcc_wrb *wrb)
108{
109 return wrb->payload.embedded_payload;
110}
Ajit Khaparde609ff3b2011-02-20 11:42:07 +0000111
Suresh Reddyefaa4082015-07-10 05:32:48 -0400112static int be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000113{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000114 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000115 u32 val = 0;
116
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530117 if (be_check_error(adapter, BE_ERROR_ANY))
Suresh Reddyefaa4082015-07-10 05:32:48 -0400118 return -EIO;
Ajit Khaparde7acc2082011-02-11 13:38:17 +0000119
Sathya Perla5fb379e2009-06-18 00:02:59 +0000120 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
121 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000122
123 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000124 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Suresh Reddyefaa4082015-07-10 05:32:48 -0400125
126 return 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000127}
128
129/* To check if valid bit is set, check the entire word as we don't know
130 * the endianness of the data (old entry is host endian while a new entry is
131 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000132static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000133{
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000134 u32 flags;
135
Sathya Perla5fb379e2009-06-18 00:02:59 +0000136 if (compl->flags != 0) {
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000137 flags = le32_to_cpu(compl->flags);
138 if (flags & CQE_FLAGS_VALID_MASK) {
139 compl->flags = flags;
140 return true;
141 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000142 }
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000143 return false;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000144}
145
146/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000147static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000148{
149 compl->flags = 0;
150}
151
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000152static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
153{
154 unsigned long addr;
155
156 addr = tag1;
157 addr = ((addr << 16) << 16) | tag0;
158 return (void *)addr;
159}
160
Kalesh AP4c600052014-05-30 19:06:26 +0530161static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
162{
163 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
164 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
165 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
Kalesh AP77be8c12015-05-06 05:30:35 -0400166 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
Kalesh AP4c600052014-05-30 19:06:26 +0530167 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
168 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
169 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
170 return true;
171 else
172 return false;
173}
174
Sathya Perla559b6332014-05-30 19:06:27 +0530175/* Place holder for all the async MCC cmds wherein the caller is not in a busy
176 * loop (has not issued be_mcc_notify_wait())
177 */
178static void be_async_cmd_process(struct be_adapter *adapter,
179 struct be_mcc_compl *compl,
180 struct be_cmd_resp_hdr *resp_hdr)
181{
182 enum mcc_base_status base_status = base_status(compl->status);
183 u8 opcode = 0, subsystem = 0;
184
185 if (resp_hdr) {
186 opcode = resp_hdr->opcode;
187 subsystem = resp_hdr->subsystem;
188 }
189
190 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
191 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
192 complete(&adapter->et_cmd_compl);
193 return;
194 }
195
Suresh Reddy9c855972015-07-10 05:32:50 -0400196 if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
197 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
198 complete(&adapter->et_cmd_compl);
199 return;
200 }
201
Sathya Perla559b6332014-05-30 19:06:27 +0530202 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
203 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
204 subsystem == CMD_SUBSYSTEM_COMMON) {
205 adapter->flash_status = compl->status;
206 complete(&adapter->et_cmd_compl);
207 return;
208 }
209
210 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
211 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
212 subsystem == CMD_SUBSYSTEM_ETH &&
213 base_status == MCC_STATUS_SUCCESS) {
214 be_parse_stats(adapter);
215 adapter->stats_cmd_sent = false;
216 return;
217 }
218
219 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
220 subsystem == CMD_SUBSYSTEM_COMMON) {
221 if (base_status == MCC_STATUS_SUCCESS) {
222 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
223 (void *)resp_hdr;
Venkata Duvvuru29e91222015-05-13 13:00:12 +0530224 adapter->hwmon_info.be_on_die_temp =
Sathya Perla559b6332014-05-30 19:06:27 +0530225 resp->on_die_temperature;
226 } else {
227 adapter->be_get_temp_freq = 0;
Venkata Duvvuru29e91222015-05-13 13:00:12 +0530228 adapter->hwmon_info.be_on_die_temp =
229 BE_INVALID_DIE_TEMP;
Sathya Perla559b6332014-05-30 19:06:27 +0530230 }
231 return;
232 }
233}
234
Sathya Perla8788fdc2009-07-27 22:52:03 +0000235static int be_mcc_compl_process(struct be_adapter *adapter,
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000236 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000237{
Kalesh AP4c600052014-05-30 19:06:26 +0530238 enum mcc_base_status base_status;
239 enum mcc_addl_status addl_status;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000240 struct be_cmd_resp_hdr *resp_hdr;
241 u8 opcode = 0, subsystem = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000242
243 /* Just swap the status to host endian; mcc tag is opaquely copied
244 * from mcc_wrb */
245 be_dws_le_to_cpu(compl, 4);
246
Kalesh AP4c600052014-05-30 19:06:26 +0530247 base_status = base_status(compl->status);
248 addl_status = addl_status(compl->status);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +0530249
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000250 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000251 if (resp_hdr) {
252 opcode = resp_hdr->opcode;
253 subsystem = resp_hdr->subsystem;
254 }
255
Sathya Perla559b6332014-05-30 19:06:27 +0530256 be_async_cmd_process(adapter, compl, resp_hdr);
Suresh Reddy5eeff632014-01-06 13:02:24 +0530257
Sathya Perla559b6332014-05-30 19:06:27 +0530258 if (base_status != MCC_STATUS_SUCCESS &&
259 !be_skip_err_log(opcode, base_status, addl_status)) {
Suresh Reddyfa5c8672016-02-03 09:49:17 +0530260 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
261 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000262 dev_warn(&adapter->pdev->dev,
Vasundhara Volam522609f2012-08-28 20:37:44 +0000263 "VF is not privileged to issue opcode %d-%d\n",
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000264 opcode, subsystem);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000265 } else {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000266 dev_err(&adapter->pdev->dev,
267 "opcode %d-%d failed:status %d-%d\n",
Kalesh AP4c600052014-05-30 19:06:26 +0530268 opcode, subsystem, base_status, addl_status);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000269 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000270 }
Kalesh AP4c600052014-05-30 19:06:26 +0530271 return compl->status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000272}
273
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000274/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000275static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530276 struct be_mcc_compl *compl)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000277{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530278 struct be_async_event_link_state *evt =
279 (struct be_async_event_link_state *)compl;
280
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000281 /* When link status changes, link speed must be re-queried from FW */
Ajit Khaparde42f11cf2012-04-21 18:53:22 +0000282 adapter->phy.link_speed = -1;
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000283
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530284 /* On BEx the FW does not send a separate link status
285 * notification for physical and logical link.
286 * On other chips just process the logical link
287 * status notification
288 */
289 if (!BEx_chip(adapter) &&
Padmanabh Ratnakar2e177a52012-07-18 02:52:15 +0000290 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
291 return;
292
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000293 /* For the initial link status do not rely on the ASYNC event as
294 * it may not be received in some cases.
295 */
296 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530297 be_link_status_update(adapter,
298 evt->port_link_status & LINK_STATUS_MASK);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000299}
300
Vasundhara Volam21252372015-02-06 08:18:42 -0500301static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
302 struct be_mcc_compl *compl)
303{
304 struct be_async_event_misconfig_port *evt =
305 (struct be_async_event_misconfig_port *)compl;
Ajit Khaparde51d1f982016-02-10 22:45:54 +0530306 u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
307 u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
308 u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
Vasundhara Volam21252372015-02-06 08:18:42 -0500309 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde51d1f982016-02-10 22:45:54 +0530310 u8 msg_severity = DEFAULT_MSG_SEVERITY;
311 u8 phy_state_info;
312 u8 new_phy_state;
Vasundhara Volam21252372015-02-06 08:18:42 -0500313
Ajit Khaparde51d1f982016-02-10 22:45:54 +0530314 new_phy_state =
315 (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
Vasundhara Volam21252372015-02-06 08:18:42 -0500316
Ajit Khaparde51d1f982016-02-10 22:45:54 +0530317 if (new_phy_state == adapter->phy_state)
318 return;
319
320 adapter->phy_state = new_phy_state;
321
322 /* for older fw that doesn't populate link effect data */
323 if (!sfp_misconfig_evt_word2)
324 goto log_message;
325
326 phy_state_info =
327 (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
328
329 if (phy_state_info & PHY_STATE_INFO_VALID) {
330 msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
331
332 if (be_phy_unqualified(new_phy_state))
333 phy_oper_state = (phy_state_info & PHY_STATE_OPER);
334 }
335
336log_message:
Vasundhara Volam21252372015-02-06 08:18:42 -0500337 /* Log an error message that would allow a user to determine
338 * whether the SFPs have an issue
339 */
Ajit Khaparde51d1f982016-02-10 22:45:54 +0530340 if (be_phy_state_unknown(new_phy_state))
341 dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
342 "Port %c: Unrecognized Optics state: 0x%x. %s",
343 adapter->port_name,
344 new_phy_state,
345 phy_state_oper_desc[phy_oper_state]);
346 else
347 dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
348 "Port %c: %s %s",
349 adapter->port_name,
350 be_misconfig_evt_port_state[new_phy_state],
351 phy_state_oper_desc[phy_oper_state]);
Vasundhara Volam21252372015-02-06 08:18:42 -0500352
Ajit Khaparde51d1f982016-02-10 22:45:54 +0530353 /* Log Vendor name and part no. if a misconfigured SFP is detected */
354 if (be_phy_misconfigured(new_phy_state))
355 adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -0500356}
357
Somnath Koturcc4ce022010-10-21 07:11:14 -0700358/* Grp5 CoS Priority evt */
359static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530360 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700361{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530362 struct be_async_event_grp5_cos_priority *evt =
363 (struct be_async_event_grp5_cos_priority *)compl;
364
Somnath Koturcc4ce022010-10-21 07:11:14 -0700365 if (evt->valid) {
366 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500367 adapter->recommended_prio_bits =
Somnath Koturcc4ce022010-10-21 07:11:14 -0700368 evt->reco_default_priority << VLAN_PRIO_SHIFT;
369 }
370}
371
Sathya Perla323ff712012-09-28 04:39:43 +0000372/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
Somnath Koturcc4ce022010-10-21 07:11:14 -0700373static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530374 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700375{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530376 struct be_async_event_grp5_qos_link_speed *evt =
377 (struct be_async_event_grp5_qos_link_speed *)compl;
378
Sathya Perla323ff712012-09-28 04:39:43 +0000379 if (adapter->phy.link_speed >= 0 &&
380 evt->physical_port == adapter->port_num)
381 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700382}
383
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000384/*Grp5 PVID evt*/
385static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530386 struct be_mcc_compl *compl)
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000387{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530388 struct be_async_event_grp5_pvid_state *evt =
389 (struct be_async_event_grp5_pvid_state *)compl;
390
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530391 if (evt->enabled) {
Somnath Kotur939cf302011-08-18 21:51:49 -0700392 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530393 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
394 } else {
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000395 adapter->pvid = 0;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530396 }
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000397}
398
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530399#define MGMT_ENABLE_MASK 0x4
400static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
401 struct be_mcc_compl *compl)
402{
403 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
404 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
405
406 if (evt_dw1 & MGMT_ENABLE_MASK) {
407 adapter->flags |= BE_FLAGS_OS2BMC;
408 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
409 } else {
410 adapter->flags &= ~BE_FLAGS_OS2BMC;
411 }
412}
413
Somnath Koturcc4ce022010-10-21 07:11:14 -0700414static void be_async_grp5_evt_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530415 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700416{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530417 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
418 ASYNC_EVENT_TYPE_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700419
420 switch (event_type) {
421 case ASYNC_EVENT_COS_PRIORITY:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530422 be_async_grp5_cos_priority_process(adapter, compl);
423 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700424 case ASYNC_EVENT_QOS_SPEED:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530425 be_async_grp5_qos_speed_process(adapter, compl);
426 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000427 case ASYNC_EVENT_PVID_STATE:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530428 be_async_grp5_pvid_state_process(adapter, compl);
429 break;
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530430 /* Async event to disable/enable os2bmc and/or mac-learning */
431 case ASYNC_EVENT_FW_CONTROL:
432 be_async_grp5_fw_control_process(adapter, compl);
433 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700434 default:
Somnath Koturcc4ce022010-10-21 07:11:14 -0700435 break;
436 }
437}
438
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000439static void be_async_dbg_evt_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530440 struct be_mcc_compl *cmp)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000441{
442 u8 event_type = 0;
Kalesh AP504fbf12014-09-19 15:47:00 +0530443 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000444
Sathya Perla3acf19d2014-05-30 19:06:28 +0530445 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
446 ASYNC_EVENT_TYPE_MASK;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000447
448 switch (event_type) {
449 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
450 if (evt->valid)
451 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
452 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
453 break;
454 default:
Vasundhara Volam05ccaa22013-08-06 09:27:19 +0530455 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
456 event_type);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000457 break;
458 }
459}
460
Vasundhara Volam21252372015-02-06 08:18:42 -0500461static void be_async_sliport_evt_process(struct be_adapter *adapter,
462 struct be_mcc_compl *cmp)
463{
464 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
465 ASYNC_EVENT_TYPE_MASK;
466
467 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
468 be_async_port_misconfig_event_process(adapter, cmp);
469}
470
Sathya Perla3acf19d2014-05-30 19:06:28 +0530471static inline bool is_link_state_evt(u32 flags)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000472{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530473 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
474 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000475}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000476
Sathya Perla3acf19d2014-05-30 19:06:28 +0530477static inline bool is_grp5_evt(u32 flags)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700478{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530479 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
480 ASYNC_EVENT_CODE_GRP_5;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700481}
482
Sathya Perla3acf19d2014-05-30 19:06:28 +0530483static inline bool is_dbg_evt(u32 flags)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000484{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530485 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
486 ASYNC_EVENT_CODE_QNQ;
487}
488
Vasundhara Volam21252372015-02-06 08:18:42 -0500489static inline bool is_sliport_evt(u32 flags)
490{
491 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
492 ASYNC_EVENT_CODE_SLIPORT;
493}
494
Sathya Perla3acf19d2014-05-30 19:06:28 +0530495static void be_mcc_event_process(struct be_adapter *adapter,
496 struct be_mcc_compl *compl)
497{
498 if (is_link_state_evt(compl->flags))
499 be_async_link_state_process(adapter, compl);
500 else if (is_grp5_evt(compl->flags))
501 be_async_grp5_evt_process(adapter, compl);
502 else if (is_dbg_evt(compl->flags))
503 be_async_dbg_evt_process(adapter, compl);
Vasundhara Volam21252372015-02-06 08:18:42 -0500504 else if (is_sliport_evt(compl->flags))
505 be_async_sliport_evt_process(adapter, compl);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000506}
507
Sathya Perlaefd2e402009-07-27 22:53:10 +0000508static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000509{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000510 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000511 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000512
513 if (be_mcc_compl_is_new(compl)) {
514 queue_tail_inc(mcc_cq);
515 return compl;
516 }
517 return NULL;
518}
519
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000520void be_async_mcc_enable(struct be_adapter *adapter)
521{
522 spin_lock_bh(&adapter->mcc_cq_lock);
523
524 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
525 adapter->mcc_obj.rearm_cq = true;
526
527 spin_unlock_bh(&adapter->mcc_cq_lock);
528}
529
530void be_async_mcc_disable(struct be_adapter *adapter)
531{
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000532 spin_lock_bh(&adapter->mcc_cq_lock);
533
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000534 adapter->mcc_obj.rearm_cq = false;
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000535 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
536
537 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000538}
539
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000540int be_process_mcc(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000541{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000542 struct be_mcc_compl *compl;
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000543 int num = 0, status = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000544 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000545
Amerigo Wang072a9c42012-08-24 21:41:11 +0000546 spin_lock(&adapter->mcc_cq_lock);
Sathya Perla3acf19d2014-05-30 19:06:28 +0530547
Sathya Perla8788fdc2009-07-27 22:52:03 +0000548 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000549 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
Sathya Perla3acf19d2014-05-30 19:06:28 +0530550 be_mcc_event_process(adapter, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700551 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla3acf19d2014-05-30 19:06:28 +0530552 status = be_mcc_compl_process(adapter, compl);
553 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000554 }
555 be_mcc_compl_use(compl);
556 num++;
557 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700558
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000559 if (num)
560 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
561
Amerigo Wang072a9c42012-08-24 21:41:11 +0000562 spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000563 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000564}
565
Sathya Perla6ac7b682009-06-18 00:05:54 +0000566/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700567static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000568{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700569#define mcc_timeout 120000 /* 12s timeout */
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000570 int i, status = 0;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800571 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700572
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800573 for (i = 0; i < mcc_timeout; i++) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530574 if (be_check_error(adapter, BE_ERROR_ANY))
Sathya Perla6589ade2011-11-10 19:18:00 +0000575 return -EIO;
576
Amerigo Wang072a9c42012-08-24 21:41:11 +0000577 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000578 status = be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +0000579 local_bh_enable();
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800580
581 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000582 break;
583 udelay(100);
584 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700585 if (i == mcc_timeout) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000586 dev_err(&adapter->pdev->dev, "FW not responding\n");
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530587 be_set_error(adapter, BE_ERROR_FW);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000588 return -EIO;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700589 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800590 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000591}
592
593/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700594static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000595{
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000596 int status;
597 struct be_mcc_wrb *wrb;
598 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530599 u32 index = mcc_obj->q.head;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000600 struct be_cmd_resp_hdr *resp;
601
602 index_dec(&index, mcc_obj->q.len);
603 wrb = queue_index_node(&mcc_obj->q, index);
604
605 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
606
Suresh Reddyefaa4082015-07-10 05:32:48 -0400607 status = be_mcc_notify(adapter);
608 if (status)
609 goto out;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000610
611 status = be_mcc_wait_compl(adapter);
612 if (status == -EIO)
613 goto out;
614
Kalesh AP4c600052014-05-30 19:06:26 +0530615 status = (resp->base_status |
616 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
617 CQE_ADDL_STATUS_SHIFT));
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000618out:
619 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000620}
621
Sathya Perla5f0b8492009-07-27 22:52:56 +0000622static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000624 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625 u32 ready;
626
627 do {
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530628 if (be_check_error(adapter, BE_ERROR_ANY))
Sathya Perla6589ade2011-11-10 19:18:00 +0000629 return -EIO;
630
Sathya Perlacf588472010-02-14 21:22:01 +0000631 ready = ioread32(db);
Sathya Perla434b3642011-11-10 19:17:59 +0000632 if (ready == 0xffffffff)
Sathya Perlacf588472010-02-14 21:22:01 +0000633 return -1;
Sathya Perlacf588472010-02-14 21:22:01 +0000634
635 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 if (ready)
637 break;
638
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000639 if (msecs > 4000) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000640 dev_err(&adapter->pdev->dev, "FW not responding\n");
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530641 be_set_error(adapter, BE_ERROR_FW);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000642 be_detect_error(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 return -1;
644 }
645
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000646 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000647 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 } while (true);
649
650 return 0;
651}
652
653/*
654 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000655 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700657static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658{
659 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000661 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
662 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000664 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665
Sathya Perlacf588472010-02-14 21:22:01 +0000666 /* wait for ready to be set */
667 status = be_mbox_db_ready_wait(adapter, db);
668 if (status != 0)
669 return status;
670
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 val |= MPU_MAILBOX_DB_HI_MASK;
672 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
673 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
674 iowrite32(val, db);
675
676 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000677 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678 if (status != 0)
679 return status;
680
681 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
683 val |= (u32)(mbox_mem->dma >> 4) << 2;
684 iowrite32(val, db);
685
Sathya Perla5f0b8492009-07-27 22:52:56 +0000686 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 if (status != 0)
688 return status;
689
Sathya Perla5fb379e2009-06-18 00:02:59 +0000690 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000691 if (be_mcc_compl_is_new(compl)) {
692 status = be_mcc_compl_process(adapter, &mbox->compl);
693 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000694 if (status)
695 return status;
696 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000697 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 return -1;
699 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000700 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701}
702
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000703static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000705 u32 sem;
706
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000707 if (BEx_chip(adapter))
708 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709 else
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000710 pci_read_config_dword(adapter->pdev,
711 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
712
713 return sem & POST_STAGE_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714}
715
Gavin Shan87f20c22013-10-29 17:30:57 +0800716static int lancer_wait_ready(struct be_adapter *adapter)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000717{
718#define SLIPORT_READY_TIMEOUT 30
719 u32 sliport_status;
Kalesh APe6732442015-01-20 03:51:46 -0500720 int i;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000721
722 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
723 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
724 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
Sathya Perla9fa465c2015-02-23 04:20:13 -0500725 return 0;
726
727 if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
728 !(sliport_status & SLIPORT_STATUS_RN_MASK))
729 return -EIO;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000730
731 msleep(1000);
732 }
733
Sathya Perla9fa465c2015-02-23 04:20:13 -0500734 return sliport_status ? : -1;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000735}
736
737int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000739 u16 stage;
740 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000741 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000743 if (lancer_chip(adapter)) {
744 status = lancer_wait_ready(adapter);
Kalesh APe6732442015-01-20 03:51:46 -0500745 if (status) {
746 stage = status;
747 goto err;
748 }
749 return 0;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000750 }
751
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000752 do {
Sathya Perlaca3de6b2015-02-23 04:20:10 -0500753 /* There's no means to poll POST state on BE2/3 VFs */
754 if (BEx_chip(adapter) && be_virtfn(adapter))
755 return 0;
756
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000757 stage = be_POST_stage_get(adapter);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000758 if (stage == POST_STAGE_ARMFW_RDY)
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000759 return 0;
Gavin Shan66d29cb2013-03-03 21:48:46 +0000760
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530761 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000762 if (msleep_interruptible(2000)) {
763 dev_err(dev, "Waiting for POST aborted\n");
764 return -EINTR;
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000765 }
Gavin Shan66d29cb2013-03-03 21:48:46 +0000766 timeout += 2;
Somnath Kotur3ab81b52011-10-03 08:10:57 +0000767 } while (timeout < 60);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768
Kalesh APe6732442015-01-20 03:51:46 -0500769err:
770 dev_err(dev, "POST timeout; stage=%#x\n", stage);
Sathya Perla9fa465c2015-02-23 04:20:13 -0500771 return -ETIMEDOUT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772}
773
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
775{
776 return &wrb->payload.sgl[0];
777}
778
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530779static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
Sathya Perlabea50982013-08-27 16:57:33 +0530780{
781 wrb->tag0 = addr & 0xFFFFFFFF;
782 wrb->tag1 = upper_32_bits(addr);
783}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
785/* Don't touch the hdr after it's prepared */
Somnath Kotur106df1e2011-10-27 07:12:13 +0000786/* mem will be NULL for embedded commands */
787static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530788 u8 subsystem, u8 opcode, int cmd_len,
789 struct be_mcc_wrb *wrb,
790 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791{
Somnath Kotur106df1e2011-10-27 07:12:13 +0000792 struct be_sge *sge;
793
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794 req_hdr->opcode = opcode;
795 req_hdr->subsystem = subsystem;
796 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000797 req_hdr->version = 0;
Sathya Perlabea50982013-08-27 16:57:33 +0530798 fill_wrb_tags(wrb, (ulong) req_hdr);
Somnath Kotur106df1e2011-10-27 07:12:13 +0000799 wrb->payload_length = cmd_len;
800 if (mem) {
801 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
802 MCC_WRB_SGE_CNT_SHIFT;
803 sge = nonembedded_sgl(wrb);
804 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
805 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
806 sge->len = cpu_to_le32(mem->size);
807 } else
808 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
809 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810}
811
812static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530813 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814{
815 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
816 u64 dma = (u64)mem->dma;
817
818 for (i = 0; i < buf_pages; i++) {
819 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
820 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
821 dma += PAGE_SIZE_4K;
822 }
823}
824
Sathya Perlab31c50a2009-09-17 10:30:13 -0700825static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700827 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
828 struct be_mcc_wrb *wrb
829 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
830 memset(wrb, 0, sizeof(*wrb));
831 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832}
833
Sathya Perlab31c50a2009-09-17 10:30:13 -0700834static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000835{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700836 struct be_queue_info *mccq = &adapter->mcc_obj.q;
837 struct be_mcc_wrb *wrb;
838
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +0000839 if (!mccq->created)
840 return NULL;
841
Vasundhara Volam4d277122013-04-21 23:28:15 +0000842 if (atomic_read(&mccq->used) >= mccq->len)
Sathya Perla713d03942009-11-22 22:02:45 +0000843 return NULL;
Sathya Perla713d03942009-11-22 22:02:45 +0000844
Sathya Perlab31c50a2009-09-17 10:30:13 -0700845 wrb = queue_head_node(mccq);
846 queue_head_inc(mccq);
847 atomic_inc(&mccq->used);
848 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000849 return wrb;
850}
851
Sathya Perlabea50982013-08-27 16:57:33 +0530852static bool use_mcc(struct be_adapter *adapter)
853{
854 return adapter->mcc_obj.q.created;
855}
856
857/* Must be used only in process context */
858static int be_cmd_lock(struct be_adapter *adapter)
859{
860 if (use_mcc(adapter)) {
861 spin_lock_bh(&adapter->mcc_lock);
862 return 0;
863 } else {
864 return mutex_lock_interruptible(&adapter->mbox_lock);
865 }
866}
867
868/* Must be used only in process context */
869static void be_cmd_unlock(struct be_adapter *adapter)
870{
871 if (use_mcc(adapter))
872 spin_unlock_bh(&adapter->mcc_lock);
873 else
874 return mutex_unlock(&adapter->mbox_lock);
875}
876
877static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
878 struct be_mcc_wrb *wrb)
879{
880 struct be_mcc_wrb *dest_wrb;
881
882 if (use_mcc(adapter)) {
883 dest_wrb = wrb_from_mccq(adapter);
884 if (!dest_wrb)
885 return NULL;
886 } else {
887 dest_wrb = wrb_from_mbox(adapter);
888 }
889
890 memcpy(dest_wrb, wrb, sizeof(*wrb));
891 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
892 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
893
894 return dest_wrb;
895}
896
897/* Must be used only in process context */
898static int be_cmd_notify_wait(struct be_adapter *adapter,
899 struct be_mcc_wrb *wrb)
900{
901 struct be_mcc_wrb *dest_wrb;
902 int status;
903
904 status = be_cmd_lock(adapter);
905 if (status)
906 return status;
907
908 dest_wrb = be_cmd_copy(adapter, wrb);
Suresh Reddy0c884562015-10-12 03:47:18 -0400909 if (!dest_wrb) {
910 status = -EBUSY;
911 goto unlock;
912 }
Sathya Perlabea50982013-08-27 16:57:33 +0530913
914 if (use_mcc(adapter))
915 status = be_mcc_notify_wait(adapter);
916 else
917 status = be_mbox_notify_wait(adapter);
918
919 if (!status)
920 memcpy(wrb, dest_wrb, sizeof(*wrb));
921
Suresh Reddy0c884562015-10-12 03:47:18 -0400922unlock:
Sathya Perlabea50982013-08-27 16:57:33 +0530923 be_cmd_unlock(adapter);
924 return status;
925}
926
Sathya Perla2243e2e2009-11-22 22:02:03 +0000927/* Tell fw we're about to start firing cmds by writing a
928 * special pattern across the wrb hdr; uses mbox
929 */
930int be_cmd_fw_init(struct be_adapter *adapter)
931{
932 u8 *wrb;
933 int status;
934
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000935 if (lancer_chip(adapter))
936 return 0;
937
Ivan Vecera29849612010-12-14 05:43:19 +0000938 if (mutex_lock_interruptible(&adapter->mbox_lock))
939 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000940
941 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000942 *wrb++ = 0xFF;
943 *wrb++ = 0x12;
944 *wrb++ = 0x34;
945 *wrb++ = 0xFF;
946 *wrb++ = 0xFF;
947 *wrb++ = 0x56;
948 *wrb++ = 0x78;
949 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000950
951 status = be_mbox_notify_wait(adapter);
952
Ivan Vecera29849612010-12-14 05:43:19 +0000953 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000954 return status;
955}
956
957/* Tell fw we're done with firing cmds by writing a
958 * special pattern across the wrb hdr; uses mbox
959 */
960int be_cmd_fw_clean(struct be_adapter *adapter)
961{
962 u8 *wrb;
963 int status;
964
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000965 if (lancer_chip(adapter))
966 return 0;
967
Ivan Vecera29849612010-12-14 05:43:19 +0000968 if (mutex_lock_interruptible(&adapter->mbox_lock))
969 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000970
971 wrb = (u8 *)wrb_from_mbox(adapter);
972 *wrb++ = 0xFF;
973 *wrb++ = 0xAA;
974 *wrb++ = 0xBB;
975 *wrb++ = 0xFF;
976 *wrb++ = 0xFF;
977 *wrb++ = 0xCC;
978 *wrb++ = 0xDD;
979 *wrb = 0xFF;
980
981 status = be_mbox_notify_wait(adapter);
982
Ivan Vecera29849612010-12-14 05:43:19 +0000983 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000984 return status;
985}
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000986
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530987int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700989 struct be_mcc_wrb *wrb;
990 struct be_cmd_req_eq_create *req;
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530991 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
992 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993
Ivan Vecera29849612010-12-14 05:43:19 +0000994 if (mutex_lock_interruptible(&adapter->mbox_lock))
995 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700996
997 wrb = wrb_from_mbox(adapter);
998 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999
Somnath Kotur106df1e2011-10-27 07:12:13 +00001000 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301001 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
1002 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301004 /* Support for EQ_CREATEv2 available only SH-R onwards */
1005 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
1006 ver = 2;
1007
1008 req->hdr.version = ver;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
1012 /* 4byte eqe*/
1013 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
1014 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301015 __ilog2_u32(eqo->q.len / 256));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016 be_dws_cpu_to_le(req->context, sizeof(req->context));
1017
1018 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1019
Sathya Perlab31c50a2009-09-17 10:30:13 -07001020 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -07001022 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301023
Sathya Perlaf2f781a2013-08-27 16:57:30 +05301024 eqo->q.id = le16_to_cpu(resp->eq_id);
1025 eqo->msix_idx =
1026 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
1027 eqo->q.created = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001029
Ivan Vecera29849612010-12-14 05:43:19 +00001030 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031 return status;
1032}
1033
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001034/* Use MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001035int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla5ee49792012-09-28 04:39:41 +00001036 bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001038 struct be_mcc_wrb *wrb;
1039 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040 int status;
1041
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001042 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001043
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001044 wrb = wrb_from_mccq(adapter);
1045 if (!wrb) {
1046 status = -EBUSY;
1047 goto err;
1048 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001049 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Somnath Kotur106df1e2011-10-27 07:12:13 +00001051 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301052 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
1053 NULL);
Sathya Perla5ee49792012-09-28 04:39:41 +00001054 req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055 if (permanent) {
1056 req->permanent = 1;
1057 } else {
Kalesh AP504fbf12014-09-19 15:47:00 +05301058 req->if_id = cpu_to_le16((u16)if_handle);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001059 req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 req->permanent = 0;
1061 }
1062
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001063 status = be_mcc_notify_wait(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001064 if (!status) {
1065 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301066
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001068 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001070err:
1071 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 return status;
1073}
1074
Sathya Perlab31c50a2009-09-17 10:30:13 -07001075/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001076int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301077 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001079 struct be_mcc_wrb *wrb;
1080 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 int status;
1082
Sathya Perlab31c50a2009-09-17 10:30:13 -07001083 spin_lock_bh(&adapter->mcc_lock);
1084
1085 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001086 if (!wrb) {
1087 status = -EBUSY;
1088 goto err;
1089 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001090 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091
Somnath Kotur106df1e2011-10-27 07:12:13 +00001092 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301093 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1094 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095
Ajit Khapardef8617e02011-02-11 13:36:37 +00001096 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097 req->if_id = cpu_to_le32(if_id);
1098 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1099
Sathya Perlab31c50a2009-09-17 10:30:13 -07001100 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 if (!status) {
1102 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301103
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 *pmac_id = le32_to_cpu(resp->pmac_id);
1105 }
1106
Sathya Perla713d03942009-11-22 22:02:45 +00001107err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001108 spin_unlock_bh(&adapter->mcc_lock);
Somnath Koture3a7ae22011-10-27 07:14:05 +00001109
1110 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1111 status = -EPERM;
1112
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113 return status;
1114}
1115
Sathya Perlab31c50a2009-09-17 10:30:13 -07001116/* Uses synchronous MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001117int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001119 struct be_mcc_wrb *wrb;
1120 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 int status;
1122
Sathya Perla30128032011-11-10 19:17:57 +00001123 if (pmac_id == -1)
1124 return 0;
1125
Sathya Perlab31c50a2009-09-17 10:30:13 -07001126 spin_lock_bh(&adapter->mcc_lock);
1127
1128 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001129 if (!wrb) {
1130 status = -EBUSY;
1131 goto err;
1132 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001133 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134
Somnath Kotur106df1e2011-10-27 07:12:13 +00001135 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Kalesh APcd3307aa2014-09-19 15:47:02 +05301136 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1137 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138
Ajit Khapardef8617e02011-02-11 13:36:37 +00001139 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140 req->if_id = cpu_to_le32(if_id);
1141 req->pmac_id = cpu_to_le32(pmac_id);
1142
Sathya Perlab31c50a2009-09-17 10:30:13 -07001143 status = be_mcc_notify_wait(adapter);
1144
Sathya Perla713d03942009-11-22 22:02:45 +00001145err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001146 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 return status;
1148}
1149
Sathya Perlab31c50a2009-09-17 10:30:13 -07001150/* Uses Mbox */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001151int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301152 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001154 struct be_mcc_wrb *wrb;
1155 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001157 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158 int status;
1159
Ivan Vecera29849612010-12-14 05:43:19 +00001160 if (mutex_lock_interruptible(&adapter->mbox_lock))
1161 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001162
1163 wrb = wrb_from_mbox(adapter);
1164 req = embedded_payload(wrb);
1165 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166
Somnath Kotur106df1e2011-10-27 07:12:13 +00001167 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301168 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1169 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170
1171 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001172
1173 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001174 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301175 coalesce_wm);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001176 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301177 ctxt, no_delay);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001178 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301179 __ilog2_u32(cq->len / 256));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001180 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001181 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1182 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001183 } else {
1184 req->hdr.version = 2;
1185 req->page_size = 1; /* 1 for 4K */
Ajit Khaparde09e83a92013-11-22 12:51:20 -06001186
1187 /* coalesce-wm field in this cmd is not relevant to Lancer.
1188 * Lancer uses COMMON_MODIFY_CQ to set this field
1189 */
1190 if (!lancer_chip(adapter))
1191 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1192 ctxt, coalesce_wm);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001193 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301194 no_delay);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001195 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301196 __ilog2_u32(cq->len / 256));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001197 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301198 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1199 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001200 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1203
1204 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1205
Sathya Perlab31c50a2009-09-17 10:30:13 -07001206 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -07001208 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 cq->id = le16_to_cpu(resp->cq_id);
1211 cq->created = true;
1212 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001213
Ivan Vecera29849612010-12-14 05:43:19 +00001214 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001215
1216 return status;
1217}
1218
1219static u32 be_encoded_q_len(int q_len)
1220{
1221 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
Kalesh AP03d28ff2014-09-19 15:46:56 +05301222
Sathya Perla5fb379e2009-06-18 00:02:59 +00001223 if (len_encoded == 16)
1224 len_encoded = 0;
1225 return len_encoded;
1226}
1227
Jingoo Han4188e7d2013-08-05 18:02:02 +09001228static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301229 struct be_queue_info *mccq,
1230 struct be_queue_info *cq)
Sathya Perla5fb379e2009-06-18 00:02:59 +00001231{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001232 struct be_mcc_wrb *wrb;
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001233 struct be_cmd_req_mcc_ext_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001234 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001235 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001236 int status;
1237
Ivan Vecera29849612010-12-14 05:43:19 +00001238 if (mutex_lock_interruptible(&adapter->mbox_lock))
1239 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001240
1241 wrb = wrb_from_mbox(adapter);
1242 req = embedded_payload(wrb);
1243 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001244
Somnath Kotur106df1e2011-10-27 07:12:13 +00001245 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301246 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1247 NULL);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001248
Ajit Khaparded4a2ac32010-03-11 01:35:59 +00001249 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301250 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001251 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1252 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301253 be_encoded_q_len(mccq->len));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001254 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301255 } else {
1256 req->hdr.version = 1;
1257 req->cq_id = cpu_to_le16(cq->id);
1258
1259 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1260 be_encoded_q_len(mccq->len));
1261 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1262 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1263 ctxt, cq->id);
1264 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1265 ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001266 }
1267
Vasundhara Volam21252372015-02-06 08:18:42 -05001268 /* Subscribe to Link State, Sliport Event and Group 5 Events
1269 * (bits 1, 5 and 17 set)
1270 */
1271 req->async_event_bitmap[0] =
1272 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1273 BIT(ASYNC_EVENT_CODE_GRP_5) |
1274 BIT(ASYNC_EVENT_CODE_QNQ) |
1275 BIT(ASYNC_EVENT_CODE_SLIPORT));
1276
Sathya Perla5fb379e2009-06-18 00:02:59 +00001277 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1278
1279 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1280
Sathya Perlab31c50a2009-09-17 10:30:13 -07001281 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001282 if (!status) {
1283 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301284
Sathya Perla5fb379e2009-06-18 00:02:59 +00001285 mccq->id = le16_to_cpu(resp->id);
1286 mccq->created = true;
1287 }
Ivan Vecera29849612010-12-14 05:43:19 +00001288 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289
1290 return status;
1291}
1292
Jingoo Han4188e7d2013-08-05 18:02:02 +09001293static int be_cmd_mccq_org_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301294 struct be_queue_info *mccq,
1295 struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001296{
1297 struct be_mcc_wrb *wrb;
1298 struct be_cmd_req_mcc_create *req;
1299 struct be_dma_mem *q_mem = &mccq->dma_mem;
1300 void *ctxt;
1301 int status;
1302
1303 if (mutex_lock_interruptible(&adapter->mbox_lock))
1304 return -1;
1305
1306 wrb = wrb_from_mbox(adapter);
1307 req = embedded_payload(wrb);
1308 ctxt = &req->context;
1309
Somnath Kotur106df1e2011-10-27 07:12:13 +00001310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301311 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1312 NULL);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001313
1314 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1315
1316 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1317 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301318 be_encoded_q_len(mccq->len));
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001319 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1320
1321 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1322
1323 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1324
1325 status = be_mbox_notify_wait(adapter);
1326 if (!status) {
1327 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301328
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001329 mccq->id = le16_to_cpu(resp->id);
1330 mccq->created = true;
1331 }
1332
1333 mutex_unlock(&adapter->mbox_lock);
1334 return status;
1335}
1336
1337int be_cmd_mccq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301338 struct be_queue_info *mccq, struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001339{
1340 int status;
1341
1342 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301343 if (status && BEx_chip(adapter)) {
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001344 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1345 "or newer to avoid conflicting priorities between NIC "
1346 "and FCoE traffic");
1347 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1348 }
1349 return status;
1350}
1351
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001352int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353{
Sathya Perla77071332013-08-27 16:57:34 +05301354 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001355 struct be_cmd_req_eth_tx_create *req;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001356 struct be_queue_info *txq = &txo->q;
1357 struct be_queue_info *cq = &txo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 struct be_dma_mem *q_mem = &txq->dma_mem;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001359 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
Sathya Perla77071332013-08-27 16:57:34 +05301361 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001362 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301363 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001365 if (lancer_chip(adapter)) {
1366 req->hdr.version = 1;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001367 } else if (BEx_chip(adapter)) {
1368 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1369 req->hdr.version = 2;
1370 } else { /* For SH */
1371 req->hdr.version = 2;
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001372 }
1373
Vasundhara Volam81b02652013-10-01 15:59:57 +05301374 if (req->hdr.version > 0)
1375 req->if_id = cpu_to_le16(adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1377 req->ulp_num = BE_ULP1_NUM;
1378 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001379 req->cq_id = cpu_to_le16(cq->id);
1380 req->queue_size = be_encoded_q_len(txq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001382 ver = req->hdr.version;
1383
Sathya Perla77071332013-08-27 16:57:34 +05301384 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 if (!status) {
Sathya Perla77071332013-08-27 16:57:34 +05301386 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301387
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 txq->id = le16_to_cpu(resp->cid);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001389 if (ver == 2)
1390 txo->db_offset = le32_to_cpu(resp->db_offset);
1391 else
1392 txo->db_offset = DB_TXULP1_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 txq->created = true;
1394 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001395
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 return status;
1397}
1398
Sathya Perla482c9e72011-06-29 23:33:17 +00001399/* Uses MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001400int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301401 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1402 u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001404 struct be_mcc_wrb *wrb;
1405 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 struct be_dma_mem *q_mem = &rxq->dma_mem;
1407 int status;
1408
Sathya Perla482c9e72011-06-29 23:33:17 +00001409 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001410
Sathya Perla482c9e72011-06-29 23:33:17 +00001411 wrb = wrb_from_mccq(adapter);
1412 if (!wrb) {
1413 status = -EBUSY;
1414 goto err;
1415 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001416 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417
Somnath Kotur106df1e2011-10-27 07:12:13 +00001418 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301419 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420
1421 req->cq_id = cpu_to_le16(cq_id);
1422 req->frag_size = fls(frag_size) - 1;
1423 req->num_pages = 2;
1424 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1425 req->interface_id = cpu_to_le32(if_id);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001426 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427 req->rss_queue = cpu_to_le32(rss);
1428
Sathya Perla482c9e72011-06-29 23:33:17 +00001429 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 if (!status) {
1431 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301432
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 rxq->id = le16_to_cpu(resp->id);
1434 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -07001435 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001437
Sathya Perla482c9e72011-06-29 23:33:17 +00001438err:
1439 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 return status;
1441}
1442
Sathya Perlab31c50a2009-09-17 10:30:13 -07001443/* Generic destroyer function for all types of queues
1444 * Uses Mbox
1445 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001446int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301447 int queue_type)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001449 struct be_mcc_wrb *wrb;
1450 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 u8 subsys = 0, opcode = 0;
1452 int status;
1453
Ivan Vecera29849612010-12-14 05:43:19 +00001454 if (mutex_lock_interruptible(&adapter->mbox_lock))
1455 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456
Sathya Perlab31c50a2009-09-17 10:30:13 -07001457 wrb = wrb_from_mbox(adapter);
1458 req = embedded_payload(wrb);
1459
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 switch (queue_type) {
1461 case QTYPE_EQ:
1462 subsys = CMD_SUBSYSTEM_COMMON;
1463 opcode = OPCODE_COMMON_EQ_DESTROY;
1464 break;
1465 case QTYPE_CQ:
1466 subsys = CMD_SUBSYSTEM_COMMON;
1467 opcode = OPCODE_COMMON_CQ_DESTROY;
1468 break;
1469 case QTYPE_TXQ:
1470 subsys = CMD_SUBSYSTEM_ETH;
1471 opcode = OPCODE_ETH_TX_DESTROY;
1472 break;
1473 case QTYPE_RXQ:
1474 subsys = CMD_SUBSYSTEM_ETH;
1475 opcode = OPCODE_ETH_RX_DESTROY;
1476 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001477 case QTYPE_MCCQ:
1478 subsys = CMD_SUBSYSTEM_COMMON;
1479 opcode = OPCODE_COMMON_MCC_DESTROY;
1480 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +00001482 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 }
Ajit Khaparded744b442009-12-03 06:12:06 +00001484
Somnath Kotur106df1e2011-10-27 07:12:13 +00001485 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301486 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 req->id = cpu_to_le16(q->id);
1488
Sathya Perlab31c50a2009-09-17 10:30:13 -07001489 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001490 q->created = false;
Sathya Perla5f0b8492009-07-27 22:52:56 +00001491
Ivan Vecera29849612010-12-14 05:43:19 +00001492 mutex_unlock(&adapter->mbox_lock);
Sathya Perla482c9e72011-06-29 23:33:17 +00001493 return status;
1494}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495
Sathya Perla482c9e72011-06-29 23:33:17 +00001496/* Uses MCC */
1497int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1498{
1499 struct be_mcc_wrb *wrb;
1500 struct be_cmd_req_q_destroy *req;
1501 int status;
1502
1503 spin_lock_bh(&adapter->mcc_lock);
1504
1505 wrb = wrb_from_mccq(adapter);
1506 if (!wrb) {
1507 status = -EBUSY;
1508 goto err;
1509 }
1510 req = embedded_payload(wrb);
1511
Somnath Kotur106df1e2011-10-27 07:12:13 +00001512 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301513 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
Sathya Perla482c9e72011-06-29 23:33:17 +00001514 req->id = cpu_to_le16(q->id);
1515
1516 status = be_mcc_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001517 q->created = false;
Sathya Perla482c9e72011-06-29 23:33:17 +00001518
1519err:
1520 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 return status;
1522}
1523
Sathya Perlab31c50a2009-09-17 10:30:13 -07001524/* Create an rx filtering policy configuration on an i/f
Sathya Perlabea50982013-08-27 16:57:33 +05301525 * Will use MBOX only if MCCQ has not been created.
Sathya Perlab31c50a2009-09-17 10:30:13 -07001526 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001527int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001528 u32 *if_handle, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529{
Sathya Perlabea50982013-08-27 16:57:33 +05301530 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001531 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532 int status;
1533
Sathya Perlabea50982013-08-27 16:57:33 +05301534 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001535 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301536 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1537 sizeof(*req), &wrb, NULL);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001538 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001539 req->capability_flags = cpu_to_le32(cap_flags);
1540 req->enable_flags = cpu_to_le32(en_flags);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001541 req->pmac_invalid = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542
Sathya Perlabea50982013-08-27 16:57:33 +05301543 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 if (!status) {
Sathya Perlabea50982013-08-27 16:57:33 +05301545 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 *if_handle = le32_to_cpu(resp->interface_id);
Sathya Perlab5bb9772013-07-23 15:25:01 +05301548
1549 /* Hack to retrieve VF's pmac-id on BE3 */
Kalesh AP18c57c72015-05-06 05:30:38 -04001550 if (BE3_chip(adapter) && be_virtfn(adapter))
Sathya Perlab5bb9772013-07-23 15:25:01 +05301551 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 return status;
1554}
1555
Ajit Khaparde62219062016-02-10 22:45:53 +05301556/* Uses MCCQ if available else MBOX */
Sathya Perla30128032011-11-10 19:17:57 +00001557int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558{
Ajit Khaparde62219062016-02-10 22:45:53 +05301559 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001560 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 int status;
1562
Sathya Perla30128032011-11-10 19:17:57 +00001563 if (interface_id == -1)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001564 return 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001565
Ajit Khaparde62219062016-02-10 22:45:53 +05301566 req = embedded_payload(&wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567
Somnath Kotur106df1e2011-10-27 07:12:13 +00001568 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301569 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
Ajit Khaparde62219062016-02-10 22:45:53 +05301570 sizeof(*req), &wrb, NULL);
Ajit Khaparde658681f2011-02-11 13:34:46 +00001571 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001573
Ajit Khaparde62219062016-02-10 22:45:53 +05301574 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 return status;
1576}
1577
1578/* Get stats is a non embedded command: the request is not embedded inside
1579 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001580 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001582int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001584 struct be_mcc_wrb *wrb;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001585 struct be_cmd_req_hdr *hdr;
Sathya Perla713d03942009-11-22 22:02:45 +00001586 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587
Sathya Perlab31c50a2009-09-17 10:30:13 -07001588 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perlab31c50a2009-09-17 10:30:13 -07001590 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001591 if (!wrb) {
1592 status = -EBUSY;
1593 goto err;
1594 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001595 hdr = nonemb_cmd->va;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Somnath Kotur106df1e2011-10-27 07:12:13 +00001597 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301598 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1599 nonemb_cmd);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001600
Sathya Perlaca34fe32012-11-06 17:48:56 +00001601 /* version 1 of the cmd is not supported only by BE2 */
Ajit Khaparde61000862013-10-03 16:16:33 -05001602 if (BE2_chip(adapter))
1603 hdr->version = 0;
1604 if (BE3_chip(adapter) || lancer_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001605 hdr->version = 1;
Ajit Khaparde61000862013-10-03 16:16:33 -05001606 else
1607 hdr->version = 2;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001608
Suresh Reddyefaa4082015-07-10 05:32:48 -04001609 status = be_mcc_notify(adapter);
1610 if (status)
1611 goto err;
1612
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001613 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614
Sathya Perla713d03942009-11-22 22:02:45 +00001615err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001616 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001617 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618}
1619
Selvin Xavier005d5692011-05-16 07:36:35 +00001620/* Lancer Stats */
1621int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301622 struct be_dma_mem *nonemb_cmd)
Selvin Xavier005d5692011-05-16 07:36:35 +00001623{
Selvin Xavier005d5692011-05-16 07:36:35 +00001624 struct be_mcc_wrb *wrb;
1625 struct lancer_cmd_req_pport_stats *req;
Selvin Xavier005d5692011-05-16 07:36:35 +00001626 int status = 0;
1627
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001628 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1629 CMD_SUBSYSTEM_ETH))
1630 return -EPERM;
1631
Selvin Xavier005d5692011-05-16 07:36:35 +00001632 spin_lock_bh(&adapter->mcc_lock);
1633
1634 wrb = wrb_from_mccq(adapter);
1635 if (!wrb) {
1636 status = -EBUSY;
1637 goto err;
1638 }
1639 req = nonemb_cmd->va;
Selvin Xavier005d5692011-05-16 07:36:35 +00001640
Somnath Kotur106df1e2011-10-27 07:12:13 +00001641 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301642 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1643 wrb, nonemb_cmd);
Selvin Xavier005d5692011-05-16 07:36:35 +00001644
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +00001645 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier005d5692011-05-16 07:36:35 +00001646 req->cmd_params.params.reset_stats = 0;
1647
Suresh Reddyefaa4082015-07-10 05:32:48 -04001648 status = be_mcc_notify(adapter);
1649 if (status)
1650 goto err;
1651
Selvin Xavier005d5692011-05-16 07:36:35 +00001652 adapter->stats_cmd_sent = true;
1653
1654err:
1655 spin_unlock_bh(&adapter->mcc_lock);
1656 return status;
1657}
1658
Sathya Perla323ff712012-09-28 04:39:43 +00001659static int be_mac_to_link_speed(int mac_speed)
1660{
1661 switch (mac_speed) {
1662 case PHY_LINK_SPEED_ZERO:
1663 return 0;
1664 case PHY_LINK_SPEED_10MBPS:
1665 return 10;
1666 case PHY_LINK_SPEED_100MBPS:
1667 return 100;
1668 case PHY_LINK_SPEED_1GBPS:
1669 return 1000;
1670 case PHY_LINK_SPEED_10GBPS:
1671 return 10000;
Vasundhara Volamb971f842013-08-06 09:27:15 +05301672 case PHY_LINK_SPEED_20GBPS:
1673 return 20000;
1674 case PHY_LINK_SPEED_25GBPS:
1675 return 25000;
1676 case PHY_LINK_SPEED_40GBPS:
1677 return 40000;
Sathya Perla323ff712012-09-28 04:39:43 +00001678 }
1679 return 0;
1680}
1681
1682/* Uses synchronous mcc
1683 * Returns link_speed in Mbps
1684 */
1685int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1686 u8 *link_status, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001688 struct be_mcc_wrb *wrb;
1689 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690 int status;
1691
Sathya Perlab31c50a2009-09-17 10:30:13 -07001692 spin_lock_bh(&adapter->mcc_lock);
1693
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001694 if (link_status)
1695 *link_status = LINK_DOWN;
1696
Sathya Perlab31c50a2009-09-17 10:30:13 -07001697 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001698 if (!wrb) {
1699 status = -EBUSY;
1700 goto err;
1701 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001702 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001703
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001704 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301705 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1706 sizeof(*req), wrb, NULL);
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001707
Sathya Perlaca34fe32012-11-06 17:48:56 +00001708 /* version 1 of the cmd is not supported only by BE2 */
1709 if (!BE2_chip(adapter))
Padmanabh Ratnakardaad6162011-11-16 02:03:45 +00001710 req->hdr.version = 1;
1711
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001712 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
Sathya Perlab31c50a2009-09-17 10:30:13 -07001714 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 if (!status) {
1716 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301717
Sathya Perla323ff712012-09-28 04:39:43 +00001718 if (link_speed) {
1719 *link_speed = resp->link_speed ?
1720 le16_to_cpu(resp->link_speed) * 10 :
1721 be_mac_to_link_speed(resp->mac_speed);
1722
1723 if (!resp->logical_link_status)
1724 *link_speed = 0;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001725 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001726 if (link_status)
1727 *link_status = resp->logical_link_status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728 }
1729
Sathya Perla713d03942009-11-22 22:02:45 +00001730err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001731 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001732 return status;
1733}
1734
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001735/* Uses synchronous mcc */
1736int be_cmd_get_die_temperature(struct be_adapter *adapter)
1737{
1738 struct be_mcc_wrb *wrb;
1739 struct be_cmd_req_get_cntl_addnl_attribs *req;
Vasundhara Volam117affe2013-08-06 09:27:20 +05301740 int status = 0;
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001741
1742 spin_lock_bh(&adapter->mcc_lock);
1743
1744 wrb = wrb_from_mccq(adapter);
1745 if (!wrb) {
1746 status = -EBUSY;
1747 goto err;
1748 }
1749 req = embedded_payload(wrb);
1750
Somnath Kotur106df1e2011-10-27 07:12:13 +00001751 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301752 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1753 sizeof(*req), wrb, NULL);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001754
Suresh Reddyefaa4082015-07-10 05:32:48 -04001755 status = be_mcc_notify(adapter);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001756err:
1757 spin_unlock_bh(&adapter->mcc_lock);
1758 return status;
1759}
1760
Somnath Kotur311fddc2011-03-16 21:22:43 +00001761/* Uses synchronous mcc */
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001762int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
Somnath Kotur311fddc2011-03-16 21:22:43 +00001763{
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001764 struct be_mcc_wrb wrb = {0};
Somnath Kotur311fddc2011-03-16 21:22:43 +00001765 struct be_cmd_req_get_fat *req;
1766 int status;
1767
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001768 req = embedded_payload(&wrb);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001769
Somnath Kotur106df1e2011-10-27 07:12:13 +00001770 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001771 OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
1772 &wrb, NULL);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001773 req->fat_operation = cpu_to_le32(QUERY_FAT);
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001774 status = be_cmd_notify_wait(adapter, &wrb);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001775 if (!status) {
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001776 struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301777
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001778 if (dump_size && resp->log_size)
1779 *dump_size = le32_to_cpu(resp->log_size) -
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001780 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001781 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001782 return status;
1783}
1784
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001785int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
Somnath Kotur311fddc2011-03-16 21:22:43 +00001786{
1787 struct be_dma_mem get_fat_cmd;
1788 struct be_mcc_wrb *wrb;
1789 struct be_cmd_req_get_fat *req;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001790 u32 offset = 0, total_size, buf_size,
1791 log_offset = sizeof(u32), payload_len;
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001792 int status;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001793
1794 if (buf_len == 0)
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001795 return 0;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001796
1797 total_size = buf_len;
1798
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001799 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05301800 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1801 get_fat_cmd.size,
1802 &get_fat_cmd.dma, GFP_ATOMIC);
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001803 if (!get_fat_cmd.va)
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301804 return -ENOMEM;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001805
Somnath Kotur311fddc2011-03-16 21:22:43 +00001806 spin_lock_bh(&adapter->mcc_lock);
1807
Somnath Kotur311fddc2011-03-16 21:22:43 +00001808 while (total_size) {
1809 buf_size = min(total_size, (u32)60*1024);
1810 total_size -= buf_size;
1811
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001812 wrb = wrb_from_mccq(adapter);
1813 if (!wrb) {
1814 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001815 goto err;
1816 }
1817 req = get_fat_cmd.va;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001818
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001819 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
Somnath Kotur106df1e2011-10-27 07:12:13 +00001820 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301821 OPCODE_COMMON_MANAGE_FAT, payload_len,
1822 wrb, &get_fat_cmd);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001823
1824 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1825 req->read_log_offset = cpu_to_le32(log_offset);
1826 req->read_log_length = cpu_to_le32(buf_size);
1827 req->data_buffer_size = cpu_to_le32(buf_size);
1828
1829 status = be_mcc_notify_wait(adapter);
1830 if (!status) {
1831 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
Kalesh AP03d28ff2014-09-19 15:46:56 +05301832
Somnath Kotur311fddc2011-03-16 21:22:43 +00001833 memcpy(buf + offset,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301834 resp->data_buffer,
1835 le32_to_cpu(resp->read_log_length));
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001836 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001837 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001838 goto err;
1839 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001840 offset += buf_size;
1841 log_offset += buf_size;
1842 }
1843err:
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05301844 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1845 get_fat_cmd.va, get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001846 spin_unlock_bh(&adapter->mcc_lock);
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301847 return status;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001848}
1849
Sathya Perla04b71172011-09-27 13:30:27 -04001850/* Uses synchronous mcc */
Kalesh APe97e3cd2014-07-17 16:20:26 +05301851int be_cmd_get_fw_ver(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001853 struct be_mcc_wrb *wrb;
1854 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 int status;
1856
Sathya Perla04b71172011-09-27 13:30:27 -04001857 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001858
Sathya Perla04b71172011-09-27 13:30:27 -04001859 wrb = wrb_from_mccq(adapter);
1860 if (!wrb) {
1861 status = -EBUSY;
1862 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 }
1864
Sathya Perla04b71172011-09-27 13:30:27 -04001865 req = embedded_payload(wrb);
Sathya Perla04b71172011-09-27 13:30:27 -04001866
Somnath Kotur106df1e2011-10-27 07:12:13 +00001867 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301868 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1869 NULL);
Sathya Perla04b71172011-09-27 13:30:27 -04001870 status = be_mcc_notify_wait(adapter);
1871 if (!status) {
1872 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05301873
Vasundhara Volam242eb472014-09-12 17:39:15 +05301874 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1875 sizeof(adapter->fw_ver));
1876 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1877 sizeof(adapter->fw_on_flash));
Sathya Perla04b71172011-09-27 13:30:27 -04001878 }
1879err:
1880 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 return status;
1882}
1883
Sathya Perlab31c50a2009-09-17 10:30:13 -07001884/* set the EQ delay interval of an EQ to specified value
1885 * Uses async mcc
1886 */
Kalesh APb502ae82014-09-19 15:46:51 +05301887static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1888 struct be_set_eqd *set_eqd, int num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001890 struct be_mcc_wrb *wrb;
1891 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla2632baf2013-10-01 16:00:00 +05301892 int status = 0, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893
Sathya Perlab31c50a2009-09-17 10:30:13 -07001894 spin_lock_bh(&adapter->mcc_lock);
1895
1896 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001897 if (!wrb) {
1898 status = -EBUSY;
1899 goto err;
1900 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001901 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902
Somnath Kotur106df1e2011-10-27 07:12:13 +00001903 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301904 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1905 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906
Sathya Perla2632baf2013-10-01 16:00:00 +05301907 req->num_eq = cpu_to_le32(num);
1908 for (i = 0; i < num; i++) {
1909 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1910 req->set_eqd[i].phase = 0;
1911 req->set_eqd[i].delay_multiplier =
1912 cpu_to_le32(set_eqd[i].delay_multiplier);
1913 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
Suresh Reddyefaa4082015-07-10 05:32:48 -04001915 status = be_mcc_notify(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001916err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001917 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001918 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919}
1920
Kalesh AP93676702014-09-12 17:39:20 +05301921int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1922 int num)
1923{
1924 int num_eqs, i = 0;
1925
Suresh Reddyc8ba4ad02015-03-20 06:28:24 -04001926 while (num) {
1927 num_eqs = min(num, 8);
1928 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1929 i += num_eqs;
1930 num -= num_eqs;
Kalesh AP93676702014-09-12 17:39:20 +05301931 }
1932
1933 return 0;
1934}
1935
Sathya Perlab31c50a2009-09-17 10:30:13 -07001936/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001937int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001938 u32 num, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001940 struct be_mcc_wrb *wrb;
1941 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 int status;
1943
Sathya Perlab31c50a2009-09-17 10:30:13 -07001944 spin_lock_bh(&adapter->mcc_lock);
1945
1946 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001947 if (!wrb) {
1948 status = -EBUSY;
1949 goto err;
1950 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001951 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952
Somnath Kotur106df1e2011-10-27 07:12:13 +00001953 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301954 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1955 wrb, NULL);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001956 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957
1958 req->interface_id = if_id;
Ajit Khaparde012bd382013-11-18 10:44:24 -06001959 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 req->num_vlan = num;
Kalesh AP4d567d92014-05-09 13:29:17 +05301961 memcpy(req->normal_vlan, vtag_array,
1962 req->num_vlan * sizeof(vtag_array[0]));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Sathya Perlab31c50a2009-09-17 10:30:13 -07001964 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001965err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001966 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 return status;
1968}
1969
Sathya Perlaac34b742015-02-06 08:18:40 -05001970static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001972 struct be_mcc_wrb *wrb;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001973 struct be_dma_mem *mem = &adapter->rx_filter;
1974 struct be_cmd_req_rx_filter *req = mem->va;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001975 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perla8788fdc2009-07-27 22:52:03 +00001977 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001978
Sathya Perlab31c50a2009-09-17 10:30:13 -07001979 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001980 if (!wrb) {
1981 status = -EBUSY;
1982 goto err;
1983 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00001984 memset(req, 0, sizeof(*req));
Somnath Kotur106df1e2011-10-27 07:12:13 +00001985 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301986 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1987 wrb, mem);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988
Sathya Perla5b8821b2011-08-02 19:57:44 +00001989 req->if_id = cpu_to_le32(adapter->if_handle);
Sathya Perlaac34b742015-02-06 08:18:40 -05001990 req->if_flags_mask = cpu_to_le32(flags);
1991 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001992
Sathya Perlaac34b742015-02-06 08:18:40 -05001993 if (flags & BE_IF_FLAGS_MULTICAST) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001994 struct netdev_hw_addr *ha;
1995 int i = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Padmanabh Ratnakar1610c792011-11-03 01:49:27 +00001997 /* Reset mcast promisc mode if already set by setting mask
1998 * and not setting flags field
1999 */
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002000 req->if_flags_mask |=
2001 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
Sathya Perla92bf14a2013-08-27 16:57:32 +05302002 be_if_cap_flags(adapter));
Padmanabh Ratnakar016f97b2011-11-03 01:49:13 +00002003 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
Sathya Perla5b8821b2011-08-02 19:57:44 +00002004 netdev_for_each_mc_addr(ha, adapter->netdev)
2005 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
2006 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007
Sathya Perlab6588872015-09-03 07:41:53 -04002008 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002009err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002010 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002011 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012}
2013
Sathya Perlaac34b742015-02-06 08:18:40 -05002014int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
2015{
2016 struct device *dev = &adapter->pdev->dev;
2017
2018 if ((flags & be_if_cap_flags(adapter)) != flags) {
2019 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2020 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2021 be_if_cap_flags(adapter));
2022 }
2023 flags &= be_if_cap_flags(adapter);
Kalesh AP196e3732015-10-12 03:47:21 -04002024 if (!flags)
2025 return -ENOTSUPP;
Sathya Perlaac34b742015-02-06 08:18:40 -05002026
2027 return __be_cmd_rx_filter(adapter, flags, value);
2028}
2029
Sathya Perlab31c50a2009-09-17 10:30:13 -07002030/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002031int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002033 struct be_mcc_wrb *wrb;
2034 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 int status;
2036
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002037 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
2038 CMD_SUBSYSTEM_COMMON))
2039 return -EPERM;
2040
Sathya Perlab31c50a2009-09-17 10:30:13 -07002041 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042
Sathya Perlab31c50a2009-09-17 10:30:13 -07002043 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002044 if (!wrb) {
2045 status = -EBUSY;
2046 goto err;
2047 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07002048 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049
Somnath Kotur106df1e2011-10-27 07:12:13 +00002050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302051 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2052 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053
Suresh Reddyb29812c2014-09-12 17:39:17 +05302054 req->hdr.version = 1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2056 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2057
Sathya Perlab31c50a2009-09-17 10:30:13 -07002058 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059
Sathya Perla713d03942009-11-22 22:02:45 +00002060err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07002061 spin_unlock_bh(&adapter->mcc_lock);
Suresh Reddyb29812c2014-09-12 17:39:17 +05302062
2063 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2064 return -EOPNOTSUPP;
2065
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066 return status;
2067}
2068
Sathya Perlab31c50a2009-09-17 10:30:13 -07002069/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002070int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002072 struct be_mcc_wrb *wrb;
2073 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074 int status;
2075
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002076 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2077 CMD_SUBSYSTEM_COMMON))
2078 return -EPERM;
2079
Sathya Perlab31c50a2009-09-17 10:30:13 -07002080 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081
Sathya Perlab31c50a2009-09-17 10:30:13 -07002082 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002083 if (!wrb) {
2084 status = -EBUSY;
2085 goto err;
2086 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07002087 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088
Somnath Kotur106df1e2011-10-27 07:12:13 +00002089 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302090 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2091 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092
Sathya Perlab31c50a2009-09-17 10:30:13 -07002093 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094 if (!status) {
2095 struct be_cmd_resp_get_flow_control *resp =
2096 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2099 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2100 }
2101
Sathya Perla713d03942009-11-22 22:02:45 +00002102err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07002103 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104 return status;
2105}
2106
Sathya Perlab31c50a2009-09-17 10:30:13 -07002107/* Uses mbox */
Kalesh APe97e3cd2014-07-17 16:20:26 +05302108int be_cmd_query_fw_cfg(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002110 struct be_mcc_wrb *wrb;
2111 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 int status;
2113
Ivan Vecera29849612010-12-14 05:43:19 +00002114 if (mutex_lock_interruptible(&adapter->mbox_lock))
2115 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Sathya Perlab31c50a2009-09-17 10:30:13 -07002117 wrb = wrb_from_mbox(adapter);
2118 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119
Somnath Kotur106df1e2011-10-27 07:12:13 +00002120 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302121 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2122 sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123
Sathya Perlab31c50a2009-09-17 10:30:13 -07002124 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125 if (!status) {
2126 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302127
Kalesh APe97e3cd2014-07-17 16:20:26 +05302128 adapter->port_num = le32_to_cpu(resp->phys_port);
2129 adapter->function_mode = le32_to_cpu(resp->function_mode);
2130 adapter->function_caps = le32_to_cpu(resp->function_caps);
2131 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
Sathya Perlaacbafeb2014-09-02 09:56:46 +05302132 dev_info(&adapter->pdev->dev,
2133 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2134 adapter->function_mode, adapter->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 }
2136
Ivan Vecera29849612010-12-14 05:43:19 +00002137 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138 return status;
2139}
sarveshwarb14074ea2009-08-05 13:05:24 -07002140
Sathya Perlab31c50a2009-09-17 10:30:13 -07002141/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07002142int be_cmd_reset_function(struct be_adapter *adapter)
2143{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002144 struct be_mcc_wrb *wrb;
2145 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07002146 int status;
2147
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002148 if (lancer_chip(adapter)) {
Sathya Perla9fa465c2015-02-23 04:20:13 -05002149 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2150 adapter->db + SLIPORT_CONTROL_OFFSET);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002151 status = lancer_wait_ready(adapter);
Sathya Perla9fa465c2015-02-23 04:20:13 -05002152 if (status)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002153 dev_err(&adapter->pdev->dev,
2154 "Adapter in non recoverable error\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002155 return status;
2156 }
2157
Ivan Vecera29849612010-12-14 05:43:19 +00002158 if (mutex_lock_interruptible(&adapter->mbox_lock))
2159 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07002160
Sathya Perlab31c50a2009-09-17 10:30:13 -07002161 wrb = wrb_from_mbox(adapter);
2162 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07002163
Somnath Kotur106df1e2011-10-27 07:12:13 +00002164 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302165 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2166 NULL);
sarveshwarb14074ea2009-08-05 13:05:24 -07002167
Sathya Perlab31c50a2009-09-17 10:30:13 -07002168 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07002169
Ivan Vecera29849612010-12-14 05:43:19 +00002170 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07002171 return status;
2172}
Ajit Khaparde84517482009-09-04 03:12:16 +00002173
Suresh Reddy594ad542013-04-25 23:03:20 +00002174int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
Ben Hutchings33cb0fa2014-05-15 02:01:23 +01002175 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
Sathya Perla3abcded2010-10-03 22:12:27 -07002176{
2177 struct be_mcc_wrb *wrb;
2178 struct be_cmd_req_rss_config *req;
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 int status;
2180
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302181 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2182 return 0;
2183
Kalesh APb51aa362014-05-09 13:29:19 +05302184 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002185
Kalesh APb51aa362014-05-09 13:29:19 +05302186 wrb = wrb_from_mccq(adapter);
2187 if (!wrb) {
2188 status = -EBUSY;
2189 goto err;
2190 }
Sathya Perla3abcded2010-10-03 22:12:27 -07002191 req = embedded_payload(wrb);
2192
Somnath Kotur106df1e2011-10-27 07:12:13 +00002193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302194 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002195
2196 req->if_id = cpu_to_le32(adapter->if_handle);
Suresh Reddy594ad542013-04-25 23:03:20 +00002197 req->enable_rss = cpu_to_le16(rss_hash_opts);
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
Suresh Reddy594ad542013-04-25 23:03:20 +00002199
Kalesh APb51aa362014-05-09 13:29:19 +05302200 if (!BEx_chip(adapter))
Suresh Reddy594ad542013-04-25 23:03:20 +00002201 req->hdr.version = 1;
2202
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 memcpy(req->cpu_table, rsstable, table_size);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302204 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla3abcded2010-10-03 22:12:27 -07002205 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2206
Kalesh APb51aa362014-05-09 13:29:19 +05302207 status = be_mcc_notify_wait(adapter);
2208err:
2209 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 return status;
2211}
2212
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002213/* Uses sync mcc */
2214int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302215 u8 bcn, u8 sts, u8 state)
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002216{
2217 struct be_mcc_wrb *wrb;
2218 struct be_cmd_req_enable_disable_beacon *req;
2219 int status;
2220
2221 spin_lock_bh(&adapter->mcc_lock);
2222
2223 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002224 if (!wrb) {
2225 status = -EBUSY;
2226 goto err;
2227 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002228 req = embedded_payload(wrb);
2229
Somnath Kotur106df1e2011-10-27 07:12:13 +00002230 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302231 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2232 sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002233
2234 req->port_num = port_num;
2235 req->beacon_state = state;
2236 req->beacon_duration = bcn;
2237 req->status_duration = sts;
2238
2239 status = be_mcc_notify_wait(adapter);
2240
Sathya Perla713d03942009-11-22 22:02:45 +00002241err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002242 spin_unlock_bh(&adapter->mcc_lock);
2243 return status;
2244}
2245
2246/* Uses sync mcc */
2247int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2248{
2249 struct be_mcc_wrb *wrb;
2250 struct be_cmd_req_get_beacon_state *req;
2251 int status;
2252
2253 spin_lock_bh(&adapter->mcc_lock);
2254
2255 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002256 if (!wrb) {
2257 status = -EBUSY;
2258 goto err;
2259 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002260 req = embedded_payload(wrb);
2261
Somnath Kotur106df1e2011-10-27 07:12:13 +00002262 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302263 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2264 wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002265
2266 req->port_num = port_num;
2267
2268 status = be_mcc_notify_wait(adapter);
2269 if (!status) {
2270 struct be_cmd_resp_get_beacon_state *resp =
2271 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302272
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002273 *state = resp->beacon_state;
2274 }
2275
Sathya Perla713d03942009-11-22 22:02:45 +00002276err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002277 spin_unlock_bh(&adapter->mcc_lock);
2278 return status;
2279}
2280
Mark Leonarde36edd92014-09-12 17:39:18 +05302281/* Uses sync mcc */
2282int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2283 u8 page_num, u8 *data)
2284{
2285 struct be_dma_mem cmd;
2286 struct be_mcc_wrb *wrb;
2287 struct be_cmd_req_port_type *req;
2288 int status;
2289
2290 if (page_num > TR_PAGE_A2)
2291 return -EINVAL;
2292
2293 cmd.size = sizeof(struct be_cmd_resp_port_type);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05302294 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2295 GFP_ATOMIC);
Mark Leonarde36edd92014-09-12 17:39:18 +05302296 if (!cmd.va) {
2297 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2298 return -ENOMEM;
2299 }
Mark Leonarde36edd92014-09-12 17:39:18 +05302300
2301 spin_lock_bh(&adapter->mcc_lock);
2302
2303 wrb = wrb_from_mccq(adapter);
2304 if (!wrb) {
2305 status = -EBUSY;
2306 goto err;
2307 }
2308 req = cmd.va;
2309
2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2311 OPCODE_COMMON_READ_TRANSRECV_DATA,
2312 cmd.size, wrb, &cmd);
2313
2314 req->port = cpu_to_le32(adapter->hba_port_num);
2315 req->page_num = cpu_to_le32(page_num);
2316 status = be_mcc_notify_wait(adapter);
2317 if (!status) {
2318 struct be_cmd_resp_port_type *resp = cmd.va;
2319
2320 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2321 }
2322err:
2323 spin_unlock_bh(&adapter->mcc_lock);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05302324 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Mark Leonarde36edd92014-09-12 17:39:18 +05302325 return status;
2326}
2327
Suresh Reddya23113b2015-12-30 01:28:59 -05002328static int lancer_cmd_write_object(struct be_adapter *adapter,
2329 struct be_dma_mem *cmd, u32 data_size,
2330 u32 data_offset, const char *obj_name,
2331 u32 *data_written, u8 *change_status,
2332 u8 *addn_status)
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002333{
2334 struct be_mcc_wrb *wrb;
2335 struct lancer_cmd_req_write_object *req;
2336 struct lancer_cmd_resp_write_object *resp;
2337 void *ctxt = NULL;
2338 int status;
2339
2340 spin_lock_bh(&adapter->mcc_lock);
2341 adapter->flash_status = 0;
2342
2343 wrb = wrb_from_mccq(adapter);
2344 if (!wrb) {
2345 status = -EBUSY;
2346 goto err_unlock;
2347 }
2348
2349 req = embedded_payload(wrb);
2350
Somnath Kotur106df1e2011-10-27 07:12:13 +00002351 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302352 OPCODE_COMMON_WRITE_OBJECT,
2353 sizeof(struct lancer_cmd_req_write_object), wrb,
2354 NULL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002355
2356 ctxt = &req->context;
2357 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302358 write_length, ctxt, data_size);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002359
2360 if (data_size == 0)
2361 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302362 eof, ctxt, 1);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002363 else
2364 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302365 eof, ctxt, 0);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002366
2367 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2368 req->write_offset = cpu_to_le32(data_offset);
Vasundhara Volam242eb472014-09-12 17:39:15 +05302369 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002370 req->descriptor_count = cpu_to_le32(1);
2371 req->buf_len = cpu_to_le32(data_size);
2372 req->addr_low = cpu_to_le32((cmd->dma +
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302373 sizeof(struct lancer_cmd_req_write_object))
2374 & 0xFFFFFFFF);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002375 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2376 sizeof(struct lancer_cmd_req_write_object)));
2377
Suresh Reddyefaa4082015-07-10 05:32:48 -04002378 status = be_mcc_notify(adapter);
2379 if (status)
2380 goto err_unlock;
2381
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002382 spin_unlock_bh(&adapter->mcc_lock);
2383
Suresh Reddy5eeff632014-01-06 13:02:24 +05302384 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
Somnath Kotur701962d2013-05-02 03:36:34 +00002385 msecs_to_jiffies(60000)))
Kalesh APfd451602014-07-17 16:20:21 +05302386 status = -ETIMEDOUT;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002387 else
2388 status = adapter->flash_status;
2389
2390 resp = embedded_payload(wrb);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002391 if (!status) {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002392 *data_written = le32_to_cpu(resp->actual_write_len);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002393 *change_status = resp->change_status;
2394 } else {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002395 *addn_status = resp->additional_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002396 }
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002397
2398 return status;
2399
2400err_unlock:
2401 spin_unlock_bh(&adapter->mcc_lock);
2402 return status;
2403}
2404
Ravikumar Nelavelli6809cee2014-09-12 17:39:19 +05302405int be_cmd_query_cable_type(struct be_adapter *adapter)
2406{
2407 u8 page_data[PAGE_DATA_LEN];
2408 int status;
2409
2410 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2411 page_data);
2412 if (!status) {
2413 switch (adapter->phy.interface_type) {
2414 case PHY_TYPE_QSFP:
2415 adapter->phy.cable_type =
2416 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2417 break;
2418 case PHY_TYPE_SFP_PLUS_10GB:
2419 adapter->phy.cable_type =
2420 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2421 break;
2422 default:
2423 adapter->phy.cable_type = 0;
2424 break;
2425 }
2426 }
2427 return status;
2428}
2429
Vasundhara Volam21252372015-02-06 08:18:42 -05002430int be_cmd_query_sfp_info(struct be_adapter *adapter)
2431{
2432 u8 page_data[PAGE_DATA_LEN];
2433 int status;
2434
2435 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2436 page_data);
2437 if (!status) {
2438 strlcpy(adapter->phy.vendor_name, page_data +
2439 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2440 strlcpy(adapter->phy.vendor_pn,
2441 page_data + SFP_VENDOR_PN_OFFSET,
2442 SFP_VENDOR_NAME_LEN - 1);
2443 }
2444
2445 return status;
2446}
2447
Suresh Reddya23113b2015-12-30 01:28:59 -05002448static int lancer_cmd_delete_object(struct be_adapter *adapter,
2449 const char *obj_name)
Kalesh APf0613382014-08-01 17:47:32 +05302450{
2451 struct lancer_cmd_req_delete_object *req;
2452 struct be_mcc_wrb *wrb;
2453 int status;
2454
2455 spin_lock_bh(&adapter->mcc_lock);
2456
2457 wrb = wrb_from_mccq(adapter);
2458 if (!wrb) {
2459 status = -EBUSY;
2460 goto err;
2461 }
2462
2463 req = embedded_payload(wrb);
2464
2465 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2466 OPCODE_COMMON_DELETE_OBJECT,
2467 sizeof(*req), wrb, NULL);
2468
Vasundhara Volam242eb472014-09-12 17:39:15 +05302469 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
Kalesh APf0613382014-08-01 17:47:32 +05302470
2471 status = be_mcc_notify_wait(adapter);
2472err:
2473 spin_unlock_bh(&adapter->mcc_lock);
2474 return status;
2475}
2476
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002477int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302478 u32 data_size, u32 data_offset, const char *obj_name,
2479 u32 *data_read, u32 *eof, u8 *addn_status)
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002480{
2481 struct be_mcc_wrb *wrb;
2482 struct lancer_cmd_req_read_object *req;
2483 struct lancer_cmd_resp_read_object *resp;
2484 int status;
2485
2486 spin_lock_bh(&adapter->mcc_lock);
2487
2488 wrb = wrb_from_mccq(adapter);
2489 if (!wrb) {
2490 status = -EBUSY;
2491 goto err_unlock;
2492 }
2493
2494 req = embedded_payload(wrb);
2495
2496 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302497 OPCODE_COMMON_READ_OBJECT,
2498 sizeof(struct lancer_cmd_req_read_object), wrb,
2499 NULL);
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002500
2501 req->desired_read_len = cpu_to_le32(data_size);
2502 req->read_offset = cpu_to_le32(data_offset);
2503 strcpy(req->object_name, obj_name);
2504 req->descriptor_count = cpu_to_le32(1);
2505 req->buf_len = cpu_to_le32(data_size);
2506 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2507 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2508
2509 status = be_mcc_notify_wait(adapter);
2510
2511 resp = embedded_payload(wrb);
2512 if (!status) {
2513 *data_read = le32_to_cpu(resp->actual_read_len);
2514 *eof = le32_to_cpu(resp->eof);
2515 } else {
2516 *addn_status = resp->additional_status;
2517 }
2518
2519err_unlock:
2520 spin_unlock_bh(&adapter->mcc_lock);
2521 return status;
2522}
2523
Suresh Reddya23113b2015-12-30 01:28:59 -05002524static int be_cmd_write_flashrom(struct be_adapter *adapter,
2525 struct be_dma_mem *cmd, u32 flash_type,
2526 u32 flash_opcode, u32 img_offset, u32 buf_size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002527{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002528 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002529 struct be_cmd_write_flashrom *req;
Ajit Khaparde84517482009-09-04 03:12:16 +00002530 int status;
2531
Sathya Perlab31c50a2009-09-17 10:30:13 -07002532 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002533 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002534
2535 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002536 if (!wrb) {
2537 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002538 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00002539 }
2540 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002541
Somnath Kotur106df1e2011-10-27 07:12:13 +00002542 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302543 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2544 cmd);
Ajit Khaparde84517482009-09-04 03:12:16 +00002545
2546 req->params.op_type = cpu_to_le32(flash_type);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002547 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2548 req->params.offset = cpu_to_le32(img_offset);
2549
Ajit Khaparde84517482009-09-04 03:12:16 +00002550 req->params.op_code = cpu_to_le32(flash_opcode);
2551 req->params.data_buf_size = cpu_to_le32(buf_size);
2552
Suresh Reddyefaa4082015-07-10 05:32:48 -04002553 status = be_mcc_notify(adapter);
2554 if (status)
2555 goto err_unlock;
2556
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002557 spin_unlock_bh(&adapter->mcc_lock);
2558
Suresh Reddy5eeff632014-01-06 13:02:24 +05302559 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2560 msecs_to_jiffies(40000)))
Kalesh APfd451602014-07-17 16:20:21 +05302561 status = -ETIMEDOUT;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002562 else
2563 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00002564
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002565 return status;
2566
2567err_unlock:
2568 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00002569 return status;
2570}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002571
Suresh Reddya23113b2015-12-30 01:28:59 -05002572static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2573 u16 img_optype, u32 img_offset, u32 crc_offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002574{
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002575 struct be_cmd_read_flash_crc *req;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002576 struct be_mcc_wrb *wrb;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002577 int status;
2578
2579 spin_lock_bh(&adapter->mcc_lock);
2580
2581 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002582 if (!wrb) {
2583 status = -EBUSY;
2584 goto err;
2585 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002586 req = embedded_payload(wrb);
2587
Somnath Kotur106df1e2011-10-27 07:12:13 +00002588 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002589 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2590 wrb, NULL);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002591
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002592 req->params.op_type = cpu_to_le32(img_optype);
2593 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2594 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2595 else
2596 req->params.offset = cpu_to_le32(crc_offset);
2597
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002598 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002599 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002600
2601 status = be_mcc_notify_wait(adapter);
2602 if (!status)
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002603 memcpy(flashed_crc, req->crc, 4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002604
Sathya Perla713d03942009-11-22 22:02:45 +00002605err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002606 spin_unlock_bh(&adapter->mcc_lock);
2607 return status;
2608}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002609
Suresh Reddya23113b2015-12-30 01:28:59 -05002610static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2611
2612static bool phy_flashing_required(struct be_adapter *adapter)
2613{
2614 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
2615 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2616}
2617
2618static bool is_comp_in_ufi(struct be_adapter *adapter,
2619 struct flash_section_info *fsec, int type)
2620{
2621 int i = 0, img_type = 0;
2622 struct flash_section_info_g2 *fsec_g2 = NULL;
2623
2624 if (BE2_chip(adapter))
2625 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2626
2627 for (i = 0; i < MAX_FLASH_COMP; i++) {
2628 if (fsec_g2)
2629 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2630 else
2631 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2632
2633 if (img_type == type)
2634 return true;
2635 }
2636 return false;
2637}
2638
2639static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2640 int header_size,
2641 const struct firmware *fw)
2642{
2643 struct flash_section_info *fsec = NULL;
2644 const u8 *p = fw->data;
2645
2646 p += header_size;
2647 while (p < (fw->data + fw->size)) {
2648 fsec = (struct flash_section_info *)p;
2649 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2650 return fsec;
2651 p += 32;
2652 }
2653 return NULL;
2654}
2655
2656static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
2657 u32 img_offset, u32 img_size, int hdr_size,
2658 u16 img_optype, bool *crc_match)
2659{
2660 u32 crc_offset;
2661 int status;
2662 u8 crc[4];
2663
2664 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
2665 img_size - 4);
2666 if (status)
2667 return status;
2668
2669 crc_offset = hdr_size + img_offset + img_size - 4;
2670
2671 /* Skip flashing, if crc of flashed region matches */
2672 if (!memcmp(crc, p + crc_offset, 4))
2673 *crc_match = true;
2674 else
2675 *crc_match = false;
2676
2677 return status;
2678}
2679
2680static int be_flash(struct be_adapter *adapter, const u8 *img,
2681 struct be_dma_mem *flash_cmd, int optype, int img_size,
2682 u32 img_offset)
2683{
2684 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
2685 struct be_cmd_write_flashrom *req = flash_cmd->va;
2686 int status;
2687
2688 while (total_bytes) {
2689 num_bytes = min_t(u32, 32 * 1024, total_bytes);
2690
2691 total_bytes -= num_bytes;
2692
2693 if (!total_bytes) {
2694 if (optype == OPTYPE_PHY_FW)
2695 flash_op = FLASHROM_OPER_PHY_FLASH;
2696 else
2697 flash_op = FLASHROM_OPER_FLASH;
2698 } else {
2699 if (optype == OPTYPE_PHY_FW)
2700 flash_op = FLASHROM_OPER_PHY_SAVE;
2701 else
2702 flash_op = FLASHROM_OPER_SAVE;
2703 }
2704
2705 memcpy(req->data_buf, img, num_bytes);
2706 img += num_bytes;
2707 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
2708 flash_op, img_offset +
2709 bytes_sent, num_bytes);
2710 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
2711 optype == OPTYPE_PHY_FW)
2712 break;
2713 else if (status)
2714 return status;
2715
2716 bytes_sent += num_bytes;
2717 }
2718 return 0;
2719}
2720
2721/* For BE2, BE3 and BE3-R */
2722static int be_flash_BEx(struct be_adapter *adapter,
2723 const struct firmware *fw,
2724 struct be_dma_mem *flash_cmd, int num_of_images)
2725{
2726 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2727 struct device *dev = &adapter->pdev->dev;
2728 struct flash_section_info *fsec = NULL;
2729 int status, i, filehdr_size, num_comp;
2730 const struct flash_comp *pflashcomp;
2731 bool crc_match;
2732 const u8 *p;
2733
2734 struct flash_comp gen3_flash_types[] = {
2735 { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2736 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2737 { BE3_REDBOOT_START, OPTYPE_REDBOOT,
2738 BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2739 { BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
2740 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2741 { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2742 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2743 { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2744 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2745 { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2746 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2747 { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2748 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2749 { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2750 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
2751 { BE3_NCSI_START, OPTYPE_NCSI_FW,
2752 BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
2753 { BE3_PHY_FW_START, OPTYPE_PHY_FW,
2754 BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
2755 };
2756
2757 struct flash_comp gen2_flash_types[] = {
2758 { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2759 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2760 { BE2_REDBOOT_START, OPTYPE_REDBOOT,
2761 BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2762 { BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
2763 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2764 { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2765 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2766 { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2767 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2768 { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2769 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2770 { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2771 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2772 { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2773 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
2774 };
2775
2776 if (BE3_chip(adapter)) {
2777 pflashcomp = gen3_flash_types;
2778 filehdr_size = sizeof(struct flash_file_hdr_g3);
2779 num_comp = ARRAY_SIZE(gen3_flash_types);
2780 } else {
2781 pflashcomp = gen2_flash_types;
2782 filehdr_size = sizeof(struct flash_file_hdr_g2);
2783 num_comp = ARRAY_SIZE(gen2_flash_types);
2784 img_hdrs_size = 0;
2785 }
2786
2787 /* Get flash section info*/
2788 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2789 if (!fsec) {
2790 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2791 return -1;
2792 }
2793 for (i = 0; i < num_comp; i++) {
2794 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2795 continue;
2796
2797 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2798 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2799 continue;
2800
2801 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
2802 !phy_flashing_required(adapter))
2803 continue;
2804
2805 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
2806 status = be_check_flash_crc(adapter, fw->data,
2807 pflashcomp[i].offset,
2808 pflashcomp[i].size,
2809 filehdr_size +
2810 img_hdrs_size,
2811 OPTYPE_REDBOOT, &crc_match);
2812 if (status) {
2813 dev_err(dev,
2814 "Could not get CRC for 0x%x region\n",
2815 pflashcomp[i].optype);
2816 continue;
2817 }
2818
2819 if (crc_match)
2820 continue;
2821 }
2822
2823 p = fw->data + filehdr_size + pflashcomp[i].offset +
2824 img_hdrs_size;
2825 if (p + pflashcomp[i].size > fw->data + fw->size)
2826 return -1;
2827
2828 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
2829 pflashcomp[i].size, 0);
2830 if (status) {
2831 dev_err(dev, "Flashing section type 0x%x failed\n",
2832 pflashcomp[i].img_type);
2833 return status;
2834 }
2835 }
2836 return 0;
2837}
2838
2839static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
2840{
2841 u32 img_type = le32_to_cpu(fsec_entry.type);
2842 u16 img_optype = le16_to_cpu(fsec_entry.optype);
2843
2844 if (img_optype != 0xFFFF)
2845 return img_optype;
2846
2847 switch (img_type) {
2848 case IMAGE_FIRMWARE_ISCSI:
2849 img_optype = OPTYPE_ISCSI_ACTIVE;
2850 break;
2851 case IMAGE_BOOT_CODE:
2852 img_optype = OPTYPE_REDBOOT;
2853 break;
2854 case IMAGE_OPTION_ROM_ISCSI:
2855 img_optype = OPTYPE_BIOS;
2856 break;
2857 case IMAGE_OPTION_ROM_PXE:
2858 img_optype = OPTYPE_PXE_BIOS;
2859 break;
2860 case IMAGE_OPTION_ROM_FCOE:
2861 img_optype = OPTYPE_FCOE_BIOS;
2862 break;
2863 case IMAGE_FIRMWARE_BACKUP_ISCSI:
2864 img_optype = OPTYPE_ISCSI_BACKUP;
2865 break;
2866 case IMAGE_NCSI:
2867 img_optype = OPTYPE_NCSI_FW;
2868 break;
2869 case IMAGE_FLASHISM_JUMPVECTOR:
2870 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
2871 break;
2872 case IMAGE_FIRMWARE_PHY:
2873 img_optype = OPTYPE_SH_PHY_FW;
2874 break;
2875 case IMAGE_REDBOOT_DIR:
2876 img_optype = OPTYPE_REDBOOT_DIR;
2877 break;
2878 case IMAGE_REDBOOT_CONFIG:
2879 img_optype = OPTYPE_REDBOOT_CONFIG;
2880 break;
2881 case IMAGE_UFI_DIR:
2882 img_optype = OPTYPE_UFI_DIR;
2883 break;
2884 default:
2885 break;
2886 }
2887
2888 return img_optype;
2889}
2890
2891static int be_flash_skyhawk(struct be_adapter *adapter,
2892 const struct firmware *fw,
2893 struct be_dma_mem *flash_cmd, int num_of_images)
2894{
2895 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
2896 bool crc_match, old_fw_img, flash_offset_support = true;
2897 struct device *dev = &adapter->pdev->dev;
2898 struct flash_section_info *fsec = NULL;
2899 u32 img_offset, img_size, img_type;
2900 u16 img_optype, flash_optype;
2901 int status, i, filehdr_size;
2902 const u8 *p;
2903
2904 filehdr_size = sizeof(struct flash_file_hdr_g3);
2905 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2906 if (!fsec) {
2907 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2908 return -EINVAL;
2909 }
2910
2911retry_flash:
2912 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
2913 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
2914 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
2915 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2916 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
2917 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
2918
2919 if (img_optype == 0xFFFF)
2920 continue;
2921
2922 if (flash_offset_support)
2923 flash_optype = OPTYPE_OFFSET_SPECIFIED;
2924 else
2925 flash_optype = img_optype;
2926
2927 /* Don't bother verifying CRC if an old FW image is being
2928 * flashed
2929 */
2930 if (old_fw_img)
2931 goto flash;
2932
2933 status = be_check_flash_crc(adapter, fw->data, img_offset,
2934 img_size, filehdr_size +
2935 img_hdrs_size, flash_optype,
2936 &crc_match);
2937 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
2938 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
2939 /* The current FW image on the card does not support
2940 * OFFSET based flashing. Retry using older mechanism
2941 * of OPTYPE based flashing
2942 */
2943 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2944 flash_offset_support = false;
2945 goto retry_flash;
2946 }
2947
2948 /* The current FW image on the card does not recognize
2949 * the new FLASH op_type. The FW download is partially
2950 * complete. Reboot the server now to enable FW image
2951 * to recognize the new FLASH op_type. To complete the
2952 * remaining process, download the same FW again after
2953 * the reboot.
2954 */
2955 dev_err(dev, "Flash incomplete. Reset the server\n");
2956 dev_err(dev, "Download FW image again after reset\n");
2957 return -EAGAIN;
2958 } else if (status) {
2959 dev_err(dev, "Could not get CRC for 0x%x region\n",
2960 img_optype);
2961 return -EFAULT;
2962 }
2963
2964 if (crc_match)
2965 continue;
2966
2967flash:
2968 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
2969 if (p + img_size > fw->data + fw->size)
2970 return -1;
2971
2972 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
2973 img_offset);
2974
2975 /* The current FW image on the card does not support OFFSET
2976 * based flashing. Retry using older mechanism of OPTYPE based
2977 * flashing
2978 */
2979 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
2980 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2981 flash_offset_support = false;
2982 goto retry_flash;
2983 }
2984
2985 /* For old FW images ignore ILLEGAL_FIELD error or errors on
2986 * UFI_DIR region
2987 */
2988 if (old_fw_img &&
2989 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
2990 (img_optype == OPTYPE_UFI_DIR &&
2991 base_status(status) == MCC_STATUS_FAILED))) {
2992 continue;
2993 } else if (status) {
2994 dev_err(dev, "Flashing section type 0x%x failed\n",
2995 img_type);
Suresh Reddy6b525782015-12-30 01:29:00 -05002996
2997 switch (addl_status(status)) {
2998 case MCC_ADDL_STATUS_MISSING_SIGNATURE:
2999 dev_err(dev,
3000 "Digital signature missing in FW\n");
3001 return -EINVAL;
3002 case MCC_ADDL_STATUS_INVALID_SIGNATURE:
3003 dev_err(dev,
3004 "Invalid digital signature in FW\n");
3005 return -EINVAL;
3006 default:
3007 return -EFAULT;
3008 }
Suresh Reddya23113b2015-12-30 01:28:59 -05003009 }
3010 }
3011 return 0;
3012}
3013
3014int lancer_fw_download(struct be_adapter *adapter,
3015 const struct firmware *fw)
3016{
3017 struct device *dev = &adapter->pdev->dev;
3018 struct be_dma_mem flash_cmd;
3019 const u8 *data_ptr = NULL;
3020 u8 *dest_image_ptr = NULL;
3021 size_t image_size = 0;
3022 u32 chunk_size = 0;
3023 u32 data_written = 0;
3024 u32 offset = 0;
3025 int status = 0;
3026 u8 add_status = 0;
3027 u8 change_status;
3028
3029 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3030 dev_err(dev, "FW image size should be multiple of 4\n");
3031 return -EINVAL;
3032 }
3033
3034 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3035 + LANCER_FW_DOWNLOAD_CHUNK;
3036 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
3037 &flash_cmd.dma, GFP_KERNEL);
3038 if (!flash_cmd.va)
3039 return -ENOMEM;
3040
3041 dest_image_ptr = flash_cmd.va +
3042 sizeof(struct lancer_cmd_req_write_object);
3043 image_size = fw->size;
3044 data_ptr = fw->data;
3045
3046 while (image_size) {
3047 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3048
3049 /* Copy the image chunk content. */
3050 memcpy(dest_image_ptr, data_ptr, chunk_size);
3051
3052 status = lancer_cmd_write_object(adapter, &flash_cmd,
3053 chunk_size, offset,
3054 LANCER_FW_DOWNLOAD_LOCATION,
3055 &data_written, &change_status,
3056 &add_status);
3057 if (status)
3058 break;
3059
3060 offset += data_written;
3061 data_ptr += data_written;
3062 image_size -= data_written;
3063 }
3064
3065 if (!status) {
3066 /* Commit the FW written */
3067 status = lancer_cmd_write_object(adapter, &flash_cmd,
3068 0, offset,
3069 LANCER_FW_DOWNLOAD_LOCATION,
3070 &data_written, &change_status,
3071 &add_status);
3072 }
3073
3074 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3075 if (status) {
3076 dev_err(dev, "Firmware load error\n");
3077 return be_cmd_status(status);
3078 }
3079
3080 dev_info(dev, "Firmware flashed successfully\n");
3081
3082 if (change_status == LANCER_FW_RESET_NEEDED) {
3083 dev_info(dev, "Resetting adapter to activate new FW\n");
3084 status = lancer_physdev_ctrl(adapter,
3085 PHYSDEV_CONTROL_FW_RESET_MASK);
3086 if (status) {
3087 dev_err(dev, "Adapter busy, could not reset FW\n");
3088 dev_err(dev, "Reboot server to activate new FW\n");
3089 }
3090 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3091 dev_info(dev, "Reboot server to activate new FW\n");
3092 }
3093
3094 return 0;
3095}
3096
3097/* Check if the flash image file is compatible with the adapter that
3098 * is being flashed.
3099 */
3100static bool be_check_ufi_compatibility(struct be_adapter *adapter,
3101 struct flash_file_hdr_g3 *fhdr)
3102{
3103 if (!fhdr) {
3104 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
3105 return false;
3106 }
3107
3108 /* First letter of the build version is used to identify
3109 * which chip this image file is meant for.
3110 */
3111 switch (fhdr->build[0]) {
3112 case BLD_STR_UFI_TYPE_SH:
3113 if (!skyhawk_chip(adapter))
3114 return false;
3115 break;
3116 case BLD_STR_UFI_TYPE_BE3:
3117 if (!BE3_chip(adapter))
3118 return false;
3119 break;
3120 case BLD_STR_UFI_TYPE_BE2:
3121 if (!BE2_chip(adapter))
3122 return false;
3123 break;
3124 default:
3125 return false;
3126 }
3127
3128 /* In BE3 FW images the "asic_type_rev" field doesn't track the
3129 * asic_rev of the chips it is compatible with.
3130 * When asic_type_rev is 0 the image is compatible only with
3131 * pre-BE3-R chips (asic_rev < 0x10)
3132 */
3133 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
3134 return adapter->asic_rev < 0x10;
3135 else
3136 return (fhdr->asic_type_rev >= adapter->asic_rev);
3137}
3138
3139int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3140{
3141 struct device *dev = &adapter->pdev->dev;
3142 struct flash_file_hdr_g3 *fhdr3;
3143 struct image_hdr *img_hdr_ptr;
3144 int status = 0, i, num_imgs;
3145 struct be_dma_mem flash_cmd;
3146
3147 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3148 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
3149 dev_err(dev, "Flash image is not compatible with adapter\n");
3150 return -EINVAL;
3151 }
3152
3153 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3154 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3155 GFP_KERNEL);
3156 if (!flash_cmd.va)
3157 return -ENOMEM;
3158
3159 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3160 for (i = 0; i < num_imgs; i++) {
3161 img_hdr_ptr = (struct image_hdr *)(fw->data +
3162 (sizeof(struct flash_file_hdr_g3) +
3163 i * sizeof(struct image_hdr)));
3164 if (!BE2_chip(adapter) &&
3165 le32_to_cpu(img_hdr_ptr->imageid) != 1)
3166 continue;
3167
3168 if (skyhawk_chip(adapter))
3169 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
3170 num_imgs);
3171 else
3172 status = be_flash_BEx(adapter, fw, &flash_cmd,
3173 num_imgs);
3174 }
3175
3176 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3177 if (!status)
3178 dev_info(dev, "Firmware flashed successfully\n");
3179
3180 return status;
3181}
3182
Dan Carpenterc196b022010-05-26 04:47:39 +00003183int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303184 struct be_dma_mem *nonemb_cmd)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003185{
3186 struct be_mcc_wrb *wrb;
3187 struct be_cmd_req_acpi_wol_magic_config *req;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003188 int status;
3189
3190 spin_lock_bh(&adapter->mcc_lock);
3191
3192 wrb = wrb_from_mccq(adapter);
3193 if (!wrb) {
3194 status = -EBUSY;
3195 goto err;
3196 }
3197 req = nonemb_cmd->va;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003198
Somnath Kotur106df1e2011-10-27 07:12:13 +00003199 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303200 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
3201 wrb, nonemb_cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003202 memcpy(req->magic_mac, mac, ETH_ALEN);
3203
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003204 status = be_mcc_notify_wait(adapter);
3205
3206err:
3207 spin_unlock_bh(&adapter->mcc_lock);
3208 return status;
3209}
Suresh Rff33a6e2009-12-03 16:15:52 -08003210
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003211int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
3212 u8 loopback_type, u8 enable)
3213{
3214 struct be_mcc_wrb *wrb;
3215 struct be_cmd_req_set_lmode *req;
3216 int status;
3217
Somnath Kotur2e365b12016-02-03 09:49:20 +05303218 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
3219 CMD_SUBSYSTEM_LOWLEVEL))
3220 return -EPERM;
3221
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003222 spin_lock_bh(&adapter->mcc_lock);
3223
3224 wrb = wrb_from_mccq(adapter);
3225 if (!wrb) {
3226 status = -EBUSY;
Suresh Reddy9c855972015-07-10 05:32:50 -04003227 goto err_unlock;
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003228 }
3229
3230 req = embedded_payload(wrb);
3231
Somnath Kotur106df1e2011-10-27 07:12:13 +00003232 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303233 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
3234 wrb, NULL);
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003235
3236 req->src_port = port_num;
3237 req->dest_port = port_num;
3238 req->loopback_type = loopback_type;
3239 req->loopback_state = enable;
3240
Suresh Reddy9c855972015-07-10 05:32:50 -04003241 status = be_mcc_notify(adapter);
3242 if (status)
3243 goto err_unlock;
3244
3245 spin_unlock_bh(&adapter->mcc_lock);
3246
3247 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
3248 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
3249 status = -ETIMEDOUT;
3250
3251 return status;
3252
3253err_unlock:
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003254 spin_unlock_bh(&adapter->mcc_lock);
3255 return status;
3256}
3257
Suresh Rff33a6e2009-12-03 16:15:52 -08003258int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303259 u32 loopback_type, u32 pkt_size, u32 num_pkts,
3260 u64 pattern)
Suresh Rff33a6e2009-12-03 16:15:52 -08003261{
3262 struct be_mcc_wrb *wrb;
3263 struct be_cmd_req_loopback_test *req;
Suresh Reddy5eeff632014-01-06 13:02:24 +05303264 struct be_cmd_resp_loopback_test *resp;
Suresh Rff33a6e2009-12-03 16:15:52 -08003265 int status;
3266
Somnath Kotur2e365b12016-02-03 09:49:20 +05303267 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
3268 CMD_SUBSYSTEM_LOWLEVEL))
3269 return -EPERM;
3270
Suresh Rff33a6e2009-12-03 16:15:52 -08003271 spin_lock_bh(&adapter->mcc_lock);
3272
3273 wrb = wrb_from_mccq(adapter);
3274 if (!wrb) {
3275 status = -EBUSY;
3276 goto err;
3277 }
3278
3279 req = embedded_payload(wrb);
3280
Somnath Kotur106df1e2011-10-27 07:12:13 +00003281 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303282 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
3283 NULL);
Suresh Rff33a6e2009-12-03 16:15:52 -08003284
Suresh Reddy5eeff632014-01-06 13:02:24 +05303285 req->hdr.timeout = cpu_to_le32(15);
Suresh Rff33a6e2009-12-03 16:15:52 -08003286 req->pattern = cpu_to_le64(pattern);
3287 req->src_port = cpu_to_le32(port_num);
3288 req->dest_port = cpu_to_le32(port_num);
3289 req->pkt_size = cpu_to_le32(pkt_size);
3290 req->num_pkts = cpu_to_le32(num_pkts);
3291 req->loopback_type = cpu_to_le32(loopback_type);
3292
Suresh Reddyefaa4082015-07-10 05:32:48 -04003293 status = be_mcc_notify(adapter);
3294 if (status)
3295 goto err;
Suresh Rff33a6e2009-12-03 16:15:52 -08003296
Suresh Reddy5eeff632014-01-06 13:02:24 +05303297 spin_unlock_bh(&adapter->mcc_lock);
3298
3299 wait_for_completion(&adapter->et_cmd_compl);
3300 resp = embedded_payload(wrb);
3301 status = le32_to_cpu(resp->status);
3302
3303 return status;
Suresh Rff33a6e2009-12-03 16:15:52 -08003304err:
3305 spin_unlock_bh(&adapter->mcc_lock);
3306 return status;
3307}
3308
3309int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303310 u32 byte_cnt, struct be_dma_mem *cmd)
Suresh Rff33a6e2009-12-03 16:15:52 -08003311{
3312 struct be_mcc_wrb *wrb;
3313 struct be_cmd_req_ddrdma_test *req;
Suresh Rff33a6e2009-12-03 16:15:52 -08003314 int status;
3315 int i, j = 0;
3316
Somnath Kotur2e365b12016-02-03 09:49:20 +05303317 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
3318 CMD_SUBSYSTEM_LOWLEVEL))
3319 return -EPERM;
3320
Suresh Rff33a6e2009-12-03 16:15:52 -08003321 spin_lock_bh(&adapter->mcc_lock);
3322
3323 wrb = wrb_from_mccq(adapter);
3324 if (!wrb) {
3325 status = -EBUSY;
3326 goto err;
3327 }
3328 req = cmd->va;
Somnath Kotur106df1e2011-10-27 07:12:13 +00003329 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303330 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
3331 cmd);
Suresh Rff33a6e2009-12-03 16:15:52 -08003332
3333 req->pattern = cpu_to_le64(pattern);
3334 req->byte_count = cpu_to_le32(byte_cnt);
3335 for (i = 0; i < byte_cnt; i++) {
3336 req->snd_buff[i] = (u8)(pattern >> (j*8));
3337 j++;
3338 if (j > 7)
3339 j = 0;
3340 }
3341
3342 status = be_mcc_notify_wait(adapter);
3343
3344 if (!status) {
3345 struct be_cmd_resp_ddrdma_test *resp;
Kalesh AP03d28ff2014-09-19 15:46:56 +05303346
Suresh Rff33a6e2009-12-03 16:15:52 -08003347 resp = cmd->va;
3348 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
Kalesh APcd3307aa2014-09-19 15:47:02 +05303349 resp->snd_err) {
Suresh Rff33a6e2009-12-03 16:15:52 -08003350 status = -1;
3351 }
3352 }
3353
3354err:
3355 spin_unlock_bh(&adapter->mcc_lock);
3356 return status;
3357}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003358
Dan Carpenterc196b022010-05-26 04:47:39 +00003359int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303360 struct be_dma_mem *nonemb_cmd)
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003361{
3362 struct be_mcc_wrb *wrb;
3363 struct be_cmd_req_seeprom_read *req;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003364 int status;
3365
3366 spin_lock_bh(&adapter->mcc_lock);
3367
3368 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00003369 if (!wrb) {
3370 status = -EBUSY;
3371 goto err;
3372 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003373 req = nonemb_cmd->va;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003374
Somnath Kotur106df1e2011-10-27 07:12:13 +00003375 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303376 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
3377 nonemb_cmd);
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003378
3379 status = be_mcc_notify_wait(adapter);
3380
Ajit Khapardee45ff012011-02-04 17:18:28 +00003381err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003382 spin_unlock_bh(&adapter->mcc_lock);
3383 return status;
3384}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003385
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003386int be_cmd_get_phy_info(struct be_adapter *adapter)
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003387{
3388 struct be_mcc_wrb *wrb;
3389 struct be_cmd_req_get_phy_info *req;
Sathya Perla306f1342011-08-02 19:57:45 +00003390 struct be_dma_mem cmd;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003391 int status;
3392
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003393 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
3394 CMD_SUBSYSTEM_COMMON))
3395 return -EPERM;
3396
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003397 spin_lock_bh(&adapter->mcc_lock);
3398
3399 wrb = wrb_from_mccq(adapter);
3400 if (!wrb) {
3401 status = -EBUSY;
3402 goto err;
3403 }
Sathya Perla306f1342011-08-02 19:57:45 +00003404 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303405 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3406 GFP_ATOMIC);
Sathya Perla306f1342011-08-02 19:57:45 +00003407 if (!cmd.va) {
3408 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3409 status = -ENOMEM;
3410 goto err;
3411 }
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003412
Sathya Perla306f1342011-08-02 19:57:45 +00003413 req = cmd.va;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003414
Somnath Kotur106df1e2011-10-27 07:12:13 +00003415 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303416 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
3417 wrb, &cmd);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003418
3419 status = be_mcc_notify_wait(adapter);
Sathya Perla306f1342011-08-02 19:57:45 +00003420 if (!status) {
3421 struct be_phy_info *resp_phy_info =
3422 cmd.va + sizeof(struct be_cmd_req_hdr);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303423
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003424 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
3425 adapter->phy.interface_type =
Sathya Perla306f1342011-08-02 19:57:45 +00003426 le16_to_cpu(resp_phy_info->interface_type);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003427 adapter->phy.auto_speeds_supported =
3428 le16_to_cpu(resp_phy_info->auto_speeds_supported);
3429 adapter->phy.fixed_speeds_supported =
3430 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
3431 adapter->phy.misc_params =
3432 le32_to_cpu(resp_phy_info->misc_params);
Vasundhara Volam68cb7e42013-08-06 09:27:18 +05303433
3434 if (BE2_chip(adapter)) {
3435 adapter->phy.fixed_speeds_supported =
3436 BE_SUPPORTED_SPEED_10GBPS |
3437 BE_SUPPORTED_SPEED_1GBPS;
3438 }
Sathya Perla306f1342011-08-02 19:57:45 +00003439 }
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303440 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003441err:
3442 spin_unlock_bh(&adapter->mcc_lock);
3443 return status;
3444}
Ajit Khapardee1d18732010-07-23 01:52:13 +00003445
Lad, Prabhakarbc0ee162015-02-05 15:24:43 +00003446static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
Ajit Khapardee1d18732010-07-23 01:52:13 +00003447{
3448 struct be_mcc_wrb *wrb;
3449 struct be_cmd_req_set_qos *req;
3450 int status;
3451
3452 spin_lock_bh(&adapter->mcc_lock);
3453
3454 wrb = wrb_from_mccq(adapter);
3455 if (!wrb) {
3456 status = -EBUSY;
3457 goto err;
3458 }
3459
3460 req = embedded_payload(wrb);
3461
Somnath Kotur106df1e2011-10-27 07:12:13 +00003462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303463 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
Ajit Khapardee1d18732010-07-23 01:52:13 +00003464
3465 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00003466 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
3467 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00003468
3469 status = be_mcc_notify_wait(adapter);
3470
3471err:
3472 spin_unlock_bh(&adapter->mcc_lock);
3473 return status;
3474}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003475
3476int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3477{
3478 struct be_mcc_wrb *wrb;
3479 struct be_cmd_req_cntl_attribs *req;
3480 struct be_cmd_resp_cntl_attribs *resp;
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05303481 int status, i;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003482 int payload_len = max(sizeof(*req), sizeof(*resp));
3483 struct mgmt_controller_attrib *attribs;
3484 struct be_dma_mem attribs_cmd;
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05303485 u32 *serial_num;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003486
Suresh Reddyd98ef502013-04-25 00:56:55 +00003487 if (mutex_lock_interruptible(&adapter->mbox_lock))
3488 return -1;
3489
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003490 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3491 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303492 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3493 attribs_cmd.size,
3494 &attribs_cmd.dma, GFP_ATOMIC);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003495 if (!attribs_cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303496 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003497 status = -ENOMEM;
3498 goto err;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003499 }
3500
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003501 wrb = wrb_from_mbox(adapter);
3502 if (!wrb) {
3503 status = -EBUSY;
3504 goto err;
3505 }
3506 req = attribs_cmd.va;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003507
Somnath Kotur106df1e2011-10-27 07:12:13 +00003508 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303509 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
3510 wrb, &attribs_cmd);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003511
3512 status = be_mbox_notify_wait(adapter);
3513 if (!status) {
Joe Perches43d620c2011-06-16 19:08:06 +00003514 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003515 adapter->hba_port_num = attribs->hba_attribs.phy_port;
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05303516 serial_num = attribs->hba_attribs.controller_serial_number;
3517 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
3518 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
3519 (BIT_MASK(16) - 1);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003520 }
3521
3522err:
3523 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003524 if (attribs_cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303525 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
3526 attribs_cmd.va, attribs_cmd.dma);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003527 return status;
3528}
Sathya Perla2e588f82011-03-11 02:49:26 +00003529
3530/* Uses mbox */
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003531int be_cmd_req_native_mode(struct be_adapter *adapter)
Sathya Perla2e588f82011-03-11 02:49:26 +00003532{
3533 struct be_mcc_wrb *wrb;
3534 struct be_cmd_req_set_func_cap *req;
3535 int status;
3536
3537 if (mutex_lock_interruptible(&adapter->mbox_lock))
3538 return -1;
3539
3540 wrb = wrb_from_mbox(adapter);
3541 if (!wrb) {
3542 status = -EBUSY;
3543 goto err;
3544 }
3545
3546 req = embedded_payload(wrb);
3547
Somnath Kotur106df1e2011-10-27 07:12:13 +00003548 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303549 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
3550 sizeof(*req), wrb, NULL);
Sathya Perla2e588f82011-03-11 02:49:26 +00003551
3552 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
3553 CAPABILITY_BE3_NATIVE_ERX_API);
3554 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
3555
3556 status = be_mbox_notify_wait(adapter);
3557 if (!status) {
3558 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303559
Sathya Perla2e588f82011-03-11 02:49:26 +00003560 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
3561 CAPABILITY_BE3_NATIVE_ERX_API;
Sathya Perlad3791422012-09-28 04:39:44 +00003562 if (!adapter->be3_native)
3563 dev_warn(&adapter->pdev->dev,
3564 "adapter not in advanced mode\n");
Sathya Perla2e588f82011-03-11 02:49:26 +00003565 }
3566err:
3567 mutex_unlock(&adapter->mbox_lock);
3568 return status;
3569}
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003570
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003571/* Get privilege(s) for a function */
3572int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
3573 u32 domain)
3574{
3575 struct be_mcc_wrb *wrb;
3576 struct be_cmd_req_get_fn_privileges *req;
3577 int status;
3578
3579 spin_lock_bh(&adapter->mcc_lock);
3580
3581 wrb = wrb_from_mccq(adapter);
3582 if (!wrb) {
3583 status = -EBUSY;
3584 goto err;
3585 }
3586
3587 req = embedded_payload(wrb);
3588
3589 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3590 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
3591 wrb, NULL);
3592
3593 req->hdr.domain = domain;
3594
3595 status = be_mcc_notify_wait(adapter);
3596 if (!status) {
3597 struct be_cmd_resp_get_fn_privileges *resp =
3598 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303599
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003600 *privilege = le32_to_cpu(resp->privilege_mask);
Suresh Reddy02308d72014-01-15 13:23:36 +05303601
3602 /* In UMC mode FW does not return right privileges.
3603 * Override with correct privilege equivalent to PF.
3604 */
3605 if (BEx_chip(adapter) && be_is_mc(adapter) &&
3606 be_physfn(adapter))
3607 *privilege = MAX_PRIVILEGES;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003608 }
3609
3610err:
3611 spin_unlock_bh(&adapter->mcc_lock);
3612 return status;
3613}
3614
Sathya Perla04a06022013-07-23 15:25:00 +05303615/* Set privilege(s) for a function */
3616int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
3617 u32 domain)
3618{
3619 struct be_mcc_wrb *wrb;
3620 struct be_cmd_req_set_fn_privileges *req;
3621 int status;
3622
3623 spin_lock_bh(&adapter->mcc_lock);
3624
3625 wrb = wrb_from_mccq(adapter);
3626 if (!wrb) {
3627 status = -EBUSY;
3628 goto err;
3629 }
3630
3631 req = embedded_payload(wrb);
3632 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3633 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3634 wrb, NULL);
3635 req->hdr.domain = domain;
3636 if (lancer_chip(adapter))
3637 req->privileges_lancer = cpu_to_le32(privileges);
3638 else
3639 req->privileges = cpu_to_le32(privileges);
3640
3641 status = be_mcc_notify_wait(adapter);
3642err:
3643 spin_unlock_bh(&adapter->mcc_lock);
3644 return status;
3645}
3646
Sathya Perla5a712c12013-07-23 15:24:59 +05303647/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3648 * pmac_id_valid: false => pmac_id or MAC address is requested.
3649 * If pmac_id is returned, pmac_id_valid is returned as true
3650 */
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003651int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05303652 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3653 u8 domain)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003654{
3655 struct be_mcc_wrb *wrb;
3656 struct be_cmd_req_get_mac_list *req;
3657 int status;
3658 int mac_count;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003659 struct be_dma_mem get_mac_list_cmd;
3660 int i;
3661
3662 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3663 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303664 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3665 get_mac_list_cmd.size,
3666 &get_mac_list_cmd.dma,
3667 GFP_ATOMIC);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003668
3669 if (!get_mac_list_cmd.va) {
3670 dev_err(&adapter->pdev->dev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303671 "Memory allocation failure during GET_MAC_LIST\n");
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003672 return -ENOMEM;
3673 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003674
3675 spin_lock_bh(&adapter->mcc_lock);
3676
3677 wrb = wrb_from_mccq(adapter);
3678 if (!wrb) {
3679 status = -EBUSY;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003680 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003681 }
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003682
3683 req = get_mac_list_cmd.va;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003684
3685 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlabf591f52013-05-08 02:05:48 +00003686 OPCODE_COMMON_GET_MAC_LIST,
3687 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003688 req->hdr.domain = domain;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003689 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla5a712c12013-07-23 15:24:59 +05303690 if (*pmac_id_valid) {
3691 req->mac_id = cpu_to_le32(*pmac_id);
Suresh Reddyb188f092014-01-15 13:23:39 +05303692 req->iface_id = cpu_to_le16(if_handle);
Sathya Perla5a712c12013-07-23 15:24:59 +05303693 req->perm_override = 0;
3694 } else {
3695 req->perm_override = 1;
3696 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003697
3698 status = be_mcc_notify_wait(adapter);
3699 if (!status) {
3700 struct be_cmd_resp_get_mac_list *resp =
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003701 get_mac_list_cmd.va;
Sathya Perla5a712c12013-07-23 15:24:59 +05303702
3703 if (*pmac_id_valid) {
3704 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3705 ETH_ALEN);
3706 goto out;
3707 }
3708
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003709 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3710 /* Mac list returned could contain one or more active mac_ids
Joe Perchesdbedd442015-03-06 20:49:12 -08003711 * or one or more true or pseudo permanent mac addresses.
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003712 * If an active mac_id is present, return first active mac_id
3713 * found.
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003714 */
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003715 for (i = 0; i < mac_count; i++) {
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003716 struct get_list_macaddr *mac_entry;
3717 u16 mac_addr_size;
3718 u32 mac_id;
3719
3720 mac_entry = &resp->macaddr_list[i];
3721 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3722 /* mac_id is a 32 bit value and mac_addr size
3723 * is 6 bytes
3724 */
3725 if (mac_addr_size == sizeof(u32)) {
Sathya Perla5a712c12013-07-23 15:24:59 +05303726 *pmac_id_valid = true;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003727 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3728 *pmac_id = le32_to_cpu(mac_id);
3729 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003730 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003731 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003732 /* If no active mac_id found, return first mac addr */
Sathya Perla5a712c12013-07-23 15:24:59 +05303733 *pmac_id_valid = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003734 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303735 ETH_ALEN);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003736 }
3737
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003738out:
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003739 spin_unlock_bh(&adapter->mcc_lock);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303740 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3741 get_mac_list_cmd.va, get_mac_list_cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003742 return status;
3743}
3744
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303745int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3746 u8 *mac, u32 if_handle, bool active, u32 domain)
Sathya Perla5a712c12013-07-23 15:24:59 +05303747{
Suresh Reddyb188f092014-01-15 13:23:39 +05303748 if (!active)
3749 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3750 if_handle, domain);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303751 if (BEx_chip(adapter))
Sathya Perla5a712c12013-07-23 15:24:59 +05303752 return be_cmd_mac_addr_query(adapter, mac, false,
Suresh Reddyb188f092014-01-15 13:23:39 +05303753 if_handle, curr_pmac_id);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303754 else
3755 /* Fetch the MAC address using pmac_id */
3756 return be_cmd_get_mac_from_list(adapter, mac, &active,
Suresh Reddyb188f092014-01-15 13:23:39 +05303757 &curr_pmac_id,
3758 if_handle, domain);
Sathya Perla5a712c12013-07-23 15:24:59 +05303759}
3760
Sathya Perla95046b92013-07-23 15:25:02 +05303761int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3762{
3763 int status;
3764 bool pmac_valid = false;
3765
Joe Perchesc7bf7162015-03-02 19:54:47 -08003766 eth_zero_addr(mac);
Sathya Perla95046b92013-07-23 15:25:02 +05303767
Sathya Perla3175d8c2013-07-23 15:25:03 +05303768 if (BEx_chip(adapter)) {
3769 if (be_physfn(adapter))
3770 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3771 0);
3772 else
3773 status = be_cmd_mac_addr_query(adapter, mac, false,
3774 adapter->if_handle, 0);
3775 } else {
Sathya Perla95046b92013-07-23 15:25:02 +05303776 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
Suresh Reddyb188f092014-01-15 13:23:39 +05303777 NULL, adapter->if_handle, 0);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303778 }
3779
Sathya Perla95046b92013-07-23 15:25:02 +05303780 return status;
3781}
3782
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003783/* Uses synchronous MCCQ */
3784int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3785 u8 mac_count, u32 domain)
3786{
3787 struct be_mcc_wrb *wrb;
3788 struct be_cmd_req_set_mac_list *req;
3789 int status;
3790 struct be_dma_mem cmd;
3791
3792 memset(&cmd, 0, sizeof(struct be_dma_mem));
3793 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303794 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3795 GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00003796 if (!cmd.va)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003797 return -ENOMEM;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003798
3799 spin_lock_bh(&adapter->mcc_lock);
3800
3801 wrb = wrb_from_mccq(adapter);
3802 if (!wrb) {
3803 status = -EBUSY;
3804 goto err;
3805 }
3806
3807 req = cmd.va;
3808 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303809 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3810 wrb, &cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003811
3812 req->hdr.domain = domain;
3813 req->mac_count = mac_count;
3814 if (mac_count)
3815 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3816
3817 status = be_mcc_notify_wait(adapter);
3818
3819err:
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303820 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003821 spin_unlock_bh(&adapter->mcc_lock);
3822 return status;
3823}
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003824
Sathya Perla3175d8c2013-07-23 15:25:03 +05303825/* Wrapper to delete any active MACs and provision the new mac.
3826 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3827 * current list are active.
3828 */
3829int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3830{
3831 bool active_mac = false;
3832 u8 old_mac[ETH_ALEN];
3833 u32 pmac_id;
3834 int status;
3835
3836 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05303837 &pmac_id, if_id, dom);
3838
Sathya Perla3175d8c2013-07-23 15:25:03 +05303839 if (!status && active_mac)
3840 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3841
3842 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3843}
3844
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003845int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
Kalesh APe7bcbd72015-05-06 05:30:32 -04003846 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003847{
3848 struct be_mcc_wrb *wrb;
3849 struct be_cmd_req_set_hsw_config *req;
3850 void *ctxt;
3851 int status;
3852
3853 spin_lock_bh(&adapter->mcc_lock);
3854
3855 wrb = wrb_from_mccq(adapter);
3856 if (!wrb) {
3857 status = -EBUSY;
3858 goto err;
3859 }
3860
3861 req = embedded_payload(wrb);
3862 ctxt = &req->context;
3863
3864 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303865 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3866 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003867
3868 req->hdr.domain = domain;
3869 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3870 if (pvid) {
3871 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3872 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3873 }
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003874 if (!BEx_chip(adapter) && hsw_mode) {
3875 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3876 ctxt, adapter->hba_port_num);
3877 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3878 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3879 ctxt, hsw_mode);
3880 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003881
Kalesh APe7bcbd72015-05-06 05:30:32 -04003882 /* Enable/disable both mac and vlan spoof checking */
3883 if (!BEx_chip(adapter) && spoofchk) {
3884 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3885 ctxt, spoofchk);
3886 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3887 ctxt, spoofchk);
3888 }
3889
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003890 be_dws_cpu_to_le(req->context, sizeof(req->context));
3891 status = be_mcc_notify_wait(adapter);
3892
3893err:
3894 spin_unlock_bh(&adapter->mcc_lock);
3895 return status;
3896}
3897
3898/* Get Hyper switch config */
3899int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
Kalesh APe7bcbd72015-05-06 05:30:32 -04003900 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003901{
3902 struct be_mcc_wrb *wrb;
3903 struct be_cmd_req_get_hsw_config *req;
3904 void *ctxt;
3905 int status;
3906 u16 vid;
3907
3908 spin_lock_bh(&adapter->mcc_lock);
3909
3910 wrb = wrb_from_mccq(adapter);
3911 if (!wrb) {
3912 status = -EBUSY;
3913 goto err;
3914 }
3915
3916 req = embedded_payload(wrb);
3917 ctxt = &req->context;
3918
3919 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303920 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3921 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003922
3923 req->hdr.domain = domain;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003924 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3925 ctxt, intf_id);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003926 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003927
Vasundhara Volam2c07c1d2014-01-15 13:23:32 +05303928 if (!BEx_chip(adapter) && mode) {
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003929 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3930 ctxt, adapter->hba_port_num);
3931 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3932 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003933 be_dws_cpu_to_le(req->context, sizeof(req->context));
3934
3935 status = be_mcc_notify_wait(adapter);
3936 if (!status) {
3937 struct be_cmd_resp_get_hsw_config *resp =
3938 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303939
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303940 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003941 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303942 pvid, &resp->context);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003943 if (pvid)
3944 *pvid = le16_to_cpu(vid);
3945 if (mode)
3946 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3947 port_fwd_type, &resp->context);
Kalesh APe7bcbd72015-05-06 05:30:32 -04003948 if (spoofchk)
3949 *spoofchk =
3950 AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3951 spoofchk, &resp->context);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003952 }
3953
3954err:
3955 spin_unlock_bh(&adapter->mcc_lock);
3956 return status;
3957}
3958
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003959static bool be_is_wol_excluded(struct be_adapter *adapter)
3960{
3961 struct pci_dev *pdev = adapter->pdev;
3962
Kalesh AP18c57c72015-05-06 05:30:38 -04003963 if (be_virtfn(adapter))
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003964 return true;
3965
3966 switch (pdev->subsystem_device) {
3967 case OC_SUBSYS_DEVICE_ID1:
3968 case OC_SUBSYS_DEVICE_ID2:
3969 case OC_SUBSYS_DEVICE_ID3:
3970 case OC_SUBSYS_DEVICE_ID4:
3971 return true;
3972 default:
3973 return false;
3974 }
3975}
3976
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003977int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3978{
3979 struct be_mcc_wrb *wrb;
3980 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303981 int status = 0;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003982 struct be_dma_mem cmd;
3983
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003984 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3985 CMD_SUBSYSTEM_ETH))
3986 return -EPERM;
3987
Suresh Reddy76a9e082014-01-15 13:23:40 +05303988 if (be_is_wol_excluded(adapter))
3989 return status;
3990
Suresh Reddyd98ef502013-04-25 00:56:55 +00003991 if (mutex_lock_interruptible(&adapter->mbox_lock))
3992 return -1;
3993
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003994 memset(&cmd, 0, sizeof(struct be_dma_mem));
3995 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303996 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3997 GFP_ATOMIC);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003998 if (!cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303999 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00004000 status = -ENOMEM;
4001 goto err;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004002 }
4003
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004004 wrb = wrb_from_mbox(adapter);
4005 if (!wrb) {
4006 status = -EBUSY;
4007 goto err;
4008 }
4009
4010 req = cmd.va;
4011
4012 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
4013 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
Suresh Reddy76a9e082014-01-15 13:23:40 +05304014 sizeof(*req), wrb, &cmd);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004015
4016 req->hdr.version = 1;
4017 req->query_options = BE_GET_WOL_CAP;
4018
4019 status = be_mbox_notify_wait(adapter);
4020 if (!status) {
4021 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
Kalesh AP03d28ff2014-09-19 15:46:56 +05304022
Kalesh AP504fbf12014-09-19 15:47:00 +05304023 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004024
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004025 adapter->wol_cap = resp->wol_settings;
Suresh Reddy76a9e082014-01-15 13:23:40 +05304026 if (adapter->wol_cap & BE_WOL_CAP)
4027 adapter->wol_en = true;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004028 }
4029err:
4030 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00004031 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304032 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4033 cmd.dma);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004034 return status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004035
4036}
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304037
4038int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4039{
4040 struct be_dma_mem extfat_cmd;
4041 struct be_fat_conf_params *cfgs;
4042 int status;
4043 int i, j;
4044
4045 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4046 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304047 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
4048 extfat_cmd.size, &extfat_cmd.dma,
4049 GFP_ATOMIC);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304050 if (!extfat_cmd.va)
4051 return -ENOMEM;
4052
4053 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4054 if (status)
4055 goto err;
4056
4057 cfgs = (struct be_fat_conf_params *)
4058 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
4059 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
4060 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304061
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304062 for (j = 0; j < num_modes; j++) {
4063 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
4064 cfgs->module[i].trace_lvl[j].dbg_lvl =
4065 cpu_to_le32(level);
4066 }
4067 }
4068
4069 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
4070err:
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304071 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4072 extfat_cmd.dma);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304073 return status;
4074}
4075
4076int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4077{
4078 struct be_dma_mem extfat_cmd;
4079 struct be_fat_conf_params *cfgs;
4080 int status, j;
4081 int level = 0;
4082
4083 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4084 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304085 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
4086 extfat_cmd.size, &extfat_cmd.dma,
4087 GFP_ATOMIC);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304088
4089 if (!extfat_cmd.va) {
4090 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4091 __func__);
4092 goto err;
4093 }
4094
4095 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4096 if (!status) {
4097 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4098 sizeof(struct be_cmd_resp_hdr));
Kalesh AP03d28ff2014-09-19 15:46:56 +05304099
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304100 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4101 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4102 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4103 }
4104 }
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304105 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4106 extfat_cmd.dma);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304107err:
4108 return level;
4109}
4110
Somnath Kotur941a77d2012-05-17 22:59:03 +00004111int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
4112 struct be_dma_mem *cmd)
4113{
4114 struct be_mcc_wrb *wrb;
4115 struct be_cmd_req_get_ext_fat_caps *req;
4116 int status;
4117
4118 if (mutex_lock_interruptible(&adapter->mbox_lock))
4119 return -1;
4120
4121 wrb = wrb_from_mbox(adapter);
4122 if (!wrb) {
4123 status = -EBUSY;
4124 goto err;
4125 }
4126
4127 req = cmd->va;
4128 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4129 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
4130 cmd->size, wrb, cmd);
4131 req->parameter_type = cpu_to_le32(1);
4132
4133 status = be_mbox_notify_wait(adapter);
4134err:
4135 mutex_unlock(&adapter->mbox_lock);
4136 return status;
4137}
4138
4139int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
4140 struct be_dma_mem *cmd,
4141 struct be_fat_conf_params *configs)
4142{
4143 struct be_mcc_wrb *wrb;
4144 struct be_cmd_req_set_ext_fat_caps *req;
4145 int status;
4146
4147 spin_lock_bh(&adapter->mcc_lock);
4148
4149 wrb = wrb_from_mccq(adapter);
4150 if (!wrb) {
4151 status = -EBUSY;
4152 goto err;
4153 }
4154
4155 req = cmd->va;
4156 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
4157 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4158 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
4159 cmd->size, wrb, cmd);
4160
4161 status = be_mcc_notify_wait(adapter);
4162err:
4163 spin_unlock_bh(&adapter->mcc_lock);
4164 return status;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004165}
Parav Pandit6a4ab662012-03-26 14:27:12 +00004166
Vasundhara Volam21252372015-02-06 08:18:42 -05004167int be_cmd_query_port_name(struct be_adapter *adapter)
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004168{
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004169 struct be_cmd_req_get_port_name *req;
Vasundhara Volam21252372015-02-06 08:18:42 -05004170 struct be_mcc_wrb *wrb;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004171 int status;
4172
Vasundhara Volam21252372015-02-06 08:18:42 -05004173 if (mutex_lock_interruptible(&adapter->mbox_lock))
4174 return -1;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004175
Vasundhara Volam21252372015-02-06 08:18:42 -05004176 wrb = wrb_from_mbox(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004177 req = embedded_payload(wrb);
4178
4179 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4180 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
4181 NULL);
Vasundhara Volam21252372015-02-06 08:18:42 -05004182 if (!BEx_chip(adapter))
4183 req->hdr.version = 1;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004184
Vasundhara Volam21252372015-02-06 08:18:42 -05004185 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004186 if (!status) {
4187 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304188
Vasundhara Volam21252372015-02-06 08:18:42 -05004189 adapter->port_name = resp->port_name[adapter->hba_port_num];
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004190 } else {
Vasundhara Volam21252372015-02-06 08:18:42 -05004191 adapter->port_name = adapter->hba_port_num + '0';
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004192 }
Vasundhara Volam21252372015-02-06 08:18:42 -05004193
4194 mutex_unlock(&adapter->mbox_lock);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004195 return status;
4196}
4197
Suresh Reddy980df242015-12-30 01:29:03 -05004198/* When more than 1 NIC descriptor is present in the descriptor list,
4199 * the caller must specify the pf_num to obtain the NIC descriptor
4200 * corresponding to its pci function.
4201 * get_vft must be true when the caller wants the VF-template desc of the
4202 * PF-pool.
4203 * The pf_num should be set to PF_NUM_IGNORE when the caller knows
4204 * that only it's NIC descriptor is present in the descriptor list.
4205 */
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304206static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
Suresh Reddy980df242015-12-30 01:29:03 -05004207 bool get_vft, u8 pf_num)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004208{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304209 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304210 struct be_nic_res_desc *nic;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004211 int i;
4212
4213 for (i = 0; i < desc_count; i++) {
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304214 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304215 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
4216 nic = (struct be_nic_res_desc *)hdr;
Suresh Reddy980df242015-12-30 01:29:03 -05004217
4218 if ((pf_num == PF_NUM_IGNORE ||
4219 nic->pf_num == pf_num) &&
4220 (!get_vft || nic->flags & BIT(VFT_SHIFT)))
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304221 return nic;
4222 }
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304223 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4224 hdr = (void *)hdr + hdr->desc_len;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004225 }
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304226 return NULL;
4227}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004228
Suresh Reddy980df242015-12-30 01:29:03 -05004229static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
4230 u8 pf_num)
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304231{
Suresh Reddy980df242015-12-30 01:29:03 -05004232 return be_get_nic_desc(buf, desc_count, true, pf_num);
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304233}
4234
Suresh Reddy980df242015-12-30 01:29:03 -05004235static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
4236 u8 pf_num)
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304237{
Suresh Reddy980df242015-12-30 01:29:03 -05004238 return be_get_nic_desc(buf, desc_count, false, pf_num);
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304239}
4240
Suresh Reddy980df242015-12-30 01:29:03 -05004241static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
4242 u8 pf_num)
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304243{
4244 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4245 struct be_pcie_res_desc *pcie;
4246 int i;
4247
4248 for (i = 0; i < desc_count; i++) {
Suresh Reddy980df242015-12-30 01:29:03 -05004249 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4250 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4251 pcie = (struct be_pcie_res_desc *)hdr;
4252 if (pcie->pf_num == pf_num)
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304253 return pcie;
4254 }
4255
4256 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4257 hdr = (void *)hdr + hdr->desc_len;
4258 }
Wei Yang950e2952013-05-22 15:58:22 +00004259 return NULL;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004260}
4261
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304262static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
4263{
4264 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4265 int i;
4266
4267 for (i = 0; i < desc_count; i++) {
4268 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
4269 return (struct be_port_res_desc *)hdr;
4270
4271 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4272 hdr = (void *)hdr + hdr->desc_len;
4273 }
4274 return NULL;
4275}
4276
Sathya Perla92bf14a2013-08-27 16:57:32 +05304277static void be_copy_nic_desc(struct be_resources *res,
4278 struct be_nic_res_desc *desc)
4279{
4280 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
4281 res->max_vlans = le16_to_cpu(desc->vlan_count);
4282 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
4283 res->max_tx_qs = le16_to_cpu(desc->txq_count);
4284 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
4285 res->max_rx_qs = le16_to_cpu(desc->rq_count);
4286 res->max_evt_qs = le16_to_cpu(desc->eq_count);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004287 res->max_cq_count = le16_to_cpu(desc->cq_count);
4288 res->max_iface_count = le16_to_cpu(desc->iface_count);
4289 res->max_mcc_count = le16_to_cpu(desc->mcc_count);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304290 /* Clear flags that driver is not interested in */
4291 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
4292 BE_IF_CAP_FLAGS_WANT;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304293}
4294
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004295/* Uses Mbox */
Sathya Perla92bf14a2013-08-27 16:57:32 +05304296int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004297{
4298 struct be_mcc_wrb *wrb;
4299 struct be_cmd_req_get_func_config *req;
4300 int status;
4301 struct be_dma_mem cmd;
4302
Suresh Reddyd98ef502013-04-25 00:56:55 +00004303 if (mutex_lock_interruptible(&adapter->mbox_lock))
4304 return -1;
4305
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004306 memset(&cmd, 0, sizeof(struct be_dma_mem));
4307 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304308 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4309 GFP_ATOMIC);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004310 if (!cmd.va) {
4311 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00004312 status = -ENOMEM;
4313 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004314 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004315
4316 wrb = wrb_from_mbox(adapter);
4317 if (!wrb) {
4318 status = -EBUSY;
4319 goto err;
4320 }
4321
4322 req = cmd.va;
4323
4324 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4325 OPCODE_COMMON_GET_FUNC_CONFIG,
4326 cmd.size, wrb, &cmd);
4327
Kalesh AP28710c52013-04-28 22:21:13 +00004328 if (skyhawk_chip(adapter))
4329 req->hdr.version = 1;
4330
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004331 status = be_mbox_notify_wait(adapter);
4332 if (!status) {
4333 struct be_cmd_resp_get_func_config *resp = cmd.va;
4334 u32 desc_count = le32_to_cpu(resp->desc_count);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304335 struct be_nic_res_desc *desc;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004336
Suresh Reddy980df242015-12-30 01:29:03 -05004337 /* GET_FUNC_CONFIG returns resource descriptors of the
4338 * current function only. So, pf_num should be set to
4339 * PF_NUM_IGNORE.
4340 */
4341 desc = be_get_func_nic_desc(resp->func_param, desc_count,
4342 PF_NUM_IGNORE);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004343 if (!desc) {
4344 status = -EINVAL;
4345 goto err;
4346 }
Suresh Reddy980df242015-12-30 01:29:03 -05004347
4348 /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
4349 adapter->pf_num = desc->pf_num;
4350 adapter->vf_num = desc->vf_num;
4351
4352 if (res)
4353 be_copy_nic_desc(res, desc);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004354 }
4355err:
4356 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00004357 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304358 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4359 cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004360 return status;
4361}
4362
Suresh Reddy980df242015-12-30 01:29:03 -05004363/* Will use MBOX only if MCCQ has not been created */
Sathya Perla92bf14a2013-08-27 16:57:32 +05304364int be_cmd_get_profile_config(struct be_adapter *adapter,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004365 struct be_resources *res, u8 query, u8 domain)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004366{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304367 struct be_cmd_resp_get_profile_config *resp;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304368 struct be_cmd_req_get_profile_config *req;
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304369 struct be_nic_res_desc *vf_res;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304370 struct be_pcie_res_desc *pcie;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304371 struct be_port_res_desc *port;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304372 struct be_nic_res_desc *nic;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304373 struct be_mcc_wrb wrb = {0};
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004374 struct be_dma_mem cmd;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004375 u16 desc_count;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004376 int status;
4377
4378 memset(&cmd, 0, sizeof(struct be_dma_mem));
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304379 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304380 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4381 GFP_ATOMIC);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304382 if (!cmd.va)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004383 return -ENOMEM;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004384
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304385 req = cmd.va;
4386 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4387 OPCODE_COMMON_GET_PROFILE_CONFIG,
4388 cmd.size, &wrb, &cmd);
4389
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304390 if (!lancer_chip(adapter))
4391 req->hdr.version = 1;
4392 req->type = ACTIVE_PROFILE_TYPE;
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004393 req->hdr.domain = domain;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304394
Vasundhara Volamf2858732015-03-04 00:44:33 -05004395 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
4396 * descriptors with all bits set to "1" for the fields which can be
4397 * modified using SET_PROFILE_CONFIG cmd.
4398 */
4399 if (query == RESOURCE_MODIFIABLE)
4400 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
4401
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304402 status = be_cmd_notify_wait(adapter, &wrb);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304403 if (status)
4404 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004405
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304406 resp = cmd.va;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004407 desc_count = le16_to_cpu(resp->desc_count);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004408
Suresh Reddy980df242015-12-30 01:29:03 -05004409 pcie = be_get_pcie_desc(resp->func_param, desc_count,
4410 adapter->pf_num);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304411 if (pcie)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304412 res->max_vfs = le16_to_cpu(pcie->num_vfs);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304413
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304414 port = be_get_port_desc(resp->func_param, desc_count);
4415 if (port)
4416 adapter->mc_type = port->mc_type;
4417
Suresh Reddy980df242015-12-30 01:29:03 -05004418 nic = be_get_func_nic_desc(resp->func_param, desc_count,
4419 adapter->pf_num);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304420 if (nic)
4421 be_copy_nic_desc(res, nic);
4422
Suresh Reddy980df242015-12-30 01:29:03 -05004423 vf_res = be_get_vft_desc(resp->func_param, desc_count,
4424 adapter->pf_num);
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304425 if (vf_res)
4426 res->vf_if_cap_flags = vf_res->cap_flags;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004427err:
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004428 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304429 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4430 cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004431 return status;
4432}
4433
Vasundhara Volambec84e62014-06-30 13:01:32 +05304434/* Will use MBOX only if MCCQ has not been created */
4435static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4436 int size, int count, u8 version, u8 domain)
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004437{
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004438 struct be_cmd_req_set_profile_config *req;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304439 struct be_mcc_wrb wrb = {0};
4440 struct be_dma_mem cmd;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004441 int status;
4442
Vasundhara Volambec84e62014-06-30 13:01:32 +05304443 memset(&cmd, 0, sizeof(struct be_dma_mem));
4444 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304445 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4446 GFP_ATOMIC);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304447 if (!cmd.va)
4448 return -ENOMEM;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004449
Vasundhara Volambec84e62014-06-30 13:01:32 +05304450 req = cmd.va;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004451 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Vasundhara Volambec84e62014-06-30 13:01:32 +05304452 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
4453 &wrb, &cmd);
Sathya Perlaa4018012014-03-27 10:46:18 +05304454 req->hdr.version = version;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004455 req->hdr.domain = domain;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304456 req->desc_count = cpu_to_le32(count);
Sathya Perlaa4018012014-03-27 10:46:18 +05304457 memcpy(req->desc, desc, size);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004458
Vasundhara Volambec84e62014-06-30 13:01:32 +05304459 status = be_cmd_notify_wait(adapter, &wrb);
4460
4461 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304462 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4463 cmd.dma);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004464 return status;
4465}
4466
Sathya Perlaa4018012014-03-27 10:46:18 +05304467/* Mark all fields invalid */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304468static void be_reset_nic_desc(struct be_nic_res_desc *nic)
Sathya Perlaa4018012014-03-27 10:46:18 +05304469{
4470 memset(nic, 0, sizeof(*nic));
4471 nic->unicast_mac_count = 0xFFFF;
4472 nic->mcc_count = 0xFFFF;
4473 nic->vlan_count = 0xFFFF;
4474 nic->mcast_mac_count = 0xFFFF;
4475 nic->txq_count = 0xFFFF;
4476 nic->rq_count = 0xFFFF;
4477 nic->rssq_count = 0xFFFF;
4478 nic->lro_count = 0xFFFF;
4479 nic->cq_count = 0xFFFF;
4480 nic->toe_conn_count = 0xFFFF;
4481 nic->eq_count = 0xFFFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304482 nic->iface_count = 0xFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05304483 nic->link_param = 0xFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304484 nic->channel_id_param = cpu_to_le16(0xF000);
Sathya Perlaa4018012014-03-27 10:46:18 +05304485 nic->acpi_params = 0xFF;
4486 nic->wol_param = 0x0F;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304487 nic->tunnel_iface_count = 0xFFFF;
4488 nic->direct_tenant_iface_count = 0xFFFF;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304489 nic->bw_min = 0xFFFFFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05304490 nic->bw_max = 0xFFFFFFFF;
4491}
4492
Vasundhara Volambec84e62014-06-30 13:01:32 +05304493/* Mark all fields invalid */
4494static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
4495{
4496 memset(pcie, 0, sizeof(*pcie));
4497 pcie->sriov_state = 0xFF;
4498 pcie->pf_state = 0xFF;
4499 pcie->pf_type = 0xFF;
4500 pcie->num_vfs = 0xFFFF;
4501}
4502
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304503int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
4504 u8 domain)
Sathya Perlaa4018012014-03-27 10:46:18 +05304505{
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304506 struct be_nic_res_desc nic_desc;
4507 u32 bw_percent;
4508 u16 version = 0;
Sathya Perlaa4018012014-03-27 10:46:18 +05304509
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304510 if (BE3_chip(adapter))
4511 return be_cmd_set_qos(adapter, max_rate / 10, domain);
4512
4513 be_reset_nic_desc(&nic_desc);
Suresh Reddy980df242015-12-30 01:29:03 -05004514 nic_desc.pf_num = adapter->pf_num;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304515 nic_desc.vf_num = domain;
Kalesh AP58bdeaa2015-01-20 03:51:49 -05004516 nic_desc.bw_min = 0;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304517 if (lancer_chip(adapter)) {
Sathya Perlaa4018012014-03-27 10:46:18 +05304518 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
4519 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
4520 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
4521 (1 << NOSV_SHIFT);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304522 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
Sathya Perlaa4018012014-03-27 10:46:18 +05304523 } else {
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304524 version = 1;
4525 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4526 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4527 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4528 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
4529 nic_desc.bw_max = cpu_to_le32(bw_percent);
Sathya Perlaa4018012014-03-27 10:46:18 +05304530 }
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304531
4532 return be_cmd_set_profile_config(adapter, &nic_desc,
4533 nic_desc.hdr.desc_len,
Vasundhara Volambec84e62014-06-30 13:01:32 +05304534 1, version, domain);
4535}
4536
Vasundhara Volamf2858732015-03-04 00:44:33 -05004537static void be_fill_vf_res_template(struct be_adapter *adapter,
4538 struct be_resources pool_res,
4539 u16 num_vfs, u16 num_vf_qs,
4540 struct be_nic_res_desc *nic_vft)
4541{
4542 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
4543 struct be_resources res_mod = {0};
4544
4545 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4546 * which are modifiable using SET_PROFILE_CONFIG cmd.
4547 */
4548 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
4549
4550 /* If RSS IFACE capability flags are modifiable for a VF, set the
4551 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4552 * more than 1 RSSQ is available for a VF.
4553 * Otherwise, provision only 1 queue pair for VF.
4554 */
4555 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4556 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4557 if (num_vf_qs > 1) {
4558 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4559 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4560 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4561 } else {
4562 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4563 BE_IF_FLAGS_DEFQ_RSS);
4564 }
Vasundhara Volamf2858732015-03-04 00:44:33 -05004565 } else {
4566 num_vf_qs = 1;
4567 }
4568
Kalesh AP196e3732015-10-12 03:47:21 -04004569 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4570 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4571 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4572 }
4573
4574 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004575 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
4576 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
4577 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
4578 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
4579 (num_vfs + 1));
4580
4581 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4582 * among the PF and it's VFs, if the fields are changeable
4583 */
4584 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4585 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
4586 (num_vfs + 1));
4587
4588 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4589 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
4590 (num_vfs + 1));
4591
4592 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4593 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
4594 (num_vfs + 1));
4595
4596 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4597 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
4598 (num_vfs + 1));
4599}
4600
Vasundhara Volambec84e62014-06-30 13:01:32 +05304601int be_cmd_set_sriov_config(struct be_adapter *adapter,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004602 struct be_resources pool_res, u16 num_vfs,
4603 u16 num_vf_qs)
Vasundhara Volambec84e62014-06-30 13:01:32 +05304604{
4605 struct {
4606 struct be_pcie_res_desc pcie;
4607 struct be_nic_res_desc nic_vft;
4608 } __packed desc;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304609
Vasundhara Volambec84e62014-06-30 13:01:32 +05304610 /* PF PCIE descriptor */
4611 be_reset_pcie_desc(&desc.pcie);
4612 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
4613 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004614 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304615 desc.pcie.pf_num = adapter->pdev->devfn;
4616 desc.pcie.sriov_state = num_vfs ? 1 : 0;
4617 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
4618
4619 /* VF NIC Template descriptor */
4620 be_reset_nic_desc(&desc.nic_vft);
4621 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4622 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004623 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304624 desc.nic_vft.pf_num = adapter->pdev->devfn;
4625 desc.nic_vft.vf_num = 0;
4626
Vasundhara Volamf2858732015-03-04 00:44:33 -05004627 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
4628 &desc.nic_vft);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304629
4630 return be_cmd_set_profile_config(adapter, &desc,
4631 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
Sathya Perlaa4018012014-03-27 10:46:18 +05304632}
4633
4634int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
4635{
4636 struct be_mcc_wrb *wrb;
4637 struct be_cmd_req_manage_iface_filters *req;
4638 int status;
4639
4640 if (iface == 0xFFFFFFFF)
4641 return -1;
4642
4643 spin_lock_bh(&adapter->mcc_lock);
4644
4645 wrb = wrb_from_mccq(adapter);
4646 if (!wrb) {
4647 status = -EBUSY;
4648 goto err;
4649 }
4650 req = embedded_payload(wrb);
4651
4652 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4653 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
4654 wrb, NULL);
4655 req->op = op;
4656 req->target_iface_id = cpu_to_le32(iface);
4657
4658 status = be_mcc_notify_wait(adapter);
4659err:
4660 spin_unlock_bh(&adapter->mcc_lock);
4661 return status;
4662}
4663
4664int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
4665{
4666 struct be_port_res_desc port_desc;
4667
4668 memset(&port_desc, 0, sizeof(port_desc));
4669 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
4670 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4671 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4672 port_desc.link_num = adapter->hba_port_num;
4673 if (port) {
4674 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
4675 (1 << RCVID_SHIFT);
4676 port_desc.nv_port = swab16(port);
4677 } else {
4678 port_desc.nv_flags = NV_TYPE_DISABLED;
4679 port_desc.nv_port = 0;
4680 }
4681
4682 return be_cmd_set_profile_config(adapter, &port_desc,
Vasundhara Volambec84e62014-06-30 13:01:32 +05304683 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
Sathya Perlaa4018012014-03-27 10:46:18 +05304684}
4685
Sathya Perla4c876612013-02-03 20:30:11 +00004686int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4687 int vf_num)
4688{
4689 struct be_mcc_wrb *wrb;
4690 struct be_cmd_req_get_iface_list *req;
4691 struct be_cmd_resp_get_iface_list *resp;
4692 int status;
4693
4694 spin_lock_bh(&adapter->mcc_lock);
4695
4696 wrb = wrb_from_mccq(adapter);
4697 if (!wrb) {
4698 status = -EBUSY;
4699 goto err;
4700 }
4701 req = embedded_payload(wrb);
4702
4703 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4704 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4705 wrb, NULL);
4706 req->hdr.domain = vf_num + 1;
4707
4708 status = be_mcc_notify_wait(adapter);
4709 if (!status) {
4710 resp = (struct be_cmd_resp_get_iface_list *)req;
4711 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4712 }
4713
4714err:
4715 spin_unlock_bh(&adapter->mcc_lock);
4716 return status;
4717}
4718
Somnath Kotur5c510812013-05-30 02:52:23 +00004719static int lancer_wait_idle(struct be_adapter *adapter)
4720{
4721#define SLIPORT_IDLE_TIMEOUT 30
4722 u32 reg_val;
4723 int status = 0, i;
4724
4725 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4726 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4727 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4728 break;
4729
4730 ssleep(1);
4731 }
4732
4733 if (i == SLIPORT_IDLE_TIMEOUT)
4734 status = -1;
4735
4736 return status;
4737}
4738
4739int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4740{
4741 int status = 0;
4742
4743 status = lancer_wait_idle(adapter);
4744 if (status)
4745 return status;
4746
4747 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4748
4749 return status;
4750}
4751
4752/* Routine to check whether dump image is present or not */
4753bool dump_present(struct be_adapter *adapter)
4754{
4755 u32 sliport_status = 0;
4756
4757 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4758 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4759}
4760
4761int lancer_initiate_dump(struct be_adapter *adapter)
4762{
Kalesh APf0613382014-08-01 17:47:32 +05304763 struct device *dev = &adapter->pdev->dev;
Somnath Kotur5c510812013-05-30 02:52:23 +00004764 int status;
4765
Kalesh APf0613382014-08-01 17:47:32 +05304766 if (dump_present(adapter)) {
4767 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4768 return -EEXIST;
4769 }
4770
Somnath Kotur5c510812013-05-30 02:52:23 +00004771 /* give firmware reset and diagnostic dump */
4772 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4773 PHYSDEV_CONTROL_DD_MASK);
4774 if (status < 0) {
Kalesh APf0613382014-08-01 17:47:32 +05304775 dev_err(dev, "FW reset failed\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004776 return status;
4777 }
4778
4779 status = lancer_wait_idle(adapter);
4780 if (status)
4781 return status;
4782
4783 if (!dump_present(adapter)) {
Kalesh APf0613382014-08-01 17:47:32 +05304784 dev_err(dev, "FW dump not generated\n");
4785 return -EIO;
Somnath Kotur5c510812013-05-30 02:52:23 +00004786 }
4787
4788 return 0;
4789}
4790
Kalesh APf0613382014-08-01 17:47:32 +05304791int lancer_delete_dump(struct be_adapter *adapter)
4792{
4793 int status;
4794
4795 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4796 return be_cmd_status(status);
4797}
4798
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00004799/* Uses sync mcc */
4800int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4801{
4802 struct be_mcc_wrb *wrb;
4803 struct be_cmd_enable_disable_vf *req;
4804 int status;
4805
Vasundhara Volam05998632013-10-01 15:59:59 +05304806 if (BEx_chip(adapter))
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00004807 return 0;
4808
4809 spin_lock_bh(&adapter->mcc_lock);
4810
4811 wrb = wrb_from_mccq(adapter);
4812 if (!wrb) {
4813 status = -EBUSY;
4814 goto err;
4815 }
4816
4817 req = embedded_payload(wrb);
4818
4819 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4820 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4821 wrb, NULL);
4822
4823 req->hdr.domain = domain;
4824 req->enable = 1;
4825 status = be_mcc_notify_wait(adapter);
4826err:
4827 spin_unlock_bh(&adapter->mcc_lock);
4828 return status;
4829}
4830
Somnath Kotur68c45a22013-03-14 02:42:07 +00004831int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4832{
4833 struct be_mcc_wrb *wrb;
4834 struct be_cmd_req_intr_set *req;
4835 int status;
4836
4837 if (mutex_lock_interruptible(&adapter->mbox_lock))
4838 return -1;
4839
4840 wrb = wrb_from_mbox(adapter);
4841
4842 req = embedded_payload(wrb);
4843
4844 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4845 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4846 wrb, NULL);
4847
4848 req->intr_enabled = intr_enable;
4849
4850 status = be_mbox_notify_wait(adapter);
4851
4852 mutex_unlock(&adapter->mbox_lock);
4853 return status;
4854}
4855
Vasundhara Volam542963b2014-01-15 13:23:33 +05304856/* Uses MBOX */
4857int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4858{
4859 struct be_cmd_req_get_active_profile *req;
4860 struct be_mcc_wrb *wrb;
4861 int status;
4862
4863 if (mutex_lock_interruptible(&adapter->mbox_lock))
4864 return -1;
4865
4866 wrb = wrb_from_mbox(adapter);
4867 if (!wrb) {
4868 status = -EBUSY;
4869 goto err;
4870 }
4871
4872 req = embedded_payload(wrb);
4873
4874 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4875 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4876 wrb, NULL);
4877
4878 status = be_mbox_notify_wait(adapter);
4879 if (!status) {
4880 struct be_cmd_resp_get_active_profile *resp =
4881 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304882
Vasundhara Volam542963b2014-01-15 13:23:33 +05304883 *profile_id = le16_to_cpu(resp->active_profile_id);
4884 }
4885
4886err:
4887 mutex_unlock(&adapter->mbox_lock);
4888 return status;
4889}
4890
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004891int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4892 int link_state, int version, u8 domain)
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304893{
4894 struct be_mcc_wrb *wrb;
4895 struct be_cmd_req_set_ll_link *req;
4896 int status;
4897
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304898 spin_lock_bh(&adapter->mcc_lock);
4899
4900 wrb = wrb_from_mccq(adapter);
4901 if (!wrb) {
4902 status = -EBUSY;
4903 goto err;
4904 }
4905
4906 req = embedded_payload(wrb);
4907
4908 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4909 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4910 sizeof(*req), wrb, NULL);
4911
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004912 req->hdr.version = version;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304913 req->hdr.domain = domain;
4914
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004915 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4916 link_state == IFLA_VF_LINK_STATE_AUTO)
4917 req->link_config |= PLINK_ENABLE;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304918
4919 if (link_state == IFLA_VF_LINK_STATE_AUTO)
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004920 req->link_config |= PLINK_TRACK;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304921
4922 status = be_mcc_notify_wait(adapter);
4923err:
4924 spin_unlock_bh(&adapter->mcc_lock);
4925 return status;
4926}
4927
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004928int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4929 int link_state, u8 domain)
4930{
4931 int status;
4932
4933 if (BEx_chip(adapter))
4934 return -EOPNOTSUPP;
4935
4936 status = __be_cmd_set_logical_link_config(adapter, link_state,
4937 2, domain);
4938
4939 /* Version 2 of the command will not be recognized by older FW.
4940 * On such a failure issue version 1 of the command.
4941 */
4942 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
4943 status = __be_cmd_set_logical_link_config(adapter, link_state,
4944 1, domain);
4945 return status;
4946}
Parav Pandit6a4ab662012-03-26 14:27:12 +00004947int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05304948 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
Parav Pandit6a4ab662012-03-26 14:27:12 +00004949{
4950 struct be_adapter *adapter = netdev_priv(netdev_handle);
4951 struct be_mcc_wrb *wrb;
Kalesh AP504fbf12014-09-19 15:47:00 +05304952 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
Parav Pandit6a4ab662012-03-26 14:27:12 +00004953 struct be_cmd_req_hdr *req;
4954 struct be_cmd_resp_hdr *resp;
4955 int status;
4956
4957 spin_lock_bh(&adapter->mcc_lock);
4958
4959 wrb = wrb_from_mccq(adapter);
4960 if (!wrb) {
4961 status = -EBUSY;
4962 goto err;
4963 }
4964 req = embedded_payload(wrb);
4965 resp = embedded_payload(wrb);
4966
4967 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4968 hdr->opcode, wrb_payload_size, wrb, NULL);
4969 memcpy(req, wrb_payload, wrb_payload_size);
4970 be_dws_cpu_to_le(req, wrb_payload_size);
4971
4972 status = be_mcc_notify_wait(adapter);
4973 if (cmd_status)
4974 *cmd_status = (status & 0xffff);
4975 if (ext_status)
4976 *ext_status = 0;
4977 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4978 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4979err:
4980 spin_unlock_bh(&adapter->mcc_lock);
4981 return status;
4982}
4983EXPORT_SYMBOL(be_roce_mcc_cmd);