blob: 33cbbde218be7b51177b64e2e9407c6c8450bdf0 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Parav Pandit6a4ab662012-03-26 14:27:12 +000018#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
Vasundhara Volam21252372015-02-06 08:18:42 -050022static char *be_port_misconfig_evt_desc[] = {
23 "A valid SFP module detected",
24 "Optics faulted/ incorrectly installed/ not installed.",
25 "Optics of two types installed.",
26 "Incompatible optics.",
27 "Unknown port SFP status"
28};
29
30static char *be_port_misconfig_remedy_desc[] = {
31 "",
32 "Reseat optics. If issue not resolved, replace",
33 "Remove one optic or install matching pair of optics",
34 "Replace with compatible optics for card to function",
35 ""
36};
37
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000038static struct be_cmd_priv_map cmd_priv_map[] = {
39 {
40 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
41 CMD_SUBSYSTEM_ETH,
42 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
44 },
45 {
46 OPCODE_COMMON_GET_FLOW_CONTROL,
47 CMD_SUBSYSTEM_COMMON,
48 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50 },
51 {
52 OPCODE_COMMON_SET_FLOW_CONTROL,
53 CMD_SUBSYSTEM_COMMON,
54 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56 },
57 {
58 OPCODE_ETH_GET_PPORT_STATS,
59 CMD_SUBSYSTEM_ETH,
60 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62 },
63 {
64 OPCODE_COMMON_GET_PHY_DETAILS,
65 CMD_SUBSYSTEM_COMMON,
66 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
Somnath Kotur2e365b12016-02-03 09:49:20 +053068 },
69 {
70 OPCODE_LOWLEVEL_HOST_DDR_DMA,
71 CMD_SUBSYSTEM_LOWLEVEL,
72 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
73 },
74 {
75 OPCODE_LOWLEVEL_LOOPBACK_TEST,
76 CMD_SUBSYSTEM_LOWLEVEL,
77 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
78 },
79 {
80 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
81 CMD_SUBSYSTEM_LOWLEVEL,
82 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
83 },
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000084};
85
Sathya Perlaa2cc4e02014-05-09 13:29:14 +053086static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +000087{
88 int i;
89 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
90 u32 cmd_privileges = adapter->cmd_privileges;
91
92 for (i = 0; i < num_entries; i++)
93 if (opcode == cmd_priv_map[i].opcode &&
94 subsystem == cmd_priv_map[i].subsystem)
95 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
96 return false;
97
98 return true;
99}
100
Somnath Kotur3de09452011-09-30 07:25:05 +0000101static inline void *embedded_payload(struct be_mcc_wrb *wrb)
102{
103 return wrb->payload.embedded_payload;
104}
Ajit Khaparde609ff3b2011-02-20 11:42:07 +0000105
Suresh Reddyefaa4082015-07-10 05:32:48 -0400106static int be_mcc_notify(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000107{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000108 struct be_queue_info *mccq = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000109 u32 val = 0;
110
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530111 if (be_check_error(adapter, BE_ERROR_ANY))
Suresh Reddyefaa4082015-07-10 05:32:48 -0400112 return -EIO;
Ajit Khaparde7acc2082011-02-11 13:38:17 +0000113
Sathya Perla5fb379e2009-06-18 00:02:59 +0000114 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
115 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000116
117 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000118 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
Suresh Reddyefaa4082015-07-10 05:32:48 -0400119
120 return 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000121}
122
123/* To check if valid bit is set, check the entire word as we don't know
124 * the endianness of the data (old entry is host endian while a new entry is
125 * little endian) */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000126static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000127{
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000128 u32 flags;
129
Sathya Perla5fb379e2009-06-18 00:02:59 +0000130 if (compl->flags != 0) {
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000131 flags = le32_to_cpu(compl->flags);
132 if (flags & CQE_FLAGS_VALID_MASK) {
133 compl->flags = flags;
134 return true;
135 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000136 }
Sathya Perla9e9ff4b2013-02-12 23:05:19 +0000137 return false;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000138}
139
140/* Need to reset the entire word that houses the valid bit */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000141static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000142{
143 compl->flags = 0;
144}
145
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000146static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
147{
148 unsigned long addr;
149
150 addr = tag1;
151 addr = ((addr << 16) << 16) | tag0;
152 return (void *)addr;
153}
154
Kalesh AP4c600052014-05-30 19:06:26 +0530155static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
156{
157 if (base_status == MCC_STATUS_NOT_SUPPORTED ||
158 base_status == MCC_STATUS_ILLEGAL_REQUEST ||
159 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
Kalesh AP77be8c12015-05-06 05:30:35 -0400160 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
Kalesh AP4c600052014-05-30 19:06:26 +0530161 (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
162 (base_status == MCC_STATUS_ILLEGAL_FIELD ||
163 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
164 return true;
165 else
166 return false;
167}
168
Sathya Perla559b6332014-05-30 19:06:27 +0530169/* Place holder for all the async MCC cmds wherein the caller is not in a busy
170 * loop (has not issued be_mcc_notify_wait())
171 */
172static void be_async_cmd_process(struct be_adapter *adapter,
173 struct be_mcc_compl *compl,
174 struct be_cmd_resp_hdr *resp_hdr)
175{
176 enum mcc_base_status base_status = base_status(compl->status);
177 u8 opcode = 0, subsystem = 0;
178
179 if (resp_hdr) {
180 opcode = resp_hdr->opcode;
181 subsystem = resp_hdr->subsystem;
182 }
183
184 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
185 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
186 complete(&adapter->et_cmd_compl);
187 return;
188 }
189
Suresh Reddy9c855972015-07-10 05:32:50 -0400190 if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
191 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
192 complete(&adapter->et_cmd_compl);
193 return;
194 }
195
Sathya Perla559b6332014-05-30 19:06:27 +0530196 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
197 opcode == OPCODE_COMMON_WRITE_OBJECT) &&
198 subsystem == CMD_SUBSYSTEM_COMMON) {
199 adapter->flash_status = compl->status;
200 complete(&adapter->et_cmd_compl);
201 return;
202 }
203
204 if ((opcode == OPCODE_ETH_GET_STATISTICS ||
205 opcode == OPCODE_ETH_GET_PPORT_STATS) &&
206 subsystem == CMD_SUBSYSTEM_ETH &&
207 base_status == MCC_STATUS_SUCCESS) {
208 be_parse_stats(adapter);
209 adapter->stats_cmd_sent = false;
210 return;
211 }
212
213 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
214 subsystem == CMD_SUBSYSTEM_COMMON) {
215 if (base_status == MCC_STATUS_SUCCESS) {
216 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
217 (void *)resp_hdr;
Venkata Duvvuru29e91222015-05-13 13:00:12 +0530218 adapter->hwmon_info.be_on_die_temp =
Sathya Perla559b6332014-05-30 19:06:27 +0530219 resp->on_die_temperature;
220 } else {
221 adapter->be_get_temp_freq = 0;
Venkata Duvvuru29e91222015-05-13 13:00:12 +0530222 adapter->hwmon_info.be_on_die_temp =
223 BE_INVALID_DIE_TEMP;
Sathya Perla559b6332014-05-30 19:06:27 +0530224 }
225 return;
226 }
227}
228
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229static int be_mcc_compl_process(struct be_adapter *adapter,
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000230 struct be_mcc_compl *compl)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000231{
Kalesh AP4c600052014-05-30 19:06:26 +0530232 enum mcc_base_status base_status;
233 enum mcc_addl_status addl_status;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000234 struct be_cmd_resp_hdr *resp_hdr;
235 u8 opcode = 0, subsystem = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000236
237 /* Just swap the status to host endian; mcc tag is opaquely copied
238 * from mcc_wrb */
239 be_dws_le_to_cpu(compl, 4);
240
Kalesh AP4c600052014-05-30 19:06:26 +0530241 base_status = base_status(compl->status);
242 addl_status = addl_status(compl->status);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +0530243
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000244 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000245 if (resp_hdr) {
246 opcode = resp_hdr->opcode;
247 subsystem = resp_hdr->subsystem;
248 }
249
Sathya Perla559b6332014-05-30 19:06:27 +0530250 be_async_cmd_process(adapter, compl, resp_hdr);
Suresh Reddy5eeff632014-01-06 13:02:24 +0530251
Sathya Perla559b6332014-05-30 19:06:27 +0530252 if (base_status != MCC_STATUS_SUCCESS &&
253 !be_skip_err_log(opcode, base_status, addl_status)) {
Suresh Reddyfa5c8672016-02-03 09:49:17 +0530254 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
255 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000256 dev_warn(&adapter->pdev->dev,
Vasundhara Volam522609f2012-08-28 20:37:44 +0000257 "VF is not privileged to issue opcode %d-%d\n",
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000258 opcode, subsystem);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000259 } else {
Vasundhara Volam97f1d8c2012-06-13 19:51:44 +0000260 dev_err(&adapter->pdev->dev,
261 "opcode %d-%d failed:status %d-%d\n",
Kalesh AP4c600052014-05-30 19:06:26 +0530262 opcode, subsystem, base_status, addl_status);
Sathya Perla2b3f2912011-06-29 23:32:56 +0000263 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000264 }
Kalesh AP4c600052014-05-30 19:06:26 +0530265 return compl->status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000266}
267
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000268/* Link state evt is a string of bytes; no need for endian swapping */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269static void be_async_link_state_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530270 struct be_mcc_compl *compl)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000271{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530272 struct be_async_event_link_state *evt =
273 (struct be_async_event_link_state *)compl;
274
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000275 /* When link status changes, link speed must be re-queried from FW */
Ajit Khaparde42f11cf2012-04-21 18:53:22 +0000276 adapter->phy.link_speed = -1;
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000277
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530278 /* On BEx the FW does not send a separate link status
279 * notification for physical and logical link.
280 * On other chips just process the logical link
281 * status notification
282 */
283 if (!BEx_chip(adapter) &&
Padmanabh Ratnakar2e177a52012-07-18 02:52:15 +0000284 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
285 return;
286
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000287 /* For the initial link status do not rely on the ASYNC event as
288 * it may not be received in some cases.
289 */
290 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530291 be_link_status_update(adapter,
292 evt->port_link_status & LINK_STATUS_MASK);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000293}
294
Vasundhara Volam21252372015-02-06 08:18:42 -0500295static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
296 struct be_mcc_compl *compl)
297{
298 struct be_async_event_misconfig_port *evt =
299 (struct be_async_event_misconfig_port *)compl;
300 u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
301 struct device *dev = &adapter->pdev->dev;
302 u8 port_misconfig_evt;
303
304 port_misconfig_evt =
305 ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
306
307 /* Log an error message that would allow a user to determine
308 * whether the SFPs have an issue
309 */
310 dev_info(dev, "Port %c: %s %s", adapter->port_name,
311 be_port_misconfig_evt_desc[port_misconfig_evt],
312 be_port_misconfig_remedy_desc[port_misconfig_evt]);
313
314 if (port_misconfig_evt == INCOMPATIBLE_SFP)
315 adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
316}
317
Somnath Koturcc4ce022010-10-21 07:11:14 -0700318/* Grp5 CoS Priority evt */
319static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530320 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700321{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530322 struct be_async_event_grp5_cos_priority *evt =
323 (struct be_async_event_grp5_cos_priority *)compl;
324
Somnath Koturcc4ce022010-10-21 07:11:14 -0700325 if (evt->valid) {
326 adapter->vlan_prio_bmap = evt->available_priority_bmap;
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500327 adapter->recommended_prio_bits =
Somnath Koturcc4ce022010-10-21 07:11:14 -0700328 evt->reco_default_priority << VLAN_PRIO_SHIFT;
329 }
330}
331
Sathya Perla323ff712012-09-28 04:39:43 +0000332/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
Somnath Koturcc4ce022010-10-21 07:11:14 -0700333static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530334 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700335{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530336 struct be_async_event_grp5_qos_link_speed *evt =
337 (struct be_async_event_grp5_qos_link_speed *)compl;
338
Sathya Perla323ff712012-09-28 04:39:43 +0000339 if (adapter->phy.link_speed >= 0 &&
340 evt->physical_port == adapter->port_num)
341 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700342}
343
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000344/*Grp5 PVID evt*/
345static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530346 struct be_mcc_compl *compl)
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000347{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530348 struct be_async_event_grp5_pvid_state *evt =
349 (struct be_async_event_grp5_pvid_state *)compl;
350
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530351 if (evt->enabled) {
Somnath Kotur939cf302011-08-18 21:51:49 -0700352 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530353 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
354 } else {
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000355 adapter->pvid = 0;
Ravikumar Nelavellibdac85b2014-03-11 18:53:05 +0530356 }
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000357}
358
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530359#define MGMT_ENABLE_MASK 0x4
360static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
361 struct be_mcc_compl *compl)
362{
363 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
364 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
365
366 if (evt_dw1 & MGMT_ENABLE_MASK) {
367 adapter->flags |= BE_FLAGS_OS2BMC;
368 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
369 } else {
370 adapter->flags &= ~BE_FLAGS_OS2BMC;
371 }
372}
373
Somnath Koturcc4ce022010-10-21 07:11:14 -0700374static void be_async_grp5_evt_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530375 struct be_mcc_compl *compl)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700376{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530377 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
378 ASYNC_EVENT_TYPE_MASK;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700379
380 switch (event_type) {
381 case ASYNC_EVENT_COS_PRIORITY:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530382 be_async_grp5_cos_priority_process(adapter, compl);
383 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700384 case ASYNC_EVENT_QOS_SPEED:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530385 be_async_grp5_qos_speed_process(adapter, compl);
386 break;
Ajit Khaparde3968fa12011-02-20 11:41:53 +0000387 case ASYNC_EVENT_PVID_STATE:
Sathya Perla3acf19d2014-05-30 19:06:28 +0530388 be_async_grp5_pvid_state_process(adapter, compl);
389 break;
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530390 /* Async event to disable/enable os2bmc and/or mac-learning */
391 case ASYNC_EVENT_FW_CONTROL:
392 be_async_grp5_fw_control_process(adapter, compl);
393 break;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700394 default:
Somnath Koturcc4ce022010-10-21 07:11:14 -0700395 break;
396 }
397}
398
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000399static void be_async_dbg_evt_process(struct be_adapter *adapter,
Sathya Perla3acf19d2014-05-30 19:06:28 +0530400 struct be_mcc_compl *cmp)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000401{
402 u8 event_type = 0;
Kalesh AP504fbf12014-09-19 15:47:00 +0530403 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000404
Sathya Perla3acf19d2014-05-30 19:06:28 +0530405 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
406 ASYNC_EVENT_TYPE_MASK;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000407
408 switch (event_type) {
409 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
410 if (evt->valid)
411 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
412 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
413 break;
414 default:
Vasundhara Volam05ccaa22013-08-06 09:27:19 +0530415 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
416 event_type);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000417 break;
418 }
419}
420
Vasundhara Volam21252372015-02-06 08:18:42 -0500421static void be_async_sliport_evt_process(struct be_adapter *adapter,
422 struct be_mcc_compl *cmp)
423{
424 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
425 ASYNC_EVENT_TYPE_MASK;
426
427 if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
428 be_async_port_misconfig_event_process(adapter, cmp);
429}
430
Sathya Perla3acf19d2014-05-30 19:06:28 +0530431static inline bool is_link_state_evt(u32 flags)
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000432{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530433 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
434 ASYNC_EVENT_CODE_LINK_STATE;
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000435}
Sathya Perla5fb379e2009-06-18 00:02:59 +0000436
Sathya Perla3acf19d2014-05-30 19:06:28 +0530437static inline bool is_grp5_evt(u32 flags)
Somnath Koturcc4ce022010-10-21 07:11:14 -0700438{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530439 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
440 ASYNC_EVENT_CODE_GRP_5;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700441}
442
Sathya Perla3acf19d2014-05-30 19:06:28 +0530443static inline bool is_dbg_evt(u32 flags)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000444{
Sathya Perla3acf19d2014-05-30 19:06:28 +0530445 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
446 ASYNC_EVENT_CODE_QNQ;
447}
448
Vasundhara Volam21252372015-02-06 08:18:42 -0500449static inline bool is_sliport_evt(u32 flags)
450{
451 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
452 ASYNC_EVENT_CODE_SLIPORT;
453}
454
Sathya Perla3acf19d2014-05-30 19:06:28 +0530455static void be_mcc_event_process(struct be_adapter *adapter,
456 struct be_mcc_compl *compl)
457{
458 if (is_link_state_evt(compl->flags))
459 be_async_link_state_process(adapter, compl);
460 else if (is_grp5_evt(compl->flags))
461 be_async_grp5_evt_process(adapter, compl);
462 else if (is_dbg_evt(compl->flags))
463 be_async_dbg_evt_process(adapter, compl);
Vasundhara Volam21252372015-02-06 08:18:42 -0500464 else if (is_sliport_evt(compl->flags))
465 be_async_sliport_evt_process(adapter, compl);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000466}
467
Sathya Perlaefd2e402009-07-27 22:53:10 +0000468static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000469{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000470 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000471 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000472
473 if (be_mcc_compl_is_new(compl)) {
474 queue_tail_inc(mcc_cq);
475 return compl;
476 }
477 return NULL;
478}
479
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000480void be_async_mcc_enable(struct be_adapter *adapter)
481{
482 spin_lock_bh(&adapter->mcc_cq_lock);
483
484 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
485 adapter->mcc_obj.rearm_cq = true;
486
487 spin_unlock_bh(&adapter->mcc_cq_lock);
488}
489
490void be_async_mcc_disable(struct be_adapter *adapter)
491{
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000492 spin_lock_bh(&adapter->mcc_cq_lock);
493
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000494 adapter->mcc_obj.rearm_cq = false;
Sathya Perlaa323d9b2012-12-17 19:38:50 +0000495 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
496
497 spin_unlock_bh(&adapter->mcc_cq_lock);
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000498}
499
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000500int be_process_mcc(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000501{
Sathya Perlaefd2e402009-07-27 22:53:10 +0000502 struct be_mcc_compl *compl;
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000503 int num = 0, status = 0;
Sathya Perla7a1e9b22010-02-17 01:35:11 +0000504 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000505
Amerigo Wang072a9c42012-08-24 21:41:11 +0000506 spin_lock(&adapter->mcc_cq_lock);
Sathya Perla3acf19d2014-05-30 19:06:28 +0530507
Sathya Perla8788fdc2009-07-27 22:52:03 +0000508 while ((compl = be_mcc_compl_get(adapter))) {
Sathya Perlaa8f447bd2009-06-18 00:10:27 +0000509 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
Sathya Perla3acf19d2014-05-30 19:06:28 +0530510 be_mcc_event_process(adapter, compl);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700511 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla3acf19d2014-05-30 19:06:28 +0530512 status = be_mcc_compl_process(adapter, compl);
513 atomic_dec(&mcc_obj->q.used);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000514 }
515 be_mcc_compl_use(compl);
516 num++;
517 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700518
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000519 if (num)
520 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
521
Amerigo Wang072a9c42012-08-24 21:41:11 +0000522 spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000523 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +0000524}
525
Sathya Perla6ac7b682009-06-18 00:05:54 +0000526/* Wait till no more pending mcc requests are present */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700527static int be_mcc_wait_compl(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000528{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700529#define mcc_timeout 120000 /* 12s timeout */
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000530 int i, status = 0;
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800531 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700532
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800533 for (i = 0; i < mcc_timeout; i++) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530534 if (be_check_error(adapter, BE_ERROR_ANY))
Sathya Perla6589ade2011-11-10 19:18:00 +0000535 return -EIO;
536
Amerigo Wang072a9c42012-08-24 21:41:11 +0000537 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000538 status = be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +0000539 local_bh_enable();
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800540
541 if (atomic_read(&mcc_obj->q.used) == 0)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000542 break;
543 udelay(100);
544 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700545 if (i == mcc_timeout) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000546 dev_err(&adapter->pdev->dev, "FW not responding\n");
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530547 be_set_error(adapter, BE_ERROR_FW);
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000548 return -EIO;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700549 }
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800550 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000551}
552
553/* Notify MCC requests and wait for completion */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700554static int be_mcc_notify_wait(struct be_adapter *adapter)
Sathya Perla6ac7b682009-06-18 00:05:54 +0000555{
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000556 int status;
557 struct be_mcc_wrb *wrb;
558 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
559 u16 index = mcc_obj->q.head;
560 struct be_cmd_resp_hdr *resp;
561
562 index_dec(&index, mcc_obj->q.len);
563 wrb = queue_index_node(&mcc_obj->q, index);
564
565 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
566
Suresh Reddyefaa4082015-07-10 05:32:48 -0400567 status = be_mcc_notify(adapter);
568 if (status)
569 goto out;
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000570
571 status = be_mcc_wait_compl(adapter);
572 if (status == -EIO)
573 goto out;
574
Kalesh AP4c600052014-05-30 19:06:26 +0530575 status = (resp->base_status |
576 ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
577 CQE_ADDL_STATUS_SHIFT));
Padmanabh Ratnakar652bf642012-04-25 01:47:03 +0000578out:
579 return status;
Sathya Perla6ac7b682009-06-18 00:05:54 +0000580}
581
Sathya Perla5f0b8492009-07-27 22:52:56 +0000582static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000584 int msecs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585 u32 ready;
586
587 do {
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530588 if (be_check_error(adapter, BE_ERROR_ANY))
Sathya Perla6589ade2011-11-10 19:18:00 +0000589 return -EIO;
590
Sathya Perlacf588472010-02-14 21:22:01 +0000591 ready = ioread32(db);
Sathya Perla434b3642011-11-10 19:17:59 +0000592 if (ready == 0xffffffff)
Sathya Perlacf588472010-02-14 21:22:01 +0000593 return -1;
Sathya Perlacf588472010-02-14 21:22:01 +0000594
595 ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 if (ready)
597 break;
598
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000599 if (msecs > 4000) {
Sathya Perla6589ade2011-11-10 19:18:00 +0000600 dev_err(&adapter->pdev->dev, "FW not responding\n");
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530601 be_set_error(adapter, BE_ERROR_FW);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000602 be_detect_error(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603 return -1;
604 }
605
Sathya Perla1dbf53a2011-05-12 19:32:16 +0000606 msleep(1);
Sathya Perlaf25b03a2010-05-30 23:34:14 +0000607 msecs++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 } while (true);
609
610 return 0;
611}
612
613/*
614 * Insert the mailbox address into the doorbell in two steps
Sathya Perla5fb379e2009-06-18 00:02:59 +0000615 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700617static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618{
619 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620 u32 val = 0;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000621 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
622 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 struct be_mcc_mailbox *mbox = mbox_mem->va;
Sathya Perlaefd2e402009-07-27 22:53:10 +0000624 struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perlacf588472010-02-14 21:22:01 +0000626 /* wait for ready to be set */
627 status = be_mbox_db_ready_wait(adapter, db);
628 if (status != 0)
629 return status;
630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 val |= MPU_MAILBOX_DB_HI_MASK;
632 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
633 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
634 iowrite32(val, db);
635
636 /* wait for ready to be set */
Sathya Perla5f0b8492009-07-27 22:52:56 +0000637 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 if (status != 0)
639 return status;
640
641 val = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
643 val |= (u32)(mbox_mem->dma >> 4) << 2;
644 iowrite32(val, db);
645
Sathya Perla5f0b8492009-07-27 22:52:56 +0000646 status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 if (status != 0)
648 return status;
649
Sathya Perla5fb379e2009-06-18 00:02:59 +0000650 /* A cq entry has been made now */
Sathya Perlaefd2e402009-07-27 22:53:10 +0000651 if (be_mcc_compl_is_new(compl)) {
652 status = be_mcc_compl_process(adapter, &mbox->compl);
653 be_mcc_compl_use(compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +0000654 if (status)
655 return status;
656 } else {
Sathya Perla5f0b8492009-07-27 22:52:56 +0000657 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 return -1;
659 }
Sathya Perla5fb379e2009-06-18 00:02:59 +0000660 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661}
662
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000663static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664{
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000665 u32 sem;
666
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000667 if (BEx_chip(adapter))
668 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 else
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000670 pci_read_config_dword(adapter->pdev,
671 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
672
673 return sem & POST_STAGE_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
Gavin Shan87f20c22013-10-29 17:30:57 +0800676static int lancer_wait_ready(struct be_adapter *adapter)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000677{
678#define SLIPORT_READY_TIMEOUT 30
679 u32 sliport_status;
Kalesh APe6732442015-01-20 03:51:46 -0500680 int i;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000681
682 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
683 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
684 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
Sathya Perla9fa465c2015-02-23 04:20:13 -0500685 return 0;
686
687 if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
688 !(sliport_status & SLIPORT_STATUS_RN_MASK))
689 return -EIO;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000690
691 msleep(1000);
692 }
693
Sathya Perla9fa465c2015-02-23 04:20:13 -0500694 return sliport_status ? : -1;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000695}
696
697int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698{
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000699 u16 stage;
700 int status, timeout = 0;
Sathya Perla6ed35ee2011-05-12 19:32:15 +0000701 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000703 if (lancer_chip(adapter)) {
704 status = lancer_wait_ready(adapter);
Kalesh APe6732442015-01-20 03:51:46 -0500705 if (status) {
706 stage = status;
707 goto err;
708 }
709 return 0;
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000710 }
711
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000712 do {
Sathya Perlaca3de6b2015-02-23 04:20:10 -0500713 /* There's no means to poll POST state on BE2/3 VFs */
714 if (BEx_chip(adapter) && be_virtfn(adapter))
715 return 0;
716
Sathya Perlac5b3ad42013-03-05 22:23:20 +0000717 stage = be_POST_stage_get(adapter);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000718 if (stage == POST_STAGE_ARMFW_RDY)
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000719 return 0;
Gavin Shan66d29cb2013-03-03 21:48:46 +0000720
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530721 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
Gavin Shan66d29cb2013-03-03 21:48:46 +0000722 if (msleep_interruptible(2000)) {
723 dev_err(dev, "Waiting for POST aborted\n");
724 return -EINTR;
Sathya Perla43a04fdc2009-10-14 20:21:17 +0000725 }
Gavin Shan66d29cb2013-03-03 21:48:46 +0000726 timeout += 2;
Somnath Kotur3ab81b52011-10-03 08:10:57 +0000727 } while (timeout < 60);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700728
Kalesh APe6732442015-01-20 03:51:46 -0500729err:
730 dev_err(dev, "POST timeout; stage=%#x\n", stage);
Sathya Perla9fa465c2015-02-23 04:20:13 -0500731 return -ETIMEDOUT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732}
733
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
735{
736 return &wrb->payload.sgl[0];
737}
738
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530739static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
Sathya Perlabea50982013-08-27 16:57:33 +0530740{
741 wrb->tag0 = addr & 0xFFFFFFFF;
742 wrb->tag1 = upper_32_bits(addr);
743}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744
745/* Don't touch the hdr after it's prepared */
Somnath Kotur106df1e2011-10-27 07:12:13 +0000746/* mem will be NULL for embedded commands */
747static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530748 u8 subsystem, u8 opcode, int cmd_len,
749 struct be_mcc_wrb *wrb,
750 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751{
Somnath Kotur106df1e2011-10-27 07:12:13 +0000752 struct be_sge *sge;
753
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 req_hdr->opcode = opcode;
755 req_hdr->subsystem = subsystem;
756 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
Ajit Khaparde07793d32010-02-16 00:18:46 +0000757 req_hdr->version = 0;
Sathya Perlabea50982013-08-27 16:57:33 +0530758 fill_wrb_tags(wrb, (ulong) req_hdr);
Somnath Kotur106df1e2011-10-27 07:12:13 +0000759 wrb->payload_length = cmd_len;
760 if (mem) {
761 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
762 MCC_WRB_SGE_CNT_SHIFT;
763 sge = nonembedded_sgl(wrb);
764 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
765 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
766 sge->len = cpu_to_le32(mem->size);
767 } else
768 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
769 be_dws_cpu_to_le(wrb, 8);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770}
771
772static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530773 struct be_dma_mem *mem)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774{
775 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
776 u64 dma = (u64)mem->dma;
777
778 for (i = 0; i < buf_pages; i++) {
779 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
780 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
781 dma += PAGE_SIZE_4K;
782 }
783}
784
Sathya Perlab31c50a2009-09-17 10:30:13 -0700785static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700787 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
788 struct be_mcc_wrb *wrb
789 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
790 memset(wrb, 0, sizeof(*wrb));
791 return wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792}
793
Sathya Perlab31c50a2009-09-17 10:30:13 -0700794static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
Sathya Perla5fb379e2009-06-18 00:02:59 +0000795{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700796 struct be_queue_info *mccq = &adapter->mcc_obj.q;
797 struct be_mcc_wrb *wrb;
798
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +0000799 if (!mccq->created)
800 return NULL;
801
Vasundhara Volam4d277122013-04-21 23:28:15 +0000802 if (atomic_read(&mccq->used) >= mccq->len)
Sathya Perla713d03942009-11-22 22:02:45 +0000803 return NULL;
Sathya Perla713d03942009-11-22 22:02:45 +0000804
Sathya Perlab31c50a2009-09-17 10:30:13 -0700805 wrb = queue_head_node(mccq);
806 queue_head_inc(mccq);
807 atomic_inc(&mccq->used);
808 memset(wrb, 0, sizeof(*wrb));
Sathya Perla5fb379e2009-06-18 00:02:59 +0000809 return wrb;
810}
811
Sathya Perlabea50982013-08-27 16:57:33 +0530812static bool use_mcc(struct be_adapter *adapter)
813{
814 return adapter->mcc_obj.q.created;
815}
816
817/* Must be used only in process context */
818static int be_cmd_lock(struct be_adapter *adapter)
819{
820 if (use_mcc(adapter)) {
821 spin_lock_bh(&adapter->mcc_lock);
822 return 0;
823 } else {
824 return mutex_lock_interruptible(&adapter->mbox_lock);
825 }
826}
827
828/* Must be used only in process context */
829static void be_cmd_unlock(struct be_adapter *adapter)
830{
831 if (use_mcc(adapter))
832 spin_unlock_bh(&adapter->mcc_lock);
833 else
834 return mutex_unlock(&adapter->mbox_lock);
835}
836
837static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
838 struct be_mcc_wrb *wrb)
839{
840 struct be_mcc_wrb *dest_wrb;
841
842 if (use_mcc(adapter)) {
843 dest_wrb = wrb_from_mccq(adapter);
844 if (!dest_wrb)
845 return NULL;
846 } else {
847 dest_wrb = wrb_from_mbox(adapter);
848 }
849
850 memcpy(dest_wrb, wrb, sizeof(*wrb));
851 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
852 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
853
854 return dest_wrb;
855}
856
857/* Must be used only in process context */
858static int be_cmd_notify_wait(struct be_adapter *adapter,
859 struct be_mcc_wrb *wrb)
860{
861 struct be_mcc_wrb *dest_wrb;
862 int status;
863
864 status = be_cmd_lock(adapter);
865 if (status)
866 return status;
867
868 dest_wrb = be_cmd_copy(adapter, wrb);
Suresh Reddy0c884562015-10-12 03:47:18 -0400869 if (!dest_wrb) {
870 status = -EBUSY;
871 goto unlock;
872 }
Sathya Perlabea50982013-08-27 16:57:33 +0530873
874 if (use_mcc(adapter))
875 status = be_mcc_notify_wait(adapter);
876 else
877 status = be_mbox_notify_wait(adapter);
878
879 if (!status)
880 memcpy(wrb, dest_wrb, sizeof(*wrb));
881
Suresh Reddy0c884562015-10-12 03:47:18 -0400882unlock:
Sathya Perlabea50982013-08-27 16:57:33 +0530883 be_cmd_unlock(adapter);
884 return status;
885}
886
Sathya Perla2243e2e2009-11-22 22:02:03 +0000887/* Tell fw we're about to start firing cmds by writing a
888 * special pattern across the wrb hdr; uses mbox
889 */
890int be_cmd_fw_init(struct be_adapter *adapter)
891{
892 u8 *wrb;
893 int status;
894
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000895 if (lancer_chip(adapter))
896 return 0;
897
Ivan Vecera29849612010-12-14 05:43:19 +0000898 if (mutex_lock_interruptible(&adapter->mbox_lock))
899 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000900
901 wrb = (u8 *)wrb_from_mbox(adapter);
Sathya Perla359a9722010-12-01 01:03:36 +0000902 *wrb++ = 0xFF;
903 *wrb++ = 0x12;
904 *wrb++ = 0x34;
905 *wrb++ = 0xFF;
906 *wrb++ = 0xFF;
907 *wrb++ = 0x56;
908 *wrb++ = 0x78;
909 *wrb = 0xFF;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000910
911 status = be_mbox_notify_wait(adapter);
912
Ivan Vecera29849612010-12-14 05:43:19 +0000913 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000914 return status;
915}
916
917/* Tell fw we're done with firing cmds by writing a
918 * special pattern across the wrb hdr; uses mbox
919 */
920int be_cmd_fw_clean(struct be_adapter *adapter)
921{
922 u8 *wrb;
923 int status;
924
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000925 if (lancer_chip(adapter))
926 return 0;
927
Ivan Vecera29849612010-12-14 05:43:19 +0000928 if (mutex_lock_interruptible(&adapter->mbox_lock))
929 return -1;
Sathya Perla2243e2e2009-11-22 22:02:03 +0000930
931 wrb = (u8 *)wrb_from_mbox(adapter);
932 *wrb++ = 0xFF;
933 *wrb++ = 0xAA;
934 *wrb++ = 0xBB;
935 *wrb++ = 0xFF;
936 *wrb++ = 0xFF;
937 *wrb++ = 0xCC;
938 *wrb++ = 0xDD;
939 *wrb = 0xFF;
940
941 status = be_mbox_notify_wait(adapter);
942
Ivan Vecera29849612010-12-14 05:43:19 +0000943 mutex_unlock(&adapter->mbox_lock);
Sathya Perla2243e2e2009-11-22 22:02:03 +0000944 return status;
945}
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +0000946
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530947int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700949 struct be_mcc_wrb *wrb;
950 struct be_cmd_req_eq_create *req;
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530951 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
952 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953
Ivan Vecera29849612010-12-14 05:43:19 +0000954 if (mutex_lock_interruptible(&adapter->mbox_lock))
955 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700956
957 wrb = wrb_from_mbox(adapter);
958 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700959
Somnath Kotur106df1e2011-10-27 07:12:13 +0000960 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +0530961 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
962 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530964 /* Support for EQ_CREATEv2 available only SH-R onwards */
965 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
966 ver = 2;
967
968 req->hdr.version = ver;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
970
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700971 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
972 /* 4byte eqe*/
973 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
974 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530975 __ilog2_u32(eqo->q.len / 256));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700976 be_dws_cpu_to_le(req->context, sizeof(req->context));
977
978 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
979
Sathya Perlab31c50a2009-09-17 10:30:13 -0700980 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700982 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530983
Sathya Perlaf2f781a2013-08-27 16:57:30 +0530984 eqo->q.id = le16_to_cpu(resp->eq_id);
985 eqo->msix_idx =
986 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
987 eqo->q.created = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700989
Ivan Vecera29849612010-12-14 05:43:19 +0000990 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700991 return status;
992}
993
Sathya Perlaf9449ab2011-10-24 02:45:01 +0000994/* Use MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +0000995int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perla5ee49792012-09-28 04:39:41 +0000996 bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997{
Sathya Perlab31c50a2009-09-17 10:30:13 -0700998 struct be_mcc_wrb *wrb;
999 struct be_cmd_req_mac_query *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000 int status;
1001
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001002 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001003
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001004 wrb = wrb_from_mccq(adapter);
1005 if (!wrb) {
1006 status = -EBUSY;
1007 goto err;
1008 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001009 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Somnath Kotur106df1e2011-10-27 07:12:13 +00001011 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301012 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
1013 NULL);
Sathya Perla5ee49792012-09-28 04:39:41 +00001014 req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015 if (permanent) {
1016 req->permanent = 1;
1017 } else {
Kalesh AP504fbf12014-09-19 15:47:00 +05301018 req->if_id = cpu_to_le16((u16)if_handle);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001019 req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 req->permanent = 0;
1021 }
1022
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001023 status = be_mcc_notify_wait(adapter);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001024 if (!status) {
1025 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301026
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001028 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001030err:
1031 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032 return status;
1033}
1034
Sathya Perlab31c50a2009-09-17 10:30:13 -07001035/* Uses synchronous MCCQ */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001036int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301037 u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001039 struct be_mcc_wrb *wrb;
1040 struct be_cmd_req_pmac_add *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 int status;
1042
Sathya Perlab31c50a2009-09-17 10:30:13 -07001043 spin_lock_bh(&adapter->mcc_lock);
1044
1045 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001046 if (!wrb) {
1047 status = -EBUSY;
1048 goto err;
1049 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001050 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051
Somnath Kotur106df1e2011-10-27 07:12:13 +00001052 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301053 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1054 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055
Ajit Khapardef8617e02011-02-11 13:36:37 +00001056 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057 req->if_id = cpu_to_le32(if_id);
1058 memcpy(req->mac_address, mac_addr, ETH_ALEN);
1059
Sathya Perlab31c50a2009-09-17 10:30:13 -07001060 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061 if (!status) {
1062 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 *pmac_id = le32_to_cpu(resp->pmac_id);
1065 }
1066
Sathya Perla713d03942009-11-22 22:02:45 +00001067err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001068 spin_unlock_bh(&adapter->mcc_lock);
Somnath Koture3a7ae22011-10-27 07:14:05 +00001069
1070 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1071 status = -EPERM;
1072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 return status;
1074}
1075
Sathya Perlab31c50a2009-09-17 10:30:13 -07001076/* Uses synchronous MCCQ */
Sathya Perla30128032011-11-10 19:17:57 +00001077int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001079 struct be_mcc_wrb *wrb;
1080 struct be_cmd_req_pmac_del *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 int status;
1082
Sathya Perla30128032011-11-10 19:17:57 +00001083 if (pmac_id == -1)
1084 return 0;
1085
Sathya Perlab31c50a2009-09-17 10:30:13 -07001086 spin_lock_bh(&adapter->mcc_lock);
1087
1088 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001089 if (!wrb) {
1090 status = -EBUSY;
1091 goto err;
1092 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001093 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094
Somnath Kotur106df1e2011-10-27 07:12:13 +00001095 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Kalesh APcd3307aa2014-09-19 15:47:02 +05301096 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1097 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Ajit Khapardef8617e02011-02-11 13:36:37 +00001099 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100 req->if_id = cpu_to_le32(if_id);
1101 req->pmac_id = cpu_to_le32(pmac_id);
1102
Sathya Perlab31c50a2009-09-17 10:30:13 -07001103 status = be_mcc_notify_wait(adapter);
1104
Sathya Perla713d03942009-11-22 22:02:45 +00001105err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001106 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001107 return status;
1108}
1109
Sathya Perlab31c50a2009-09-17 10:30:13 -07001110/* Uses Mbox */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001111int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301112 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001114 struct be_mcc_wrb *wrb;
1115 struct be_cmd_req_cq_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116 struct be_dma_mem *q_mem = &cq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001117 void *ctxt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 int status;
1119
Ivan Vecera29849612010-12-14 05:43:19 +00001120 if (mutex_lock_interruptible(&adapter->mbox_lock))
1121 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001122
1123 wrb = wrb_from_mbox(adapter);
1124 req = embedded_payload(wrb);
1125 ctxt = &req->context;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
Somnath Kotur106df1e2011-10-27 07:12:13 +00001127 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301128 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1129 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130
1131 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001132
1133 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001134 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301135 coalesce_wm);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001136 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301137 ctxt, no_delay);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001138 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301139 __ilog2_u32(cq->len / 256));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001140 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001141 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1142 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001143 } else {
1144 req->hdr.version = 2;
1145 req->page_size = 1; /* 1 for 4K */
Ajit Khaparde09e83a92013-11-22 12:51:20 -06001146
1147 /* coalesce-wm field in this cmd is not relevant to Lancer.
1148 * Lancer uses COMMON_MODIFY_CQ to set this field
1149 */
1150 if (!lancer_chip(adapter))
1151 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1152 ctxt, coalesce_wm);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001153 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301154 no_delay);
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001155 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301156 __ilog2_u32(cq->len / 256));
Ajit Khapardebbdc42f2013-05-01 09:37:17 +00001157 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301158 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1159 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001160 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1163
1164 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1165
Sathya Perlab31c50a2009-09-17 10:30:13 -07001166 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167 if (!status) {
Sathya Perlab31c50a2009-09-17 10:30:13 -07001168 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301169
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 cq->id = le16_to_cpu(resp->cq_id);
1171 cq->created = true;
1172 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001173
Ivan Vecera29849612010-12-14 05:43:19 +00001174 mutex_unlock(&adapter->mbox_lock);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001175
1176 return status;
1177}
1178
1179static u32 be_encoded_q_len(int q_len)
1180{
1181 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
Kalesh AP03d28ff2014-09-19 15:46:56 +05301182
Sathya Perla5fb379e2009-06-18 00:02:59 +00001183 if (len_encoded == 16)
1184 len_encoded = 0;
1185 return len_encoded;
1186}
1187
Jingoo Han4188e7d2013-08-05 18:02:02 +09001188static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301189 struct be_queue_info *mccq,
1190 struct be_queue_info *cq)
Sathya Perla5fb379e2009-06-18 00:02:59 +00001191{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001192 struct be_mcc_wrb *wrb;
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001193 struct be_cmd_req_mcc_ext_create *req;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001194 struct be_dma_mem *q_mem = &mccq->dma_mem;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001195 void *ctxt;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001196 int status;
1197
Ivan Vecera29849612010-12-14 05:43:19 +00001198 if (mutex_lock_interruptible(&adapter->mbox_lock))
1199 return -1;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001200
1201 wrb = wrb_from_mbox(adapter);
1202 req = embedded_payload(wrb);
1203 ctxt = &req->context;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001204
Somnath Kotur106df1e2011-10-27 07:12:13 +00001205 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301206 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1207 NULL);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001208
Ajit Khaparded4a2ac32010-03-11 01:35:59 +00001209 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301210 if (BEx_chip(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001211 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1212 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301213 be_encoded_q_len(mccq->len));
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001214 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301215 } else {
1216 req->hdr.version = 1;
1217 req->cq_id = cpu_to_le16(cq->id);
1218
1219 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1220 be_encoded_q_len(mccq->len));
1221 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1222 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1223 ctxt, cq->id);
1224 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1225 ctxt, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001226 }
1227
Vasundhara Volam21252372015-02-06 08:18:42 -05001228 /* Subscribe to Link State, Sliport Event and Group 5 Events
1229 * (bits 1, 5 and 17 set)
1230 */
1231 req->async_event_bitmap[0] =
1232 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1233 BIT(ASYNC_EVENT_CODE_GRP_5) |
1234 BIT(ASYNC_EVENT_CODE_QNQ) |
1235 BIT(ASYNC_EVENT_CODE_SLIPORT));
1236
Sathya Perla5fb379e2009-06-18 00:02:59 +00001237 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1238
1239 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1240
Sathya Perlab31c50a2009-09-17 10:30:13 -07001241 status = be_mbox_notify_wait(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001242 if (!status) {
1243 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301244
Sathya Perla5fb379e2009-06-18 00:02:59 +00001245 mccq->id = le16_to_cpu(resp->id);
1246 mccq->created = true;
1247 }
Ivan Vecera29849612010-12-14 05:43:19 +00001248 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249
1250 return status;
1251}
1252
Jingoo Han4188e7d2013-08-05 18:02:02 +09001253static int be_cmd_mccq_org_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301254 struct be_queue_info *mccq,
1255 struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001256{
1257 struct be_mcc_wrb *wrb;
1258 struct be_cmd_req_mcc_create *req;
1259 struct be_dma_mem *q_mem = &mccq->dma_mem;
1260 void *ctxt;
1261 int status;
1262
1263 if (mutex_lock_interruptible(&adapter->mbox_lock))
1264 return -1;
1265
1266 wrb = wrb_from_mbox(adapter);
1267 req = embedded_payload(wrb);
1268 ctxt = &req->context;
1269
Somnath Kotur106df1e2011-10-27 07:12:13 +00001270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301271 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1272 NULL);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001273
1274 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1275
1276 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1277 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301278 be_encoded_q_len(mccq->len));
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001279 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1280
1281 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1282
1283 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1284
1285 status = be_mbox_notify_wait(adapter);
1286 if (!status) {
1287 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301288
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001289 mccq->id = le16_to_cpu(resp->id);
1290 mccq->created = true;
1291 }
1292
1293 mutex_unlock(&adapter->mbox_lock);
1294 return status;
1295}
1296
1297int be_cmd_mccq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301298 struct be_queue_info *mccq, struct be_queue_info *cq)
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001299{
1300 int status;
1301
1302 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
Vasundhara Volam666d39c2014-01-15 13:23:31 +05301303 if (status && BEx_chip(adapter)) {
Somnath Kotur34b1ef02011-06-01 00:33:22 +00001304 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1305 "or newer to avoid conflicting priorities between NIC "
1306 "and FCoE traffic");
1307 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1308 }
1309 return status;
1310}
1311
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001312int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313{
Sathya Perla77071332013-08-27 16:57:34 +05301314 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001315 struct be_cmd_req_eth_tx_create *req;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001316 struct be_queue_info *txq = &txo->q;
1317 struct be_queue_info *cq = &txo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318 struct be_dma_mem *q_mem = &txq->dma_mem;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001319 int status, ver = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320
Sathya Perla77071332013-08-27 16:57:34 +05301321 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001322 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301323 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001325 if (lancer_chip(adapter)) {
1326 req->hdr.version = 1;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001327 } else if (BEx_chip(adapter)) {
1328 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1329 req->hdr.version = 2;
1330 } else { /* For SH */
1331 req->hdr.version = 2;
Padmanabh Ratnakar8b7756c2011-03-07 03:08:52 +00001332 }
1333
Vasundhara Volam81b02652013-10-01 15:59:57 +05301334 if (req->hdr.version > 0)
1335 req->if_id = cpu_to_le16(adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1337 req->ulp_num = BE_ULP1_NUM;
1338 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001339 req->cq_id = cpu_to_le16(cq->id);
1340 req->queue_size = be_encoded_q_len(txq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001342 ver = req->hdr.version;
1343
Sathya Perla77071332013-08-27 16:57:34 +05301344 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 if (!status) {
Sathya Perla77071332013-08-27 16:57:34 +05301346 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301347
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 txq->id = le16_to_cpu(resp->cid);
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001349 if (ver == 2)
1350 txo->db_offset = le32_to_cpu(resp->db_offset);
1351 else
1352 txo->db_offset = DB_TXULP1_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 txq->created = true;
1354 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001355
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 return status;
1357}
1358
Sathya Perla482c9e72011-06-29 23:33:17 +00001359/* Uses MCC */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001360int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301361 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1362 u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001364 struct be_mcc_wrb *wrb;
1365 struct be_cmd_req_eth_rx_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 struct be_dma_mem *q_mem = &rxq->dma_mem;
1367 int status;
1368
Sathya Perla482c9e72011-06-29 23:33:17 +00001369 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001370
Sathya Perla482c9e72011-06-29 23:33:17 +00001371 wrb = wrb_from_mccq(adapter);
1372 if (!wrb) {
1373 status = -EBUSY;
1374 goto err;
1375 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001376 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377
Somnath Kotur106df1e2011-10-27 07:12:13 +00001378 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301379 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380
1381 req->cq_id = cpu_to_le16(cq_id);
1382 req->frag_size = fls(frag_size) - 1;
1383 req->num_pages = 2;
1384 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1385 req->interface_id = cpu_to_le32(if_id);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001386 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387 req->rss_queue = cpu_to_le32(rss);
1388
Sathya Perla482c9e72011-06-29 23:33:17 +00001389 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 if (!status) {
1391 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 rxq->id = le16_to_cpu(resp->id);
1394 rxq->created = true;
Sathya Perla3abcded2010-10-03 22:12:27 -07001395 *rss_id = resp->rss_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001397
Sathya Perla482c9e72011-06-29 23:33:17 +00001398err:
1399 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 return status;
1401}
1402
Sathya Perlab31c50a2009-09-17 10:30:13 -07001403/* Generic destroyer function for all types of queues
1404 * Uses Mbox
1405 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001406int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301407 int queue_type)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001409 struct be_mcc_wrb *wrb;
1410 struct be_cmd_req_q_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001411 u8 subsys = 0, opcode = 0;
1412 int status;
1413
Ivan Vecera29849612010-12-14 05:43:19 +00001414 if (mutex_lock_interruptible(&adapter->mbox_lock))
1415 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416
Sathya Perlab31c50a2009-09-17 10:30:13 -07001417 wrb = wrb_from_mbox(adapter);
1418 req = embedded_payload(wrb);
1419
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 switch (queue_type) {
1421 case QTYPE_EQ:
1422 subsys = CMD_SUBSYSTEM_COMMON;
1423 opcode = OPCODE_COMMON_EQ_DESTROY;
1424 break;
1425 case QTYPE_CQ:
1426 subsys = CMD_SUBSYSTEM_COMMON;
1427 opcode = OPCODE_COMMON_CQ_DESTROY;
1428 break;
1429 case QTYPE_TXQ:
1430 subsys = CMD_SUBSYSTEM_ETH;
1431 opcode = OPCODE_ETH_TX_DESTROY;
1432 break;
1433 case QTYPE_RXQ:
1434 subsys = CMD_SUBSYSTEM_ETH;
1435 opcode = OPCODE_ETH_RX_DESTROY;
1436 break;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437 case QTYPE_MCCQ:
1438 subsys = CMD_SUBSYSTEM_COMMON;
1439 opcode = OPCODE_COMMON_MCC_DESTROY;
1440 break;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 default:
Sathya Perla5f0b8492009-07-27 22:52:56 +00001442 BUG();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 }
Ajit Khaparded744b442009-12-03 06:12:06 +00001444
Somnath Kotur106df1e2011-10-27 07:12:13 +00001445 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301446 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 req->id = cpu_to_le16(q->id);
1448
Sathya Perlab31c50a2009-09-17 10:30:13 -07001449 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001450 q->created = false;
Sathya Perla5f0b8492009-07-27 22:52:56 +00001451
Ivan Vecera29849612010-12-14 05:43:19 +00001452 mutex_unlock(&adapter->mbox_lock);
Sathya Perla482c9e72011-06-29 23:33:17 +00001453 return status;
1454}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455
Sathya Perla482c9e72011-06-29 23:33:17 +00001456/* Uses MCC */
1457int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1458{
1459 struct be_mcc_wrb *wrb;
1460 struct be_cmd_req_q_destroy *req;
1461 int status;
1462
1463 spin_lock_bh(&adapter->mcc_lock);
1464
1465 wrb = wrb_from_mccq(adapter);
1466 if (!wrb) {
1467 status = -EBUSY;
1468 goto err;
1469 }
1470 req = embedded_payload(wrb);
1471
Somnath Kotur106df1e2011-10-27 07:12:13 +00001472 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301473 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
Sathya Perla482c9e72011-06-29 23:33:17 +00001474 req->id = cpu_to_le16(q->id);
1475
1476 status = be_mcc_notify_wait(adapter);
Padmanabh Ratnakaraa790db2012-10-20 06:03:25 +00001477 q->created = false;
Sathya Perla482c9e72011-06-29 23:33:17 +00001478
1479err:
1480 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 return status;
1482}
1483
Sathya Perlab31c50a2009-09-17 10:30:13 -07001484/* Create an rx filtering policy configuration on an i/f
Sathya Perlabea50982013-08-27 16:57:33 +05301485 * Will use MBOX only if MCCQ has not been created.
Sathya Perlab31c50a2009-09-17 10:30:13 -07001486 */
Sathya Perla73d540f2009-10-14 20:20:42 +00001487int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001488 u32 *if_handle, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489{
Sathya Perlabea50982013-08-27 16:57:33 +05301490 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001491 struct be_cmd_req_if_create *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 int status;
1493
Sathya Perlabea50982013-08-27 16:57:33 +05301494 req = embedded_payload(&wrb);
Somnath Kotur106df1e2011-10-27 07:12:13 +00001495 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301496 OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1497 sizeof(*req), &wrb, NULL);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001498 req->hdr.domain = domain;
Sathya Perla73d540f2009-10-14 20:20:42 +00001499 req->capability_flags = cpu_to_le32(cap_flags);
1500 req->enable_flags = cpu_to_le32(en_flags);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00001501 req->pmac_invalid = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502
Sathya Perlabea50982013-08-27 16:57:33 +05301503 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 if (!status) {
Sathya Perlabea50982013-08-27 16:57:33 +05301505 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301506
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 *if_handle = le32_to_cpu(resp->interface_id);
Sathya Perlab5bb9772013-07-23 15:25:01 +05301508
1509 /* Hack to retrieve VF's pmac-id on BE3 */
Kalesh AP18c57c72015-05-06 05:30:38 -04001510 if (BE3_chip(adapter) && be_virtfn(adapter))
Sathya Perlab5bb9772013-07-23 15:25:01 +05301511 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 return status;
1514}
1515
Ajit Khaparde62219062016-02-10 22:45:53 +05301516/* Uses MCCQ if available else MBOX */
Sathya Perla30128032011-11-10 19:17:57 +00001517int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518{
Ajit Khaparde62219062016-02-10 22:45:53 +05301519 struct be_mcc_wrb wrb = {0};
Sathya Perlab31c50a2009-09-17 10:30:13 -07001520 struct be_cmd_req_if_destroy *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 int status;
1522
Sathya Perla30128032011-11-10 19:17:57 +00001523 if (interface_id == -1)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00001524 return 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07001525
Ajit Khaparde62219062016-02-10 22:45:53 +05301526 req = embedded_payload(&wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Somnath Kotur106df1e2011-10-27 07:12:13 +00001528 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301529 OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
Ajit Khaparde62219062016-02-10 22:45:53 +05301530 sizeof(*req), &wrb, NULL);
Ajit Khaparde658681f2011-02-11 13:34:46 +00001531 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532 req->interface_id = cpu_to_le32(interface_id);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001533
Ajit Khaparde62219062016-02-10 22:45:53 +05301534 status = be_cmd_notify_wait(adapter, &wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 return status;
1536}
1537
1538/* Get stats is a non embedded command: the request is not embedded inside
1539 * WRB but is a separate dma memory block
Sathya Perlab31c50a2009-09-17 10:30:13 -07001540 * Uses asynchronous MCC
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001542int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001544 struct be_mcc_wrb *wrb;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001545 struct be_cmd_req_hdr *hdr;
Sathya Perla713d03942009-11-22 22:02:45 +00001546 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Sathya Perlab31c50a2009-09-17 10:30:13 -07001548 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549
Sathya Perlab31c50a2009-09-17 10:30:13 -07001550 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001551 if (!wrb) {
1552 status = -EBUSY;
1553 goto err;
1554 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001555 hdr = nonemb_cmd->va;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556
Somnath Kotur106df1e2011-10-27 07:12:13 +00001557 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301558 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1559 nonemb_cmd);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001560
Sathya Perlaca34fe32012-11-06 17:48:56 +00001561 /* version 1 of the cmd is not supported only by BE2 */
Ajit Khaparde61000862013-10-03 16:16:33 -05001562 if (BE2_chip(adapter))
1563 hdr->version = 0;
1564 if (BE3_chip(adapter) || lancer_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001565 hdr->version = 1;
Ajit Khaparde61000862013-10-03 16:16:33 -05001566 else
1567 hdr->version = 2;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00001568
Suresh Reddyefaa4082015-07-10 05:32:48 -04001569 status = be_mcc_notify(adapter);
1570 if (status)
1571 goto err;
1572
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001573 adapter->stats_cmd_sent = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574
Sathya Perla713d03942009-11-22 22:02:45 +00001575err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001576 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001577 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578}
1579
Selvin Xavier005d5692011-05-16 07:36:35 +00001580/* Lancer Stats */
1581int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301582 struct be_dma_mem *nonemb_cmd)
Selvin Xavier005d5692011-05-16 07:36:35 +00001583{
Selvin Xavier005d5692011-05-16 07:36:35 +00001584 struct be_mcc_wrb *wrb;
1585 struct lancer_cmd_req_pport_stats *req;
Selvin Xavier005d5692011-05-16 07:36:35 +00001586 int status = 0;
1587
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001588 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1589 CMD_SUBSYSTEM_ETH))
1590 return -EPERM;
1591
Selvin Xavier005d5692011-05-16 07:36:35 +00001592 spin_lock_bh(&adapter->mcc_lock);
1593
1594 wrb = wrb_from_mccq(adapter);
1595 if (!wrb) {
1596 status = -EBUSY;
1597 goto err;
1598 }
1599 req = nonemb_cmd->va;
Selvin Xavier005d5692011-05-16 07:36:35 +00001600
Somnath Kotur106df1e2011-10-27 07:12:13 +00001601 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301602 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1603 wrb, nonemb_cmd);
Selvin Xavier005d5692011-05-16 07:36:35 +00001604
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +00001605 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier005d5692011-05-16 07:36:35 +00001606 req->cmd_params.params.reset_stats = 0;
1607
Suresh Reddyefaa4082015-07-10 05:32:48 -04001608 status = be_mcc_notify(adapter);
1609 if (status)
1610 goto err;
1611
Selvin Xavier005d5692011-05-16 07:36:35 +00001612 adapter->stats_cmd_sent = true;
1613
1614err:
1615 spin_unlock_bh(&adapter->mcc_lock);
1616 return status;
1617}
1618
Sathya Perla323ff712012-09-28 04:39:43 +00001619static int be_mac_to_link_speed(int mac_speed)
1620{
1621 switch (mac_speed) {
1622 case PHY_LINK_SPEED_ZERO:
1623 return 0;
1624 case PHY_LINK_SPEED_10MBPS:
1625 return 10;
1626 case PHY_LINK_SPEED_100MBPS:
1627 return 100;
1628 case PHY_LINK_SPEED_1GBPS:
1629 return 1000;
1630 case PHY_LINK_SPEED_10GBPS:
1631 return 10000;
Vasundhara Volamb971f842013-08-06 09:27:15 +05301632 case PHY_LINK_SPEED_20GBPS:
1633 return 20000;
1634 case PHY_LINK_SPEED_25GBPS:
1635 return 25000;
1636 case PHY_LINK_SPEED_40GBPS:
1637 return 40000;
Sathya Perla323ff712012-09-28 04:39:43 +00001638 }
1639 return 0;
1640}
1641
1642/* Uses synchronous mcc
1643 * Returns link_speed in Mbps
1644 */
1645int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1646 u8 *link_status, u32 dom)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001648 struct be_mcc_wrb *wrb;
1649 struct be_cmd_req_link_status *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 int status;
1651
Sathya Perlab31c50a2009-09-17 10:30:13 -07001652 spin_lock_bh(&adapter->mcc_lock);
1653
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001654 if (link_status)
1655 *link_status = LINK_DOWN;
1656
Sathya Perlab31c50a2009-09-17 10:30:13 -07001657 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001658 if (!wrb) {
1659 status = -EBUSY;
1660 goto err;
1661 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001662 req = embedded_payload(wrb);
Sathya Perlaa8f447bd2009-06-18 00:10:27 +00001663
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301665 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1666 sizeof(*req), wrb, NULL);
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001667
Sathya Perlaca34fe32012-11-06 17:48:56 +00001668 /* version 1 of the cmd is not supported only by BE2 */
1669 if (!BE2_chip(adapter))
Padmanabh Ratnakardaad6162011-11-16 02:03:45 +00001670 req->hdr.version = 1;
1671
Padmanabh Ratnakar57cd80d2012-02-03 09:49:46 +00001672 req->hdr.domain = dom;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673
Sathya Perlab31c50a2009-09-17 10:30:13 -07001674 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675 if (!status) {
1676 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301677
Sathya Perla323ff712012-09-28 04:39:43 +00001678 if (link_speed) {
1679 *link_speed = resp->link_speed ?
1680 le16_to_cpu(resp->link_speed) * 10 :
1681 be_mac_to_link_speed(resp->mac_speed);
1682
1683 if (!resp->logical_link_status)
1684 *link_speed = 0;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001685 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +00001686 if (link_status)
1687 *link_status = resp->logical_link_status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688 }
1689
Sathya Perla713d03942009-11-22 22:02:45 +00001690err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001691 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692 return status;
1693}
1694
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001695/* Uses synchronous mcc */
1696int be_cmd_get_die_temperature(struct be_adapter *adapter)
1697{
1698 struct be_mcc_wrb *wrb;
1699 struct be_cmd_req_get_cntl_addnl_attribs *req;
Vasundhara Volam117affe2013-08-06 09:27:20 +05301700 int status = 0;
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001701
1702 spin_lock_bh(&adapter->mcc_lock);
1703
1704 wrb = wrb_from_mccq(adapter);
1705 if (!wrb) {
1706 status = -EBUSY;
1707 goto err;
1708 }
1709 req = embedded_payload(wrb);
1710
Somnath Kotur106df1e2011-10-27 07:12:13 +00001711 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301712 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1713 sizeof(*req), wrb, NULL);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001714
Suresh Reddyefaa4082015-07-10 05:32:48 -04001715 status = be_mcc_notify(adapter);
Ajit Khaparde609ff3b2011-02-20 11:42:07 +00001716err:
1717 spin_unlock_bh(&adapter->mcc_lock);
1718 return status;
1719}
1720
Somnath Kotur311fddc2011-03-16 21:22:43 +00001721/* Uses synchronous mcc */
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001722int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
Somnath Kotur311fddc2011-03-16 21:22:43 +00001723{
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001724 struct be_mcc_wrb wrb = {0};
Somnath Kotur311fddc2011-03-16 21:22:43 +00001725 struct be_cmd_req_get_fat *req;
1726 int status;
1727
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001728 req = embedded_payload(&wrb);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001729
Somnath Kotur106df1e2011-10-27 07:12:13 +00001730 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001731 OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
1732 &wrb, NULL);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001733 req->fat_operation = cpu_to_le32(QUERY_FAT);
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001734 status = be_cmd_notify_wait(adapter, &wrb);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001735 if (!status) {
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001736 struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05301737
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001738 if (dump_size && resp->log_size)
1739 *dump_size = le32_to_cpu(resp->log_size) -
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001740 sizeof(u32);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001741 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001742 return status;
1743}
1744
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001745int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
Somnath Kotur311fddc2011-03-16 21:22:43 +00001746{
1747 struct be_dma_mem get_fat_cmd;
1748 struct be_mcc_wrb *wrb;
1749 struct be_cmd_req_get_fat *req;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001750 u32 offset = 0, total_size, buf_size,
1751 log_offset = sizeof(u32), payload_len;
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001752 int status;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001753
1754 if (buf_len == 0)
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001755 return 0;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001756
1757 total_size = buf_len;
1758
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001759 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05301760 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1761 get_fat_cmd.size,
1762 &get_fat_cmd.dma, GFP_ATOMIC);
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05001763 if (!get_fat_cmd.va)
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301764 return -ENOMEM;
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001765
Somnath Kotur311fddc2011-03-16 21:22:43 +00001766 spin_lock_bh(&adapter->mcc_lock);
1767
Somnath Kotur311fddc2011-03-16 21:22:43 +00001768 while (total_size) {
1769 buf_size = min(total_size, (u32)60*1024);
1770 total_size -= buf_size;
1771
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001772 wrb = wrb_from_mccq(adapter);
1773 if (!wrb) {
1774 status = -EBUSY;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001775 goto err;
1776 }
1777 req = get_fat_cmd.va;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001778
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001779 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
Somnath Kotur106df1e2011-10-27 07:12:13 +00001780 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301781 OPCODE_COMMON_MANAGE_FAT, payload_len,
1782 wrb, &get_fat_cmd);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001783
1784 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1785 req->read_log_offset = cpu_to_le32(log_offset);
1786 req->read_log_length = cpu_to_le32(buf_size);
1787 req->data_buffer_size = cpu_to_le32(buf_size);
1788
1789 status = be_mcc_notify_wait(adapter);
1790 if (!status) {
1791 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
Kalesh AP03d28ff2014-09-19 15:46:56 +05301792
Somnath Kotur311fddc2011-03-16 21:22:43 +00001793 memcpy(buf + offset,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301794 resp->data_buffer,
1795 le32_to_cpu(resp->read_log_length));
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001796 } else {
Somnath Kotur311fddc2011-03-16 21:22:43 +00001797 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
Somnath Koturfe2a70e2011-04-21 03:18:12 +00001798 goto err;
1799 }
Somnath Kotur311fddc2011-03-16 21:22:43 +00001800 offset += buf_size;
1801 log_offset += buf_size;
1802 }
1803err:
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05301804 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1805 get_fat_cmd.va, get_fat_cmd.dma);
Somnath Kotur311fddc2011-03-16 21:22:43 +00001806 spin_unlock_bh(&adapter->mcc_lock);
Vasundhara Volamc5f156d2014-09-02 09:56:54 +05301807 return status;
Somnath Kotur311fddc2011-03-16 21:22:43 +00001808}
1809
Sathya Perla04b71172011-09-27 13:30:27 -04001810/* Uses synchronous mcc */
Kalesh APe97e3cd2014-07-17 16:20:26 +05301811int be_cmd_get_fw_ver(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001813 struct be_mcc_wrb *wrb;
1814 struct be_cmd_req_get_fw_version *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 int status;
1816
Sathya Perla04b71172011-09-27 13:30:27 -04001817 spin_lock_bh(&adapter->mcc_lock);
Sathya Perlab31c50a2009-09-17 10:30:13 -07001818
Sathya Perla04b71172011-09-27 13:30:27 -04001819 wrb = wrb_from_mccq(adapter);
1820 if (!wrb) {
1821 status = -EBUSY;
1822 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 }
1824
Sathya Perla04b71172011-09-27 13:30:27 -04001825 req = embedded_payload(wrb);
Sathya Perla04b71172011-09-27 13:30:27 -04001826
Somnath Kotur106df1e2011-10-27 07:12:13 +00001827 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301828 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1829 NULL);
Sathya Perla04b71172011-09-27 13:30:27 -04001830 status = be_mcc_notify_wait(adapter);
1831 if (!status) {
1832 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05301833
Vasundhara Volam242eb472014-09-12 17:39:15 +05301834 strlcpy(adapter->fw_ver, resp->firmware_version_string,
1835 sizeof(adapter->fw_ver));
1836 strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1837 sizeof(adapter->fw_on_flash));
Sathya Perla04b71172011-09-27 13:30:27 -04001838 }
1839err:
1840 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 return status;
1842}
1843
Sathya Perlab31c50a2009-09-17 10:30:13 -07001844/* set the EQ delay interval of an EQ to specified value
1845 * Uses async mcc
1846 */
Kalesh APb502ae82014-09-19 15:46:51 +05301847static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1848 struct be_set_eqd *set_eqd, int num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001850 struct be_mcc_wrb *wrb;
1851 struct be_cmd_req_modify_eq_delay *req;
Sathya Perla2632baf2013-10-01 16:00:00 +05301852 int status = 0, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853
Sathya Perlab31c50a2009-09-17 10:30:13 -07001854 spin_lock_bh(&adapter->mcc_lock);
1855
1856 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001857 if (!wrb) {
1858 status = -EBUSY;
1859 goto err;
1860 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001861 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
Somnath Kotur106df1e2011-10-27 07:12:13 +00001863 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301864 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1865 NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866
Sathya Perla2632baf2013-10-01 16:00:00 +05301867 req->num_eq = cpu_to_le32(num);
1868 for (i = 0; i < num; i++) {
1869 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1870 req->set_eqd[i].phase = 0;
1871 req->set_eqd[i].delay_multiplier =
1872 cpu_to_le32(set_eqd[i].delay_multiplier);
1873 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874
Suresh Reddyefaa4082015-07-10 05:32:48 -04001875 status = be_mcc_notify(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001876err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001877 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla713d03942009-11-22 22:02:45 +00001878 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879}
1880
Kalesh AP93676702014-09-12 17:39:20 +05301881int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1882 int num)
1883{
1884 int num_eqs, i = 0;
1885
Suresh Reddyc8ba4ad02015-03-20 06:28:24 -04001886 while (num) {
1887 num_eqs = min(num, 8);
1888 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1889 i += num_eqs;
1890 num -= num_eqs;
Kalesh AP93676702014-09-12 17:39:20 +05301891 }
1892
1893 return 0;
1894}
1895
Sathya Perlab31c50a2009-09-17 10:30:13 -07001896/* Uses sycnhronous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001897int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001898 u32 num, u32 domain)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001900 struct be_mcc_wrb *wrb;
1901 struct be_cmd_req_vlan_config *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 int status;
1903
Sathya Perlab31c50a2009-09-17 10:30:13 -07001904 spin_lock_bh(&adapter->mcc_lock);
1905
1906 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001907 if (!wrb) {
1908 status = -EBUSY;
1909 goto err;
1910 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07001911 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912
Somnath Kotur106df1e2011-10-27 07:12:13 +00001913 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301914 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1915 wrb, NULL);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001916 req->hdr.domain = domain;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917
1918 req->interface_id = if_id;
Ajit Khaparde012bd382013-11-18 10:44:24 -06001919 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 req->num_vlan = num;
Kalesh AP4d567d92014-05-09 13:29:17 +05301921 memcpy(req->normal_vlan, vtag_array,
1922 req->num_vlan * sizeof(vtag_array[0]));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Sathya Perlab31c50a2009-09-17 10:30:13 -07001924 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001925err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07001926 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927 return status;
1928}
1929
Sathya Perlaac34b742015-02-06 08:18:40 -05001930static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931{
Sathya Perla6ac7b682009-06-18 00:05:54 +00001932 struct be_mcc_wrb *wrb;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001933 struct be_dma_mem *mem = &adapter->rx_filter;
1934 struct be_cmd_req_rx_filter *req = mem->va;
Sathya Perlae7b909a2009-11-22 22:01:10 +00001935 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936
Sathya Perla8788fdc2009-07-27 22:52:03 +00001937 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6ac7b682009-06-18 00:05:54 +00001938
Sathya Perlab31c50a2009-09-17 10:30:13 -07001939 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001940 if (!wrb) {
1941 status = -EBUSY;
1942 goto err;
1943 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00001944 memset(req, 0, sizeof(*req));
Somnath Kotur106df1e2011-10-27 07:12:13 +00001945 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05301946 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1947 wrb, mem);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948
Sathya Perla5b8821b2011-08-02 19:57:44 +00001949 req->if_id = cpu_to_le32(adapter->if_handle);
Sathya Perlaac34b742015-02-06 08:18:40 -05001950 req->if_flags_mask = cpu_to_le32(flags);
1951 req->if_flags = (value == ON) ? req->if_flags_mask : 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001952
Sathya Perlaac34b742015-02-06 08:18:40 -05001953 if (flags & BE_IF_FLAGS_MULTICAST) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001954 struct netdev_hw_addr *ha;
1955 int i = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
Padmanabh Ratnakar1610c792011-11-03 01:49:27 +00001957 /* Reset mcast promisc mode if already set by setting mask
1958 * and not setting flags field
1959 */
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001960 req->if_flags_mask |=
1961 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
Sathya Perla92bf14a2013-08-27 16:57:32 +05301962 be_if_cap_flags(adapter));
Padmanabh Ratnakar016f97b2011-11-03 01:49:13 +00001963 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
Sathya Perla5b8821b2011-08-02 19:57:44 +00001964 netdev_for_each_mc_addr(ha, adapter->netdev)
1965 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1966 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967
Sathya Perlab6588872015-09-03 07:41:53 -04001968 status = be_mcc_notify_wait(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00001969err:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001970 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perlae7b909a2009-11-22 22:01:10 +00001971 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972}
1973
Sathya Perlaac34b742015-02-06 08:18:40 -05001974int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1975{
1976 struct device *dev = &adapter->pdev->dev;
1977
1978 if ((flags & be_if_cap_flags(adapter)) != flags) {
1979 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
1980 dev_warn(dev, "Interface is capable of 0x%x flags only\n",
1981 be_if_cap_flags(adapter));
1982 }
1983 flags &= be_if_cap_flags(adapter);
Kalesh AP196e3732015-10-12 03:47:21 -04001984 if (!flags)
1985 return -ENOTSUPP;
Sathya Perlaac34b742015-02-06 08:18:40 -05001986
1987 return __be_cmd_rx_filter(adapter, flags, value);
1988}
1989
Sathya Perlab31c50a2009-09-17 10:30:13 -07001990/* Uses synchrounous mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001991int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992{
Sathya Perlab31c50a2009-09-17 10:30:13 -07001993 struct be_mcc_wrb *wrb;
1994 struct be_cmd_req_set_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 int status;
1996
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00001997 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1998 CMD_SUBSYSTEM_COMMON))
1999 return -EPERM;
2000
Sathya Perlab31c50a2009-09-17 10:30:13 -07002001 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002
Sathya Perlab31c50a2009-09-17 10:30:13 -07002003 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002004 if (!wrb) {
2005 status = -EBUSY;
2006 goto err;
2007 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07002008 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009
Somnath Kotur106df1e2011-10-27 07:12:13 +00002010 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302011 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2012 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013
Suresh Reddyb29812c2014-09-12 17:39:17 +05302014 req->hdr.version = 1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2016 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2017
Sathya Perlab31c50a2009-09-17 10:30:13 -07002018 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019
Sathya Perla713d03942009-11-22 22:02:45 +00002020err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07002021 spin_unlock_bh(&adapter->mcc_lock);
Suresh Reddyb29812c2014-09-12 17:39:17 +05302022
2023 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2024 return -EOPNOTSUPP;
2025
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 return status;
2027}
2028
Sathya Perlab31c50a2009-09-17 10:30:13 -07002029/* Uses sycn mcc */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002030int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002032 struct be_mcc_wrb *wrb;
2033 struct be_cmd_req_get_flow_control *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034 int status;
2035
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002036 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2037 CMD_SUBSYSTEM_COMMON))
2038 return -EPERM;
2039
Sathya Perlab31c50a2009-09-17 10:30:13 -07002040 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041
Sathya Perlab31c50a2009-09-17 10:30:13 -07002042 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002043 if (!wrb) {
2044 status = -EBUSY;
2045 goto err;
2046 }
Sathya Perlab31c50a2009-09-17 10:30:13 -07002047 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048
Somnath Kotur106df1e2011-10-27 07:12:13 +00002049 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302050 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2051 wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052
Sathya Perlab31c50a2009-09-17 10:30:13 -07002053 status = be_mcc_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 if (!status) {
2055 struct be_cmd_resp_get_flow_control *resp =
2056 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302057
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 *tx_fc = le16_to_cpu(resp->tx_flow_control);
2059 *rx_fc = le16_to_cpu(resp->rx_flow_control);
2060 }
2061
Sathya Perla713d03942009-11-22 22:02:45 +00002062err:
Sathya Perlab31c50a2009-09-17 10:30:13 -07002063 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 return status;
2065}
2066
Sathya Perlab31c50a2009-09-17 10:30:13 -07002067/* Uses mbox */
Kalesh APe97e3cd2014-07-17 16:20:26 +05302068int be_cmd_query_fw_cfg(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002070 struct be_mcc_wrb *wrb;
2071 struct be_cmd_req_query_fw_cfg *req;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072 int status;
2073
Ivan Vecera29849612010-12-14 05:43:19 +00002074 if (mutex_lock_interruptible(&adapter->mbox_lock))
2075 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076
Sathya Perlab31c50a2009-09-17 10:30:13 -07002077 wrb = wrb_from_mbox(adapter);
2078 req = embedded_payload(wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079
Somnath Kotur106df1e2011-10-27 07:12:13 +00002080 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302081 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2082 sizeof(*req), wrb, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083
Sathya Perlab31c50a2009-09-17 10:30:13 -07002084 status = be_mbox_notify_wait(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085 if (!status) {
2086 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302087
Kalesh APe97e3cd2014-07-17 16:20:26 +05302088 adapter->port_num = le32_to_cpu(resp->phys_port);
2089 adapter->function_mode = le32_to_cpu(resp->function_mode);
2090 adapter->function_caps = le32_to_cpu(resp->function_caps);
2091 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
Sathya Perlaacbafeb2014-09-02 09:56:46 +05302092 dev_info(&adapter->pdev->dev,
2093 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2094 adapter->function_mode, adapter->function_caps);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095 }
2096
Ivan Vecera29849612010-12-14 05:43:19 +00002097 mutex_unlock(&adapter->mbox_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098 return status;
2099}
sarveshwarb14074ea2009-08-05 13:05:24 -07002100
Sathya Perlab31c50a2009-09-17 10:30:13 -07002101/* Uses mbox */
sarveshwarb14074ea2009-08-05 13:05:24 -07002102int be_cmd_reset_function(struct be_adapter *adapter)
2103{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002104 struct be_mcc_wrb *wrb;
2105 struct be_cmd_req_hdr *req;
sarveshwarb14074ea2009-08-05 13:05:24 -07002106 int status;
2107
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002108 if (lancer_chip(adapter)) {
Sathya Perla9fa465c2015-02-23 04:20:13 -05002109 iowrite32(SLI_PORT_CONTROL_IP_MASK,
2110 adapter->db + SLIPORT_CONTROL_OFFSET);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002111 status = lancer_wait_ready(adapter);
Sathya Perla9fa465c2015-02-23 04:20:13 -05002112 if (status)
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002113 dev_err(&adapter->pdev->dev,
2114 "Adapter in non recoverable error\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00002115 return status;
2116 }
2117
Ivan Vecera29849612010-12-14 05:43:19 +00002118 if (mutex_lock_interruptible(&adapter->mbox_lock))
2119 return -1;
sarveshwarb14074ea2009-08-05 13:05:24 -07002120
Sathya Perlab31c50a2009-09-17 10:30:13 -07002121 wrb = wrb_from_mbox(adapter);
2122 req = embedded_payload(wrb);
sarveshwarb14074ea2009-08-05 13:05:24 -07002123
Somnath Kotur106df1e2011-10-27 07:12:13 +00002124 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302125 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2126 NULL);
sarveshwarb14074ea2009-08-05 13:05:24 -07002127
Sathya Perlab31c50a2009-09-17 10:30:13 -07002128 status = be_mbox_notify_wait(adapter);
sarveshwarb14074ea2009-08-05 13:05:24 -07002129
Ivan Vecera29849612010-12-14 05:43:19 +00002130 mutex_unlock(&adapter->mbox_lock);
sarveshwarb14074ea2009-08-05 13:05:24 -07002131 return status;
2132}
Ajit Khaparde84517482009-09-04 03:12:16 +00002133
Suresh Reddy594ad542013-04-25 23:03:20 +00002134int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
Ben Hutchings33cb0fa2014-05-15 02:01:23 +01002135 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
Sathya Perla3abcded2010-10-03 22:12:27 -07002136{
2137 struct be_mcc_wrb *wrb;
2138 struct be_cmd_req_rss_config *req;
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 int status;
2140
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302141 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2142 return 0;
2143
Kalesh APb51aa362014-05-09 13:29:19 +05302144 spin_lock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002145
Kalesh APb51aa362014-05-09 13:29:19 +05302146 wrb = wrb_from_mccq(adapter);
2147 if (!wrb) {
2148 status = -EBUSY;
2149 goto err;
2150 }
Sathya Perla3abcded2010-10-03 22:12:27 -07002151 req = embedded_payload(wrb);
2152
Somnath Kotur106df1e2011-10-27 07:12:13 +00002153 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302154 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002155
2156 req->if_id = cpu_to_le32(adapter->if_handle);
Suresh Reddy594ad542013-04-25 23:03:20 +00002157 req->enable_rss = cpu_to_le16(rss_hash_opts);
Sathya Perla3abcded2010-10-03 22:12:27 -07002158 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
Suresh Reddy594ad542013-04-25 23:03:20 +00002159
Kalesh APb51aa362014-05-09 13:29:19 +05302160 if (!BEx_chip(adapter))
Suresh Reddy594ad542013-04-25 23:03:20 +00002161 req->hdr.version = 1;
2162
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 memcpy(req->cpu_table, rsstable, table_size);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302164 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla3abcded2010-10-03 22:12:27 -07002165 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2166
Kalesh APb51aa362014-05-09 13:29:19 +05302167 status = be_mcc_notify_wait(adapter);
2168err:
2169 spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla3abcded2010-10-03 22:12:27 -07002170 return status;
2171}
2172
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002173/* Uses sync mcc */
2174int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302175 u8 bcn, u8 sts, u8 state)
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002176{
2177 struct be_mcc_wrb *wrb;
2178 struct be_cmd_req_enable_disable_beacon *req;
2179 int status;
2180
2181 spin_lock_bh(&adapter->mcc_lock);
2182
2183 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002184 if (!wrb) {
2185 status = -EBUSY;
2186 goto err;
2187 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002188 req = embedded_payload(wrb);
2189
Somnath Kotur106df1e2011-10-27 07:12:13 +00002190 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302191 OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2192 sizeof(*req), wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002193
2194 req->port_num = port_num;
2195 req->beacon_state = state;
2196 req->beacon_duration = bcn;
2197 req->status_duration = sts;
2198
2199 status = be_mcc_notify_wait(adapter);
2200
Sathya Perla713d03942009-11-22 22:02:45 +00002201err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002202 spin_unlock_bh(&adapter->mcc_lock);
2203 return status;
2204}
2205
2206/* Uses sync mcc */
2207int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2208{
2209 struct be_mcc_wrb *wrb;
2210 struct be_cmd_req_get_beacon_state *req;
2211 int status;
2212
2213 spin_lock_bh(&adapter->mcc_lock);
2214
2215 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002216 if (!wrb) {
2217 status = -EBUSY;
2218 goto err;
2219 }
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002220 req = embedded_payload(wrb);
2221
Somnath Kotur106df1e2011-10-27 07:12:13 +00002222 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302223 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2224 wrb, NULL);
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002225
2226 req->port_num = port_num;
2227
2228 status = be_mcc_notify_wait(adapter);
2229 if (!status) {
2230 struct be_cmd_resp_get_beacon_state *resp =
2231 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05302232
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002233 *state = resp->beacon_state;
2234 }
2235
Sathya Perla713d03942009-11-22 22:02:45 +00002236err:
Sarveshwar Bandifad9ab22009-10-12 04:23:15 -07002237 spin_unlock_bh(&adapter->mcc_lock);
2238 return status;
2239}
2240
Mark Leonarde36edd92014-09-12 17:39:18 +05302241/* Uses sync mcc */
2242int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2243 u8 page_num, u8 *data)
2244{
2245 struct be_dma_mem cmd;
2246 struct be_mcc_wrb *wrb;
2247 struct be_cmd_req_port_type *req;
2248 int status;
2249
2250 if (page_num > TR_PAGE_A2)
2251 return -EINVAL;
2252
2253 cmd.size = sizeof(struct be_cmd_resp_port_type);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05302254 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2255 GFP_ATOMIC);
Mark Leonarde36edd92014-09-12 17:39:18 +05302256 if (!cmd.va) {
2257 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2258 return -ENOMEM;
2259 }
Mark Leonarde36edd92014-09-12 17:39:18 +05302260
2261 spin_lock_bh(&adapter->mcc_lock);
2262
2263 wrb = wrb_from_mccq(adapter);
2264 if (!wrb) {
2265 status = -EBUSY;
2266 goto err;
2267 }
2268 req = cmd.va;
2269
2270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2271 OPCODE_COMMON_READ_TRANSRECV_DATA,
2272 cmd.size, wrb, &cmd);
2273
2274 req->port = cpu_to_le32(adapter->hba_port_num);
2275 req->page_num = cpu_to_le32(page_num);
2276 status = be_mcc_notify_wait(adapter);
2277 if (!status) {
2278 struct be_cmd_resp_port_type *resp = cmd.va;
2279
2280 memcpy(data, resp->page_data, PAGE_DATA_LEN);
2281 }
2282err:
2283 spin_unlock_bh(&adapter->mcc_lock);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05302284 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Mark Leonarde36edd92014-09-12 17:39:18 +05302285 return status;
2286}
2287
Suresh Reddya23113b2015-12-30 01:28:59 -05002288static int lancer_cmd_write_object(struct be_adapter *adapter,
2289 struct be_dma_mem *cmd, u32 data_size,
2290 u32 data_offset, const char *obj_name,
2291 u32 *data_written, u8 *change_status,
2292 u8 *addn_status)
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002293{
2294 struct be_mcc_wrb *wrb;
2295 struct lancer_cmd_req_write_object *req;
2296 struct lancer_cmd_resp_write_object *resp;
2297 void *ctxt = NULL;
2298 int status;
2299
2300 spin_lock_bh(&adapter->mcc_lock);
2301 adapter->flash_status = 0;
2302
2303 wrb = wrb_from_mccq(adapter);
2304 if (!wrb) {
2305 status = -EBUSY;
2306 goto err_unlock;
2307 }
2308
2309 req = embedded_payload(wrb);
2310
Somnath Kotur106df1e2011-10-27 07:12:13 +00002311 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302312 OPCODE_COMMON_WRITE_OBJECT,
2313 sizeof(struct lancer_cmd_req_write_object), wrb,
2314 NULL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002315
2316 ctxt = &req->context;
2317 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302318 write_length, ctxt, data_size);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002319
2320 if (data_size == 0)
2321 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302322 eof, ctxt, 1);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002323 else
2324 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302325 eof, ctxt, 0);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002326
2327 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2328 req->write_offset = cpu_to_le32(data_offset);
Vasundhara Volam242eb472014-09-12 17:39:15 +05302329 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002330 req->descriptor_count = cpu_to_le32(1);
2331 req->buf_len = cpu_to_le32(data_size);
2332 req->addr_low = cpu_to_le32((cmd->dma +
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302333 sizeof(struct lancer_cmd_req_write_object))
2334 & 0xFFFFFFFF);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002335 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2336 sizeof(struct lancer_cmd_req_write_object)));
2337
Suresh Reddyefaa4082015-07-10 05:32:48 -04002338 status = be_mcc_notify(adapter);
2339 if (status)
2340 goto err_unlock;
2341
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002342 spin_unlock_bh(&adapter->mcc_lock);
2343
Suresh Reddy5eeff632014-01-06 13:02:24 +05302344 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
Somnath Kotur701962d2013-05-02 03:36:34 +00002345 msecs_to_jiffies(60000)))
Kalesh APfd451602014-07-17 16:20:21 +05302346 status = -ETIMEDOUT;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002347 else
2348 status = adapter->flash_status;
2349
2350 resp = embedded_payload(wrb);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002351 if (!status) {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002352 *data_written = le32_to_cpu(resp->actual_write_len);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002353 *change_status = resp->change_status;
2354 } else {
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002355 *addn_status = resp->additional_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002356 }
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002357
2358 return status;
2359
2360err_unlock:
2361 spin_unlock_bh(&adapter->mcc_lock);
2362 return status;
2363}
2364
Ravikumar Nelavelli6809cee2014-09-12 17:39:19 +05302365int be_cmd_query_cable_type(struct be_adapter *adapter)
2366{
2367 u8 page_data[PAGE_DATA_LEN];
2368 int status;
2369
2370 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2371 page_data);
2372 if (!status) {
2373 switch (adapter->phy.interface_type) {
2374 case PHY_TYPE_QSFP:
2375 adapter->phy.cable_type =
2376 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2377 break;
2378 case PHY_TYPE_SFP_PLUS_10GB:
2379 adapter->phy.cable_type =
2380 page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2381 break;
2382 default:
2383 adapter->phy.cable_type = 0;
2384 break;
2385 }
2386 }
2387 return status;
2388}
2389
Vasundhara Volam21252372015-02-06 08:18:42 -05002390int be_cmd_query_sfp_info(struct be_adapter *adapter)
2391{
2392 u8 page_data[PAGE_DATA_LEN];
2393 int status;
2394
2395 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2396 page_data);
2397 if (!status) {
2398 strlcpy(adapter->phy.vendor_name, page_data +
2399 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2400 strlcpy(adapter->phy.vendor_pn,
2401 page_data + SFP_VENDOR_PN_OFFSET,
2402 SFP_VENDOR_NAME_LEN - 1);
2403 }
2404
2405 return status;
2406}
2407
Suresh Reddya23113b2015-12-30 01:28:59 -05002408static int lancer_cmd_delete_object(struct be_adapter *adapter,
2409 const char *obj_name)
Kalesh APf0613382014-08-01 17:47:32 +05302410{
2411 struct lancer_cmd_req_delete_object *req;
2412 struct be_mcc_wrb *wrb;
2413 int status;
2414
2415 spin_lock_bh(&adapter->mcc_lock);
2416
2417 wrb = wrb_from_mccq(adapter);
2418 if (!wrb) {
2419 status = -EBUSY;
2420 goto err;
2421 }
2422
2423 req = embedded_payload(wrb);
2424
2425 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2426 OPCODE_COMMON_DELETE_OBJECT,
2427 sizeof(*req), wrb, NULL);
2428
Vasundhara Volam242eb472014-09-12 17:39:15 +05302429 strlcpy(req->object_name, obj_name, sizeof(req->object_name));
Kalesh APf0613382014-08-01 17:47:32 +05302430
2431 status = be_mcc_notify_wait(adapter);
2432err:
2433 spin_unlock_bh(&adapter->mcc_lock);
2434 return status;
2435}
2436
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002437int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302438 u32 data_size, u32 data_offset, const char *obj_name,
2439 u32 *data_read, u32 *eof, u8 *addn_status)
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002440{
2441 struct be_mcc_wrb *wrb;
2442 struct lancer_cmd_req_read_object *req;
2443 struct lancer_cmd_resp_read_object *resp;
2444 int status;
2445
2446 spin_lock_bh(&adapter->mcc_lock);
2447
2448 wrb = wrb_from_mccq(adapter);
2449 if (!wrb) {
2450 status = -EBUSY;
2451 goto err_unlock;
2452 }
2453
2454 req = embedded_payload(wrb);
2455
2456 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302457 OPCODE_COMMON_READ_OBJECT,
2458 sizeof(struct lancer_cmd_req_read_object), wrb,
2459 NULL);
Padmanabh Ratnakarde49bd52011-11-16 02:02:43 +00002460
2461 req->desired_read_len = cpu_to_le32(data_size);
2462 req->read_offset = cpu_to_le32(data_offset);
2463 strcpy(req->object_name, obj_name);
2464 req->descriptor_count = cpu_to_le32(1);
2465 req->buf_len = cpu_to_le32(data_size);
2466 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2467 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2468
2469 status = be_mcc_notify_wait(adapter);
2470
2471 resp = embedded_payload(wrb);
2472 if (!status) {
2473 *data_read = le32_to_cpu(resp->actual_read_len);
2474 *eof = le32_to_cpu(resp->eof);
2475 } else {
2476 *addn_status = resp->additional_status;
2477 }
2478
2479err_unlock:
2480 spin_unlock_bh(&adapter->mcc_lock);
2481 return status;
2482}
2483
Suresh Reddya23113b2015-12-30 01:28:59 -05002484static int be_cmd_write_flashrom(struct be_adapter *adapter,
2485 struct be_dma_mem *cmd, u32 flash_type,
2486 u32 flash_opcode, u32 img_offset, u32 buf_size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002487{
Sathya Perlab31c50a2009-09-17 10:30:13 -07002488 struct be_mcc_wrb *wrb;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002489 struct be_cmd_write_flashrom *req;
Ajit Khaparde84517482009-09-04 03:12:16 +00002490 int status;
2491
Sathya Perlab31c50a2009-09-17 10:30:13 -07002492 spin_lock_bh(&adapter->mcc_lock);
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002493 adapter->flash_status = 0;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002494
2495 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002496 if (!wrb) {
2497 status = -EBUSY;
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002498 goto err_unlock;
Sathya Perla713d03942009-11-22 22:02:45 +00002499 }
2500 req = cmd->va;
Sathya Perlab31c50a2009-09-17 10:30:13 -07002501
Somnath Kotur106df1e2011-10-27 07:12:13 +00002502 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05302503 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2504 cmd);
Ajit Khaparde84517482009-09-04 03:12:16 +00002505
2506 req->params.op_type = cpu_to_le32(flash_type);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002507 if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2508 req->params.offset = cpu_to_le32(img_offset);
2509
Ajit Khaparde84517482009-09-04 03:12:16 +00002510 req->params.op_code = cpu_to_le32(flash_opcode);
2511 req->params.data_buf_size = cpu_to_le32(buf_size);
2512
Suresh Reddyefaa4082015-07-10 05:32:48 -04002513 status = be_mcc_notify(adapter);
2514 if (status)
2515 goto err_unlock;
2516
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002517 spin_unlock_bh(&adapter->mcc_lock);
2518
Suresh Reddy5eeff632014-01-06 13:02:24 +05302519 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2520 msecs_to_jiffies(40000)))
Kalesh APfd451602014-07-17 16:20:21 +05302521 status = -ETIMEDOUT;
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002522 else
2523 status = adapter->flash_status;
Ajit Khaparde84517482009-09-04 03:12:16 +00002524
Dan Carpenter2892d9c2010-05-26 04:46:35 +00002525 return status;
2526
2527err_unlock:
2528 spin_unlock_bh(&adapter->mcc_lock);
Ajit Khaparde84517482009-09-04 03:12:16 +00002529 return status;
2530}
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002531
Suresh Reddya23113b2015-12-30 01:28:59 -05002532static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2533 u16 img_optype, u32 img_offset, u32 crc_offset)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002534{
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002535 struct be_cmd_read_flash_crc *req;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002536 struct be_mcc_wrb *wrb;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002537 int status;
2538
2539 spin_lock_bh(&adapter->mcc_lock);
2540
2541 wrb = wrb_from_mccq(adapter);
Sathya Perla713d03942009-11-22 22:02:45 +00002542 if (!wrb) {
2543 status = -EBUSY;
2544 goto err;
2545 }
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002546 req = embedded_payload(wrb);
2547
Somnath Kotur106df1e2011-10-27 07:12:13 +00002548 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002549 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2550 wrb, NULL);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002551
Vasundhara Volam70a7b522015-02-06 08:18:39 -05002552 req->params.op_type = cpu_to_le32(img_optype);
2553 if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2554 req->params.offset = cpu_to_le32(img_offset + crc_offset);
2555 else
2556 req->params.offset = cpu_to_le32(crc_offset);
2557
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002558 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002559 req->params.data_buf_size = cpu_to_le32(0x4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002560
2561 status = be_mcc_notify_wait(adapter);
2562 if (!status)
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00002563 memcpy(flashed_crc, req->crc, 4);
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002564
Sathya Perla713d03942009-11-22 22:02:45 +00002565err:
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002566 spin_unlock_bh(&adapter->mcc_lock);
2567 return status;
2568}
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002569
Suresh Reddya23113b2015-12-30 01:28:59 -05002570static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2571
2572static bool phy_flashing_required(struct be_adapter *adapter)
2573{
2574 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
2575 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2576}
2577
2578static bool is_comp_in_ufi(struct be_adapter *adapter,
2579 struct flash_section_info *fsec, int type)
2580{
2581 int i = 0, img_type = 0;
2582 struct flash_section_info_g2 *fsec_g2 = NULL;
2583
2584 if (BE2_chip(adapter))
2585 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2586
2587 for (i = 0; i < MAX_FLASH_COMP; i++) {
2588 if (fsec_g2)
2589 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2590 else
2591 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2592
2593 if (img_type == type)
2594 return true;
2595 }
2596 return false;
2597}
2598
2599static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2600 int header_size,
2601 const struct firmware *fw)
2602{
2603 struct flash_section_info *fsec = NULL;
2604 const u8 *p = fw->data;
2605
2606 p += header_size;
2607 while (p < (fw->data + fw->size)) {
2608 fsec = (struct flash_section_info *)p;
2609 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2610 return fsec;
2611 p += 32;
2612 }
2613 return NULL;
2614}
2615
2616static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
2617 u32 img_offset, u32 img_size, int hdr_size,
2618 u16 img_optype, bool *crc_match)
2619{
2620 u32 crc_offset;
2621 int status;
2622 u8 crc[4];
2623
2624 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
2625 img_size - 4);
2626 if (status)
2627 return status;
2628
2629 crc_offset = hdr_size + img_offset + img_size - 4;
2630
2631 /* Skip flashing, if crc of flashed region matches */
2632 if (!memcmp(crc, p + crc_offset, 4))
2633 *crc_match = true;
2634 else
2635 *crc_match = false;
2636
2637 return status;
2638}
2639
2640static int be_flash(struct be_adapter *adapter, const u8 *img,
2641 struct be_dma_mem *flash_cmd, int optype, int img_size,
2642 u32 img_offset)
2643{
2644 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
2645 struct be_cmd_write_flashrom *req = flash_cmd->va;
2646 int status;
2647
2648 while (total_bytes) {
2649 num_bytes = min_t(u32, 32 * 1024, total_bytes);
2650
2651 total_bytes -= num_bytes;
2652
2653 if (!total_bytes) {
2654 if (optype == OPTYPE_PHY_FW)
2655 flash_op = FLASHROM_OPER_PHY_FLASH;
2656 else
2657 flash_op = FLASHROM_OPER_FLASH;
2658 } else {
2659 if (optype == OPTYPE_PHY_FW)
2660 flash_op = FLASHROM_OPER_PHY_SAVE;
2661 else
2662 flash_op = FLASHROM_OPER_SAVE;
2663 }
2664
2665 memcpy(req->data_buf, img, num_bytes);
2666 img += num_bytes;
2667 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
2668 flash_op, img_offset +
2669 bytes_sent, num_bytes);
2670 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
2671 optype == OPTYPE_PHY_FW)
2672 break;
2673 else if (status)
2674 return status;
2675
2676 bytes_sent += num_bytes;
2677 }
2678 return 0;
2679}
2680
2681/* For BE2, BE3 and BE3-R */
2682static int be_flash_BEx(struct be_adapter *adapter,
2683 const struct firmware *fw,
2684 struct be_dma_mem *flash_cmd, int num_of_images)
2685{
2686 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2687 struct device *dev = &adapter->pdev->dev;
2688 struct flash_section_info *fsec = NULL;
2689 int status, i, filehdr_size, num_comp;
2690 const struct flash_comp *pflashcomp;
2691 bool crc_match;
2692 const u8 *p;
2693
2694 struct flash_comp gen3_flash_types[] = {
2695 { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2696 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2697 { BE3_REDBOOT_START, OPTYPE_REDBOOT,
2698 BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2699 { BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
2700 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2701 { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2702 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2703 { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2704 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2705 { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2706 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2707 { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2708 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2709 { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2710 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
2711 { BE3_NCSI_START, OPTYPE_NCSI_FW,
2712 BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
2713 { BE3_PHY_FW_START, OPTYPE_PHY_FW,
2714 BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
2715 };
2716
2717 struct flash_comp gen2_flash_types[] = {
2718 { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2719 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2720 { BE2_REDBOOT_START, OPTYPE_REDBOOT,
2721 BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2722 { BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
2723 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2724 { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2725 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2726 { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2727 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2728 { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2729 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2730 { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2731 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2732 { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2733 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
2734 };
2735
2736 if (BE3_chip(adapter)) {
2737 pflashcomp = gen3_flash_types;
2738 filehdr_size = sizeof(struct flash_file_hdr_g3);
2739 num_comp = ARRAY_SIZE(gen3_flash_types);
2740 } else {
2741 pflashcomp = gen2_flash_types;
2742 filehdr_size = sizeof(struct flash_file_hdr_g2);
2743 num_comp = ARRAY_SIZE(gen2_flash_types);
2744 img_hdrs_size = 0;
2745 }
2746
2747 /* Get flash section info*/
2748 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2749 if (!fsec) {
2750 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2751 return -1;
2752 }
2753 for (i = 0; i < num_comp; i++) {
2754 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2755 continue;
2756
2757 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2758 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2759 continue;
2760
2761 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
2762 !phy_flashing_required(adapter))
2763 continue;
2764
2765 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
2766 status = be_check_flash_crc(adapter, fw->data,
2767 pflashcomp[i].offset,
2768 pflashcomp[i].size,
2769 filehdr_size +
2770 img_hdrs_size,
2771 OPTYPE_REDBOOT, &crc_match);
2772 if (status) {
2773 dev_err(dev,
2774 "Could not get CRC for 0x%x region\n",
2775 pflashcomp[i].optype);
2776 continue;
2777 }
2778
2779 if (crc_match)
2780 continue;
2781 }
2782
2783 p = fw->data + filehdr_size + pflashcomp[i].offset +
2784 img_hdrs_size;
2785 if (p + pflashcomp[i].size > fw->data + fw->size)
2786 return -1;
2787
2788 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
2789 pflashcomp[i].size, 0);
2790 if (status) {
2791 dev_err(dev, "Flashing section type 0x%x failed\n",
2792 pflashcomp[i].img_type);
2793 return status;
2794 }
2795 }
2796 return 0;
2797}
2798
2799static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
2800{
2801 u32 img_type = le32_to_cpu(fsec_entry.type);
2802 u16 img_optype = le16_to_cpu(fsec_entry.optype);
2803
2804 if (img_optype != 0xFFFF)
2805 return img_optype;
2806
2807 switch (img_type) {
2808 case IMAGE_FIRMWARE_ISCSI:
2809 img_optype = OPTYPE_ISCSI_ACTIVE;
2810 break;
2811 case IMAGE_BOOT_CODE:
2812 img_optype = OPTYPE_REDBOOT;
2813 break;
2814 case IMAGE_OPTION_ROM_ISCSI:
2815 img_optype = OPTYPE_BIOS;
2816 break;
2817 case IMAGE_OPTION_ROM_PXE:
2818 img_optype = OPTYPE_PXE_BIOS;
2819 break;
2820 case IMAGE_OPTION_ROM_FCOE:
2821 img_optype = OPTYPE_FCOE_BIOS;
2822 break;
2823 case IMAGE_FIRMWARE_BACKUP_ISCSI:
2824 img_optype = OPTYPE_ISCSI_BACKUP;
2825 break;
2826 case IMAGE_NCSI:
2827 img_optype = OPTYPE_NCSI_FW;
2828 break;
2829 case IMAGE_FLASHISM_JUMPVECTOR:
2830 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
2831 break;
2832 case IMAGE_FIRMWARE_PHY:
2833 img_optype = OPTYPE_SH_PHY_FW;
2834 break;
2835 case IMAGE_REDBOOT_DIR:
2836 img_optype = OPTYPE_REDBOOT_DIR;
2837 break;
2838 case IMAGE_REDBOOT_CONFIG:
2839 img_optype = OPTYPE_REDBOOT_CONFIG;
2840 break;
2841 case IMAGE_UFI_DIR:
2842 img_optype = OPTYPE_UFI_DIR;
2843 break;
2844 default:
2845 break;
2846 }
2847
2848 return img_optype;
2849}
2850
2851static int be_flash_skyhawk(struct be_adapter *adapter,
2852 const struct firmware *fw,
2853 struct be_dma_mem *flash_cmd, int num_of_images)
2854{
2855 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
2856 bool crc_match, old_fw_img, flash_offset_support = true;
2857 struct device *dev = &adapter->pdev->dev;
2858 struct flash_section_info *fsec = NULL;
2859 u32 img_offset, img_size, img_type;
2860 u16 img_optype, flash_optype;
2861 int status, i, filehdr_size;
2862 const u8 *p;
2863
2864 filehdr_size = sizeof(struct flash_file_hdr_g3);
2865 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2866 if (!fsec) {
2867 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2868 return -EINVAL;
2869 }
2870
2871retry_flash:
2872 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
2873 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
2874 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
2875 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2876 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
2877 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
2878
2879 if (img_optype == 0xFFFF)
2880 continue;
2881
2882 if (flash_offset_support)
2883 flash_optype = OPTYPE_OFFSET_SPECIFIED;
2884 else
2885 flash_optype = img_optype;
2886
2887 /* Don't bother verifying CRC if an old FW image is being
2888 * flashed
2889 */
2890 if (old_fw_img)
2891 goto flash;
2892
2893 status = be_check_flash_crc(adapter, fw->data, img_offset,
2894 img_size, filehdr_size +
2895 img_hdrs_size, flash_optype,
2896 &crc_match);
2897 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
2898 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
2899 /* The current FW image on the card does not support
2900 * OFFSET based flashing. Retry using older mechanism
2901 * of OPTYPE based flashing
2902 */
2903 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2904 flash_offset_support = false;
2905 goto retry_flash;
2906 }
2907
2908 /* The current FW image on the card does not recognize
2909 * the new FLASH op_type. The FW download is partially
2910 * complete. Reboot the server now to enable FW image
2911 * to recognize the new FLASH op_type. To complete the
2912 * remaining process, download the same FW again after
2913 * the reboot.
2914 */
2915 dev_err(dev, "Flash incomplete. Reset the server\n");
2916 dev_err(dev, "Download FW image again after reset\n");
2917 return -EAGAIN;
2918 } else if (status) {
2919 dev_err(dev, "Could not get CRC for 0x%x region\n",
2920 img_optype);
2921 return -EFAULT;
2922 }
2923
2924 if (crc_match)
2925 continue;
2926
2927flash:
2928 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
2929 if (p + img_size > fw->data + fw->size)
2930 return -1;
2931
2932 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
2933 img_offset);
2934
2935 /* The current FW image on the card does not support OFFSET
2936 * based flashing. Retry using older mechanism of OPTYPE based
2937 * flashing
2938 */
2939 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
2940 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2941 flash_offset_support = false;
2942 goto retry_flash;
2943 }
2944
2945 /* For old FW images ignore ILLEGAL_FIELD error or errors on
2946 * UFI_DIR region
2947 */
2948 if (old_fw_img &&
2949 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
2950 (img_optype == OPTYPE_UFI_DIR &&
2951 base_status(status) == MCC_STATUS_FAILED))) {
2952 continue;
2953 } else if (status) {
2954 dev_err(dev, "Flashing section type 0x%x failed\n",
2955 img_type);
Suresh Reddy6b525782015-12-30 01:29:00 -05002956
2957 switch (addl_status(status)) {
2958 case MCC_ADDL_STATUS_MISSING_SIGNATURE:
2959 dev_err(dev,
2960 "Digital signature missing in FW\n");
2961 return -EINVAL;
2962 case MCC_ADDL_STATUS_INVALID_SIGNATURE:
2963 dev_err(dev,
2964 "Invalid digital signature in FW\n");
2965 return -EINVAL;
2966 default:
2967 return -EFAULT;
2968 }
Suresh Reddya23113b2015-12-30 01:28:59 -05002969 }
2970 }
2971 return 0;
2972}
2973
2974int lancer_fw_download(struct be_adapter *adapter,
2975 const struct firmware *fw)
2976{
2977 struct device *dev = &adapter->pdev->dev;
2978 struct be_dma_mem flash_cmd;
2979 const u8 *data_ptr = NULL;
2980 u8 *dest_image_ptr = NULL;
2981 size_t image_size = 0;
2982 u32 chunk_size = 0;
2983 u32 data_written = 0;
2984 u32 offset = 0;
2985 int status = 0;
2986 u8 add_status = 0;
2987 u8 change_status;
2988
2989 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2990 dev_err(dev, "FW image size should be multiple of 4\n");
2991 return -EINVAL;
2992 }
2993
2994 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2995 + LANCER_FW_DOWNLOAD_CHUNK;
2996 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
2997 &flash_cmd.dma, GFP_KERNEL);
2998 if (!flash_cmd.va)
2999 return -ENOMEM;
3000
3001 dest_image_ptr = flash_cmd.va +
3002 sizeof(struct lancer_cmd_req_write_object);
3003 image_size = fw->size;
3004 data_ptr = fw->data;
3005
3006 while (image_size) {
3007 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3008
3009 /* Copy the image chunk content. */
3010 memcpy(dest_image_ptr, data_ptr, chunk_size);
3011
3012 status = lancer_cmd_write_object(adapter, &flash_cmd,
3013 chunk_size, offset,
3014 LANCER_FW_DOWNLOAD_LOCATION,
3015 &data_written, &change_status,
3016 &add_status);
3017 if (status)
3018 break;
3019
3020 offset += data_written;
3021 data_ptr += data_written;
3022 image_size -= data_written;
3023 }
3024
3025 if (!status) {
3026 /* Commit the FW written */
3027 status = lancer_cmd_write_object(adapter, &flash_cmd,
3028 0, offset,
3029 LANCER_FW_DOWNLOAD_LOCATION,
3030 &data_written, &change_status,
3031 &add_status);
3032 }
3033
3034 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3035 if (status) {
3036 dev_err(dev, "Firmware load error\n");
3037 return be_cmd_status(status);
3038 }
3039
3040 dev_info(dev, "Firmware flashed successfully\n");
3041
3042 if (change_status == LANCER_FW_RESET_NEEDED) {
3043 dev_info(dev, "Resetting adapter to activate new FW\n");
3044 status = lancer_physdev_ctrl(adapter,
3045 PHYSDEV_CONTROL_FW_RESET_MASK);
3046 if (status) {
3047 dev_err(dev, "Adapter busy, could not reset FW\n");
3048 dev_err(dev, "Reboot server to activate new FW\n");
3049 }
3050 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3051 dev_info(dev, "Reboot server to activate new FW\n");
3052 }
3053
3054 return 0;
3055}
3056
3057/* Check if the flash image file is compatible with the adapter that
3058 * is being flashed.
3059 */
3060static bool be_check_ufi_compatibility(struct be_adapter *adapter,
3061 struct flash_file_hdr_g3 *fhdr)
3062{
3063 if (!fhdr) {
3064 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
3065 return false;
3066 }
3067
3068 /* First letter of the build version is used to identify
3069 * which chip this image file is meant for.
3070 */
3071 switch (fhdr->build[0]) {
3072 case BLD_STR_UFI_TYPE_SH:
3073 if (!skyhawk_chip(adapter))
3074 return false;
3075 break;
3076 case BLD_STR_UFI_TYPE_BE3:
3077 if (!BE3_chip(adapter))
3078 return false;
3079 break;
3080 case BLD_STR_UFI_TYPE_BE2:
3081 if (!BE2_chip(adapter))
3082 return false;
3083 break;
3084 default:
3085 return false;
3086 }
3087
3088 /* In BE3 FW images the "asic_type_rev" field doesn't track the
3089 * asic_rev of the chips it is compatible with.
3090 * When asic_type_rev is 0 the image is compatible only with
3091 * pre-BE3-R chips (asic_rev < 0x10)
3092 */
3093 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
3094 return adapter->asic_rev < 0x10;
3095 else
3096 return (fhdr->asic_type_rev >= adapter->asic_rev);
3097}
3098
3099int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3100{
3101 struct device *dev = &adapter->pdev->dev;
3102 struct flash_file_hdr_g3 *fhdr3;
3103 struct image_hdr *img_hdr_ptr;
3104 int status = 0, i, num_imgs;
3105 struct be_dma_mem flash_cmd;
3106
3107 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3108 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
3109 dev_err(dev, "Flash image is not compatible with adapter\n");
3110 return -EINVAL;
3111 }
3112
3113 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3114 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3115 GFP_KERNEL);
3116 if (!flash_cmd.va)
3117 return -ENOMEM;
3118
3119 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3120 for (i = 0; i < num_imgs; i++) {
3121 img_hdr_ptr = (struct image_hdr *)(fw->data +
3122 (sizeof(struct flash_file_hdr_g3) +
3123 i * sizeof(struct image_hdr)));
3124 if (!BE2_chip(adapter) &&
3125 le32_to_cpu(img_hdr_ptr->imageid) != 1)
3126 continue;
3127
3128 if (skyhawk_chip(adapter))
3129 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
3130 num_imgs);
3131 else
3132 status = be_flash_BEx(adapter, fw, &flash_cmd,
3133 num_imgs);
3134 }
3135
3136 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3137 if (!status)
3138 dev_info(dev, "Firmware flashed successfully\n");
3139
3140 return status;
3141}
3142
Dan Carpenterc196b022010-05-26 04:47:39 +00003143int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303144 struct be_dma_mem *nonemb_cmd)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003145{
3146 struct be_mcc_wrb *wrb;
3147 struct be_cmd_req_acpi_wol_magic_config *req;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003148 int status;
3149
3150 spin_lock_bh(&adapter->mcc_lock);
3151
3152 wrb = wrb_from_mccq(adapter);
3153 if (!wrb) {
3154 status = -EBUSY;
3155 goto err;
3156 }
3157 req = nonemb_cmd->va;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003158
Somnath Kotur106df1e2011-10-27 07:12:13 +00003159 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303160 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
3161 wrb, nonemb_cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003162 memcpy(req->magic_mac, mac, ETH_ALEN);
3163
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003164 status = be_mcc_notify_wait(adapter);
3165
3166err:
3167 spin_unlock_bh(&adapter->mcc_lock);
3168 return status;
3169}
Suresh Rff33a6e2009-12-03 16:15:52 -08003170
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003171int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
3172 u8 loopback_type, u8 enable)
3173{
3174 struct be_mcc_wrb *wrb;
3175 struct be_cmd_req_set_lmode *req;
3176 int status;
3177
Somnath Kotur2e365b12016-02-03 09:49:20 +05303178 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
3179 CMD_SUBSYSTEM_LOWLEVEL))
3180 return -EPERM;
3181
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003182 spin_lock_bh(&adapter->mcc_lock);
3183
3184 wrb = wrb_from_mccq(adapter);
3185 if (!wrb) {
3186 status = -EBUSY;
Suresh Reddy9c855972015-07-10 05:32:50 -04003187 goto err_unlock;
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003188 }
3189
3190 req = embedded_payload(wrb);
3191
Somnath Kotur106df1e2011-10-27 07:12:13 +00003192 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303193 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
3194 wrb, NULL);
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003195
3196 req->src_port = port_num;
3197 req->dest_port = port_num;
3198 req->loopback_type = loopback_type;
3199 req->loopback_state = enable;
3200
Suresh Reddy9c855972015-07-10 05:32:50 -04003201 status = be_mcc_notify(adapter);
3202 if (status)
3203 goto err_unlock;
3204
3205 spin_unlock_bh(&adapter->mcc_lock);
3206
3207 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
3208 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
3209 status = -ETIMEDOUT;
3210
3211 return status;
3212
3213err_unlock:
Sarveshwar Bandifced9992009-12-23 04:41:44 +00003214 spin_unlock_bh(&adapter->mcc_lock);
3215 return status;
3216}
3217
Suresh Rff33a6e2009-12-03 16:15:52 -08003218int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303219 u32 loopback_type, u32 pkt_size, u32 num_pkts,
3220 u64 pattern)
Suresh Rff33a6e2009-12-03 16:15:52 -08003221{
3222 struct be_mcc_wrb *wrb;
3223 struct be_cmd_req_loopback_test *req;
Suresh Reddy5eeff632014-01-06 13:02:24 +05303224 struct be_cmd_resp_loopback_test *resp;
Suresh Rff33a6e2009-12-03 16:15:52 -08003225 int status;
3226
Somnath Kotur2e365b12016-02-03 09:49:20 +05303227 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
3228 CMD_SUBSYSTEM_LOWLEVEL))
3229 return -EPERM;
3230
Suresh Rff33a6e2009-12-03 16:15:52 -08003231 spin_lock_bh(&adapter->mcc_lock);
3232
3233 wrb = wrb_from_mccq(adapter);
3234 if (!wrb) {
3235 status = -EBUSY;
3236 goto err;
3237 }
3238
3239 req = embedded_payload(wrb);
3240
Somnath Kotur106df1e2011-10-27 07:12:13 +00003241 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303242 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
3243 NULL);
Suresh Rff33a6e2009-12-03 16:15:52 -08003244
Suresh Reddy5eeff632014-01-06 13:02:24 +05303245 req->hdr.timeout = cpu_to_le32(15);
Suresh Rff33a6e2009-12-03 16:15:52 -08003246 req->pattern = cpu_to_le64(pattern);
3247 req->src_port = cpu_to_le32(port_num);
3248 req->dest_port = cpu_to_le32(port_num);
3249 req->pkt_size = cpu_to_le32(pkt_size);
3250 req->num_pkts = cpu_to_le32(num_pkts);
3251 req->loopback_type = cpu_to_le32(loopback_type);
3252
Suresh Reddyefaa4082015-07-10 05:32:48 -04003253 status = be_mcc_notify(adapter);
3254 if (status)
3255 goto err;
Suresh Rff33a6e2009-12-03 16:15:52 -08003256
Suresh Reddy5eeff632014-01-06 13:02:24 +05303257 spin_unlock_bh(&adapter->mcc_lock);
3258
3259 wait_for_completion(&adapter->et_cmd_compl);
3260 resp = embedded_payload(wrb);
3261 status = le32_to_cpu(resp->status);
3262
3263 return status;
Suresh Rff33a6e2009-12-03 16:15:52 -08003264err:
3265 spin_unlock_bh(&adapter->mcc_lock);
3266 return status;
3267}
3268
3269int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303270 u32 byte_cnt, struct be_dma_mem *cmd)
Suresh Rff33a6e2009-12-03 16:15:52 -08003271{
3272 struct be_mcc_wrb *wrb;
3273 struct be_cmd_req_ddrdma_test *req;
Suresh Rff33a6e2009-12-03 16:15:52 -08003274 int status;
3275 int i, j = 0;
3276
Somnath Kotur2e365b12016-02-03 09:49:20 +05303277 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
3278 CMD_SUBSYSTEM_LOWLEVEL))
3279 return -EPERM;
3280
Suresh Rff33a6e2009-12-03 16:15:52 -08003281 spin_lock_bh(&adapter->mcc_lock);
3282
3283 wrb = wrb_from_mccq(adapter);
3284 if (!wrb) {
3285 status = -EBUSY;
3286 goto err;
3287 }
3288 req = cmd->va;
Somnath Kotur106df1e2011-10-27 07:12:13 +00003289 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303290 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
3291 cmd);
Suresh Rff33a6e2009-12-03 16:15:52 -08003292
3293 req->pattern = cpu_to_le64(pattern);
3294 req->byte_count = cpu_to_le32(byte_cnt);
3295 for (i = 0; i < byte_cnt; i++) {
3296 req->snd_buff[i] = (u8)(pattern >> (j*8));
3297 j++;
3298 if (j > 7)
3299 j = 0;
3300 }
3301
3302 status = be_mcc_notify_wait(adapter);
3303
3304 if (!status) {
3305 struct be_cmd_resp_ddrdma_test *resp;
Kalesh AP03d28ff2014-09-19 15:46:56 +05303306
Suresh Rff33a6e2009-12-03 16:15:52 -08003307 resp = cmd->va;
3308 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
Kalesh APcd3307aa2014-09-19 15:47:02 +05303309 resp->snd_err) {
Suresh Rff33a6e2009-12-03 16:15:52 -08003310 status = -1;
3311 }
3312 }
3313
3314err:
3315 spin_unlock_bh(&adapter->mcc_lock);
3316 return status;
3317}
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003318
Dan Carpenterc196b022010-05-26 04:47:39 +00003319int be_cmd_get_seeprom_data(struct be_adapter *adapter,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303320 struct be_dma_mem *nonemb_cmd)
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003321{
3322 struct be_mcc_wrb *wrb;
3323 struct be_cmd_req_seeprom_read *req;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003324 int status;
3325
3326 spin_lock_bh(&adapter->mcc_lock);
3327
3328 wrb = wrb_from_mccq(adapter);
Ajit Khapardee45ff012011-02-04 17:18:28 +00003329 if (!wrb) {
3330 status = -EBUSY;
3331 goto err;
3332 }
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003333 req = nonemb_cmd->va;
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003334
Somnath Kotur106df1e2011-10-27 07:12:13 +00003335 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303336 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
3337 nonemb_cmd);
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003338
3339 status = be_mcc_notify_wait(adapter);
3340
Ajit Khapardee45ff012011-02-04 17:18:28 +00003341err:
Sarveshwar Bandi368c0ca2010-01-08 00:07:27 -08003342 spin_unlock_bh(&adapter->mcc_lock);
3343 return status;
3344}
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003345
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003346int be_cmd_get_phy_info(struct be_adapter *adapter)
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003347{
3348 struct be_mcc_wrb *wrb;
3349 struct be_cmd_req_get_phy_info *req;
Sathya Perla306f1342011-08-02 19:57:45 +00003350 struct be_dma_mem cmd;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003351 int status;
3352
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003353 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
3354 CMD_SUBSYSTEM_COMMON))
3355 return -EPERM;
3356
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003357 spin_lock_bh(&adapter->mcc_lock);
3358
3359 wrb = wrb_from_mccq(adapter);
3360 if (!wrb) {
3361 status = -EBUSY;
3362 goto err;
3363 }
Sathya Perla306f1342011-08-02 19:57:45 +00003364 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303365 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3366 GFP_ATOMIC);
Sathya Perla306f1342011-08-02 19:57:45 +00003367 if (!cmd.va) {
3368 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3369 status = -ENOMEM;
3370 goto err;
3371 }
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003372
Sathya Perla306f1342011-08-02 19:57:45 +00003373 req = cmd.va;
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003374
Somnath Kotur106df1e2011-10-27 07:12:13 +00003375 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303376 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
3377 wrb, &cmd);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003378
3379 status = be_mcc_notify_wait(adapter);
Sathya Perla306f1342011-08-02 19:57:45 +00003380 if (!status) {
3381 struct be_phy_info *resp_phy_info =
3382 cmd.va + sizeof(struct be_cmd_req_hdr);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303383
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003384 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
3385 adapter->phy.interface_type =
Sathya Perla306f1342011-08-02 19:57:45 +00003386 le16_to_cpu(resp_phy_info->interface_type);
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003387 adapter->phy.auto_speeds_supported =
3388 le16_to_cpu(resp_phy_info->auto_speeds_supported);
3389 adapter->phy.fixed_speeds_supported =
3390 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
3391 adapter->phy.misc_params =
3392 le32_to_cpu(resp_phy_info->misc_params);
Vasundhara Volam68cb7e42013-08-06 09:27:18 +05303393
3394 if (BE2_chip(adapter)) {
3395 adapter->phy.fixed_speeds_supported =
3396 BE_SUPPORTED_SPEED_10GBPS |
3397 BE_SUPPORTED_SPEED_1GBPS;
3398 }
Sathya Perla306f1342011-08-02 19:57:45 +00003399 }
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303400 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khapardeee3cb622010-07-01 03:51:00 +00003401err:
3402 spin_unlock_bh(&adapter->mcc_lock);
3403 return status;
3404}
Ajit Khapardee1d18732010-07-23 01:52:13 +00003405
Lad, Prabhakarbc0ee162015-02-05 15:24:43 +00003406static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
Ajit Khapardee1d18732010-07-23 01:52:13 +00003407{
3408 struct be_mcc_wrb *wrb;
3409 struct be_cmd_req_set_qos *req;
3410 int status;
3411
3412 spin_lock_bh(&adapter->mcc_lock);
3413
3414 wrb = wrb_from_mccq(adapter);
3415 if (!wrb) {
3416 status = -EBUSY;
3417 goto err;
3418 }
3419
3420 req = embedded_payload(wrb);
3421
Somnath Kotur106df1e2011-10-27 07:12:13 +00003422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303423 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
Ajit Khapardee1d18732010-07-23 01:52:13 +00003424
3425 req->hdr.domain = domain;
Ajit Khaparde6bff57a2011-02-11 13:33:02 +00003426 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
3427 req->max_bps_nic = cpu_to_le32(bps);
Ajit Khapardee1d18732010-07-23 01:52:13 +00003428
3429 status = be_mcc_notify_wait(adapter);
3430
3431err:
3432 spin_unlock_bh(&adapter->mcc_lock);
3433 return status;
3434}
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003435
3436int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3437{
3438 struct be_mcc_wrb *wrb;
3439 struct be_cmd_req_cntl_attribs *req;
3440 struct be_cmd_resp_cntl_attribs *resp;
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05303441 int status, i;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003442 int payload_len = max(sizeof(*req), sizeof(*resp));
3443 struct mgmt_controller_attrib *attribs;
3444 struct be_dma_mem attribs_cmd;
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05303445 u32 *serial_num;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003446
Suresh Reddyd98ef502013-04-25 00:56:55 +00003447 if (mutex_lock_interruptible(&adapter->mbox_lock))
3448 return -1;
3449
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003450 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3451 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303452 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3453 attribs_cmd.size,
3454 &attribs_cmd.dma, GFP_ATOMIC);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003455 if (!attribs_cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303456 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003457 status = -ENOMEM;
3458 goto err;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003459 }
3460
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003461 wrb = wrb_from_mbox(adapter);
3462 if (!wrb) {
3463 status = -EBUSY;
3464 goto err;
3465 }
3466 req = attribs_cmd.va;
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003467
Somnath Kotur106df1e2011-10-27 07:12:13 +00003468 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303469 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
3470 wrb, &attribs_cmd);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003471
3472 status = be_mbox_notify_wait(adapter);
3473 if (!status) {
Joe Perches43d620c2011-06-16 19:08:06 +00003474 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003475 adapter->hba_port_num = attribs->hba_attribs.phy_port;
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05303476 serial_num = attribs->hba_attribs.controller_serial_number;
3477 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
3478 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
3479 (BIT_MASK(16) - 1);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003480 }
3481
3482err:
3483 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003484 if (attribs_cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303485 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
3486 attribs_cmd.va, attribs_cmd.dma);
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003487 return status;
3488}
Sathya Perla2e588f82011-03-11 02:49:26 +00003489
3490/* Uses mbox */
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003491int be_cmd_req_native_mode(struct be_adapter *adapter)
Sathya Perla2e588f82011-03-11 02:49:26 +00003492{
3493 struct be_mcc_wrb *wrb;
3494 struct be_cmd_req_set_func_cap *req;
3495 int status;
3496
3497 if (mutex_lock_interruptible(&adapter->mbox_lock))
3498 return -1;
3499
3500 wrb = wrb_from_mbox(adapter);
3501 if (!wrb) {
3502 status = -EBUSY;
3503 goto err;
3504 }
3505
3506 req = embedded_payload(wrb);
3507
Somnath Kotur106df1e2011-10-27 07:12:13 +00003508 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303509 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
3510 sizeof(*req), wrb, NULL);
Sathya Perla2e588f82011-03-11 02:49:26 +00003511
3512 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
3513 CAPABILITY_BE3_NATIVE_ERX_API);
3514 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
3515
3516 status = be_mbox_notify_wait(adapter);
3517 if (!status) {
3518 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303519
Sathya Perla2e588f82011-03-11 02:49:26 +00003520 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
3521 CAPABILITY_BE3_NATIVE_ERX_API;
Sathya Perlad3791422012-09-28 04:39:44 +00003522 if (!adapter->be3_native)
3523 dev_warn(&adapter->pdev->dev,
3524 "adapter not in advanced mode\n");
Sathya Perla2e588f82011-03-11 02:49:26 +00003525 }
3526err:
3527 mutex_unlock(&adapter->mbox_lock);
3528 return status;
3529}
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003530
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003531/* Get privilege(s) for a function */
3532int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
3533 u32 domain)
3534{
3535 struct be_mcc_wrb *wrb;
3536 struct be_cmd_req_get_fn_privileges *req;
3537 int status;
3538
3539 spin_lock_bh(&adapter->mcc_lock);
3540
3541 wrb = wrb_from_mccq(adapter);
3542 if (!wrb) {
3543 status = -EBUSY;
3544 goto err;
3545 }
3546
3547 req = embedded_payload(wrb);
3548
3549 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3550 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
3551 wrb, NULL);
3552
3553 req->hdr.domain = domain;
3554
3555 status = be_mcc_notify_wait(adapter);
3556 if (!status) {
3557 struct be_cmd_resp_get_fn_privileges *resp =
3558 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303559
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003560 *privilege = le32_to_cpu(resp->privilege_mask);
Suresh Reddy02308d72014-01-15 13:23:36 +05303561
3562 /* In UMC mode FW does not return right privileges.
3563 * Override with correct privilege equivalent to PF.
3564 */
3565 if (BEx_chip(adapter) && be_is_mc(adapter) &&
3566 be_physfn(adapter))
3567 *privilege = MAX_PRIVILEGES;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003568 }
3569
3570err:
3571 spin_unlock_bh(&adapter->mcc_lock);
3572 return status;
3573}
3574
Sathya Perla04a06022013-07-23 15:25:00 +05303575/* Set privilege(s) for a function */
3576int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
3577 u32 domain)
3578{
3579 struct be_mcc_wrb *wrb;
3580 struct be_cmd_req_set_fn_privileges *req;
3581 int status;
3582
3583 spin_lock_bh(&adapter->mcc_lock);
3584
3585 wrb = wrb_from_mccq(adapter);
3586 if (!wrb) {
3587 status = -EBUSY;
3588 goto err;
3589 }
3590
3591 req = embedded_payload(wrb);
3592 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3593 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3594 wrb, NULL);
3595 req->hdr.domain = domain;
3596 if (lancer_chip(adapter))
3597 req->privileges_lancer = cpu_to_le32(privileges);
3598 else
3599 req->privileges = cpu_to_le32(privileges);
3600
3601 status = be_mcc_notify_wait(adapter);
3602err:
3603 spin_unlock_bh(&adapter->mcc_lock);
3604 return status;
3605}
3606
Sathya Perla5a712c12013-07-23 15:24:59 +05303607/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3608 * pmac_id_valid: false => pmac_id or MAC address is requested.
3609 * If pmac_id is returned, pmac_id_valid is returned as true
3610 */
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003611int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05303612 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3613 u8 domain)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003614{
3615 struct be_mcc_wrb *wrb;
3616 struct be_cmd_req_get_mac_list *req;
3617 int status;
3618 int mac_count;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003619 struct be_dma_mem get_mac_list_cmd;
3620 int i;
3621
3622 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3623 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303624 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3625 get_mac_list_cmd.size,
3626 &get_mac_list_cmd.dma,
3627 GFP_ATOMIC);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003628
3629 if (!get_mac_list_cmd.va) {
3630 dev_err(&adapter->pdev->dev,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303631 "Memory allocation failure during GET_MAC_LIST\n");
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003632 return -ENOMEM;
3633 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003634
3635 spin_lock_bh(&adapter->mcc_lock);
3636
3637 wrb = wrb_from_mccq(adapter);
3638 if (!wrb) {
3639 status = -EBUSY;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003640 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003641 }
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003642
3643 req = get_mac_list_cmd.va;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003644
3645 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlabf591f52013-05-08 02:05:48 +00003646 OPCODE_COMMON_GET_MAC_LIST,
3647 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003648 req->hdr.domain = domain;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003649 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla5a712c12013-07-23 15:24:59 +05303650 if (*pmac_id_valid) {
3651 req->mac_id = cpu_to_le32(*pmac_id);
Suresh Reddyb188f092014-01-15 13:23:39 +05303652 req->iface_id = cpu_to_le16(if_handle);
Sathya Perla5a712c12013-07-23 15:24:59 +05303653 req->perm_override = 0;
3654 } else {
3655 req->perm_override = 1;
3656 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003657
3658 status = be_mcc_notify_wait(adapter);
3659 if (!status) {
3660 struct be_cmd_resp_get_mac_list *resp =
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003661 get_mac_list_cmd.va;
Sathya Perla5a712c12013-07-23 15:24:59 +05303662
3663 if (*pmac_id_valid) {
3664 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3665 ETH_ALEN);
3666 goto out;
3667 }
3668
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003669 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3670 /* Mac list returned could contain one or more active mac_ids
Joe Perchesdbedd442015-03-06 20:49:12 -08003671 * or one or more true or pseudo permanent mac addresses.
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003672 * If an active mac_id is present, return first active mac_id
3673 * found.
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003674 */
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003675 for (i = 0; i < mac_count; i++) {
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003676 struct get_list_macaddr *mac_entry;
3677 u16 mac_addr_size;
3678 u32 mac_id;
3679
3680 mac_entry = &resp->macaddr_list[i];
3681 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3682 /* mac_id is a 32 bit value and mac_addr size
3683 * is 6 bytes
3684 */
3685 if (mac_addr_size == sizeof(u32)) {
Sathya Perla5a712c12013-07-23 15:24:59 +05303686 *pmac_id_valid = true;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003687 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3688 *pmac_id = le32_to_cpu(mac_id);
3689 goto out;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003690 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003691 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003692 /* If no active mac_id found, return first mac addr */
Sathya Perla5a712c12013-07-23 15:24:59 +05303693 *pmac_id_valid = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003694 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303695 ETH_ALEN);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003696 }
3697
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00003698out:
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003699 spin_unlock_bh(&adapter->mcc_lock);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303700 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3701 get_mac_list_cmd.va, get_mac_list_cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003702 return status;
3703}
3704
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303705int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3706 u8 *mac, u32 if_handle, bool active, u32 domain)
Sathya Perla5a712c12013-07-23 15:24:59 +05303707{
Suresh Reddyb188f092014-01-15 13:23:39 +05303708 if (!active)
3709 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3710 if_handle, domain);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303711 if (BEx_chip(adapter))
Sathya Perla5a712c12013-07-23 15:24:59 +05303712 return be_cmd_mac_addr_query(adapter, mac, false,
Suresh Reddyb188f092014-01-15 13:23:39 +05303713 if_handle, curr_pmac_id);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303714 else
3715 /* Fetch the MAC address using pmac_id */
3716 return be_cmd_get_mac_from_list(adapter, mac, &active,
Suresh Reddyb188f092014-01-15 13:23:39 +05303717 &curr_pmac_id,
3718 if_handle, domain);
Sathya Perla5a712c12013-07-23 15:24:59 +05303719}
3720
Sathya Perla95046b92013-07-23 15:25:02 +05303721int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3722{
3723 int status;
3724 bool pmac_valid = false;
3725
Joe Perchesc7bf7162015-03-02 19:54:47 -08003726 eth_zero_addr(mac);
Sathya Perla95046b92013-07-23 15:25:02 +05303727
Sathya Perla3175d8c2013-07-23 15:25:03 +05303728 if (BEx_chip(adapter)) {
3729 if (be_physfn(adapter))
3730 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3731 0);
3732 else
3733 status = be_cmd_mac_addr_query(adapter, mac, false,
3734 adapter->if_handle, 0);
3735 } else {
Sathya Perla95046b92013-07-23 15:25:02 +05303736 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
Suresh Reddyb188f092014-01-15 13:23:39 +05303737 NULL, adapter->if_handle, 0);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303738 }
3739
Sathya Perla95046b92013-07-23 15:25:02 +05303740 return status;
3741}
3742
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003743/* Uses synchronous MCCQ */
3744int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3745 u8 mac_count, u32 domain)
3746{
3747 struct be_mcc_wrb *wrb;
3748 struct be_cmd_req_set_mac_list *req;
3749 int status;
3750 struct be_dma_mem cmd;
3751
3752 memset(&cmd, 0, sizeof(struct be_dma_mem));
3753 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303754 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3755 GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00003756 if (!cmd.va)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003757 return -ENOMEM;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003758
3759 spin_lock_bh(&adapter->mcc_lock);
3760
3761 wrb = wrb_from_mccq(adapter);
3762 if (!wrb) {
3763 status = -EBUSY;
3764 goto err;
3765 }
3766
3767 req = cmd.va;
3768 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303769 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3770 wrb, &cmd);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003771
3772 req->hdr.domain = domain;
3773 req->mac_count = mac_count;
3774 if (mac_count)
3775 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3776
3777 status = be_mcc_notify_wait(adapter);
3778
3779err:
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303780 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003781 spin_unlock_bh(&adapter->mcc_lock);
3782 return status;
3783}
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003784
Sathya Perla3175d8c2013-07-23 15:25:03 +05303785/* Wrapper to delete any active MACs and provision the new mac.
3786 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3787 * current list are active.
3788 */
3789int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3790{
3791 bool active_mac = false;
3792 u8 old_mac[ETH_ALEN];
3793 u32 pmac_id;
3794 int status;
3795
3796 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
Suresh Reddyb188f092014-01-15 13:23:39 +05303797 &pmac_id, if_id, dom);
3798
Sathya Perla3175d8c2013-07-23 15:25:03 +05303799 if (!status && active_mac)
3800 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3801
3802 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3803}
3804
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003805int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
Kalesh APe7bcbd72015-05-06 05:30:32 -04003806 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003807{
3808 struct be_mcc_wrb *wrb;
3809 struct be_cmd_req_set_hsw_config *req;
3810 void *ctxt;
3811 int status;
3812
3813 spin_lock_bh(&adapter->mcc_lock);
3814
3815 wrb = wrb_from_mccq(adapter);
3816 if (!wrb) {
3817 status = -EBUSY;
3818 goto err;
3819 }
3820
3821 req = embedded_payload(wrb);
3822 ctxt = &req->context;
3823
3824 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303825 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3826 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003827
3828 req->hdr.domain = domain;
3829 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3830 if (pvid) {
3831 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3832 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3833 }
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003834 if (!BEx_chip(adapter) && hsw_mode) {
3835 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3836 ctxt, adapter->hba_port_num);
3837 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3838 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3839 ctxt, hsw_mode);
3840 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003841
Kalesh APe7bcbd72015-05-06 05:30:32 -04003842 /* Enable/disable both mac and vlan spoof checking */
3843 if (!BEx_chip(adapter) && spoofchk) {
3844 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3845 ctxt, spoofchk);
3846 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3847 ctxt, spoofchk);
3848 }
3849
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003850 be_dws_cpu_to_le(req->context, sizeof(req->context));
3851 status = be_mcc_notify_wait(adapter);
3852
3853err:
3854 spin_unlock_bh(&adapter->mcc_lock);
3855 return status;
3856}
3857
3858/* Get Hyper switch config */
3859int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
Kalesh APe7bcbd72015-05-06 05:30:32 -04003860 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003861{
3862 struct be_mcc_wrb *wrb;
3863 struct be_cmd_req_get_hsw_config *req;
3864 void *ctxt;
3865 int status;
3866 u16 vid;
3867
3868 spin_lock_bh(&adapter->mcc_lock);
3869
3870 wrb = wrb_from_mccq(adapter);
3871 if (!wrb) {
3872 status = -EBUSY;
3873 goto err;
3874 }
3875
3876 req = embedded_payload(wrb);
3877 ctxt = &req->context;
3878
3879 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303880 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3881 NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003882
3883 req->hdr.domain = domain;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003884 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3885 ctxt, intf_id);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003886 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003887
Vasundhara Volam2c07c1d2014-01-15 13:23:32 +05303888 if (!BEx_chip(adapter) && mode) {
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003889 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3890 ctxt, adapter->hba_port_num);
3891 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3892 }
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003893 be_dws_cpu_to_le(req->context, sizeof(req->context));
3894
3895 status = be_mcc_notify_wait(adapter);
3896 if (!status) {
3897 struct be_cmd_resp_get_hsw_config *resp =
3898 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05303899
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303900 be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003901 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303902 pvid, &resp->context);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003903 if (pvid)
3904 *pvid = le16_to_cpu(vid);
3905 if (mode)
3906 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3907 port_fwd_type, &resp->context);
Kalesh APe7bcbd72015-05-06 05:30:32 -04003908 if (spoofchk)
3909 *spoofchk =
3910 AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3911 spoofchk, &resp->context);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003912 }
3913
3914err:
3915 spin_unlock_bh(&adapter->mcc_lock);
3916 return status;
3917}
3918
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003919static bool be_is_wol_excluded(struct be_adapter *adapter)
3920{
3921 struct pci_dev *pdev = adapter->pdev;
3922
Kalesh AP18c57c72015-05-06 05:30:38 -04003923 if (be_virtfn(adapter))
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003924 return true;
3925
3926 switch (pdev->subsystem_device) {
3927 case OC_SUBSYS_DEVICE_ID1:
3928 case OC_SUBSYS_DEVICE_ID2:
3929 case OC_SUBSYS_DEVICE_ID3:
3930 case OC_SUBSYS_DEVICE_ID4:
3931 return true;
3932 default:
3933 return false;
3934 }
3935}
3936
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003937int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3938{
3939 struct be_mcc_wrb *wrb;
3940 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303941 int status = 0;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003942 struct be_dma_mem cmd;
3943
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003944 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3945 CMD_SUBSYSTEM_ETH))
3946 return -EPERM;
3947
Suresh Reddy76a9e082014-01-15 13:23:40 +05303948 if (be_is_wol_excluded(adapter))
3949 return status;
3950
Suresh Reddyd98ef502013-04-25 00:56:55 +00003951 if (mutex_lock_interruptible(&adapter->mbox_lock))
3952 return -1;
3953
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003954 memset(&cmd, 0, sizeof(struct be_dma_mem));
3955 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303956 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3957 GFP_ATOMIC);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003958 if (!cmd.va) {
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05303959 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00003960 status = -ENOMEM;
3961 goto err;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003962 }
3963
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003964 wrb = wrb_from_mbox(adapter);
3965 if (!wrb) {
3966 status = -EBUSY;
3967 goto err;
3968 }
3969
3970 req = cmd.va;
3971
3972 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3973 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
Suresh Reddy76a9e082014-01-15 13:23:40 +05303974 sizeof(*req), wrb, &cmd);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003975
3976 req->hdr.version = 1;
3977 req->query_options = BE_GET_WOL_CAP;
3978
3979 status = be_mbox_notify_wait(adapter);
3980 if (!status) {
3981 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
Kalesh AP03d28ff2014-09-19 15:46:56 +05303982
Kalesh AP504fbf12014-09-19 15:47:00 +05303983 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003984
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003985 adapter->wol_cap = resp->wol_settings;
Suresh Reddy76a9e082014-01-15 13:23:40 +05303986 if (adapter->wol_cap & BE_WOL_CAP)
3987 adapter->wol_en = true;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003988 }
3989err:
3990 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00003991 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05303992 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3993 cmd.dma);
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003994 return status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003995
3996}
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05303997
3998int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3999{
4000 struct be_dma_mem extfat_cmd;
4001 struct be_fat_conf_params *cfgs;
4002 int status;
4003 int i, j;
4004
4005 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4006 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304007 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
4008 extfat_cmd.size, &extfat_cmd.dma,
4009 GFP_ATOMIC);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304010 if (!extfat_cmd.va)
4011 return -ENOMEM;
4012
4013 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4014 if (status)
4015 goto err;
4016
4017 cfgs = (struct be_fat_conf_params *)
4018 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
4019 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
4020 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304021
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304022 for (j = 0; j < num_modes; j++) {
4023 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
4024 cfgs->module[i].trace_lvl[j].dbg_lvl =
4025 cpu_to_le32(level);
4026 }
4027 }
4028
4029 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
4030err:
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304031 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4032 extfat_cmd.dma);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304033 return status;
4034}
4035
4036int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4037{
4038 struct be_dma_mem extfat_cmd;
4039 struct be_fat_conf_params *cfgs;
4040 int status, j;
4041 int level = 0;
4042
4043 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4044 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304045 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
4046 extfat_cmd.size, &extfat_cmd.dma,
4047 GFP_ATOMIC);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304048
4049 if (!extfat_cmd.va) {
4050 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4051 __func__);
4052 goto err;
4053 }
4054
4055 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4056 if (!status) {
4057 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4058 sizeof(struct be_cmd_resp_hdr));
Kalesh AP03d28ff2014-09-19 15:46:56 +05304059
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304060 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4061 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4062 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4063 }
4064 }
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304065 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4066 extfat_cmd.dma);
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304067err:
4068 return level;
4069}
4070
Somnath Kotur941a77d2012-05-17 22:59:03 +00004071int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
4072 struct be_dma_mem *cmd)
4073{
4074 struct be_mcc_wrb *wrb;
4075 struct be_cmd_req_get_ext_fat_caps *req;
4076 int status;
4077
4078 if (mutex_lock_interruptible(&adapter->mbox_lock))
4079 return -1;
4080
4081 wrb = wrb_from_mbox(adapter);
4082 if (!wrb) {
4083 status = -EBUSY;
4084 goto err;
4085 }
4086
4087 req = cmd->va;
4088 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4089 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
4090 cmd->size, wrb, cmd);
4091 req->parameter_type = cpu_to_le32(1);
4092
4093 status = be_mbox_notify_wait(adapter);
4094err:
4095 mutex_unlock(&adapter->mbox_lock);
4096 return status;
4097}
4098
4099int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
4100 struct be_dma_mem *cmd,
4101 struct be_fat_conf_params *configs)
4102{
4103 struct be_mcc_wrb *wrb;
4104 struct be_cmd_req_set_ext_fat_caps *req;
4105 int status;
4106
4107 spin_lock_bh(&adapter->mcc_lock);
4108
4109 wrb = wrb_from_mccq(adapter);
4110 if (!wrb) {
4111 status = -EBUSY;
4112 goto err;
4113 }
4114
4115 req = cmd->va;
4116 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
4117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4118 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
4119 cmd->size, wrb, cmd);
4120
4121 status = be_mcc_notify_wait(adapter);
4122err:
4123 spin_unlock_bh(&adapter->mcc_lock);
4124 return status;
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004125}
Parav Pandit6a4ab662012-03-26 14:27:12 +00004126
Vasundhara Volam21252372015-02-06 08:18:42 -05004127int be_cmd_query_port_name(struct be_adapter *adapter)
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004128{
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004129 struct be_cmd_req_get_port_name *req;
Vasundhara Volam21252372015-02-06 08:18:42 -05004130 struct be_mcc_wrb *wrb;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004131 int status;
4132
Vasundhara Volam21252372015-02-06 08:18:42 -05004133 if (mutex_lock_interruptible(&adapter->mbox_lock))
4134 return -1;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004135
Vasundhara Volam21252372015-02-06 08:18:42 -05004136 wrb = wrb_from_mbox(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004137 req = embedded_payload(wrb);
4138
4139 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4140 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
4141 NULL);
Vasundhara Volam21252372015-02-06 08:18:42 -05004142 if (!BEx_chip(adapter))
4143 req->hdr.version = 1;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004144
Vasundhara Volam21252372015-02-06 08:18:42 -05004145 status = be_mbox_notify_wait(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004146 if (!status) {
4147 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304148
Vasundhara Volam21252372015-02-06 08:18:42 -05004149 adapter->port_name = resp->port_name[adapter->hba_port_num];
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004150 } else {
Vasundhara Volam21252372015-02-06 08:18:42 -05004151 adapter->port_name = adapter->hba_port_num + '0';
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004152 }
Vasundhara Volam21252372015-02-06 08:18:42 -05004153
4154 mutex_unlock(&adapter->mbox_lock);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004155 return status;
4156}
4157
Suresh Reddy980df242015-12-30 01:29:03 -05004158/* When more than 1 NIC descriptor is present in the descriptor list,
4159 * the caller must specify the pf_num to obtain the NIC descriptor
4160 * corresponding to its pci function.
4161 * get_vft must be true when the caller wants the VF-template desc of the
4162 * PF-pool.
4163 * The pf_num should be set to PF_NUM_IGNORE when the caller knows
4164 * that only it's NIC descriptor is present in the descriptor list.
4165 */
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304166static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
Suresh Reddy980df242015-12-30 01:29:03 -05004167 bool get_vft, u8 pf_num)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004168{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304169 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304170 struct be_nic_res_desc *nic;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004171 int i;
4172
4173 for (i = 0; i < desc_count; i++) {
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304174 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304175 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
4176 nic = (struct be_nic_res_desc *)hdr;
Suresh Reddy980df242015-12-30 01:29:03 -05004177
4178 if ((pf_num == PF_NUM_IGNORE ||
4179 nic->pf_num == pf_num) &&
4180 (!get_vft || nic->flags & BIT(VFT_SHIFT)))
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304181 return nic;
4182 }
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304183 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4184 hdr = (void *)hdr + hdr->desc_len;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004185 }
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304186 return NULL;
4187}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004188
Suresh Reddy980df242015-12-30 01:29:03 -05004189static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
4190 u8 pf_num)
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304191{
Suresh Reddy980df242015-12-30 01:29:03 -05004192 return be_get_nic_desc(buf, desc_count, true, pf_num);
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304193}
4194
Suresh Reddy980df242015-12-30 01:29:03 -05004195static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
4196 u8 pf_num)
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304197{
Suresh Reddy980df242015-12-30 01:29:03 -05004198 return be_get_nic_desc(buf, desc_count, false, pf_num);
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304199}
4200
Suresh Reddy980df242015-12-30 01:29:03 -05004201static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
4202 u8 pf_num)
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304203{
4204 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4205 struct be_pcie_res_desc *pcie;
4206 int i;
4207
4208 for (i = 0; i < desc_count; i++) {
Suresh Reddy980df242015-12-30 01:29:03 -05004209 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4210 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4211 pcie = (struct be_pcie_res_desc *)hdr;
4212 if (pcie->pf_num == pf_num)
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304213 return pcie;
4214 }
4215
4216 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4217 hdr = (void *)hdr + hdr->desc_len;
4218 }
Wei Yang950e2952013-05-22 15:58:22 +00004219 return NULL;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004220}
4221
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304222static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
4223{
4224 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4225 int i;
4226
4227 for (i = 0; i < desc_count; i++) {
4228 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
4229 return (struct be_port_res_desc *)hdr;
4230
4231 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4232 hdr = (void *)hdr + hdr->desc_len;
4233 }
4234 return NULL;
4235}
4236
Sathya Perla92bf14a2013-08-27 16:57:32 +05304237static void be_copy_nic_desc(struct be_resources *res,
4238 struct be_nic_res_desc *desc)
4239{
4240 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
4241 res->max_vlans = le16_to_cpu(desc->vlan_count);
4242 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
4243 res->max_tx_qs = le16_to_cpu(desc->txq_count);
4244 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
4245 res->max_rx_qs = le16_to_cpu(desc->rq_count);
4246 res->max_evt_qs = le16_to_cpu(desc->eq_count);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004247 res->max_cq_count = le16_to_cpu(desc->cq_count);
4248 res->max_iface_count = le16_to_cpu(desc->iface_count);
4249 res->max_mcc_count = le16_to_cpu(desc->mcc_count);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304250 /* Clear flags that driver is not interested in */
4251 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
4252 BE_IF_CAP_FLAGS_WANT;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304253}
4254
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004255/* Uses Mbox */
Sathya Perla92bf14a2013-08-27 16:57:32 +05304256int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004257{
4258 struct be_mcc_wrb *wrb;
4259 struct be_cmd_req_get_func_config *req;
4260 int status;
4261 struct be_dma_mem cmd;
4262
Suresh Reddyd98ef502013-04-25 00:56:55 +00004263 if (mutex_lock_interruptible(&adapter->mbox_lock))
4264 return -1;
4265
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004266 memset(&cmd, 0, sizeof(struct be_dma_mem));
4267 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304268 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4269 GFP_ATOMIC);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004270 if (!cmd.va) {
4271 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
Suresh Reddyd98ef502013-04-25 00:56:55 +00004272 status = -ENOMEM;
4273 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004274 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004275
4276 wrb = wrb_from_mbox(adapter);
4277 if (!wrb) {
4278 status = -EBUSY;
4279 goto err;
4280 }
4281
4282 req = cmd.va;
4283
4284 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4285 OPCODE_COMMON_GET_FUNC_CONFIG,
4286 cmd.size, wrb, &cmd);
4287
Kalesh AP28710c52013-04-28 22:21:13 +00004288 if (skyhawk_chip(adapter))
4289 req->hdr.version = 1;
4290
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004291 status = be_mbox_notify_wait(adapter);
4292 if (!status) {
4293 struct be_cmd_resp_get_func_config *resp = cmd.va;
4294 u32 desc_count = le32_to_cpu(resp->desc_count);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304295 struct be_nic_res_desc *desc;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004296
Suresh Reddy980df242015-12-30 01:29:03 -05004297 /* GET_FUNC_CONFIG returns resource descriptors of the
4298 * current function only. So, pf_num should be set to
4299 * PF_NUM_IGNORE.
4300 */
4301 desc = be_get_func_nic_desc(resp->func_param, desc_count,
4302 PF_NUM_IGNORE);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004303 if (!desc) {
4304 status = -EINVAL;
4305 goto err;
4306 }
Suresh Reddy980df242015-12-30 01:29:03 -05004307
4308 /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
4309 adapter->pf_num = desc->pf_num;
4310 adapter->vf_num = desc->vf_num;
4311
4312 if (res)
4313 be_copy_nic_desc(res, desc);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004314 }
4315err:
4316 mutex_unlock(&adapter->mbox_lock);
Suresh Reddyd98ef502013-04-25 00:56:55 +00004317 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304318 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4319 cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004320 return status;
4321}
4322
Suresh Reddy980df242015-12-30 01:29:03 -05004323/* Will use MBOX only if MCCQ has not been created */
Sathya Perla92bf14a2013-08-27 16:57:32 +05304324int be_cmd_get_profile_config(struct be_adapter *adapter,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004325 struct be_resources *res, u8 query, u8 domain)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004326{
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304327 struct be_cmd_resp_get_profile_config *resp;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304328 struct be_cmd_req_get_profile_config *req;
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304329 struct be_nic_res_desc *vf_res;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304330 struct be_pcie_res_desc *pcie;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304331 struct be_port_res_desc *port;
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304332 struct be_nic_res_desc *nic;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304333 struct be_mcc_wrb wrb = {0};
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004334 struct be_dma_mem cmd;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004335 u16 desc_count;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004336 int status;
4337
4338 memset(&cmd, 0, sizeof(struct be_dma_mem));
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304339 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304340 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4341 GFP_ATOMIC);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304342 if (!cmd.va)
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004343 return -ENOMEM;
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004344
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304345 req = cmd.va;
4346 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4347 OPCODE_COMMON_GET_PROFILE_CONFIG,
4348 cmd.size, &wrb, &cmd);
4349
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304350 if (!lancer_chip(adapter))
4351 req->hdr.version = 1;
4352 req->type = ACTIVE_PROFILE_TYPE;
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004353 req->hdr.domain = domain;
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304354
Vasundhara Volamf2858732015-03-04 00:44:33 -05004355 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
4356 * descriptors with all bits set to "1" for the fields which can be
4357 * modified using SET_PROFILE_CONFIG cmd.
4358 */
4359 if (query == RESOURCE_MODIFIABLE)
4360 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
4361
Vasundhara Volamba48c0c2014-06-30 13:01:30 +05304362 status = be_cmd_notify_wait(adapter, &wrb);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304363 if (status)
4364 goto err;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004365
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304366 resp = cmd.va;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004367 desc_count = le16_to_cpu(resp->desc_count);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004368
Suresh Reddy980df242015-12-30 01:29:03 -05004369 pcie = be_get_pcie_desc(resp->func_param, desc_count,
4370 adapter->pf_num);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304371 if (pcie)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304372 res->max_vfs = le16_to_cpu(pcie->num_vfs);
Vasundhara Volam150d58c2013-08-27 16:57:31 +05304373
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304374 port = be_get_port_desc(resp->func_param, desc_count);
4375 if (port)
4376 adapter->mc_type = port->mc_type;
4377
Suresh Reddy980df242015-12-30 01:29:03 -05004378 nic = be_get_func_nic_desc(resp->func_param, desc_count,
4379 adapter->pf_num);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304380 if (nic)
4381 be_copy_nic_desc(res, nic);
4382
Suresh Reddy980df242015-12-30 01:29:03 -05004383 vf_res = be_get_vft_desc(resp->func_param, desc_count,
4384 adapter->pf_num);
Vasundhara Volam10cccf62014-06-30 13:01:31 +05304385 if (vf_res)
4386 res->vf_if_cap_flags = vf_res->cap_flags;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004387err:
Vasundhara Volama05f99d2013-04-21 23:28:17 +00004388 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304389 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4390 cmd.dma);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004391 return status;
4392}
4393
Vasundhara Volambec84e62014-06-30 13:01:32 +05304394/* Will use MBOX only if MCCQ has not been created */
4395static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4396 int size, int count, u8 version, u8 domain)
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004397{
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004398 struct be_cmd_req_set_profile_config *req;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304399 struct be_mcc_wrb wrb = {0};
4400 struct be_dma_mem cmd;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004401 int status;
4402
Vasundhara Volambec84e62014-06-30 13:01:32 +05304403 memset(&cmd, 0, sizeof(struct be_dma_mem));
4404 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304405 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4406 GFP_ATOMIC);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304407 if (!cmd.va)
4408 return -ENOMEM;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004409
Vasundhara Volambec84e62014-06-30 13:01:32 +05304410 req = cmd.va;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004411 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
Vasundhara Volambec84e62014-06-30 13:01:32 +05304412 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
4413 &wrb, &cmd);
Sathya Perlaa4018012014-03-27 10:46:18 +05304414 req->hdr.version = version;
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004415 req->hdr.domain = domain;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304416 req->desc_count = cpu_to_le32(count);
Sathya Perlaa4018012014-03-27 10:46:18 +05304417 memcpy(req->desc, desc, size);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004418
Vasundhara Volambec84e62014-06-30 13:01:32 +05304419 status = be_cmd_notify_wait(adapter, &wrb);
4420
4421 if (cmd.va)
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304422 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4423 cmd.dma);
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00004424 return status;
4425}
4426
Sathya Perlaa4018012014-03-27 10:46:18 +05304427/* Mark all fields invalid */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304428static void be_reset_nic_desc(struct be_nic_res_desc *nic)
Sathya Perlaa4018012014-03-27 10:46:18 +05304429{
4430 memset(nic, 0, sizeof(*nic));
4431 nic->unicast_mac_count = 0xFFFF;
4432 nic->mcc_count = 0xFFFF;
4433 nic->vlan_count = 0xFFFF;
4434 nic->mcast_mac_count = 0xFFFF;
4435 nic->txq_count = 0xFFFF;
4436 nic->rq_count = 0xFFFF;
4437 nic->rssq_count = 0xFFFF;
4438 nic->lro_count = 0xFFFF;
4439 nic->cq_count = 0xFFFF;
4440 nic->toe_conn_count = 0xFFFF;
4441 nic->eq_count = 0xFFFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304442 nic->iface_count = 0xFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05304443 nic->link_param = 0xFF;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304444 nic->channel_id_param = cpu_to_le16(0xF000);
Sathya Perlaa4018012014-03-27 10:46:18 +05304445 nic->acpi_params = 0xFF;
4446 nic->wol_param = 0x0F;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304447 nic->tunnel_iface_count = 0xFFFF;
4448 nic->direct_tenant_iface_count = 0xFFFF;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304449 nic->bw_min = 0xFFFFFFFF;
Sathya Perlaa4018012014-03-27 10:46:18 +05304450 nic->bw_max = 0xFFFFFFFF;
4451}
4452
Vasundhara Volambec84e62014-06-30 13:01:32 +05304453/* Mark all fields invalid */
4454static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
4455{
4456 memset(pcie, 0, sizeof(*pcie));
4457 pcie->sriov_state = 0xFF;
4458 pcie->pf_state = 0xFF;
4459 pcie->pf_type = 0xFF;
4460 pcie->num_vfs = 0xFFFF;
4461}
4462
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304463int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
4464 u8 domain)
Sathya Perlaa4018012014-03-27 10:46:18 +05304465{
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304466 struct be_nic_res_desc nic_desc;
4467 u32 bw_percent;
4468 u16 version = 0;
Sathya Perlaa4018012014-03-27 10:46:18 +05304469
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304470 if (BE3_chip(adapter))
4471 return be_cmd_set_qos(adapter, max_rate / 10, domain);
4472
4473 be_reset_nic_desc(&nic_desc);
Suresh Reddy980df242015-12-30 01:29:03 -05004474 nic_desc.pf_num = adapter->pf_num;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304475 nic_desc.vf_num = domain;
Kalesh AP58bdeaa2015-01-20 03:51:49 -05004476 nic_desc.bw_min = 0;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304477 if (lancer_chip(adapter)) {
Sathya Perlaa4018012014-03-27 10:46:18 +05304478 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
4479 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
4480 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
4481 (1 << NOSV_SHIFT);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304482 nic_desc.bw_max = cpu_to_le32(max_rate / 10);
Sathya Perlaa4018012014-03-27 10:46:18 +05304483 } else {
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304484 version = 1;
4485 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4486 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4487 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4488 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
4489 nic_desc.bw_max = cpu_to_le32(bw_percent);
Sathya Perlaa4018012014-03-27 10:46:18 +05304490 }
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304491
4492 return be_cmd_set_profile_config(adapter, &nic_desc,
4493 nic_desc.hdr.desc_len,
Vasundhara Volambec84e62014-06-30 13:01:32 +05304494 1, version, domain);
4495}
4496
Vasundhara Volamf2858732015-03-04 00:44:33 -05004497static void be_fill_vf_res_template(struct be_adapter *adapter,
4498 struct be_resources pool_res,
4499 u16 num_vfs, u16 num_vf_qs,
4500 struct be_nic_res_desc *nic_vft)
4501{
4502 u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
4503 struct be_resources res_mod = {0};
4504
4505 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4506 * which are modifiable using SET_PROFILE_CONFIG cmd.
4507 */
4508 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
4509
4510 /* If RSS IFACE capability flags are modifiable for a VF, set the
4511 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4512 * more than 1 RSSQ is available for a VF.
4513 * Otherwise, provision only 1 queue pair for VF.
4514 */
4515 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4516 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4517 if (num_vf_qs > 1) {
4518 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4519 if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4520 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4521 } else {
4522 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4523 BE_IF_FLAGS_DEFQ_RSS);
4524 }
Vasundhara Volamf2858732015-03-04 00:44:33 -05004525 } else {
4526 num_vf_qs = 1;
4527 }
4528
Kalesh AP196e3732015-10-12 03:47:21 -04004529 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4530 nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4531 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4532 }
4533
4534 nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004535 nic_vft->rq_count = cpu_to_le16(num_vf_qs);
4536 nic_vft->txq_count = cpu_to_le16(num_vf_qs);
4537 nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
4538 nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
4539 (num_vfs + 1));
4540
4541 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4542 * among the PF and it's VFs, if the fields are changeable
4543 */
4544 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4545 nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
4546 (num_vfs + 1));
4547
4548 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4549 nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
4550 (num_vfs + 1));
4551
4552 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4553 nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
4554 (num_vfs + 1));
4555
4556 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4557 nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
4558 (num_vfs + 1));
4559}
4560
Vasundhara Volambec84e62014-06-30 13:01:32 +05304561int be_cmd_set_sriov_config(struct be_adapter *adapter,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004562 struct be_resources pool_res, u16 num_vfs,
4563 u16 num_vf_qs)
Vasundhara Volambec84e62014-06-30 13:01:32 +05304564{
4565 struct {
4566 struct be_pcie_res_desc pcie;
4567 struct be_nic_res_desc nic_vft;
4568 } __packed desc;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304569
Vasundhara Volambec84e62014-06-30 13:01:32 +05304570 /* PF PCIE descriptor */
4571 be_reset_pcie_desc(&desc.pcie);
4572 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
4573 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004574 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304575 desc.pcie.pf_num = adapter->pdev->devfn;
4576 desc.pcie.sriov_state = num_vfs ? 1 : 0;
4577 desc.pcie.num_vfs = cpu_to_le16(num_vfs);
4578
4579 /* VF NIC Template descriptor */
4580 be_reset_nic_desc(&desc.nic_vft);
4581 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4582 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
Vasundhara Volamf2858732015-03-04 00:44:33 -05004583 desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304584 desc.nic_vft.pf_num = adapter->pdev->devfn;
4585 desc.nic_vft.vf_num = 0;
4586
Vasundhara Volamf2858732015-03-04 00:44:33 -05004587 be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
4588 &desc.nic_vft);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304589
4590 return be_cmd_set_profile_config(adapter, &desc,
4591 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
Sathya Perlaa4018012014-03-27 10:46:18 +05304592}
4593
4594int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
4595{
4596 struct be_mcc_wrb *wrb;
4597 struct be_cmd_req_manage_iface_filters *req;
4598 int status;
4599
4600 if (iface == 0xFFFFFFFF)
4601 return -1;
4602
4603 spin_lock_bh(&adapter->mcc_lock);
4604
4605 wrb = wrb_from_mccq(adapter);
4606 if (!wrb) {
4607 status = -EBUSY;
4608 goto err;
4609 }
4610 req = embedded_payload(wrb);
4611
4612 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4613 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
4614 wrb, NULL);
4615 req->op = op;
4616 req->target_iface_id = cpu_to_le32(iface);
4617
4618 status = be_mcc_notify_wait(adapter);
4619err:
4620 spin_unlock_bh(&adapter->mcc_lock);
4621 return status;
4622}
4623
4624int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
4625{
4626 struct be_port_res_desc port_desc;
4627
4628 memset(&port_desc, 0, sizeof(port_desc));
4629 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
4630 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4631 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4632 port_desc.link_num = adapter->hba_port_num;
4633 if (port) {
4634 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
4635 (1 << RCVID_SHIFT);
4636 port_desc.nv_port = swab16(port);
4637 } else {
4638 port_desc.nv_flags = NV_TYPE_DISABLED;
4639 port_desc.nv_port = 0;
4640 }
4641
4642 return be_cmd_set_profile_config(adapter, &port_desc,
Vasundhara Volambec84e62014-06-30 13:01:32 +05304643 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
Sathya Perlaa4018012014-03-27 10:46:18 +05304644}
4645
Sathya Perla4c876612013-02-03 20:30:11 +00004646int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4647 int vf_num)
4648{
4649 struct be_mcc_wrb *wrb;
4650 struct be_cmd_req_get_iface_list *req;
4651 struct be_cmd_resp_get_iface_list *resp;
4652 int status;
4653
4654 spin_lock_bh(&adapter->mcc_lock);
4655
4656 wrb = wrb_from_mccq(adapter);
4657 if (!wrb) {
4658 status = -EBUSY;
4659 goto err;
4660 }
4661 req = embedded_payload(wrb);
4662
4663 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4664 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4665 wrb, NULL);
4666 req->hdr.domain = vf_num + 1;
4667
4668 status = be_mcc_notify_wait(adapter);
4669 if (!status) {
4670 resp = (struct be_cmd_resp_get_iface_list *)req;
4671 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4672 }
4673
4674err:
4675 spin_unlock_bh(&adapter->mcc_lock);
4676 return status;
4677}
4678
Somnath Kotur5c510812013-05-30 02:52:23 +00004679static int lancer_wait_idle(struct be_adapter *adapter)
4680{
4681#define SLIPORT_IDLE_TIMEOUT 30
4682 u32 reg_val;
4683 int status = 0, i;
4684
4685 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4686 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4687 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4688 break;
4689
4690 ssleep(1);
4691 }
4692
4693 if (i == SLIPORT_IDLE_TIMEOUT)
4694 status = -1;
4695
4696 return status;
4697}
4698
4699int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4700{
4701 int status = 0;
4702
4703 status = lancer_wait_idle(adapter);
4704 if (status)
4705 return status;
4706
4707 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4708
4709 return status;
4710}
4711
4712/* Routine to check whether dump image is present or not */
4713bool dump_present(struct be_adapter *adapter)
4714{
4715 u32 sliport_status = 0;
4716
4717 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4718 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4719}
4720
4721int lancer_initiate_dump(struct be_adapter *adapter)
4722{
Kalesh APf0613382014-08-01 17:47:32 +05304723 struct device *dev = &adapter->pdev->dev;
Somnath Kotur5c510812013-05-30 02:52:23 +00004724 int status;
4725
Kalesh APf0613382014-08-01 17:47:32 +05304726 if (dump_present(adapter)) {
4727 dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4728 return -EEXIST;
4729 }
4730
Somnath Kotur5c510812013-05-30 02:52:23 +00004731 /* give firmware reset and diagnostic dump */
4732 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4733 PHYSDEV_CONTROL_DD_MASK);
4734 if (status < 0) {
Kalesh APf0613382014-08-01 17:47:32 +05304735 dev_err(dev, "FW reset failed\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004736 return status;
4737 }
4738
4739 status = lancer_wait_idle(adapter);
4740 if (status)
4741 return status;
4742
4743 if (!dump_present(adapter)) {
Kalesh APf0613382014-08-01 17:47:32 +05304744 dev_err(dev, "FW dump not generated\n");
4745 return -EIO;
Somnath Kotur5c510812013-05-30 02:52:23 +00004746 }
4747
4748 return 0;
4749}
4750
Kalesh APf0613382014-08-01 17:47:32 +05304751int lancer_delete_dump(struct be_adapter *adapter)
4752{
4753 int status;
4754
4755 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4756 return be_cmd_status(status);
4757}
4758
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00004759/* Uses sync mcc */
4760int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4761{
4762 struct be_mcc_wrb *wrb;
4763 struct be_cmd_enable_disable_vf *req;
4764 int status;
4765
Vasundhara Volam05998632013-10-01 15:59:59 +05304766 if (BEx_chip(adapter))
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00004767 return 0;
4768
4769 spin_lock_bh(&adapter->mcc_lock);
4770
4771 wrb = wrb_from_mccq(adapter);
4772 if (!wrb) {
4773 status = -EBUSY;
4774 goto err;
4775 }
4776
4777 req = embedded_payload(wrb);
4778
4779 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4780 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4781 wrb, NULL);
4782
4783 req->hdr.domain = domain;
4784 req->enable = 1;
4785 status = be_mcc_notify_wait(adapter);
4786err:
4787 spin_unlock_bh(&adapter->mcc_lock);
4788 return status;
4789}
4790
Somnath Kotur68c45a22013-03-14 02:42:07 +00004791int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4792{
4793 struct be_mcc_wrb *wrb;
4794 struct be_cmd_req_intr_set *req;
4795 int status;
4796
4797 if (mutex_lock_interruptible(&adapter->mbox_lock))
4798 return -1;
4799
4800 wrb = wrb_from_mbox(adapter);
4801
4802 req = embedded_payload(wrb);
4803
4804 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4805 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4806 wrb, NULL);
4807
4808 req->intr_enabled = intr_enable;
4809
4810 status = be_mbox_notify_wait(adapter);
4811
4812 mutex_unlock(&adapter->mbox_lock);
4813 return status;
4814}
4815
Vasundhara Volam542963b2014-01-15 13:23:33 +05304816/* Uses MBOX */
4817int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4818{
4819 struct be_cmd_req_get_active_profile *req;
4820 struct be_mcc_wrb *wrb;
4821 int status;
4822
4823 if (mutex_lock_interruptible(&adapter->mbox_lock))
4824 return -1;
4825
4826 wrb = wrb_from_mbox(adapter);
4827 if (!wrb) {
4828 status = -EBUSY;
4829 goto err;
4830 }
4831
4832 req = embedded_payload(wrb);
4833
4834 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4835 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4836 wrb, NULL);
4837
4838 status = be_mbox_notify_wait(adapter);
4839 if (!status) {
4840 struct be_cmd_resp_get_active_profile *resp =
4841 embedded_payload(wrb);
Kalesh AP03d28ff2014-09-19 15:46:56 +05304842
Vasundhara Volam542963b2014-01-15 13:23:33 +05304843 *profile_id = le16_to_cpu(resp->active_profile_id);
4844 }
4845
4846err:
4847 mutex_unlock(&adapter->mbox_lock);
4848 return status;
4849}
4850
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004851int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4852 int link_state, int version, u8 domain)
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304853{
4854 struct be_mcc_wrb *wrb;
4855 struct be_cmd_req_set_ll_link *req;
4856 int status;
4857
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304858 spin_lock_bh(&adapter->mcc_lock);
4859
4860 wrb = wrb_from_mccq(adapter);
4861 if (!wrb) {
4862 status = -EBUSY;
4863 goto err;
4864 }
4865
4866 req = embedded_payload(wrb);
4867
4868 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4869 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4870 sizeof(*req), wrb, NULL);
4871
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004872 req->hdr.version = version;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304873 req->hdr.domain = domain;
4874
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004875 if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4876 link_state == IFLA_VF_LINK_STATE_AUTO)
4877 req->link_config |= PLINK_ENABLE;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304878
4879 if (link_state == IFLA_VF_LINK_STATE_AUTO)
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004880 req->link_config |= PLINK_TRACK;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304881
4882 status = be_mcc_notify_wait(adapter);
4883err:
4884 spin_unlock_bh(&adapter->mcc_lock);
4885 return status;
4886}
4887
Suresh Reddyd9d426a2015-12-30 01:28:56 -05004888int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4889 int link_state, u8 domain)
4890{
4891 int status;
4892
4893 if (BEx_chip(adapter))
4894 return -EOPNOTSUPP;
4895
4896 status = __be_cmd_set_logical_link_config(adapter, link_state,
4897 2, domain);
4898
4899 /* Version 2 of the command will not be recognized by older FW.
4900 * On such a failure issue version 1 of the command.
4901 */
4902 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
4903 status = __be_cmd_set_logical_link_config(adapter, link_state,
4904 1, domain);
4905 return status;
4906}
Parav Pandit6a4ab662012-03-26 14:27:12 +00004907int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
Sathya Perlaa2cc4e02014-05-09 13:29:14 +05304908 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
Parav Pandit6a4ab662012-03-26 14:27:12 +00004909{
4910 struct be_adapter *adapter = netdev_priv(netdev_handle);
4911 struct be_mcc_wrb *wrb;
Kalesh AP504fbf12014-09-19 15:47:00 +05304912 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
Parav Pandit6a4ab662012-03-26 14:27:12 +00004913 struct be_cmd_req_hdr *req;
4914 struct be_cmd_resp_hdr *resp;
4915 int status;
4916
4917 spin_lock_bh(&adapter->mcc_lock);
4918
4919 wrb = wrb_from_mccq(adapter);
4920 if (!wrb) {
4921 status = -EBUSY;
4922 goto err;
4923 }
4924 req = embedded_payload(wrb);
4925 resp = embedded_payload(wrb);
4926
4927 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4928 hdr->opcode, wrb_payload_size, wrb, NULL);
4929 memcpy(req, wrb_payload, wrb_payload_size);
4930 be_dws_cpu_to_le(req, wrb_payload_size);
4931
4932 status = be_mcc_notify_wait(adapter);
4933 if (cmd_status)
4934 *cmd_status = (status & 0xffff);
4935 if (ext_status)
4936 *ext_status = 0;
4937 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4938 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4939err:
4940 spin_unlock_bh(&adapter->mcc_lock);
4941 return status;
4942}
4943EXPORT_SYMBOL(be_roce_mcc_cmd);