blob: 314022df34694758d524554caccb3e2d2f9ec3ba [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/delay.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020038#include <linux/slab.h>
Tomer Tayar5529bad2016-03-09 09:16:24 +020039#include <linux/spinlock.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020040#include <linux/string.h>
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +020041#include <linux/etherdevice.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020042#include "qed.h"
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -040043#include "qed_dcbx.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020044#include "qed_hsi.h"
45#include "qed_hw.h"
46#include "qed_mcp.h"
47#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030048#include "qed_sriov.h"
49
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020050#define CHIP_MCP_RESP_ITER_US 10
51
52#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
53#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
54
55#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
56 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
57 _val)
58
59#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
60 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
61
62#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
63 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
64 offsetof(struct public_drv_mb, _field), _val)
65
66#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
67 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
68 offsetof(struct public_drv_mb, _field))
69
70#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
71 DRV_ID_PDA_COMP_VER_SHIFT)
72
73#define MCP_BYTES_PER_MBIT_SHIFT 17
74
75bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
76{
77 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
78 return false;
79 return true;
80}
81
Yuval Mintz1a635e42016-08-15 10:42:43 +030082void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020083{
84 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
85 PUBLIC_PORT);
86 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
87
88 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
89 MFW_PORT(p_hwfn));
90 DP_VERBOSE(p_hwfn, QED_MSG_SP,
91 "port_addr = 0x%x, port_id 0x%02x\n",
92 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
93}
94
Yuval Mintz1a635e42016-08-15 10:42:43 +030095void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020096{
97 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
98 u32 tmp, i;
99
100 if (!p_hwfn->mcp_info->public_base)
101 return;
102
103 for (i = 0; i < length; i++) {
104 tmp = qed_rd(p_hwfn, p_ptt,
105 p_hwfn->mcp_info->mfw_mb_addr +
106 (i << 2) + sizeof(u32));
107
108 /* The MB data is actually BE; Need to force it to cpu */
109 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
110 be32_to_cpu((__force __be32)tmp);
111 }
112}
113
114int qed_mcp_free(struct qed_hwfn *p_hwfn)
115{
116 if (p_hwfn->mcp_info) {
117 kfree(p_hwfn->mcp_info->mfw_mb_cur);
118 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
119 }
120 kfree(p_hwfn->mcp_info);
121
122 return 0;
123}
124
Yuval Mintz1a635e42016-08-15 10:42:43 +0300125static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126{
127 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
128 u32 drv_mb_offsize, mfw_mb_offsize;
129 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
130
131 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
132 if (!p_info->public_base)
133 return 0;
134
135 p_info->public_base |= GRCBASE_MCP;
136
137 /* Calculate the driver and MFW mailbox address */
138 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
139 SECTION_OFFSIZE_ADDR(p_info->public_base,
140 PUBLIC_DRV_MB));
141 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
142 DP_VERBOSE(p_hwfn, QED_MSG_SP,
143 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
144 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
145
146 /* Set the MFW MB address */
147 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
148 SECTION_OFFSIZE_ADDR(p_info->public_base,
149 PUBLIC_MFW_MB));
150 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
151 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
152
153 /* Get the current driver mailbox sequence before sending
154 * the first command
155 */
156 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
157 DRV_MSG_SEQ_NUMBER_MASK;
158
159 /* Get current FW pulse sequence */
160 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
161 DRV_PULSE_SEQ_MASK;
162
163 p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
164
165 return 0;
166}
167
Yuval Mintz1a635e42016-08-15 10:42:43 +0300168int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200169{
170 struct qed_mcp_info *p_info;
171 u32 size;
172
173 /* Allocate mcp_info structure */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200174 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200175 if (!p_hwfn->mcp_info)
176 goto err;
177 p_info = p_hwfn->mcp_info;
178
179 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
180 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
183 */
184 return 0;
185 }
186
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
Yuval Mintz60fffb32016-02-21 11:40:07 +0200188 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
Yuval Mintz83aeb932016-08-15 10:42:44 +0300189 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191 goto err;
192
Tomer Tayar5529bad2016-03-09 09:16:24 +0200193 /* Initialize the MFW spinlock */
194 spin_lock_init(&p_info->lock);
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200195 spin_lock_init(&p_info->link_lock);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200196
197 return 0;
198
199err:
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200200 qed_mcp_free(p_hwfn);
201 return -ENOMEM;
202}
203
Tomer Tayar5529bad2016-03-09 09:16:24 +0200204/* Locks the MFW mailbox of a PF to ensure a single access.
205 * The lock is achieved in most cases by holding a spinlock, causing other
206 * threads to wait till a previous access is done.
207 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
208 * access is achieved by setting a blocking flag, which will fail other
209 * competing contexts to send their mailboxes.
210 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300211static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200212{
213 spin_lock_bh(&p_hwfn->mcp_info->lock);
214
215 /* The spinlock shouldn't be acquired when the mailbox command is
216 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
217 * pending [UN]LOAD_REQ command of another PF together with a spinlock
218 * (i.e. interrupts are disabled) - can lead to a deadlock.
219 * It is assumed that for a single PF, no other mailbox commands can be
220 * sent from another context while sending LOAD_REQ, and that any
221 * parallel commands to UNLOAD_REQ can be cancelled.
222 */
223 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
224 p_hwfn->mcp_info->block_mb_sending = false;
225
226 if (p_hwfn->mcp_info->block_mb_sending) {
227 DP_NOTICE(p_hwfn,
228 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
229 cmd);
230 spin_unlock_bh(&p_hwfn->mcp_info->lock);
231 return -EBUSY;
232 }
233
234 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
235 p_hwfn->mcp_info->block_mb_sending = true;
236 spin_unlock_bh(&p_hwfn->mcp_info->lock);
237 }
238
239 return 0;
240}
241
Yuval Mintz1a635e42016-08-15 10:42:43 +0300242static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200243{
244 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
245 spin_unlock_bh(&p_hwfn->mcp_info->lock);
246}
247
Yuval Mintz1a635e42016-08-15 10:42:43 +0300248int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200249{
250 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
251 u8 delay = CHIP_MCP_RESP_ITER_US;
252 u32 org_mcp_reset_seq, cnt = 0;
253 int rc = 0;
254
Tomer Tayar5529bad2016-03-09 09:16:24 +0200255 /* Ensure that only a single thread is accessing the mailbox at a
256 * certain time.
257 */
258 rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
259 if (rc != 0)
260 return rc;
261
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200262 /* Set drv command along with the updated sequence */
263 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
264 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
265 (DRV_MSG_CODE_MCP_RESET | seq));
266
267 do {
268 /* Wait for MFW response */
269 udelay(delay);
270 /* Give the FW up to 500 second (50*1000*10usec) */
271 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
272 MISCS_REG_GENERIC_POR_0)) &&
273 (cnt++ < QED_MCP_RESET_RETRIES));
274
275 if (org_mcp_reset_seq !=
276 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
277 DP_VERBOSE(p_hwfn, QED_MSG_SP,
278 "MCP was reset after %d usec\n", cnt * delay);
279 } else {
280 DP_ERR(p_hwfn, "Failed to reset MCP\n");
281 rc = -EAGAIN;
282 }
283
Tomer Tayar5529bad2016-03-09 09:16:24 +0200284 qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
285
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200286 return rc;
287}
288
289static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
290 struct qed_ptt *p_ptt,
291 u32 cmd,
292 u32 param,
293 u32 *o_mcp_resp,
294 u32 *o_mcp_param)
295{
296 u8 delay = CHIP_MCP_RESP_ITER_US;
297 u32 seq, cnt = 1, actual_mb_seq;
298 int rc = 0;
299
300 /* Get actual driver mailbox sequence */
301 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
302 DRV_MSG_SEQ_NUMBER_MASK;
303
304 /* Use MCP history register to check if MCP reset occurred between
305 * init time and now.
306 */
307 if (p_hwfn->mcp_info->mcp_hist !=
308 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
309 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
310 qed_load_mcp_offsets(p_hwfn, p_ptt);
311 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
312 }
313 seq = ++p_hwfn->mcp_info->drv_mb_seq;
314
315 /* Set drv param */
316 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
317
318 /* Set drv command along with the updated sequence */
319 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
320
321 DP_VERBOSE(p_hwfn, QED_MSG_SP,
322 "wrote command (%x) to MFW MB param 0x%08x\n",
323 (cmd | seq), param);
324
325 do {
326 /* Wait for MFW response */
327 udelay(delay);
328 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
329
330 /* Give the FW up to 5 second (500*10ms) */
331 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
332 (cnt++ < QED_DRV_MB_MAX_RETRIES));
333
334 DP_VERBOSE(p_hwfn, QED_MSG_SP,
335 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
336 cnt * delay, *o_mcp_resp, seq);
337
338 /* Is this a reply to our command? */
339 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
340 *o_mcp_resp &= FW_MSG_CODE_MASK;
341 /* Get the MCP param */
342 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
343 } else {
344 /* FW BUG! */
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300345 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
346 cmd, param);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200347 *o_mcp_resp = 0;
348 rc = -EAGAIN;
349 }
350 return rc;
351}
352
Tomer Tayar5529bad2016-03-09 09:16:24 +0200353static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
354 struct qed_ptt *p_ptt,
355 struct qed_mcp_mb_params *p_mb_params)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200356{
Tomer Tayar5529bad2016-03-09 09:16:24 +0200357 u32 union_data_addr;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200358
Tomer Tayar5529bad2016-03-09 09:16:24 +0200359 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200360
361 /* MCP not initialized */
362 if (!qed_mcp_is_init(p_hwfn)) {
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300363 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200364 return -EBUSY;
365 }
366
Tomer Tayar5529bad2016-03-09 09:16:24 +0200367 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
368 offsetof(struct public_drv_mb, union_data);
369
370 /* Ensure that only a single thread is accessing the mailbox at a
371 * certain time.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200372 */
Tomer Tayar5529bad2016-03-09 09:16:24 +0200373 rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
374 if (rc)
375 return rc;
376
377 if (p_mb_params->p_data_src != NULL)
378 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
379 p_mb_params->p_data_src,
380 sizeof(*p_mb_params->p_data_src));
381
382 rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
383 p_mb_params->param, &p_mb_params->mcp_resp,
384 &p_mb_params->mcp_param);
385
386 if (p_mb_params->p_data_dst != NULL)
387 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
388 union_data_addr,
389 sizeof(*p_mb_params->p_data_dst));
390
391 qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200392
393 return rc;
394}
395
Tomer Tayar5529bad2016-03-09 09:16:24 +0200396int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
397 struct qed_ptt *p_ptt,
398 u32 cmd,
399 u32 param,
400 u32 *o_mcp_resp,
401 u32 *o_mcp_param)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200402{
Tomer Tayar5529bad2016-03-09 09:16:24 +0200403 struct qed_mcp_mb_params mb_params;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200404 union drv_union_data data_src;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200405 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200406
Tomer Tayar5529bad2016-03-09 09:16:24 +0200407 memset(&mb_params, 0, sizeof(mb_params));
Mintz, Yuval14d39642016-10-31 07:14:23 +0200408 memset(&data_src, 0, sizeof(data_src));
Tomer Tayar5529bad2016-03-09 09:16:24 +0200409 mb_params.cmd = cmd;
410 mb_params.param = param;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200411
412 /* In case of UNLOAD_DONE, set the primary MAC */
413 if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
414 (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
415 u8 *p_mac = p_hwfn->cdev->wol_mac;
416
417 data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
418 data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
419 p_mac[4] << 8 | p_mac[5];
420
421 DP_VERBOSE(p_hwfn,
422 (QED_MSG_SP | NETIF_MSG_IFDOWN),
423 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
424 p_mac, data_src.wol_mac.mac_upper,
425 data_src.wol_mac.mac_lower);
426
427 mb_params.p_data_src = &data_src;
428 }
429
Tomer Tayar5529bad2016-03-09 09:16:24 +0200430 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
431 if (rc)
432 return rc;
433
434 *o_mcp_resp = mb_params.mcp_resp;
435 *o_mcp_param = mb_params.mcp_param;
436
437 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200438}
439
Tomer Tayar41024262016-09-05 14:35:10 +0300440int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
441 struct qed_ptt *p_ptt,
442 u32 cmd,
443 u32 param,
444 u32 *o_mcp_resp,
445 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
446{
447 struct qed_mcp_mb_params mb_params;
448 union drv_union_data union_data;
449 int rc;
450
451 memset(&mb_params, 0, sizeof(mb_params));
452 mb_params.cmd = cmd;
453 mb_params.param = param;
454 mb_params.p_data_dst = &union_data;
455 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
456 if (rc)
457 return rc;
458
459 *o_mcp_resp = mb_params.mcp_resp;
460 *o_mcp_param = mb_params.mcp_param;
461
462 *o_txn_size = *o_mcp_param;
463 memcpy(o_buf, &union_data.raw_data, *o_txn_size);
464
465 return 0;
466}
467
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200468int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300469 struct qed_ptt *p_ptt, u32 *p_load_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200470{
471 struct qed_dev *cdev = p_hwfn->cdev;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200472 struct qed_mcp_mb_params mb_params;
473 union drv_union_data union_data;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200474 int rc;
475
Tomer Tayar5529bad2016-03-09 09:16:24 +0200476 memset(&mb_params, 0, sizeof(mb_params));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200477 /* Load Request */
Tomer Tayar5529bad2016-03-09 09:16:24 +0200478 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
479 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
480 cdev->drv_type;
481 memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
482 mb_params.p_data_src = &union_data;
483 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200484
485 /* if mcp fails to respond we must abort */
486 if (rc) {
487 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
488 return rc;
489 }
490
Tomer Tayar5529bad2016-03-09 09:16:24 +0200491 *p_load_code = mb_params.mcp_resp;
492
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200493 /* If MFW refused (e.g. other port is in diagnostic mode) we
494 * must abort. This can happen in the following cases:
495 * - Other port is in diagnostic mode
496 * - Previously loaded function on the engine is not compliant with
497 * the requester.
498 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
499 * -
500 */
501 if (!(*p_load_code) ||
502 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
503 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
504 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
505 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
506 return -EBUSY;
507 }
508
509 return 0;
510}
511
Yuval Mintz0b55e272016-05-11 16:36:15 +0300512static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
513 struct qed_ptt *p_ptt)
514{
515 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
516 PUBLIC_PATH);
517 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
518 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
519 QED_PATH_ID(p_hwfn));
520 u32 disabled_vfs[VF_MAX_STATIC / 32];
521 int i;
522
523 DP_VERBOSE(p_hwfn,
524 QED_MSG_SP,
525 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
526 mfw_path_offsize, path_addr);
527
528 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
529 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
530 path_addr +
531 offsetof(struct public_path,
532 mcp_vf_disabled) +
533 sizeof(u32) * i);
534 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
535 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
536 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
537 }
538
539 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
540 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
541}
542
543int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
544 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
545{
546 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
547 PUBLIC_FUNC);
548 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
549 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
550 MCP_PF_ID(p_hwfn));
551 struct qed_mcp_mb_params mb_params;
552 union drv_union_data union_data;
553 int rc;
554 int i;
555
556 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
557 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
558 "Acking VFs [%08x,...,%08x] - %08x\n",
559 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
560
561 memset(&mb_params, 0, sizeof(mb_params));
562 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
563 memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
564 mb_params.p_data_src = &union_data;
565 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
566 if (rc) {
567 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
568 return -EBUSY;
569 }
570
571 /* Clear the ACK bits */
572 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
573 qed_wr(p_hwfn, p_ptt,
574 func_addr +
575 offsetof(struct public_func, drv_ack_vf_disabled) +
576 i * sizeof(u32), 0);
577
578 return rc;
579}
580
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200581static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
582 struct qed_ptt *p_ptt)
583{
584 u32 transceiver_state;
585
586 transceiver_state = qed_rd(p_hwfn, p_ptt,
587 p_hwfn->mcp_info->port_addr +
588 offsetof(struct public_port,
589 transceiver_data));
590
591 DP_VERBOSE(p_hwfn,
592 (NETIF_MSG_HW | QED_MSG_SP),
593 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
594 transceiver_state,
595 (u32)(p_hwfn->mcp_info->port_addr +
Yuval Mintz1a635e42016-08-15 10:42:43 +0300596 offsetof(struct public_port, transceiver_data)));
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200597
598 transceiver_state = GET_FIELD(transceiver_state,
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300599 ETH_TRANSCEIVER_STATE);
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200600
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300601 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200602 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
603 else
604 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
605}
606
Yuval Mintzcc875c22015-10-26 11:02:31 +0200607static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300608 struct qed_ptt *p_ptt, bool b_reset)
Yuval Mintzcc875c22015-10-26 11:02:31 +0200609{
610 struct qed_mcp_link_state *p_link;
Manish Chopraa64b02d2016-04-26 10:56:10 -0400611 u8 max_bw, min_bw;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200612 u32 status = 0;
613
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200614 /* Prevent SW/attentions from doing this at the same time */
615 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
616
Yuval Mintzcc875c22015-10-26 11:02:31 +0200617 p_link = &p_hwfn->mcp_info->link_output;
618 memset(p_link, 0, sizeof(*p_link));
619 if (!b_reset) {
620 status = qed_rd(p_hwfn, p_ptt,
621 p_hwfn->mcp_info->port_addr +
622 offsetof(struct public_port, link_status));
623 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
624 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
625 status,
626 (u32)(p_hwfn->mcp_info->port_addr +
Yuval Mintz1a635e42016-08-15 10:42:43 +0300627 offsetof(struct public_port, link_status)));
Yuval Mintzcc875c22015-10-26 11:02:31 +0200628 } else {
629 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
630 "Resetting link indications\n");
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200631 goto out;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200632 }
633
Sudarsana Reddy Kallurufc916ff2016-03-09 09:16:23 +0200634 if (p_hwfn->b_drv_link_init)
635 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
636 else
637 p_link->link_up = false;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200638
639 p_link->full_duplex = true;
640 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
641 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
642 p_link->speed = 100000;
643 break;
644 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
645 p_link->speed = 50000;
646 break;
647 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
648 p_link->speed = 40000;
649 break;
650 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
651 p_link->speed = 25000;
652 break;
653 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
654 p_link->speed = 20000;
655 break;
656 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
657 p_link->speed = 10000;
658 break;
659 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
660 p_link->full_duplex = false;
661 /* Fall-through */
662 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
663 p_link->speed = 1000;
664 break;
665 default:
666 p_link->speed = 0;
667 }
668
Manish Chopra4b01e512016-04-26 10:56:09 -0400669 if (p_link->link_up && p_link->speed)
670 p_link->line_speed = p_link->speed;
671 else
672 p_link->line_speed = 0;
673
674 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
Manish Chopraa64b02d2016-04-26 10:56:10 -0400675 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
Manish Chopra4b01e512016-04-26 10:56:09 -0400676
Manish Chopraa64b02d2016-04-26 10:56:10 -0400677 /* Max bandwidth configuration */
Manish Chopra4b01e512016-04-26 10:56:09 -0400678 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200679
Manish Chopraa64b02d2016-04-26 10:56:10 -0400680 /* Min bandwidth configuration */
681 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
682 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
683
Yuval Mintzcc875c22015-10-26 11:02:31 +0200684 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
685 p_link->an_complete = !!(status &
686 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
687 p_link->parallel_detection = !!(status &
688 LINK_STATUS_PARALLEL_DETECTION_USED);
689 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
690
691 p_link->partner_adv_speed |=
692 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
693 QED_LINK_PARTNER_SPEED_1G_FD : 0;
694 p_link->partner_adv_speed |=
695 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
696 QED_LINK_PARTNER_SPEED_1G_HD : 0;
697 p_link->partner_adv_speed |=
698 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
699 QED_LINK_PARTNER_SPEED_10G : 0;
700 p_link->partner_adv_speed |=
701 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
702 QED_LINK_PARTNER_SPEED_20G : 0;
703 p_link->partner_adv_speed |=
Sudarsana Reddy Kalluru054c67d2016-08-09 03:51:23 -0400704 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
705 QED_LINK_PARTNER_SPEED_25G : 0;
706 p_link->partner_adv_speed |=
Yuval Mintzcc875c22015-10-26 11:02:31 +0200707 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
708 QED_LINK_PARTNER_SPEED_40G : 0;
709 p_link->partner_adv_speed |=
710 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
711 QED_LINK_PARTNER_SPEED_50G : 0;
712 p_link->partner_adv_speed |=
713 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
714 QED_LINK_PARTNER_SPEED_100G : 0;
715
716 p_link->partner_tx_flow_ctrl_en =
717 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
718 p_link->partner_rx_flow_ctrl_en =
719 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
720
721 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
722 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
723 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
724 break;
725 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
726 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
727 break;
728 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
729 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
730 break;
731 default:
732 p_link->partner_adv_pause = 0;
733 }
734
735 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
736
737 qed_link_update(p_hwfn);
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200738out:
739 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200740}
741
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300742int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
Yuval Mintzcc875c22015-10-26 11:02:31 +0200743{
744 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200745 struct qed_mcp_mb_params mb_params;
746 union drv_union_data union_data;
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300747 struct eth_phy_cfg *phy_cfg;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200748 int rc = 0;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200749 u32 cmd;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200750
751 /* Set the shmem configuration according to params */
Tomer Tayar5529bad2016-03-09 09:16:24 +0200752 phy_cfg = &union_data.drv_phy_cfg;
753 memset(phy_cfg, 0, sizeof(*phy_cfg));
Yuval Mintzcc875c22015-10-26 11:02:31 +0200754 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
755 if (!params->speed.autoneg)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200756 phy_cfg->speed = params->speed.forced_speed;
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300757 phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
758 phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
759 phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200760 phy_cfg->adv_speed = params->speed.advertised_speeds;
761 phy_cfg->loopback_mode = params->loopback_mode;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200762
Sudarsana Reddy Kallurufc916ff2016-03-09 09:16:23 +0200763 p_hwfn->b_drv_link_init = b_up;
764
Yuval Mintzcc875c22015-10-26 11:02:31 +0200765 if (b_up) {
766 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
767 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
Tomer Tayar5529bad2016-03-09 09:16:24 +0200768 phy_cfg->speed,
769 phy_cfg->pause,
770 phy_cfg->adv_speed,
771 phy_cfg->loopback_mode,
772 phy_cfg->feature_config_flags);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200773 } else {
774 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
775 "Resetting link\n");
776 }
777
Tomer Tayar5529bad2016-03-09 09:16:24 +0200778 memset(&mb_params, 0, sizeof(mb_params));
779 mb_params.cmd = cmd;
780 mb_params.p_data_src = &union_data;
781 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200782
783 /* if mcp fails to respond we must abort */
784 if (rc) {
785 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
786 return rc;
787 }
788
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200789 /* Mimic link-change attention, done for several reasons:
790 * - On reset, there's no guarantee MFW would trigger
791 * an attention.
792 * - On initialization, older MFWs might not indicate link change
793 * during LFA, so we'll never get an UP indication.
794 */
795 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200796
797 return 0;
798}
799
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -0400800static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
801 struct qed_ptt *p_ptt,
802 enum MFW_DRV_MSG_TYPE type)
803{
804 enum qed_mcp_protocol_type stats_type;
805 union qed_mcp_protocol_stats stats;
806 struct qed_mcp_mb_params mb_params;
807 union drv_union_data union_data;
808 u32 hsi_param;
809
810 switch (type) {
811 case MFW_DRV_MSG_GET_LAN_STATS:
812 stats_type = QED_MCP_LAN_STATS;
813 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
814 break;
815 case MFW_DRV_MSG_GET_FCOE_STATS:
816 stats_type = QED_MCP_FCOE_STATS;
817 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
818 break;
819 case MFW_DRV_MSG_GET_ISCSI_STATS:
820 stats_type = QED_MCP_ISCSI_STATS;
821 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
822 break;
823 case MFW_DRV_MSG_GET_RDMA_STATS:
824 stats_type = QED_MCP_RDMA_STATS;
825 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
826 break;
827 default:
828 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
829 return;
830 }
831
832 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
833
834 memset(&mb_params, 0, sizeof(mb_params));
835 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
836 mb_params.param = hsi_param;
837 memcpy(&union_data, &stats, sizeof(stats));
838 mb_params.p_data_src = &union_data;
839 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
840}
841
Manish Chopra4b01e512016-04-26 10:56:09 -0400842static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
843 struct public_func *p_shmem_info)
844{
845 struct qed_mcp_function_info *p_info;
846
847 p_info = &p_hwfn->mcp_info->func_info;
848
849 p_info->bandwidth_min = (p_shmem_info->config &
850 FUNC_MF_CFG_MIN_BW_MASK) >>
851 FUNC_MF_CFG_MIN_BW_SHIFT;
852 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
853 DP_INFO(p_hwfn,
854 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
855 p_info->bandwidth_min);
856 p_info->bandwidth_min = 1;
857 }
858
859 p_info->bandwidth_max = (p_shmem_info->config &
860 FUNC_MF_CFG_MAX_BW_MASK) >>
861 FUNC_MF_CFG_MAX_BW_SHIFT;
862 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
863 DP_INFO(p_hwfn,
864 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
865 p_info->bandwidth_max);
866 p_info->bandwidth_max = 100;
867 }
868}
869
870static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
871 struct qed_ptt *p_ptt,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300872 struct public_func *p_data, int pfid)
Manish Chopra4b01e512016-04-26 10:56:09 -0400873{
874 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
875 PUBLIC_FUNC);
876 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
877 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
878 u32 i, size;
879
880 memset(p_data, 0, sizeof(*p_data));
881
Yuval Mintz1a635e42016-08-15 10:42:43 +0300882 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
Manish Chopra4b01e512016-04-26 10:56:09 -0400883 for (i = 0; i < size / sizeof(u32); i++)
884 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
885 func_addr + (i << 2));
886 return size;
887}
888
Yuval Mintz1a635e42016-08-15 10:42:43 +0300889static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Manish Chopra4b01e512016-04-26 10:56:09 -0400890{
891 struct qed_mcp_function_info *p_info;
892 struct public_func shmem_info;
893 u32 resp = 0, param = 0;
894
Yuval Mintz1a635e42016-08-15 10:42:43 +0300895 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
Manish Chopra4b01e512016-04-26 10:56:09 -0400896
897 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
898
899 p_info = &p_hwfn->mcp_info->func_info;
900
Manish Chopraa64b02d2016-04-26 10:56:10 -0400901 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
Manish Chopra4b01e512016-04-26 10:56:09 -0400902 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
903
904 /* Acknowledge the MFW */
905 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
906 &param);
907}
908
Yuval Mintzcc875c22015-10-26 11:02:31 +0200909int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
910 struct qed_ptt *p_ptt)
911{
912 struct qed_mcp_info *info = p_hwfn->mcp_info;
913 int rc = 0;
914 bool found = false;
915 u16 i;
916
917 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
918
919 /* Read Messages from MFW */
920 qed_mcp_read_mb(p_hwfn, p_ptt);
921
922 /* Compare current messages to old ones */
923 for (i = 0; i < info->mfw_mb_length; i++) {
924 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
925 continue;
926
927 found = true;
928
929 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
930 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
931 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
932
933 switch (i) {
934 case MFW_DRV_MSG_LINK_CHANGE:
935 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
936 break;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300937 case MFW_DRV_MSG_VF_DISABLED:
938 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
939 break;
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400940 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
941 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
942 QED_DCBX_REMOTE_LLDP_MIB);
943 break;
944 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
945 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
946 QED_DCBX_REMOTE_MIB);
947 break;
948 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
949 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
950 QED_DCBX_OPERATIONAL_MIB);
951 break;
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200952 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
953 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
954 break;
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -0400955 case MFW_DRV_MSG_GET_LAN_STATS:
956 case MFW_DRV_MSG_GET_FCOE_STATS:
957 case MFW_DRV_MSG_GET_ISCSI_STATS:
958 case MFW_DRV_MSG_GET_RDMA_STATS:
959 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
960 break;
Manish Chopra4b01e512016-04-26 10:56:09 -0400961 case MFW_DRV_MSG_BW_UPDATE:
962 qed_mcp_update_bw(p_hwfn, p_ptt);
963 break;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200964 default:
965 DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
966 rc = -EINVAL;
967 }
968 }
969
970 /* ACK everything */
971 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
972 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
973
974 /* MFW expect answer in BE, so we force write in that format */
975 qed_wr(p_hwfn, p_ptt,
976 info->mfw_mb_addr + sizeof(u32) +
977 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
978 sizeof(u32) + i * sizeof(u32),
979 (__force u32)val);
980 }
981
982 if (!found) {
983 DP_NOTICE(p_hwfn,
984 "Received an MFW message indication but no new message!\n");
985 rc = -EINVAL;
986 }
987
988 /* Copy the new mfw messages into the shadow */
989 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
990
991 return rc;
992}
993
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300994int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
995 struct qed_ptt *p_ptt,
996 u32 *p_mfw_ver, u32 *p_running_bundle_id)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200997{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200998 u32 global_offsize;
999
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001000 if (IS_VF(p_hwfn->cdev)) {
1001 if (p_hwfn->vf_iov_info) {
1002 struct pfvf_acquire_resp_tlv *p_resp;
1003
1004 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1005 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1006 return 0;
1007 } else {
1008 DP_VERBOSE(p_hwfn,
1009 QED_MSG_IOV,
1010 "VF requested MFW version prior to ACQUIRE\n");
1011 return -EINVAL;
1012 }
1013 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001014
1015 global_offsize = qed_rd(p_hwfn, p_ptt,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001016 SECTION_OFFSIZE_ADDR(p_hwfn->
1017 mcp_info->public_base,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001018 PUBLIC_GLOBAL));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001019 *p_mfw_ver =
1020 qed_rd(p_hwfn, p_ptt,
1021 SECTION_ADDR(global_offsize,
1022 0) + offsetof(struct public_global, mfw_ver));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001023
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001024 if (p_running_bundle_id != NULL) {
1025 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1026 SECTION_ADDR(global_offsize, 0) +
1027 offsetof(struct public_global,
1028 running_bundle_id));
1029 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001030
1031 return 0;
1032}
1033
Yuval Mintz1a635e42016-08-15 10:42:43 +03001034int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
Yuval Mintzcc875c22015-10-26 11:02:31 +02001035{
1036 struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
1037 struct qed_ptt *p_ptt;
1038
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001039 if (IS_VF(cdev))
1040 return -EINVAL;
1041
Yuval Mintzcc875c22015-10-26 11:02:31 +02001042 if (!qed_mcp_is_init(p_hwfn)) {
Yuval Mintz525ef5c2016-08-15 10:42:45 +03001043 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
Yuval Mintzcc875c22015-10-26 11:02:31 +02001044 return -EBUSY;
1045 }
1046
1047 *p_media_type = MEDIA_UNSPECIFIED;
1048
1049 p_ptt = qed_ptt_acquire(p_hwfn);
1050 if (!p_ptt)
1051 return -EBUSY;
1052
1053 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1054 offsetof(struct public_port, media_type));
1055
1056 qed_ptt_release(p_hwfn, p_ptt);
1057
1058 return 0;
1059}
1060
Mintz, Yuval6927e822016-10-31 07:14:25 +02001061/* Old MFW has a global configuration for all PFs regarding RDMA support */
1062static void
1063qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1064 enum qed_pci_personality *p_proto)
1065{
1066 /* There wasn't ever a legacy MFW that published iwarp.
1067 * So at this point, this is either plain l2 or RoCE.
1068 */
1069 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1070 *p_proto = QED_PCI_ETH_ROCE;
1071 else
1072 *p_proto = QED_PCI_ETH;
1073
1074 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1075 "According to Legacy capabilities, L2 personality is %08x\n",
1076 (u32) *p_proto);
1077}
1078
1079static int
1080qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1081 struct qed_ptt *p_ptt,
1082 enum qed_pci_personality *p_proto)
1083{
1084 u32 resp = 0, param = 0;
1085 int rc;
1086
1087 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1088 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
1089 if (rc)
1090 return rc;
1091 if (resp != FW_MSG_CODE_OK) {
1092 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1093 "MFW lacks support for command; Returns %08x\n",
1094 resp);
1095 return -EINVAL;
1096 }
1097
1098 switch (param) {
1099 case FW_MB_PARAM_GET_PF_RDMA_NONE:
1100 *p_proto = QED_PCI_ETH;
1101 break;
1102 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1103 *p_proto = QED_PCI_ETH_ROCE;
1104 break;
1105 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
1106 DP_NOTICE(p_hwfn,
1107 "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
1108 *p_proto = QED_PCI_ETH_ROCE;
1109 break;
1110 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
1111 default:
1112 DP_NOTICE(p_hwfn,
1113 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1114 param);
1115 return -EINVAL;
1116 }
1117
1118 DP_VERBOSE(p_hwfn,
1119 NETIF_MSG_IFUP,
1120 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1121 (u32) *p_proto, resp, param);
1122 return 0;
1123}
1124
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001125static int
1126qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1127 struct public_func *p_info,
Mintz, Yuval6927e822016-10-31 07:14:25 +02001128 struct qed_ptt *p_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001129 enum qed_pci_personality *p_proto)
1130{
1131 int rc = 0;
1132
1133 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1134 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
Ram Amrani1fe582e2017-01-01 13:57:10 +02001135 if (!IS_ENABLED(CONFIG_QED_RDMA))
1136 *p_proto = QED_PCI_ETH;
1137 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
Mintz, Yuval6927e822016-10-31 07:14:25 +02001138 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
Yuval Mintzc5ac9312016-06-03 14:35:34 +03001139 break;
1140 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1141 *p_proto = QED_PCI_ISCSI;
1142 break;
Arun Easi1e128c82017-02-15 06:28:22 -08001143 case FUNC_MF_CFG_PROTOCOL_FCOE:
1144 *p_proto = QED_PCI_FCOE;
1145 break;
Yuval Mintzc5ac9312016-06-03 14:35:34 +03001146 case FUNC_MF_CFG_PROTOCOL_ROCE:
1147 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
Mintz, Yuval6927e822016-10-31 07:14:25 +02001148 /* Fallthrough */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001149 default:
1150 rc = -EINVAL;
1151 }
1152
1153 return rc;
1154}
1155
1156int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1157 struct qed_ptt *p_ptt)
1158{
1159 struct qed_mcp_function_info *info;
1160 struct public_func shmem_info;
1161
Yuval Mintz1a635e42016-08-15 10:42:43 +03001162 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001163 info = &p_hwfn->mcp_info->func_info;
1164
1165 info->pause_on_host = (shmem_info.config &
1166 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1167
Mintz, Yuval6927e822016-10-31 07:14:25 +02001168 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1169 &info->protocol)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001170 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1171 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1172 return -EINVAL;
1173 }
1174
Manish Chopra4b01e512016-04-26 10:56:09 -04001175 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001176
1177 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1178 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1179 info->mac[1] = (u8)(shmem_info.mac_upper);
1180 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1181 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1182 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1183 info->mac[5] = (u8)(shmem_info.mac_lower);
Mintz, Yuval14d39642016-10-31 07:14:23 +02001184
1185 /* Store primary MAC for later possible WoL */
1186 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001187 } else {
1188 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1189 }
1190
1191 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1192 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1193 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1194 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1195
1196 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1197
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001198 info->mtu = (u16)shmem_info.mtu_size;
1199
Mintz, Yuval14d39642016-10-31 07:14:23 +02001200 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1201 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1202 if (qed_mcp_is_init(p_hwfn)) {
1203 u32 resp = 0, param = 0;
1204 int rc;
1205
1206 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1207 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
1208 if (rc)
1209 return rc;
1210 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1211 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1212 }
1213
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001214 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
Mintz, Yuval14d39642016-10-31 07:14:23 +02001215 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001216 info->pause_on_host, info->protocol,
1217 info->bandwidth_min, info->bandwidth_max,
1218 info->mac[0], info->mac[1], info->mac[2],
1219 info->mac[3], info->mac[4], info->mac[5],
Mintz, Yuval14d39642016-10-31 07:14:23 +02001220 info->wwn_port, info->wwn_node,
1221 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001222
1223 return 0;
1224}
1225
Yuval Mintzcc875c22015-10-26 11:02:31 +02001226struct qed_mcp_link_params
1227*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1228{
1229 if (!p_hwfn || !p_hwfn->mcp_info)
1230 return NULL;
1231 return &p_hwfn->mcp_info->link_input;
1232}
1233
1234struct qed_mcp_link_state
1235*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1236{
1237 if (!p_hwfn || !p_hwfn->mcp_info)
1238 return NULL;
1239 return &p_hwfn->mcp_info->link_output;
1240}
1241
1242struct qed_mcp_link_capabilities
1243*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1244{
1245 if (!p_hwfn || !p_hwfn->mcp_info)
1246 return NULL;
1247 return &p_hwfn->mcp_info->link_capabilities;
1248}
1249
Yuval Mintz1a635e42016-08-15 10:42:43 +03001250int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001251{
1252 u32 resp = 0, param = 0;
1253 int rc;
1254
1255 rc = qed_mcp_cmd(p_hwfn, p_ptt,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001256 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001257
1258 /* Wait for the drain to complete before returning */
Yuval Mintz8f60baf2016-03-09 09:16:26 +02001259 msleep(1020);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001260
1261 return rc;
1262}
1263
Manish Chopracee4d262015-10-26 11:02:28 +02001264int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001265 struct qed_ptt *p_ptt, u32 *p_flash_size)
Manish Chopracee4d262015-10-26 11:02:28 +02001266{
1267 u32 flash_size;
1268
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001269 if (IS_VF(p_hwfn->cdev))
1270 return -EINVAL;
1271
Manish Chopracee4d262015-10-26 11:02:28 +02001272 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1273 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1274 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1275 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1276
1277 *p_flash_size = flash_size;
1278
1279 return 0;
1280}
1281
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001282int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1283 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1284{
1285 u32 resp = 0, param = 0, rc_param = 0;
1286 int rc;
1287
1288 /* Only Leader can configure MSIX, and need to take CMT into account */
1289 if (!IS_LEAD_HWFN(p_hwfn))
1290 return 0;
1291 num *= p_hwfn->cdev->num_hwfns;
1292
1293 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1294 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1295 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1296 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1297
1298 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1299 &resp, &rc_param);
1300
1301 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1302 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1303 rc = -EINVAL;
1304 } else {
1305 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1306 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1307 num, vf_id);
1308 }
1309
1310 return rc;
1311}
1312
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001313int
1314qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1315 struct qed_ptt *p_ptt,
1316 struct qed_mcp_drv_version *p_ver)
1317{
Tomer Tayar5529bad2016-03-09 09:16:24 +02001318 struct drv_version_stc *p_drv_version;
1319 struct qed_mcp_mb_params mb_params;
1320 union drv_union_data union_data;
1321 __be32 val;
1322 u32 i;
1323 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001324
Tomer Tayar5529bad2016-03-09 09:16:24 +02001325 p_drv_version = &union_data.drv_version;
1326 p_drv_version->version = p_ver->version;
Manish Chopra4b01e512016-04-26 10:56:09 -04001327
Yuval Mintz67a99b72016-09-19 17:47:41 +03001328 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
1329 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
Manish Chopra4b01e512016-04-26 10:56:09 -04001330 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001331 }
1332
Tomer Tayar5529bad2016-03-09 09:16:24 +02001333 memset(&mb_params, 0, sizeof(mb_params));
1334 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1335 mb_params.p_data_src = &union_data;
1336 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1337 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001338 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001339
Tomer Tayar5529bad2016-03-09 09:16:24 +02001340 return rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001341}
Sudarsana Kalluru91420b82015-11-30 12:25:03 +02001342
Tomer Tayar41024262016-09-05 14:35:10 +03001343int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1344{
1345 u32 resp = 0, param = 0;
1346 int rc;
1347
1348 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1349 &param);
1350 if (rc)
1351 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1352
1353 return rc;
1354}
1355
1356int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1357{
1358 u32 value, cpu_mode;
1359
1360 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1361
1362 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1363 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1364 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1365 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1366
1367 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
1368}
1369
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001370int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
1371 struct qed_ptt *p_ptt,
1372 enum qed_ov_client client)
1373{
1374 u32 resp = 0, param = 0;
1375 u32 drv_mb_param;
1376 int rc;
1377
1378 switch (client) {
1379 case QED_OV_CLIENT_DRV:
1380 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1381 break;
1382 case QED_OV_CLIENT_USER:
1383 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1384 break;
1385 case QED_OV_CLIENT_VENDOR_SPEC:
1386 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
1387 break;
1388 default:
1389 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
1390 return -EINVAL;
1391 }
1392
1393 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1394 drv_mb_param, &resp, &param);
1395 if (rc)
1396 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1397
1398 return rc;
1399}
1400
1401int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
1402 struct qed_ptt *p_ptt,
1403 enum qed_ov_driver_state drv_state)
1404{
1405 u32 resp = 0, param = 0;
1406 u32 drv_mb_param;
1407 int rc;
1408
1409 switch (drv_state) {
1410 case QED_OV_DRIVER_STATE_NOT_LOADED:
1411 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1412 break;
1413 case QED_OV_DRIVER_STATE_DISABLED:
1414 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1415 break;
1416 case QED_OV_DRIVER_STATE_ACTIVE:
1417 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1418 break;
1419 default:
1420 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
1421 return -EINVAL;
1422 }
1423
1424 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1425 drv_mb_param, &resp, &param);
1426 if (rc)
1427 DP_ERR(p_hwfn, "Failed to send driver state\n");
1428
1429 return rc;
1430}
1431
1432int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
1433 struct qed_ptt *p_ptt, u16 mtu)
1434{
1435 u32 resp = 0, param = 0;
1436 u32 drv_mb_param;
1437 int rc;
1438
1439 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
1440 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
1441 drv_mb_param, &resp, &param);
1442 if (rc)
1443 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
1444
1445 return rc;
1446}
1447
1448int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
1449 struct qed_ptt *p_ptt, u8 *mac)
1450{
1451 struct qed_mcp_mb_params mb_params;
1452 union drv_union_data union_data;
1453 int rc;
1454
1455 memset(&mb_params, 0, sizeof(mb_params));
1456 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
1457 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
1458 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
1459 mb_params.param |= MCP_PF_ID(p_hwfn);
1460 ether_addr_copy(&union_data.raw_data[0], mac);
1461 mb_params.p_data_src = &union_data;
1462 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1463 if (rc)
1464 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
1465
Mintz, Yuval14d39642016-10-31 07:14:23 +02001466 /* Store primary MAC for later possible WoL */
1467 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
1468
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001469 return rc;
1470}
1471
1472int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
1473 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
1474{
1475 u32 resp = 0, param = 0;
1476 u32 drv_mb_param;
1477 int rc;
1478
Mintz, Yuval14d39642016-10-31 07:14:23 +02001479 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
1480 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1481 "Can't change WoL configuration when WoL isn't supported\n");
1482 return -EINVAL;
1483 }
1484
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001485 switch (wol) {
1486 case QED_OV_WOL_DEFAULT:
1487 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
1488 break;
1489 case QED_OV_WOL_DISABLED:
1490 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
1491 break;
1492 case QED_OV_WOL_ENABLED:
1493 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
1494 break;
1495 default:
1496 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
1497 return -EINVAL;
1498 }
1499
1500 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
1501 drv_mb_param, &resp, &param);
1502 if (rc)
1503 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
1504
Mintz, Yuval14d39642016-10-31 07:14:23 +02001505 /* Store the WoL update for a future unload */
1506 p_hwfn->cdev->wol_config = (u8)wol;
1507
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001508 return rc;
1509}
1510
1511int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
1512 struct qed_ptt *p_ptt,
1513 enum qed_ov_eswitch eswitch)
1514{
1515 u32 resp = 0, param = 0;
1516 u32 drv_mb_param;
1517 int rc;
1518
1519 switch (eswitch) {
1520 case QED_OV_ESWITCH_NONE:
1521 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
1522 break;
1523 case QED_OV_ESWITCH_VEB:
1524 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
1525 break;
1526 case QED_OV_ESWITCH_VEPA:
1527 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
1528 break;
1529 default:
1530 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
1531 return -EINVAL;
1532 }
1533
1534 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
1535 drv_mb_param, &resp, &param);
1536 if (rc)
1537 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
1538
1539 return rc;
1540}
1541
Yuval Mintz1a635e42016-08-15 10:42:43 +03001542int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
1543 struct qed_ptt *p_ptt, enum qed_led_mode mode)
Sudarsana Kalluru91420b82015-11-30 12:25:03 +02001544{
1545 u32 resp = 0, param = 0, drv_mb_param;
1546 int rc;
1547
1548 switch (mode) {
1549 case QED_LED_MODE_ON:
1550 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1551 break;
1552 case QED_LED_MODE_OFF:
1553 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1554 break;
1555 case QED_LED_MODE_RESTORE:
1556 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1557 break;
1558 default:
1559 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
1560 return -EINVAL;
1561 }
1562
1563 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1564 drv_mb_param, &resp, &param);
1565
1566 return rc;
1567}
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -04001568
Tomer Tayar41024262016-09-05 14:35:10 +03001569int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1570 struct qed_ptt *p_ptt, u32 mask_parities)
1571{
1572 u32 resp = 0, param = 0;
1573 int rc;
1574
1575 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1576 mask_parities, &resp, &param);
1577
1578 if (rc) {
1579 DP_ERR(p_hwfn,
1580 "MCP response failure for mask parities, aborting\n");
1581 } else if (resp != FW_MSG_CODE_OK) {
1582 DP_ERR(p_hwfn,
1583 "MCP did not acknowledge mask parity request. Old MFW?\n");
1584 rc = -EINVAL;
1585 }
1586
1587 return rc;
1588}
1589
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02001590int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
1591{
1592 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
1593 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1594 u32 resp = 0, resp_param = 0;
1595 struct qed_ptt *p_ptt;
1596 int rc = 0;
1597
1598 p_ptt = qed_ptt_acquire(p_hwfn);
1599 if (!p_ptt)
1600 return -EBUSY;
1601
1602 while (bytes_left > 0) {
1603 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
1604
1605 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1606 DRV_MSG_CODE_NVM_READ_NVRAM,
1607 addr + offset +
1608 (bytes_to_copy <<
1609 DRV_MB_PARAM_NVM_LEN_SHIFT),
1610 &resp, &resp_param,
1611 &read_len,
1612 (u32 *)(p_buf + offset));
1613
1614 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
1615 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
1616 break;
1617 }
1618
1619 /* This can be a lengthy process, and it's possible scheduler
1620 * isn't preemptable. Sleep a bit to prevent CPU hogging.
1621 */
1622 if (bytes_left % 0x1000 <
1623 (bytes_left - read_len) % 0x1000)
1624 usleep_range(1000, 2000);
1625
1626 offset += read_len;
1627 bytes_left -= read_len;
1628 }
1629
1630 cdev->mcp_nvm_resp = resp;
1631 qed_ptt_release(p_hwfn, p_ptt);
1632
1633 return rc;
1634}
1635
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -04001636int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1637{
1638 u32 drv_mb_param = 0, rsp, param;
1639 int rc = 0;
1640
1641 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
1642 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1643
1644 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1645 drv_mb_param, &rsp, &param);
1646
1647 if (rc)
1648 return rc;
1649
1650 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1651 (param != DRV_MB_PARAM_BIST_RC_PASSED))
1652 rc = -EAGAIN;
1653
1654 return rc;
1655}
1656
1657int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1658{
1659 u32 drv_mb_param, rsp, param;
1660 int rc = 0;
1661
1662 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
1663 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1664
1665 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1666 drv_mb_param, &rsp, &param);
1667
1668 if (rc)
1669 return rc;
1670
1671 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1672 (param != DRV_MB_PARAM_BIST_RC_PASSED))
1673 rc = -EAGAIN;
1674
1675 return rc;
1676}
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02001677
1678int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
1679 struct qed_ptt *p_ptt,
1680 u32 *num_images)
1681{
1682 u32 drv_mb_param = 0, rsp;
1683 int rc = 0;
1684
1685 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
1686 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1687
1688 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1689 drv_mb_param, &rsp, num_images);
1690 if (rc)
1691 return rc;
1692
1693 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
1694 rc = -EINVAL;
1695
1696 return rc;
1697}
1698
1699int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
1700 struct qed_ptt *p_ptt,
1701 struct bist_nvm_image_att *p_image_att,
1702 u32 image_index)
1703{
1704 u32 buf_size = 0, param, resp = 0, resp_param = 0;
1705 int rc;
1706
1707 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
1708 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
1709 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
1710
1711 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1712 DRV_MSG_CODE_BIST_TEST, param,
1713 &resp, &resp_param,
1714 &buf_size,
1715 (u32 *)p_image_att);
1716 if (rc)
1717 return rc;
1718
1719 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1720 (p_image_att->return_code != 1))
1721 rc = -EINVAL;
1722
1723 return rc;
1724}
Tomer Tayar2edbff82016-10-31 07:14:27 +02001725
1726#define QED_RESC_ALLOC_VERSION_MAJOR 1
1727#define QED_RESC_ALLOC_VERSION_MINOR 0
1728#define QED_RESC_ALLOC_VERSION \
1729 ((QED_RESC_ALLOC_VERSION_MAJOR << \
1730 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
1731 (QED_RESC_ALLOC_VERSION_MINOR << \
1732 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
1733int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
1734 struct qed_ptt *p_ptt,
1735 struct resource_info *p_resc_info,
1736 u32 *p_mcp_resp, u32 *p_mcp_param)
1737{
1738 struct qed_mcp_mb_params mb_params;
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001739 union drv_union_data union_data;
Tomer Tayar2edbff82016-10-31 07:14:27 +02001740 int rc;
1741
1742 memset(&mb_params, 0, sizeof(mb_params));
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001743 memset(&union_data, 0, sizeof(union_data));
Tomer Tayar2edbff82016-10-31 07:14:27 +02001744 mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
1745 mb_params.param = QED_RESC_ALLOC_VERSION;
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001746
1747 /* Need to have a sufficient large struct, as the cmd_and_union
1748 * is going to do memcpy from and to it.
1749 */
1750 memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
1751
1752 mb_params.p_data_src = &union_data;
1753 mb_params.p_data_dst = &union_data;
Tomer Tayar2edbff82016-10-31 07:14:27 +02001754 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1755 if (rc)
1756 return rc;
1757
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001758 /* Copy the data back */
1759 memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
Tomer Tayar2edbff82016-10-31 07:14:27 +02001760 *p_mcp_resp = mb_params.mcp_resp;
1761 *p_mcp_param = mb_params.mcp_param;
1762
1763 DP_VERBOSE(p_hwfn,
1764 QED_MSG_SP,
1765 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
1766 *p_mcp_param,
1767 p_resc_info->res_id,
1768 p_resc_info->size,
1769 p_resc_info->offset,
1770 p_resc_info->vf_size,
1771 p_resc_info->vf_offset, p_resc_info->flags);
1772
1773 return 0;
1774}