blob: 87fde205149fdbf3181befd79ca62508b2daa388 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/delay.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020038#include <linux/slab.h>
Tomer Tayar5529bad2016-03-09 09:16:24 +020039#include <linux/spinlock.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020040#include <linux/string.h>
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +020041#include <linux/etherdevice.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020042#include "qed.h"
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -040043#include "qed_dcbx.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020044#include "qed_hsi.h"
45#include "qed_hw.h"
46#include "qed_mcp.h"
47#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030048#include "qed_sriov.h"
49
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020050#define CHIP_MCP_RESP_ITER_US 10
51
52#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
53#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
54
55#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
56 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
57 _val)
58
59#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
60 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
61
62#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
63 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
64 offsetof(struct public_drv_mb, _field), _val)
65
66#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
67 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
68 offsetof(struct public_drv_mb, _field))
69
70#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
71 DRV_ID_PDA_COMP_VER_SHIFT)
72
73#define MCP_BYTES_PER_MBIT_SHIFT 17
74
75bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
76{
77 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
78 return false;
79 return true;
80}
81
Yuval Mintz1a635e42016-08-15 10:42:43 +030082void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020083{
84 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
85 PUBLIC_PORT);
86 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
87
88 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
89 MFW_PORT(p_hwfn));
90 DP_VERBOSE(p_hwfn, QED_MSG_SP,
91 "port_addr = 0x%x, port_id 0x%02x\n",
92 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
93}
94
Yuval Mintz1a635e42016-08-15 10:42:43 +030095void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020096{
97 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
98 u32 tmp, i;
99
100 if (!p_hwfn->mcp_info->public_base)
101 return;
102
103 for (i = 0; i < length; i++) {
104 tmp = qed_rd(p_hwfn, p_ptt,
105 p_hwfn->mcp_info->mfw_mb_addr +
106 (i << 2) + sizeof(u32));
107
108 /* The MB data is actually BE; Need to force it to cpu */
109 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
110 be32_to_cpu((__force __be32)tmp);
111 }
112}
113
114int qed_mcp_free(struct qed_hwfn *p_hwfn)
115{
116 if (p_hwfn->mcp_info) {
117 kfree(p_hwfn->mcp_info->mfw_mb_cur);
118 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
119 }
120 kfree(p_hwfn->mcp_info);
121
122 return 0;
123}
124
Yuval Mintz1a635e42016-08-15 10:42:43 +0300125static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200126{
127 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
128 u32 drv_mb_offsize, mfw_mb_offsize;
129 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
130
131 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
132 if (!p_info->public_base)
133 return 0;
134
135 p_info->public_base |= GRCBASE_MCP;
136
137 /* Calculate the driver and MFW mailbox address */
138 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
139 SECTION_OFFSIZE_ADDR(p_info->public_base,
140 PUBLIC_DRV_MB));
141 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
142 DP_VERBOSE(p_hwfn, QED_MSG_SP,
143 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
144 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
145
146 /* Set the MFW MB address */
147 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
148 SECTION_OFFSIZE_ADDR(p_info->public_base,
149 PUBLIC_MFW_MB));
150 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
151 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
152
153 /* Get the current driver mailbox sequence before sending
154 * the first command
155 */
156 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
157 DRV_MSG_SEQ_NUMBER_MASK;
158
159 /* Get current FW pulse sequence */
160 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
161 DRV_PULSE_SEQ_MASK;
162
163 p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
164
165 return 0;
166}
167
Yuval Mintz1a635e42016-08-15 10:42:43 +0300168int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200169{
170 struct qed_mcp_info *p_info;
171 u32 size;
172
173 /* Allocate mcp_info structure */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200174 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200175 if (!p_hwfn->mcp_info)
176 goto err;
177 p_info = p_hwfn->mcp_info;
178
179 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
180 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
181 /* Do not free mcp_info here, since public_base indicate that
182 * the MCP is not initialized
183 */
184 return 0;
185 }
186
187 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
Yuval Mintz60fffb32016-02-21 11:40:07 +0200188 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
Yuval Mintz83aeb932016-08-15 10:42:44 +0300189 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200190 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
191 goto err;
192
Tomer Tayar5529bad2016-03-09 09:16:24 +0200193 /* Initialize the MFW spinlock */
194 spin_lock_init(&p_info->lock);
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200195 spin_lock_init(&p_info->link_lock);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200196
197 return 0;
198
199err:
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200200 qed_mcp_free(p_hwfn);
201 return -ENOMEM;
202}
203
Tomer Tayar5529bad2016-03-09 09:16:24 +0200204/* Locks the MFW mailbox of a PF to ensure a single access.
205 * The lock is achieved in most cases by holding a spinlock, causing other
206 * threads to wait till a previous access is done.
207 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
208 * access is achieved by setting a blocking flag, which will fail other
209 * competing contexts to send their mailboxes.
210 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300211static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200212{
213 spin_lock_bh(&p_hwfn->mcp_info->lock);
214
215 /* The spinlock shouldn't be acquired when the mailbox command is
216 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
217 * pending [UN]LOAD_REQ command of another PF together with a spinlock
218 * (i.e. interrupts are disabled) - can lead to a deadlock.
219 * It is assumed that for a single PF, no other mailbox commands can be
220 * sent from another context while sending LOAD_REQ, and that any
221 * parallel commands to UNLOAD_REQ can be cancelled.
222 */
223 if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
224 p_hwfn->mcp_info->block_mb_sending = false;
225
226 if (p_hwfn->mcp_info->block_mb_sending) {
227 DP_NOTICE(p_hwfn,
228 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
229 cmd);
230 spin_unlock_bh(&p_hwfn->mcp_info->lock);
231 return -EBUSY;
232 }
233
234 if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
235 p_hwfn->mcp_info->block_mb_sending = true;
236 spin_unlock_bh(&p_hwfn->mcp_info->lock);
237 }
238
239 return 0;
240}
241
Yuval Mintz1a635e42016-08-15 10:42:43 +0300242static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200243{
244 if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
245 spin_unlock_bh(&p_hwfn->mcp_info->lock);
246}
247
Yuval Mintz1a635e42016-08-15 10:42:43 +0300248int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200249{
250 u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
251 u8 delay = CHIP_MCP_RESP_ITER_US;
252 u32 org_mcp_reset_seq, cnt = 0;
253 int rc = 0;
254
Tomer Tayar5529bad2016-03-09 09:16:24 +0200255 /* Ensure that only a single thread is accessing the mailbox at a
256 * certain time.
257 */
258 rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
259 if (rc != 0)
260 return rc;
261
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200262 /* Set drv command along with the updated sequence */
263 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
264 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
265 (DRV_MSG_CODE_MCP_RESET | seq));
266
267 do {
268 /* Wait for MFW response */
269 udelay(delay);
270 /* Give the FW up to 500 second (50*1000*10usec) */
271 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
272 MISCS_REG_GENERIC_POR_0)) &&
273 (cnt++ < QED_MCP_RESET_RETRIES));
274
275 if (org_mcp_reset_seq !=
276 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
277 DP_VERBOSE(p_hwfn, QED_MSG_SP,
278 "MCP was reset after %d usec\n", cnt * delay);
279 } else {
280 DP_ERR(p_hwfn, "Failed to reset MCP\n");
281 rc = -EAGAIN;
282 }
283
Tomer Tayar5529bad2016-03-09 09:16:24 +0200284 qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
285
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200286 return rc;
287}
288
289static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
290 struct qed_ptt *p_ptt,
291 u32 cmd,
292 u32 param,
293 u32 *o_mcp_resp,
294 u32 *o_mcp_param)
295{
296 u8 delay = CHIP_MCP_RESP_ITER_US;
297 u32 seq, cnt = 1, actual_mb_seq;
298 int rc = 0;
299
300 /* Get actual driver mailbox sequence */
301 actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
302 DRV_MSG_SEQ_NUMBER_MASK;
303
304 /* Use MCP history register to check if MCP reset occurred between
305 * init time and now.
306 */
307 if (p_hwfn->mcp_info->mcp_hist !=
308 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
309 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
310 qed_load_mcp_offsets(p_hwfn, p_ptt);
311 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
312 }
313 seq = ++p_hwfn->mcp_info->drv_mb_seq;
314
315 /* Set drv param */
316 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
317
318 /* Set drv command along with the updated sequence */
319 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
320
321 DP_VERBOSE(p_hwfn, QED_MSG_SP,
322 "wrote command (%x) to MFW MB param 0x%08x\n",
323 (cmd | seq), param);
324
325 do {
326 /* Wait for MFW response */
327 udelay(delay);
328 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
329
330 /* Give the FW up to 5 second (500*10ms) */
331 } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
332 (cnt++ < QED_DRV_MB_MAX_RETRIES));
333
334 DP_VERBOSE(p_hwfn, QED_MSG_SP,
335 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
336 cnt * delay, *o_mcp_resp, seq);
337
338 /* Is this a reply to our command? */
339 if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
340 *o_mcp_resp &= FW_MSG_CODE_MASK;
341 /* Get the MCP param */
342 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
343 } else {
344 /* FW BUG! */
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300345 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
346 cmd, param);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200347 *o_mcp_resp = 0;
348 rc = -EAGAIN;
349 }
350 return rc;
351}
352
Tomer Tayar5529bad2016-03-09 09:16:24 +0200353static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
354 struct qed_ptt *p_ptt,
355 struct qed_mcp_mb_params *p_mb_params)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200356{
Tomer Tayar5529bad2016-03-09 09:16:24 +0200357 u32 union_data_addr;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200358
Tomer Tayar5529bad2016-03-09 09:16:24 +0200359 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200360
361 /* MCP not initialized */
362 if (!qed_mcp_is_init(p_hwfn)) {
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300363 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200364 return -EBUSY;
365 }
366
Tomer Tayar5529bad2016-03-09 09:16:24 +0200367 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
368 offsetof(struct public_drv_mb, union_data);
369
370 /* Ensure that only a single thread is accessing the mailbox at a
371 * certain time.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200372 */
Tomer Tayar5529bad2016-03-09 09:16:24 +0200373 rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
374 if (rc)
375 return rc;
376
377 if (p_mb_params->p_data_src != NULL)
378 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
379 p_mb_params->p_data_src,
380 sizeof(*p_mb_params->p_data_src));
381
382 rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
383 p_mb_params->param, &p_mb_params->mcp_resp,
384 &p_mb_params->mcp_param);
385
386 if (p_mb_params->p_data_dst != NULL)
387 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
388 union_data_addr,
389 sizeof(*p_mb_params->p_data_dst));
390
391 qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200392
393 return rc;
394}
395
Tomer Tayar5529bad2016-03-09 09:16:24 +0200396int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
397 struct qed_ptt *p_ptt,
398 u32 cmd,
399 u32 param,
400 u32 *o_mcp_resp,
401 u32 *o_mcp_param)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200402{
Tomer Tayar5529bad2016-03-09 09:16:24 +0200403 struct qed_mcp_mb_params mb_params;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200404 union drv_union_data data_src;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200405 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200406
Tomer Tayar5529bad2016-03-09 09:16:24 +0200407 memset(&mb_params, 0, sizeof(mb_params));
Mintz, Yuval14d39642016-10-31 07:14:23 +0200408 memset(&data_src, 0, sizeof(data_src));
Tomer Tayar5529bad2016-03-09 09:16:24 +0200409 mb_params.cmd = cmd;
410 mb_params.param = param;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200411
412 /* In case of UNLOAD_DONE, set the primary MAC */
413 if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
414 (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
415 u8 *p_mac = p_hwfn->cdev->wol_mac;
416
417 data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
418 data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
419 p_mac[4] << 8 | p_mac[5];
420
421 DP_VERBOSE(p_hwfn,
422 (QED_MSG_SP | NETIF_MSG_IFDOWN),
423 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
424 p_mac, data_src.wol_mac.mac_upper,
425 data_src.wol_mac.mac_lower);
426
427 mb_params.p_data_src = &data_src;
428 }
429
Tomer Tayar5529bad2016-03-09 09:16:24 +0200430 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
431 if (rc)
432 return rc;
433
434 *o_mcp_resp = mb_params.mcp_resp;
435 *o_mcp_param = mb_params.mcp_param;
436
437 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200438}
439
Tomer Tayar41024262016-09-05 14:35:10 +0300440int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
441 struct qed_ptt *p_ptt,
442 u32 cmd,
443 u32 param,
444 u32 *o_mcp_resp,
445 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
446{
447 struct qed_mcp_mb_params mb_params;
448 union drv_union_data union_data;
449 int rc;
450
451 memset(&mb_params, 0, sizeof(mb_params));
452 mb_params.cmd = cmd;
453 mb_params.param = param;
454 mb_params.p_data_dst = &union_data;
455 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
456 if (rc)
457 return rc;
458
459 *o_mcp_resp = mb_params.mcp_resp;
460 *o_mcp_param = mb_params.mcp_param;
461
462 *o_txn_size = *o_mcp_param;
463 memcpy(o_buf, &union_data.raw_data, *o_txn_size);
464
465 return 0;
466}
467
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200468int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300469 struct qed_ptt *p_ptt, u32 *p_load_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200470{
471 struct qed_dev *cdev = p_hwfn->cdev;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200472 struct qed_mcp_mb_params mb_params;
473 union drv_union_data union_data;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200474 int rc;
475
Tomer Tayar5529bad2016-03-09 09:16:24 +0200476 memset(&mb_params, 0, sizeof(mb_params));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200477 /* Load Request */
Tomer Tayar5529bad2016-03-09 09:16:24 +0200478 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
479 mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
480 cdev->drv_type;
481 memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
482 mb_params.p_data_src = &union_data;
483 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200484
485 /* if mcp fails to respond we must abort */
486 if (rc) {
487 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
488 return rc;
489 }
490
Tomer Tayar5529bad2016-03-09 09:16:24 +0200491 *p_load_code = mb_params.mcp_resp;
492
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200493 /* If MFW refused (e.g. other port is in diagnostic mode) we
494 * must abort. This can happen in the following cases:
495 * - Other port is in diagnostic mode
496 * - Previously loaded function on the engine is not compliant with
497 * the requester.
498 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
499 * -
500 */
501 if (!(*p_load_code) ||
502 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
503 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
504 ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
505 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
506 return -EBUSY;
507 }
508
509 return 0;
510}
511
Yuval Mintz0b55e272016-05-11 16:36:15 +0300512static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
513 struct qed_ptt *p_ptt)
514{
515 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
516 PUBLIC_PATH);
517 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
518 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
519 QED_PATH_ID(p_hwfn));
520 u32 disabled_vfs[VF_MAX_STATIC / 32];
521 int i;
522
523 DP_VERBOSE(p_hwfn,
524 QED_MSG_SP,
525 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
526 mfw_path_offsize, path_addr);
527
528 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
529 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
530 path_addr +
531 offsetof(struct public_path,
532 mcp_vf_disabled) +
533 sizeof(u32) * i);
534 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
535 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
536 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
537 }
538
539 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
540 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
541}
542
543int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
544 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
545{
546 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
547 PUBLIC_FUNC);
548 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
549 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
550 MCP_PF_ID(p_hwfn));
551 struct qed_mcp_mb_params mb_params;
552 union drv_union_data union_data;
553 int rc;
554 int i;
555
556 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
557 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
558 "Acking VFs [%08x,...,%08x] - %08x\n",
559 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
560
561 memset(&mb_params, 0, sizeof(mb_params));
562 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
563 memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
564 mb_params.p_data_src = &union_data;
565 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
566 if (rc) {
567 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
568 return -EBUSY;
569 }
570
571 /* Clear the ACK bits */
572 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
573 qed_wr(p_hwfn, p_ptt,
574 func_addr +
575 offsetof(struct public_func, drv_ack_vf_disabled) +
576 i * sizeof(u32), 0);
577
578 return rc;
579}
580
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200581static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
582 struct qed_ptt *p_ptt)
583{
584 u32 transceiver_state;
585
586 transceiver_state = qed_rd(p_hwfn, p_ptt,
587 p_hwfn->mcp_info->port_addr +
588 offsetof(struct public_port,
589 transceiver_data));
590
591 DP_VERBOSE(p_hwfn,
592 (NETIF_MSG_HW | QED_MSG_SP),
593 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
594 transceiver_state,
595 (u32)(p_hwfn->mcp_info->port_addr +
Yuval Mintz1a635e42016-08-15 10:42:43 +0300596 offsetof(struct public_port, transceiver_data)));
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200597
598 transceiver_state = GET_FIELD(transceiver_state,
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300599 ETH_TRANSCEIVER_STATE);
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200600
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300601 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200602 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
603 else
604 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
605}
606
Yuval Mintzcc875c22015-10-26 11:02:31 +0200607static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300608 struct qed_ptt *p_ptt, bool b_reset)
Yuval Mintzcc875c22015-10-26 11:02:31 +0200609{
610 struct qed_mcp_link_state *p_link;
Manish Chopraa64b02d2016-04-26 10:56:10 -0400611 u8 max_bw, min_bw;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200612 u32 status = 0;
613
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200614 /* Prevent SW/attentions from doing this at the same time */
615 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
616
Yuval Mintzcc875c22015-10-26 11:02:31 +0200617 p_link = &p_hwfn->mcp_info->link_output;
618 memset(p_link, 0, sizeof(*p_link));
619 if (!b_reset) {
620 status = qed_rd(p_hwfn, p_ptt,
621 p_hwfn->mcp_info->port_addr +
622 offsetof(struct public_port, link_status));
623 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
624 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
625 status,
626 (u32)(p_hwfn->mcp_info->port_addr +
Yuval Mintz1a635e42016-08-15 10:42:43 +0300627 offsetof(struct public_port, link_status)));
Yuval Mintzcc875c22015-10-26 11:02:31 +0200628 } else {
629 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
630 "Resetting link indications\n");
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200631 goto out;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200632 }
633
Sudarsana Reddy Kallurufc916ff2016-03-09 09:16:23 +0200634 if (p_hwfn->b_drv_link_init)
635 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
636 else
637 p_link->link_up = false;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200638
639 p_link->full_duplex = true;
640 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
641 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
642 p_link->speed = 100000;
643 break;
644 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
645 p_link->speed = 50000;
646 break;
647 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
648 p_link->speed = 40000;
649 break;
650 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
651 p_link->speed = 25000;
652 break;
653 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
654 p_link->speed = 20000;
655 break;
656 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
657 p_link->speed = 10000;
658 break;
659 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
660 p_link->full_duplex = false;
661 /* Fall-through */
662 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
663 p_link->speed = 1000;
664 break;
665 default:
666 p_link->speed = 0;
667 }
668
Manish Chopra4b01e512016-04-26 10:56:09 -0400669 if (p_link->link_up && p_link->speed)
670 p_link->line_speed = p_link->speed;
671 else
672 p_link->line_speed = 0;
673
674 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
Manish Chopraa64b02d2016-04-26 10:56:10 -0400675 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
Manish Chopra4b01e512016-04-26 10:56:09 -0400676
Manish Chopraa64b02d2016-04-26 10:56:10 -0400677 /* Max bandwidth configuration */
Manish Chopra4b01e512016-04-26 10:56:09 -0400678 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200679
Manish Chopraa64b02d2016-04-26 10:56:10 -0400680 /* Min bandwidth configuration */
681 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
Mintz, Yuval6f437d42017-02-27 11:06:33 +0200682 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
683 p_link->min_pf_rate);
Manish Chopraa64b02d2016-04-26 10:56:10 -0400684
Yuval Mintzcc875c22015-10-26 11:02:31 +0200685 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
686 p_link->an_complete = !!(status &
687 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
688 p_link->parallel_detection = !!(status &
689 LINK_STATUS_PARALLEL_DETECTION_USED);
690 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
691
692 p_link->partner_adv_speed |=
693 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
694 QED_LINK_PARTNER_SPEED_1G_FD : 0;
695 p_link->partner_adv_speed |=
696 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
697 QED_LINK_PARTNER_SPEED_1G_HD : 0;
698 p_link->partner_adv_speed |=
699 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
700 QED_LINK_PARTNER_SPEED_10G : 0;
701 p_link->partner_adv_speed |=
702 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
703 QED_LINK_PARTNER_SPEED_20G : 0;
704 p_link->partner_adv_speed |=
Sudarsana Reddy Kalluru054c67d2016-08-09 03:51:23 -0400705 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
706 QED_LINK_PARTNER_SPEED_25G : 0;
707 p_link->partner_adv_speed |=
Yuval Mintzcc875c22015-10-26 11:02:31 +0200708 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
709 QED_LINK_PARTNER_SPEED_40G : 0;
710 p_link->partner_adv_speed |=
711 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
712 QED_LINK_PARTNER_SPEED_50G : 0;
713 p_link->partner_adv_speed |=
714 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
715 QED_LINK_PARTNER_SPEED_100G : 0;
716
717 p_link->partner_tx_flow_ctrl_en =
718 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
719 p_link->partner_rx_flow_ctrl_en =
720 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
721
722 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
723 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
724 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
725 break;
726 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
727 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
728 break;
729 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
730 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
731 break;
732 default:
733 p_link->partner_adv_pause = 0;
734 }
735
736 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
737
738 qed_link_update(p_hwfn);
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200739out:
740 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200741}
742
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300743int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
Yuval Mintzcc875c22015-10-26 11:02:31 +0200744{
745 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200746 struct qed_mcp_mb_params mb_params;
747 union drv_union_data union_data;
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300748 struct eth_phy_cfg *phy_cfg;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200749 int rc = 0;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200750 u32 cmd;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200751
752 /* Set the shmem configuration according to params */
Tomer Tayar5529bad2016-03-09 09:16:24 +0200753 phy_cfg = &union_data.drv_phy_cfg;
754 memset(phy_cfg, 0, sizeof(*phy_cfg));
Yuval Mintzcc875c22015-10-26 11:02:31 +0200755 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
756 if (!params->speed.autoneg)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200757 phy_cfg->speed = params->speed.forced_speed;
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300758 phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
759 phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
760 phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200761 phy_cfg->adv_speed = params->speed.advertised_speeds;
762 phy_cfg->loopback_mode = params->loopback_mode;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200763
Sudarsana Reddy Kallurufc916ff2016-03-09 09:16:23 +0200764 p_hwfn->b_drv_link_init = b_up;
765
Yuval Mintzcc875c22015-10-26 11:02:31 +0200766 if (b_up) {
767 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
768 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
Tomer Tayar5529bad2016-03-09 09:16:24 +0200769 phy_cfg->speed,
770 phy_cfg->pause,
771 phy_cfg->adv_speed,
772 phy_cfg->loopback_mode,
773 phy_cfg->feature_config_flags);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200774 } else {
775 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
776 "Resetting link\n");
777 }
778
Tomer Tayar5529bad2016-03-09 09:16:24 +0200779 memset(&mb_params, 0, sizeof(mb_params));
780 mb_params.cmd = cmd;
781 mb_params.p_data_src = &union_data;
782 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200783
784 /* if mcp fails to respond we must abort */
785 if (rc) {
786 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
787 return rc;
788 }
789
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +0200790 /* Mimic link-change attention, done for several reasons:
791 * - On reset, there's no guarantee MFW would trigger
792 * an attention.
793 * - On initialization, older MFWs might not indicate link change
794 * during LFA, so we'll never get an UP indication.
795 */
796 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
Yuval Mintzcc875c22015-10-26 11:02:31 +0200797
798 return 0;
799}
800
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -0400801static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
802 struct qed_ptt *p_ptt,
803 enum MFW_DRV_MSG_TYPE type)
804{
805 enum qed_mcp_protocol_type stats_type;
806 union qed_mcp_protocol_stats stats;
807 struct qed_mcp_mb_params mb_params;
808 union drv_union_data union_data;
809 u32 hsi_param;
810
811 switch (type) {
812 case MFW_DRV_MSG_GET_LAN_STATS:
813 stats_type = QED_MCP_LAN_STATS;
814 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
815 break;
816 case MFW_DRV_MSG_GET_FCOE_STATS:
817 stats_type = QED_MCP_FCOE_STATS;
818 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
819 break;
820 case MFW_DRV_MSG_GET_ISCSI_STATS:
821 stats_type = QED_MCP_ISCSI_STATS;
822 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
823 break;
824 case MFW_DRV_MSG_GET_RDMA_STATS:
825 stats_type = QED_MCP_RDMA_STATS;
826 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
827 break;
828 default:
829 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
830 return;
831 }
832
833 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
834
835 memset(&mb_params, 0, sizeof(mb_params));
836 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
837 mb_params.param = hsi_param;
838 memcpy(&union_data, &stats, sizeof(stats));
839 mb_params.p_data_src = &union_data;
840 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
841}
842
Manish Chopra4b01e512016-04-26 10:56:09 -0400843static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
844 struct public_func *p_shmem_info)
845{
846 struct qed_mcp_function_info *p_info;
847
848 p_info = &p_hwfn->mcp_info->func_info;
849
850 p_info->bandwidth_min = (p_shmem_info->config &
851 FUNC_MF_CFG_MIN_BW_MASK) >>
852 FUNC_MF_CFG_MIN_BW_SHIFT;
853 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
854 DP_INFO(p_hwfn,
855 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
856 p_info->bandwidth_min);
857 p_info->bandwidth_min = 1;
858 }
859
860 p_info->bandwidth_max = (p_shmem_info->config &
861 FUNC_MF_CFG_MAX_BW_MASK) >>
862 FUNC_MF_CFG_MAX_BW_SHIFT;
863 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
864 DP_INFO(p_hwfn,
865 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
866 p_info->bandwidth_max);
867 p_info->bandwidth_max = 100;
868 }
869}
870
871static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
872 struct qed_ptt *p_ptt,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300873 struct public_func *p_data, int pfid)
Manish Chopra4b01e512016-04-26 10:56:09 -0400874{
875 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
876 PUBLIC_FUNC);
877 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
878 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
879 u32 i, size;
880
881 memset(p_data, 0, sizeof(*p_data));
882
Yuval Mintz1a635e42016-08-15 10:42:43 +0300883 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
Manish Chopra4b01e512016-04-26 10:56:09 -0400884 for (i = 0; i < size / sizeof(u32); i++)
885 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
886 func_addr + (i << 2));
887 return size;
888}
889
Yuval Mintz1a635e42016-08-15 10:42:43 +0300890static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Manish Chopra4b01e512016-04-26 10:56:09 -0400891{
892 struct qed_mcp_function_info *p_info;
893 struct public_func shmem_info;
894 u32 resp = 0, param = 0;
895
Yuval Mintz1a635e42016-08-15 10:42:43 +0300896 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
Manish Chopra4b01e512016-04-26 10:56:09 -0400897
898 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
899
900 p_info = &p_hwfn->mcp_info->func_info;
901
Manish Chopraa64b02d2016-04-26 10:56:10 -0400902 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
Manish Chopra4b01e512016-04-26 10:56:09 -0400903 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
904
905 /* Acknowledge the MFW */
906 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
907 &param);
908}
909
Yuval Mintzcc875c22015-10-26 11:02:31 +0200910int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
911 struct qed_ptt *p_ptt)
912{
913 struct qed_mcp_info *info = p_hwfn->mcp_info;
914 int rc = 0;
915 bool found = false;
916 u16 i;
917
918 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
919
920 /* Read Messages from MFW */
921 qed_mcp_read_mb(p_hwfn, p_ptt);
922
923 /* Compare current messages to old ones */
924 for (i = 0; i < info->mfw_mb_length; i++) {
925 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
926 continue;
927
928 found = true;
929
930 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
931 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
932 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
933
934 switch (i) {
935 case MFW_DRV_MSG_LINK_CHANGE:
936 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
937 break;
Yuval Mintz0b55e272016-05-11 16:36:15 +0300938 case MFW_DRV_MSG_VF_DISABLED:
939 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
940 break;
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400941 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
942 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
943 QED_DCBX_REMOTE_LLDP_MIB);
944 break;
945 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
946 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
947 QED_DCBX_REMOTE_MIB);
948 break;
949 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
950 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
951 QED_DCBX_OPERATIONAL_MIB);
952 break;
Zvi Nachmani334c03b2016-03-09 09:16:25 +0200953 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
954 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
955 break;
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -0400956 case MFW_DRV_MSG_GET_LAN_STATS:
957 case MFW_DRV_MSG_GET_FCOE_STATS:
958 case MFW_DRV_MSG_GET_ISCSI_STATS:
959 case MFW_DRV_MSG_GET_RDMA_STATS:
960 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
961 break;
Manish Chopra4b01e512016-04-26 10:56:09 -0400962 case MFW_DRV_MSG_BW_UPDATE:
963 qed_mcp_update_bw(p_hwfn, p_ptt);
964 break;
Yuval Mintzcc875c22015-10-26 11:02:31 +0200965 default:
966 DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
967 rc = -EINVAL;
968 }
969 }
970
971 /* ACK everything */
972 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
973 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
974
975 /* MFW expect answer in BE, so we force write in that format */
976 qed_wr(p_hwfn, p_ptt,
977 info->mfw_mb_addr + sizeof(u32) +
978 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
979 sizeof(u32) + i * sizeof(u32),
980 (__force u32)val);
981 }
982
983 if (!found) {
984 DP_NOTICE(p_hwfn,
985 "Received an MFW message indication but no new message!\n");
986 rc = -EINVAL;
987 }
988
989 /* Copy the new mfw messages into the shadow */
990 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
991
992 return rc;
993}
994
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300995int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
996 struct qed_ptt *p_ptt,
997 u32 *p_mfw_ver, u32 *p_running_bundle_id)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200998{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200999 u32 global_offsize;
1000
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001001 if (IS_VF(p_hwfn->cdev)) {
1002 if (p_hwfn->vf_iov_info) {
1003 struct pfvf_acquire_resp_tlv *p_resp;
1004
1005 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1006 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1007 return 0;
1008 } else {
1009 DP_VERBOSE(p_hwfn,
1010 QED_MSG_IOV,
1011 "VF requested MFW version prior to ACQUIRE\n");
1012 return -EINVAL;
1013 }
1014 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001015
1016 global_offsize = qed_rd(p_hwfn, p_ptt,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001017 SECTION_OFFSIZE_ADDR(p_hwfn->
1018 mcp_info->public_base,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001019 PUBLIC_GLOBAL));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001020 *p_mfw_ver =
1021 qed_rd(p_hwfn, p_ptt,
1022 SECTION_ADDR(global_offsize,
1023 0) + offsetof(struct public_global, mfw_ver));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001024
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001025 if (p_running_bundle_id != NULL) {
1026 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1027 SECTION_ADDR(global_offsize, 0) +
1028 offsetof(struct public_global,
1029 running_bundle_id));
1030 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001031
1032 return 0;
1033}
1034
Yuval Mintz1a635e42016-08-15 10:42:43 +03001035int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
Yuval Mintzcc875c22015-10-26 11:02:31 +02001036{
1037 struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
1038 struct qed_ptt *p_ptt;
1039
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001040 if (IS_VF(cdev))
1041 return -EINVAL;
1042
Yuval Mintzcc875c22015-10-26 11:02:31 +02001043 if (!qed_mcp_is_init(p_hwfn)) {
Yuval Mintz525ef5c2016-08-15 10:42:45 +03001044 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
Yuval Mintzcc875c22015-10-26 11:02:31 +02001045 return -EBUSY;
1046 }
1047
1048 *p_media_type = MEDIA_UNSPECIFIED;
1049
1050 p_ptt = qed_ptt_acquire(p_hwfn);
1051 if (!p_ptt)
1052 return -EBUSY;
1053
1054 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1055 offsetof(struct public_port, media_type));
1056
1057 qed_ptt_release(p_hwfn, p_ptt);
1058
1059 return 0;
1060}
1061
Mintz, Yuval6927e822016-10-31 07:14:25 +02001062/* Old MFW has a global configuration for all PFs regarding RDMA support */
1063static void
1064qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1065 enum qed_pci_personality *p_proto)
1066{
1067 /* There wasn't ever a legacy MFW that published iwarp.
1068 * So at this point, this is either plain l2 or RoCE.
1069 */
1070 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1071 *p_proto = QED_PCI_ETH_ROCE;
1072 else
1073 *p_proto = QED_PCI_ETH;
1074
1075 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1076 "According to Legacy capabilities, L2 personality is %08x\n",
1077 (u32) *p_proto);
1078}
1079
1080static int
1081qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1082 struct qed_ptt *p_ptt,
1083 enum qed_pci_personality *p_proto)
1084{
1085 u32 resp = 0, param = 0;
1086 int rc;
1087
1088 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1089 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
1090 if (rc)
1091 return rc;
1092 if (resp != FW_MSG_CODE_OK) {
1093 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1094 "MFW lacks support for command; Returns %08x\n",
1095 resp);
1096 return -EINVAL;
1097 }
1098
1099 switch (param) {
1100 case FW_MB_PARAM_GET_PF_RDMA_NONE:
1101 *p_proto = QED_PCI_ETH;
1102 break;
1103 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1104 *p_proto = QED_PCI_ETH_ROCE;
1105 break;
1106 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
1107 DP_NOTICE(p_hwfn,
1108 "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
1109 *p_proto = QED_PCI_ETH_ROCE;
1110 break;
1111 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
1112 default:
1113 DP_NOTICE(p_hwfn,
1114 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1115 param);
1116 return -EINVAL;
1117 }
1118
1119 DP_VERBOSE(p_hwfn,
1120 NETIF_MSG_IFUP,
1121 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1122 (u32) *p_proto, resp, param);
1123 return 0;
1124}
1125
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001126static int
1127qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1128 struct public_func *p_info,
Mintz, Yuval6927e822016-10-31 07:14:25 +02001129 struct qed_ptt *p_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001130 enum qed_pci_personality *p_proto)
1131{
1132 int rc = 0;
1133
1134 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1135 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
Ram Amrani1fe582e2017-01-01 13:57:10 +02001136 if (!IS_ENABLED(CONFIG_QED_RDMA))
1137 *p_proto = QED_PCI_ETH;
1138 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
Mintz, Yuval6927e822016-10-31 07:14:25 +02001139 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
Yuval Mintzc5ac9312016-06-03 14:35:34 +03001140 break;
1141 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1142 *p_proto = QED_PCI_ISCSI;
1143 break;
Arun Easi1e128c82017-02-15 06:28:22 -08001144 case FUNC_MF_CFG_PROTOCOL_FCOE:
1145 *p_proto = QED_PCI_FCOE;
1146 break;
Yuval Mintzc5ac9312016-06-03 14:35:34 +03001147 case FUNC_MF_CFG_PROTOCOL_ROCE:
1148 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
Mintz, Yuval6927e822016-10-31 07:14:25 +02001149 /* Fallthrough */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001150 default:
1151 rc = -EINVAL;
1152 }
1153
1154 return rc;
1155}
1156
1157int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1158 struct qed_ptt *p_ptt)
1159{
1160 struct qed_mcp_function_info *info;
1161 struct public_func shmem_info;
1162
Yuval Mintz1a635e42016-08-15 10:42:43 +03001163 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001164 info = &p_hwfn->mcp_info->func_info;
1165
1166 info->pause_on_host = (shmem_info.config &
1167 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1168
Mintz, Yuval6927e822016-10-31 07:14:25 +02001169 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1170 &info->protocol)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001171 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1172 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1173 return -EINVAL;
1174 }
1175
Manish Chopra4b01e512016-04-26 10:56:09 -04001176 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001177
1178 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1179 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1180 info->mac[1] = (u8)(shmem_info.mac_upper);
1181 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1182 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1183 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1184 info->mac[5] = (u8)(shmem_info.mac_lower);
Mintz, Yuval14d39642016-10-31 07:14:23 +02001185
1186 /* Store primary MAC for later possible WoL */
1187 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001188 } else {
1189 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1190 }
1191
1192 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1193 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1194 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1195 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1196
1197 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1198
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001199 info->mtu = (u16)shmem_info.mtu_size;
1200
Mintz, Yuval14d39642016-10-31 07:14:23 +02001201 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1202 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1203 if (qed_mcp_is_init(p_hwfn)) {
1204 u32 resp = 0, param = 0;
1205 int rc;
1206
1207 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1208 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
1209 if (rc)
1210 return rc;
1211 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1212 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1213 }
1214
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001215 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
Mintz, Yuval14d39642016-10-31 07:14:23 +02001216 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001217 info->pause_on_host, info->protocol,
1218 info->bandwidth_min, info->bandwidth_max,
1219 info->mac[0], info->mac[1], info->mac[2],
1220 info->mac[3], info->mac[4], info->mac[5],
Mintz, Yuval14d39642016-10-31 07:14:23 +02001221 info->wwn_port, info->wwn_node,
1222 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001223
1224 return 0;
1225}
1226
Yuval Mintzcc875c22015-10-26 11:02:31 +02001227struct qed_mcp_link_params
1228*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1229{
1230 if (!p_hwfn || !p_hwfn->mcp_info)
1231 return NULL;
1232 return &p_hwfn->mcp_info->link_input;
1233}
1234
1235struct qed_mcp_link_state
1236*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1237{
1238 if (!p_hwfn || !p_hwfn->mcp_info)
1239 return NULL;
1240 return &p_hwfn->mcp_info->link_output;
1241}
1242
1243struct qed_mcp_link_capabilities
1244*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1245{
1246 if (!p_hwfn || !p_hwfn->mcp_info)
1247 return NULL;
1248 return &p_hwfn->mcp_info->link_capabilities;
1249}
1250
Yuval Mintz1a635e42016-08-15 10:42:43 +03001251int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001252{
1253 u32 resp = 0, param = 0;
1254 int rc;
1255
1256 rc = qed_mcp_cmd(p_hwfn, p_ptt,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001257 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001258
1259 /* Wait for the drain to complete before returning */
Yuval Mintz8f60baf2016-03-09 09:16:26 +02001260 msleep(1020);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001261
1262 return rc;
1263}
1264
Manish Chopracee4d262015-10-26 11:02:28 +02001265int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001266 struct qed_ptt *p_ptt, u32 *p_flash_size)
Manish Chopracee4d262015-10-26 11:02:28 +02001267{
1268 u32 flash_size;
1269
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001270 if (IS_VF(p_hwfn->cdev))
1271 return -EINVAL;
1272
Manish Chopracee4d262015-10-26 11:02:28 +02001273 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1274 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1275 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1276 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1277
1278 *p_flash_size = flash_size;
1279
1280 return 0;
1281}
1282
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001283int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1284 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1285{
1286 u32 resp = 0, param = 0, rc_param = 0;
1287 int rc;
1288
1289 /* Only Leader can configure MSIX, and need to take CMT into account */
1290 if (!IS_LEAD_HWFN(p_hwfn))
1291 return 0;
1292 num *= p_hwfn->cdev->num_hwfns;
1293
1294 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1295 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1296 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1297 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1298
1299 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1300 &resp, &rc_param);
1301
1302 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1303 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1304 rc = -EINVAL;
1305 } else {
1306 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1307 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1308 num, vf_id);
1309 }
1310
1311 return rc;
1312}
1313
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001314int
1315qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1316 struct qed_ptt *p_ptt,
1317 struct qed_mcp_drv_version *p_ver)
1318{
Tomer Tayar5529bad2016-03-09 09:16:24 +02001319 struct drv_version_stc *p_drv_version;
1320 struct qed_mcp_mb_params mb_params;
1321 union drv_union_data union_data;
1322 __be32 val;
1323 u32 i;
1324 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001325
Tomer Tayar5529bad2016-03-09 09:16:24 +02001326 p_drv_version = &union_data.drv_version;
1327 p_drv_version->version = p_ver->version;
Manish Chopra4b01e512016-04-26 10:56:09 -04001328
Yuval Mintz67a99b72016-09-19 17:47:41 +03001329 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
1330 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
Manish Chopra4b01e512016-04-26 10:56:09 -04001331 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001332 }
1333
Tomer Tayar5529bad2016-03-09 09:16:24 +02001334 memset(&mb_params, 0, sizeof(mb_params));
1335 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1336 mb_params.p_data_src = &union_data;
1337 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1338 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001339 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001340
Tomer Tayar5529bad2016-03-09 09:16:24 +02001341 return rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001342}
Sudarsana Kalluru91420b82015-11-30 12:25:03 +02001343
Tomer Tayar41024262016-09-05 14:35:10 +03001344int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1345{
1346 u32 resp = 0, param = 0;
1347 int rc;
1348
1349 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1350 &param);
1351 if (rc)
1352 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1353
1354 return rc;
1355}
1356
1357int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1358{
1359 u32 value, cpu_mode;
1360
1361 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1362
1363 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1364 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1365 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1366 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1367
1368 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
1369}
1370
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001371int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
1372 struct qed_ptt *p_ptt,
1373 enum qed_ov_client client)
1374{
1375 u32 resp = 0, param = 0;
1376 u32 drv_mb_param;
1377 int rc;
1378
1379 switch (client) {
1380 case QED_OV_CLIENT_DRV:
1381 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1382 break;
1383 case QED_OV_CLIENT_USER:
1384 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1385 break;
1386 case QED_OV_CLIENT_VENDOR_SPEC:
1387 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
1388 break;
1389 default:
1390 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
1391 return -EINVAL;
1392 }
1393
1394 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1395 drv_mb_param, &resp, &param);
1396 if (rc)
1397 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1398
1399 return rc;
1400}
1401
1402int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
1403 struct qed_ptt *p_ptt,
1404 enum qed_ov_driver_state drv_state)
1405{
1406 u32 resp = 0, param = 0;
1407 u32 drv_mb_param;
1408 int rc;
1409
1410 switch (drv_state) {
1411 case QED_OV_DRIVER_STATE_NOT_LOADED:
1412 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1413 break;
1414 case QED_OV_DRIVER_STATE_DISABLED:
1415 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1416 break;
1417 case QED_OV_DRIVER_STATE_ACTIVE:
1418 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1419 break;
1420 default:
1421 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
1422 return -EINVAL;
1423 }
1424
1425 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1426 drv_mb_param, &resp, &param);
1427 if (rc)
1428 DP_ERR(p_hwfn, "Failed to send driver state\n");
1429
1430 return rc;
1431}
1432
1433int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
1434 struct qed_ptt *p_ptt, u16 mtu)
1435{
1436 u32 resp = 0, param = 0;
1437 u32 drv_mb_param;
1438 int rc;
1439
1440 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
1441 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
1442 drv_mb_param, &resp, &param);
1443 if (rc)
1444 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
1445
1446 return rc;
1447}
1448
1449int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
1450 struct qed_ptt *p_ptt, u8 *mac)
1451{
1452 struct qed_mcp_mb_params mb_params;
1453 union drv_union_data union_data;
1454 int rc;
1455
1456 memset(&mb_params, 0, sizeof(mb_params));
1457 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
1458 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
1459 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
1460 mb_params.param |= MCP_PF_ID(p_hwfn);
1461 ether_addr_copy(&union_data.raw_data[0], mac);
1462 mb_params.p_data_src = &union_data;
1463 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1464 if (rc)
1465 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
1466
Mintz, Yuval14d39642016-10-31 07:14:23 +02001467 /* Store primary MAC for later possible WoL */
1468 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
1469
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001470 return rc;
1471}
1472
1473int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
1474 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
1475{
1476 u32 resp = 0, param = 0;
1477 u32 drv_mb_param;
1478 int rc;
1479
Mintz, Yuval14d39642016-10-31 07:14:23 +02001480 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
1481 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1482 "Can't change WoL configuration when WoL isn't supported\n");
1483 return -EINVAL;
1484 }
1485
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001486 switch (wol) {
1487 case QED_OV_WOL_DEFAULT:
1488 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
1489 break;
1490 case QED_OV_WOL_DISABLED:
1491 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
1492 break;
1493 case QED_OV_WOL_ENABLED:
1494 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
1495 break;
1496 default:
1497 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
1498 return -EINVAL;
1499 }
1500
1501 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
1502 drv_mb_param, &resp, &param);
1503 if (rc)
1504 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
1505
Mintz, Yuval14d39642016-10-31 07:14:23 +02001506 /* Store the WoL update for a future unload */
1507 p_hwfn->cdev->wol_config = (u8)wol;
1508
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001509 return rc;
1510}
1511
1512int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
1513 struct qed_ptt *p_ptt,
1514 enum qed_ov_eswitch eswitch)
1515{
1516 u32 resp = 0, param = 0;
1517 u32 drv_mb_param;
1518 int rc;
1519
1520 switch (eswitch) {
1521 case QED_OV_ESWITCH_NONE:
1522 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
1523 break;
1524 case QED_OV_ESWITCH_VEB:
1525 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
1526 break;
1527 case QED_OV_ESWITCH_VEPA:
1528 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
1529 break;
1530 default:
1531 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
1532 return -EINVAL;
1533 }
1534
1535 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
1536 drv_mb_param, &resp, &param);
1537 if (rc)
1538 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
1539
1540 return rc;
1541}
1542
Yuval Mintz1a635e42016-08-15 10:42:43 +03001543int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
1544 struct qed_ptt *p_ptt, enum qed_led_mode mode)
Sudarsana Kalluru91420b82015-11-30 12:25:03 +02001545{
1546 u32 resp = 0, param = 0, drv_mb_param;
1547 int rc;
1548
1549 switch (mode) {
1550 case QED_LED_MODE_ON:
1551 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1552 break;
1553 case QED_LED_MODE_OFF:
1554 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1555 break;
1556 case QED_LED_MODE_RESTORE:
1557 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1558 break;
1559 default:
1560 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
1561 return -EINVAL;
1562 }
1563
1564 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1565 drv_mb_param, &resp, &param);
1566
1567 return rc;
1568}
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -04001569
Tomer Tayar41024262016-09-05 14:35:10 +03001570int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
1571 struct qed_ptt *p_ptt, u32 mask_parities)
1572{
1573 u32 resp = 0, param = 0;
1574 int rc;
1575
1576 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1577 mask_parities, &resp, &param);
1578
1579 if (rc) {
1580 DP_ERR(p_hwfn,
1581 "MCP response failure for mask parities, aborting\n");
1582 } else if (resp != FW_MSG_CODE_OK) {
1583 DP_ERR(p_hwfn,
1584 "MCP did not acknowledge mask parity request. Old MFW?\n");
1585 rc = -EINVAL;
1586 }
1587
1588 return rc;
1589}
1590
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02001591int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
1592{
1593 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
1594 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1595 u32 resp = 0, resp_param = 0;
1596 struct qed_ptt *p_ptt;
1597 int rc = 0;
1598
1599 p_ptt = qed_ptt_acquire(p_hwfn);
1600 if (!p_ptt)
1601 return -EBUSY;
1602
1603 while (bytes_left > 0) {
1604 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
1605
1606 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1607 DRV_MSG_CODE_NVM_READ_NVRAM,
1608 addr + offset +
1609 (bytes_to_copy <<
1610 DRV_MB_PARAM_NVM_LEN_SHIFT),
1611 &resp, &resp_param,
1612 &read_len,
1613 (u32 *)(p_buf + offset));
1614
1615 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
1616 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
1617 break;
1618 }
1619
1620 /* This can be a lengthy process, and it's possible scheduler
1621 * isn't preemptable. Sleep a bit to prevent CPU hogging.
1622 */
1623 if (bytes_left % 0x1000 <
1624 (bytes_left - read_len) % 0x1000)
1625 usleep_range(1000, 2000);
1626
1627 offset += read_len;
1628 bytes_left -= read_len;
1629 }
1630
1631 cdev->mcp_nvm_resp = resp;
1632 qed_ptt_release(p_hwfn, p_ptt);
1633
1634 return rc;
1635}
1636
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -04001637int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1638{
1639 u32 drv_mb_param = 0, rsp, param;
1640 int rc = 0;
1641
1642 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
1643 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1644
1645 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1646 drv_mb_param, &rsp, &param);
1647
1648 if (rc)
1649 return rc;
1650
1651 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1652 (param != DRV_MB_PARAM_BIST_RC_PASSED))
1653 rc = -EAGAIN;
1654
1655 return rc;
1656}
1657
1658int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1659{
1660 u32 drv_mb_param, rsp, param;
1661 int rc = 0;
1662
1663 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
1664 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1665
1666 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1667 drv_mb_param, &rsp, &param);
1668
1669 if (rc)
1670 return rc;
1671
1672 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1673 (param != DRV_MB_PARAM_BIST_RC_PASSED))
1674 rc = -EAGAIN;
1675
1676 return rc;
1677}
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02001678
1679int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
1680 struct qed_ptt *p_ptt,
1681 u32 *num_images)
1682{
1683 u32 drv_mb_param = 0, rsp;
1684 int rc = 0;
1685
1686 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
1687 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
1688
1689 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
1690 drv_mb_param, &rsp, num_images);
1691 if (rc)
1692 return rc;
1693
1694 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
1695 rc = -EINVAL;
1696
1697 return rc;
1698}
1699
1700int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
1701 struct qed_ptt *p_ptt,
1702 struct bist_nvm_image_att *p_image_att,
1703 u32 image_index)
1704{
1705 u32 buf_size = 0, param, resp = 0, resp_param = 0;
1706 int rc;
1707
1708 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
1709 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
1710 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
1711
1712 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1713 DRV_MSG_CODE_BIST_TEST, param,
1714 &resp, &resp_param,
1715 &buf_size,
1716 (u32 *)p_image_att);
1717 if (rc)
1718 return rc;
1719
1720 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
1721 (p_image_att->return_code != 1))
1722 rc = -EINVAL;
1723
1724 return rc;
1725}
Tomer Tayar2edbff82016-10-31 07:14:27 +02001726
1727#define QED_RESC_ALLOC_VERSION_MAJOR 1
1728#define QED_RESC_ALLOC_VERSION_MINOR 0
1729#define QED_RESC_ALLOC_VERSION \
1730 ((QED_RESC_ALLOC_VERSION_MAJOR << \
1731 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
1732 (QED_RESC_ALLOC_VERSION_MINOR << \
1733 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
1734int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
1735 struct qed_ptt *p_ptt,
1736 struct resource_info *p_resc_info,
1737 u32 *p_mcp_resp, u32 *p_mcp_param)
1738{
1739 struct qed_mcp_mb_params mb_params;
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001740 union drv_union_data union_data;
Tomer Tayar2edbff82016-10-31 07:14:27 +02001741 int rc;
1742
1743 memset(&mb_params, 0, sizeof(mb_params));
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001744 memset(&union_data, 0, sizeof(union_data));
Tomer Tayar2edbff82016-10-31 07:14:27 +02001745 mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
1746 mb_params.param = QED_RESC_ALLOC_VERSION;
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001747
1748 /* Need to have a sufficient large struct, as the cmd_and_union
1749 * is going to do memcpy from and to it.
1750 */
1751 memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
1752
1753 mb_params.p_data_src = &union_data;
1754 mb_params.p_data_dst = &union_data;
Tomer Tayar2edbff82016-10-31 07:14:27 +02001755 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1756 if (rc)
1757 return rc;
1758
Mintz, Yuvalbb480242016-11-06 17:12:27 +02001759 /* Copy the data back */
1760 memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
Tomer Tayar2edbff82016-10-31 07:14:27 +02001761 *p_mcp_resp = mb_params.mcp_resp;
1762 *p_mcp_param = mb_params.mcp_param;
1763
1764 DP_VERBOSE(p_hwfn,
1765 QED_MSG_SP,
1766 "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
1767 *p_mcp_param,
1768 p_resc_info->res_id,
1769 p_resc_info->size,
1770 p_resc_info->offset,
1771 p_resc_info->vf_size,
1772 p_resc_info->vf_offset, p_resc_info->flags);
1773
1774 return 0;
1775}