blob: 2612e3e458d9a744b7396099ebf93fc738dea01b [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/delay.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020038#include <linux/slab.h>
Tomer Tayar5529bad2016-03-09 09:16:24 +020039#include <linux/spinlock.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020040#include <linux/string.h>
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +020041#include <linux/etherdevice.h>
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020042#include "qed.h"
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -070043#include "qed_cxt.h"
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -040044#include "qed_dcbx.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020045#include "qed_hsi.h"
46#include "qed_hw.h"
47#include "qed_mcp.h"
48#include "qed_reg_addr.h"
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030049#include "qed_sriov.h"
50
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020051#define CHIP_MCP_RESP_ITER_US 10
52
53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
55
56#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
57 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
58 _val)
59
60#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
61 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
62
63#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
64 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
65 offsetof(struct public_drv_mb, _field), _val)
66
67#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
68 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
69 offsetof(struct public_drv_mb, _field))
70
71#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
72 DRV_ID_PDA_COMP_VER_SHIFT)
73
74#define MCP_BYTES_PER_MBIT_SHIFT 17
75
76bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
77{
78 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
79 return false;
80 return true;
81}
82
Yuval Mintz1a635e42016-08-15 10:42:43 +030083void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020084{
85 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
86 PUBLIC_PORT);
87 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
88
89 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
90 MFW_PORT(p_hwfn));
91 DP_VERBOSE(p_hwfn, QED_MSG_SP,
92 "port_addr = 0x%x, port_id 0x%02x\n",
93 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
94}
95
Yuval Mintz1a635e42016-08-15 10:42:43 +030096void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020097{
98 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
99 u32 tmp, i;
100
101 if (!p_hwfn->mcp_info->public_base)
102 return;
103
104 for (i = 0; i < length; i++) {
105 tmp = qed_rd(p_hwfn, p_ptt,
106 p_hwfn->mcp_info->mfw_mb_addr +
107 (i << 2) + sizeof(u32));
108
109 /* The MB data is actually BE; Need to force it to cpu */
110 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
111 be32_to_cpu((__force __be32)tmp);
112 }
113}
114
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200115struct qed_mcp_cmd_elem {
116 struct list_head list;
117 struct qed_mcp_mb_params *p_mb_params;
118 u16 expected_seq_num;
119 bool b_is_completed;
120};
121
122/* Must be called while cmd_lock is acquired */
123static struct qed_mcp_cmd_elem *
124qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
125 struct qed_mcp_mb_params *p_mb_params,
126 u16 expected_seq_num)
127{
128 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
129
130 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
131 if (!p_cmd_elem)
132 goto out;
133
134 p_cmd_elem->p_mb_params = p_mb_params;
135 p_cmd_elem->expected_seq_num = expected_seq_num;
136 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
137out:
138 return p_cmd_elem;
139}
140
141/* Must be called while cmd_lock is acquired */
142static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
143 struct qed_mcp_cmd_elem *p_cmd_elem)
144{
145 list_del(&p_cmd_elem->list);
146 kfree(p_cmd_elem);
147}
148
149/* Must be called while cmd_lock is acquired */
150static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
151 u16 seq_num)
152{
153 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
154
155 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
156 if (p_cmd_elem->expected_seq_num == seq_num)
157 return p_cmd_elem;
158 }
159
160 return NULL;
161}
162
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200163int qed_mcp_free(struct qed_hwfn *p_hwfn)
164{
165 if (p_hwfn->mcp_info) {
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200166 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
167
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200168 kfree(p_hwfn->mcp_info->mfw_mb_cur);
169 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200170
171 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
172 list_for_each_entry_safe(p_cmd_elem,
173 p_tmp,
174 &p_hwfn->mcp_info->cmd_list, list) {
175 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
176 }
177 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200178 }
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200179
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200180 kfree(p_hwfn->mcp_info);
Tomer Tayar3587cb82017-05-21 12:10:56 +0300181 p_hwfn->mcp_info = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200182
183 return 0;
184}
185
Yuval Mintz1a635e42016-08-15 10:42:43 +0300186static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200187{
188 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
189 u32 drv_mb_offsize, mfw_mb_offsize;
190 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
191
192 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
193 if (!p_info->public_base)
194 return 0;
195
196 p_info->public_base |= GRCBASE_MCP;
197
198 /* Calculate the driver and MFW mailbox address */
199 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
200 SECTION_OFFSIZE_ADDR(p_info->public_base,
201 PUBLIC_DRV_MB));
202 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
203 DP_VERBOSE(p_hwfn, QED_MSG_SP,
204 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
205 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
206
207 /* Set the MFW MB address */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
213
214 /* Get the current driver mailbox sequence before sending
215 * the first command
216 */
217 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
218 DRV_MSG_SEQ_NUMBER_MASK;
219
220 /* Get current FW pulse sequence */
221 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
222 DRV_PULSE_SEQ_MASK;
223
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200224 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200225
226 return 0;
227}
228
Yuval Mintz1a635e42016-08-15 10:42:43 +0300229int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200230{
231 struct qed_mcp_info *p_info;
232 u32 size;
233
234 /* Allocate mcp_info structure */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200235 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200236 if (!p_hwfn->mcp_info)
237 goto err;
238 p_info = p_hwfn->mcp_info;
239
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200240 /* Initialize the MFW spinlock */
241 spin_lock_init(&p_info->cmd_lock);
242 spin_lock_init(&p_info->link_lock);
243
244 INIT_LIST_HEAD(&p_info->cmd_list);
245
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200246 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
247 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
248 /* Do not free mcp_info here, since public_base indicate that
249 * the MCP is not initialized
250 */
251 return 0;
252 }
253
254 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
Yuval Mintz60fffb32016-02-21 11:40:07 +0200255 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
Yuval Mintz83aeb932016-08-15 10:42:44 +0300256 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
Christophe Jailleteb2a6b82017-08-07 00:00:17 +0200257 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200258 goto err;
259
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200260 return 0;
261
262err:
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200263 qed_mcp_free(p_hwfn);
264 return -ENOMEM;
265}
266
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200267static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
268 struct qed_ptt *p_ptt)
Tomer Tayar5529bad2016-03-09 09:16:24 +0200269{
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200270 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
Tomer Tayar5529bad2016-03-09 09:16:24 +0200271
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200272 /* Use MCP history register to check if MCP reset occurred between init
273 * time and now.
Tomer Tayar5529bad2016-03-09 09:16:24 +0200274 */
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200275 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
276 DP_VERBOSE(p_hwfn,
277 QED_MSG_SP,
278 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
279 p_hwfn->mcp_info->mcp_hist, generic_por_0);
Tomer Tayar5529bad2016-03-09 09:16:24 +0200280
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200281 qed_load_mcp_offsets(p_hwfn, p_ptt);
282 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
Tomer Tayar5529bad2016-03-09 09:16:24 +0200283 }
Tomer Tayar5529bad2016-03-09 09:16:24 +0200284}
285
Yuval Mintz1a635e42016-08-15 10:42:43 +0300286int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200287{
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200288 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200289 int rc = 0;
290
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200291 /* Ensure that only a single thread is accessing the mailbox */
292 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
293
294 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
Tomer Tayar5529bad2016-03-09 09:16:24 +0200295
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200296 /* Set drv command along with the updated sequence */
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200297 qed_mcp_reread_offsets(p_hwfn, p_ptt);
298 seq = ++p_hwfn->mcp_info->drv_mb_seq;
299 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200300
301 do {
302 /* Wait for MFW response */
303 udelay(delay);
304 /* Give the FW up to 500 second (50*1000*10usec) */
305 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
306 MISCS_REG_GENERIC_POR_0)) &&
307 (cnt++ < QED_MCP_RESET_RETRIES));
308
309 if (org_mcp_reset_seq !=
310 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
311 DP_VERBOSE(p_hwfn, QED_MSG_SP,
312 "MCP was reset after %d usec\n", cnt * delay);
313 } else {
314 DP_ERR(p_hwfn, "Failed to reset MCP\n");
315 rc = -EAGAIN;
316 }
317
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200318 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
Tomer Tayar5529bad2016-03-09 09:16:24 +0200319
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200320 return rc;
321}
322
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200323/* Must be called while cmd_lock is acquired */
324static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200325{
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200326 struct qed_mcp_cmd_elem *p_cmd_elem;
327
328 /* There is at most one pending command at a certain time, and if it
329 * exists - it is placed at the HEAD of the list.
330 */
331 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
332 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
333 struct qed_mcp_cmd_elem, list);
334 return !p_cmd_elem->b_is_completed;
335 }
336
337 return false;
338}
339
340/* Must be called while cmd_lock is acquired */
341static int
342qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
343{
344 struct qed_mcp_mb_params *p_mb_params;
345 struct qed_mcp_cmd_elem *p_cmd_elem;
346 u32 mcp_resp;
347 u16 seq_num;
348
349 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
350 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
351
352 /* Return if no new non-handled response has been received */
353 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
354 return -EAGAIN;
355
356 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
357 if (!p_cmd_elem) {
358 DP_ERR(p_hwfn,
359 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
360 seq_num);
361 return -EINVAL;
362 }
363
364 p_mb_params = p_cmd_elem->p_mb_params;
365
366 /* Get the MFW response along with the sequence number */
367 p_mb_params->mcp_resp = mcp_resp;
368
369 /* Get the MFW param */
370 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
371
372 /* Get the union data */
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200373 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200374 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
375 offsetof(struct public_drv_mb,
376 union_data);
377 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200378 union_data_addr, p_mb_params->data_dst_size);
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200379 }
380
381 p_cmd_elem->b_is_completed = true;
382
383 return 0;
384}
385
386/* Must be called while cmd_lock is acquired */
387static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
388 struct qed_ptt *p_ptt,
389 struct qed_mcp_mb_params *p_mb_params,
390 u16 seq_num)
391{
392 union drv_union_data union_data;
393 u32 union_data_addr;
394
395 /* Set the union data */
396 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
397 offsetof(struct public_drv_mb, union_data);
398 memset(&union_data, 0, sizeof(union_data));
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200399 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200400 memcpy(&union_data, p_mb_params->p_data_src,
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200401 p_mb_params->data_src_size);
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200402 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
403 sizeof(union_data));
404
405 /* Set the drv param */
406 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
407
408 /* Set the drv command along with the sequence number */
409 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
410
411 DP_VERBOSE(p_hwfn, QED_MSG_SP,
412 "MFW mailbox: command 0x%08x param 0x%08x\n",
413 (p_mb_params->cmd | seq_num), p_mb_params->param);
414}
415
416static int
417_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
418 struct qed_ptt *p_ptt,
419 struct qed_mcp_mb_params *p_mb_params,
420 u32 max_retries, u32 delay)
421{
422 struct qed_mcp_cmd_elem *p_cmd_elem;
423 u32 cnt = 0;
424 u16 seq_num;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200425 int rc = 0;
426
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200427 /* Wait until the mailbox is non-occupied */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200428 do {
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200429 /* Exit the loop if there is no pending command, or if the
430 * pending command is completed during this iteration.
431 * The spinlock stays locked until the command is sent.
432 */
433
434 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
435
436 if (!qed_mcp_has_pending_cmd(p_hwfn))
437 break;
438
439 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
440 if (!rc)
441 break;
442 else if (rc != -EAGAIN)
443 goto err;
444
445 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200446 udelay(delay);
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200447 } while (++cnt < max_retries);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200448
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200449 if (cnt >= max_retries) {
450 DP_NOTICE(p_hwfn,
451 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
452 p_mb_params->cmd, p_mb_params->param);
453 return -EAGAIN;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200454 }
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200455
456 /* Send the mailbox command */
457 qed_mcp_reread_offsets(p_hwfn, p_ptt);
458 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
459 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
Dan Carpenterc8004602017-04-03 21:25:22 +0300460 if (!p_cmd_elem) {
461 rc = -ENOMEM;
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200462 goto err;
Dan Carpenterc8004602017-04-03 21:25:22 +0300463 }
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200464
465 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
466 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
467
468 /* Wait for the MFW response */
469 do {
470 /* Exit the loop if the command is already completed, or if the
471 * command is completed during this iteration.
472 * The spinlock stays locked until the list element is removed.
473 */
474
475 udelay(delay);
476 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
477
478 if (p_cmd_elem->b_is_completed)
479 break;
480
481 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
482 if (!rc)
483 break;
484 else if (rc != -EAGAIN)
485 goto err;
486
487 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
488 } while (++cnt < max_retries);
489
490 if (cnt >= max_retries) {
491 DP_NOTICE(p_hwfn,
492 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
493 p_mb_params->cmd, p_mb_params->param);
494
495 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
496 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
497 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
498
499 return -EAGAIN;
500 }
501
502 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
503 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
504
505 DP_VERBOSE(p_hwfn,
506 QED_MSG_SP,
507 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
508 p_mb_params->mcp_resp,
509 p_mb_params->mcp_param,
510 (cnt * delay) / 1000, (cnt * delay) % 1000);
511
512 /* Clear the sequence number from the MFW response */
513 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
514
515 return 0;
516
517err:
518 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200519 return rc;
520}
521
Tomer Tayar5529bad2016-03-09 09:16:24 +0200522static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
523 struct qed_ptt *p_ptt,
524 struct qed_mcp_mb_params *p_mb_params)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200525{
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200526 size_t union_data_size = sizeof(union drv_union_data);
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200527 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
528 u32 delay = CHIP_MCP_RESP_ITER_US;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200529
530 /* MCP not initialized */
531 if (!qed_mcp_is_init(p_hwfn)) {
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300532 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200533 return -EBUSY;
534 }
535
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200536 if (p_mb_params->data_src_size > union_data_size ||
537 p_mb_params->data_dst_size > union_data_size) {
538 DP_ERR(p_hwfn,
539 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
540 p_mb_params->data_src_size,
541 p_mb_params->data_dst_size, union_data_size);
542 return -EINVAL;
543 }
544
Tomer Tayar4ed1eea2017-03-23 15:50:15 +0200545 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
546 delay);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200547}
548
Tomer Tayar5529bad2016-03-09 09:16:24 +0200549int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
550 struct qed_ptt *p_ptt,
551 u32 cmd,
552 u32 param,
553 u32 *o_mcp_resp,
554 u32 *o_mcp_param)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200555{
Tomer Tayar5529bad2016-03-09 09:16:24 +0200556 struct qed_mcp_mb_params mb_params;
557 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200558
Tomer Tayar5529bad2016-03-09 09:16:24 +0200559 memset(&mb_params, 0, sizeof(mb_params));
560 mb_params.cmd = cmd;
561 mb_params.param = param;
Mintz, Yuval14d39642016-10-31 07:14:23 +0200562
Tomer Tayar5529bad2016-03-09 09:16:24 +0200563 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
564 if (rc)
565 return rc;
566
567 *o_mcp_resp = mb_params.mcp_resp;
568 *o_mcp_param = mb_params.mcp_param;
569
570 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200571}
572
Sudarsana Reddy Kalluru62e4d432018-03-28 05:14:21 -0700573int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
574 struct qed_ptt *p_ptt,
575 u32 cmd,
576 u32 param,
577 u32 *o_mcp_resp,
578 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
579{
580 struct qed_mcp_mb_params mb_params;
581 int rc;
582
583 memset(&mb_params, 0, sizeof(mb_params));
584 mb_params.cmd = cmd;
585 mb_params.param = param;
586 mb_params.p_data_src = i_buf;
587 mb_params.data_src_size = (u8)i_txn_size;
588 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
589 if (rc)
590 return rc;
591
592 *o_mcp_resp = mb_params.mcp_resp;
593 *o_mcp_param = mb_params.mcp_param;
594
595 return 0;
596}
597
Tomer Tayar41024262016-09-05 14:35:10 +0300598int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
599 struct qed_ptt *p_ptt,
600 u32 cmd,
601 u32 param,
602 u32 *o_mcp_resp,
603 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
604{
605 struct qed_mcp_mb_params mb_params;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200606 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
Tomer Tayar41024262016-09-05 14:35:10 +0300607 int rc;
608
609 memset(&mb_params, 0, sizeof(mb_params));
610 mb_params.cmd = cmd;
611 mb_params.param = param;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200612 mb_params.p_data_dst = raw_data;
613
614 /* Use the maximal value since the actual one is part of the response */
615 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
616
Tomer Tayar41024262016-09-05 14:35:10 +0300617 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
618 if (rc)
619 return rc;
620
621 *o_mcp_resp = mb_params.mcp_resp;
622 *o_mcp_param = mb_params.mcp_param;
623
624 *o_txn_size = *o_mcp_param;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +0200625 memcpy(o_buf, raw_data, *o_txn_size);
Tomer Tayar41024262016-09-05 14:35:10 +0300626
627 return 0;
628}
629
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300630static bool
631qed_mcp_can_force_load(u8 drv_role,
632 u8 exist_drv_role,
633 enum qed_override_force_load override_force_load)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200634{
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300635 bool can_force_load = false;
636
637 switch (override_force_load) {
638 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
639 can_force_load = true;
640 break;
641 case QED_OVERRIDE_FORCE_LOAD_NEVER:
642 can_force_load = false;
643 break;
644 default:
645 can_force_load = (drv_role == DRV_ROLE_OS &&
646 exist_drv_role == DRV_ROLE_PREBOOT) ||
647 (drv_role == DRV_ROLE_KDUMP &&
648 exist_drv_role == DRV_ROLE_OS);
649 break;
650 }
651
652 return can_force_load;
653}
654
655static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
656 struct qed_ptt *p_ptt)
657{
658 u32 resp = 0, param = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200659 int rc;
660
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300661 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
662 &resp, &param);
663 if (rc)
664 DP_NOTICE(p_hwfn,
665 "Failed to send cancel load request, rc = %d\n", rc);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200666
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300667 return rc;
668}
669
670#define CONFIG_QEDE_BITMAP_IDX BIT(0)
671#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
672#define CONFIG_QEDR_BITMAP_IDX BIT(2)
673#define CONFIG_QEDF_BITMAP_IDX BIT(4)
674#define CONFIG_QEDI_BITMAP_IDX BIT(5)
675#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
676
677static u32 qed_get_config_bitmap(void)
678{
679 u32 config_bitmap = 0x0;
680
681 if (IS_ENABLED(CONFIG_QEDE))
682 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
683
684 if (IS_ENABLED(CONFIG_QED_SRIOV))
685 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
686
687 if (IS_ENABLED(CONFIG_QED_RDMA))
688 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
689
690 if (IS_ENABLED(CONFIG_QED_FCOE))
691 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
692
693 if (IS_ENABLED(CONFIG_QED_ISCSI))
694 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
695
696 if (IS_ENABLED(CONFIG_QED_LL2))
697 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
698
699 return config_bitmap;
700}
701
702struct qed_load_req_in_params {
703 u8 hsi_ver;
704#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
705#define QED_LOAD_REQ_HSI_VER_1 1
706 u32 drv_ver_0;
707 u32 drv_ver_1;
708 u32 fw_ver;
709 u8 drv_role;
710 u8 timeout_val;
711 u8 force_cmd;
712 bool avoid_eng_reset;
713};
714
715struct qed_load_req_out_params {
716 u32 load_code;
717 u32 exist_drv_ver_0;
718 u32 exist_drv_ver_1;
719 u32 exist_fw_ver;
720 u8 exist_drv_role;
721 u8 mfw_hsi_ver;
722 bool drv_exists;
723};
724
725static int
726__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
727 struct qed_ptt *p_ptt,
728 struct qed_load_req_in_params *p_in_params,
729 struct qed_load_req_out_params *p_out_params)
730{
731 struct qed_mcp_mb_params mb_params;
732 struct load_req_stc load_req;
733 struct load_rsp_stc load_rsp;
734 u32 hsi_ver;
735 int rc;
736
737 memset(&load_req, 0, sizeof(load_req));
738 load_req.drv_ver_0 = p_in_params->drv_ver_0;
739 load_req.drv_ver_1 = p_in_params->drv_ver_1;
740 load_req.fw_ver = p_in_params->fw_ver;
741 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
742 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
743 p_in_params->timeout_val);
744 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
745 p_in_params->force_cmd);
746 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
747 p_in_params->avoid_eng_reset);
748
749 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
750 DRV_ID_MCP_HSI_VER_CURRENT :
751 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
752
753 memset(&mb_params, 0, sizeof(mb_params));
754 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
755 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
756 mb_params.p_data_src = &load_req;
757 mb_params.data_src_size = sizeof(load_req);
758 mb_params.p_data_dst = &load_rsp;
759 mb_params.data_dst_size = sizeof(load_rsp);
760
761 DP_VERBOSE(p_hwfn, QED_MSG_SP,
762 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
763 mb_params.param,
764 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
765 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
766 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
767 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
768
769 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
770 DP_VERBOSE(p_hwfn, QED_MSG_SP,
771 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
772 load_req.drv_ver_0,
773 load_req.drv_ver_1,
774 load_req.fw_ver,
775 load_req.misc0,
776 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
777 QED_MFW_GET_FIELD(load_req.misc0,
778 LOAD_REQ_LOCK_TO),
779 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
780 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
781 }
782
783 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200784 if (rc) {
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300785 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200786 return rc;
787 }
788
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300789 DP_VERBOSE(p_hwfn, QED_MSG_SP,
790 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
791 p_out_params->load_code = mb_params.mcp_resp;
Tomer Tayar5529bad2016-03-09 09:16:24 +0200792
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300793 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
794 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
795 DP_VERBOSE(p_hwfn,
796 QED_MSG_SP,
797 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
798 load_rsp.drv_ver_0,
799 load_rsp.drv_ver_1,
800 load_rsp.fw_ver,
801 load_rsp.misc0,
802 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
803 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
804 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
805
806 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
807 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
808 p_out_params->exist_fw_ver = load_rsp.fw_ver;
809 p_out_params->exist_drv_role =
810 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
811 p_out_params->mfw_hsi_ver =
812 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
813 p_out_params->drv_exists =
814 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
815 LOAD_RSP_FLAGS0_DRV_EXISTS;
816 }
817
818 return 0;
819}
820
821static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
822 enum qed_drv_role drv_role,
823 u8 *p_mfw_drv_role)
824{
825 switch (drv_role) {
826 case QED_DRV_ROLE_OS:
827 *p_mfw_drv_role = DRV_ROLE_OS;
828 break;
829 case QED_DRV_ROLE_KDUMP:
830 *p_mfw_drv_role = DRV_ROLE_KDUMP;
831 break;
832 default:
833 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
834 return -EINVAL;
835 }
836
837 return 0;
838}
839
840enum qed_load_req_force {
841 QED_LOAD_REQ_FORCE_NONE,
842 QED_LOAD_REQ_FORCE_PF,
843 QED_LOAD_REQ_FORCE_ALL,
844};
845
846static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
847
848 enum qed_load_req_force force_cmd,
849 u8 *p_mfw_force_cmd)
850{
851 switch (force_cmd) {
852 case QED_LOAD_REQ_FORCE_NONE:
853 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
854 break;
855 case QED_LOAD_REQ_FORCE_PF:
856 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
857 break;
858 case QED_LOAD_REQ_FORCE_ALL:
859 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
860 break;
861 }
862}
863
864int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
865 struct qed_ptt *p_ptt,
866 struct qed_load_req_params *p_params)
867{
868 struct qed_load_req_out_params out_params;
869 struct qed_load_req_in_params in_params;
870 u8 mfw_drv_role, mfw_force_cmd;
871 int rc;
872
873 memset(&in_params, 0, sizeof(in_params));
874 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
875 in_params.drv_ver_0 = QED_VERSION;
876 in_params.drv_ver_1 = qed_get_config_bitmap();
877 in_params.fw_ver = STORM_FW_VERSION;
878 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
879 if (rc)
880 return rc;
881
882 in_params.drv_role = mfw_drv_role;
883 in_params.timeout_val = p_params->timeout_val;
884 qed_get_mfw_force_cmd(p_hwfn,
885 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
886
887 in_params.force_cmd = mfw_force_cmd;
888 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
889
890 memset(&out_params, 0, sizeof(out_params));
891 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
892 if (rc)
893 return rc;
894
895 /* First handle cases where another load request should/might be sent:
896 * - MFW expects the old interface [HSI version = 1]
897 * - MFW responds that a force load request is required
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200898 */
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300899 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
900 DP_INFO(p_hwfn,
901 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
902
903 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
904 memset(&out_params, 0, sizeof(out_params));
905 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
906 if (rc)
907 return rc;
908 } else if (out_params.load_code ==
909 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
910 if (qed_mcp_can_force_load(in_params.drv_role,
911 out_params.exist_drv_role,
912 p_params->override_force_load)) {
913 DP_INFO(p_hwfn,
914 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
915 in_params.drv_role, in_params.fw_ver,
916 in_params.drv_ver_0, in_params.drv_ver_1,
917 out_params.exist_drv_role,
918 out_params.exist_fw_ver,
919 out_params.exist_drv_ver_0,
920 out_params.exist_drv_ver_1);
921
922 qed_get_mfw_force_cmd(p_hwfn,
923 QED_LOAD_REQ_FORCE_ALL,
924 &mfw_force_cmd);
925
926 in_params.force_cmd = mfw_force_cmd;
927 memset(&out_params, 0, sizeof(out_params));
928 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
929 &out_params);
930 if (rc)
931 return rc;
932 } else {
933 DP_NOTICE(p_hwfn,
934 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
935 in_params.drv_role, in_params.fw_ver,
936 in_params.drv_ver_0, in_params.drv_ver_1,
937 out_params.exist_drv_role,
938 out_params.exist_fw_ver,
939 out_params.exist_drv_ver_0,
940 out_params.exist_drv_ver_1);
941 DP_NOTICE(p_hwfn,
942 "Avoid sending a force load request to prevent disruption of active PFs\n");
943
944 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
945 return -EBUSY;
946 }
947 }
948
949 /* Now handle the other types of responses.
950 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
951 * expected here after the additional revised load requests were sent.
952 */
953 switch (out_params.load_code) {
954 case FW_MSG_CODE_DRV_LOAD_ENGINE:
955 case FW_MSG_CODE_DRV_LOAD_PORT:
956 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
957 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
958 out_params.drv_exists) {
959 /* The role and fw/driver version match, but the PF is
960 * already loaded and has not been unloaded gracefully.
961 */
962 DP_NOTICE(p_hwfn,
963 "PF is already loaded\n");
964 return -EINVAL;
965 }
966 break;
967 default:
968 DP_NOTICE(p_hwfn,
969 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
970 out_params.load_code);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200971 return -EBUSY;
972 }
973
Tomer Tayar5d24bcf2017-03-28 15:12:52 +0300974 p_params->load_code = out_params.load_code;
975
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200976 return 0;
977}
978
Tomer Tayar12263372017-03-28 15:12:50 +0300979int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
980{
981 u32 wol_param, mcp_resp, mcp_param;
982
983 switch (p_hwfn->cdev->wol_config) {
984 case QED_OV_WOL_DISABLED:
985 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
986 break;
987 case QED_OV_WOL_ENABLED:
988 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
989 break;
990 default:
991 DP_NOTICE(p_hwfn,
992 "Unknown WoL configuration %02x\n",
993 p_hwfn->cdev->wol_config);
994 /* Fallthrough */
995 case QED_OV_WOL_DEFAULT:
996 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
997 }
998
999 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1000 &mcp_resp, &mcp_param);
1001}
1002
1003int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1004{
1005 struct qed_mcp_mb_params mb_params;
1006 struct mcp_mac wol_mac;
1007
1008 memset(&mb_params, 0, sizeof(mb_params));
1009 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1010
1011 /* Set the primary MAC if WoL is enabled */
1012 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1013 u8 *p_mac = p_hwfn->cdev->wol_mac;
1014
1015 memset(&wol_mac, 0, sizeof(wol_mac));
1016 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1017 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1018 p_mac[4] << 8 | p_mac[5];
1019
1020 DP_VERBOSE(p_hwfn,
1021 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1022 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1023 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1024
1025 mb_params.p_data_src = &wol_mac;
1026 mb_params.data_src_size = sizeof(wol_mac);
1027 }
1028
1029 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1030}
1031
Yuval Mintz0b55e272016-05-11 16:36:15 +03001032static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1033 struct qed_ptt *p_ptt)
1034{
1035 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1036 PUBLIC_PATH);
1037 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1038 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1039 QED_PATH_ID(p_hwfn));
1040 u32 disabled_vfs[VF_MAX_STATIC / 32];
1041 int i;
1042
1043 DP_VERBOSE(p_hwfn,
1044 QED_MSG_SP,
1045 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1046 mfw_path_offsize, path_addr);
1047
1048 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1049 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1050 path_addr +
1051 offsetof(struct public_path,
1052 mcp_vf_disabled) +
1053 sizeof(u32) * i);
1054 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1055 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1056 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1057 }
1058
1059 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1060 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1061}
1062
1063int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1064 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1065{
1066 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1067 PUBLIC_FUNC);
1068 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1069 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1070 MCP_PF_ID(p_hwfn));
1071 struct qed_mcp_mb_params mb_params;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001072 int rc;
1073 int i;
1074
1075 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1076 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1077 "Acking VFs [%08x,...,%08x] - %08x\n",
1078 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1079
1080 memset(&mb_params, 0, sizeof(mb_params));
1081 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001082 mb_params.p_data_src = vfs_to_ack;
1083 mb_params.data_src_size = VF_MAX_STATIC / 8;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001084 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1085 if (rc) {
1086 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1087 return -EBUSY;
1088 }
1089
1090 /* Clear the ACK bits */
1091 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1092 qed_wr(p_hwfn, p_ptt,
1093 func_addr +
1094 offsetof(struct public_func, drv_ack_vf_disabled) +
1095 i * sizeof(u32), 0);
1096
1097 return rc;
1098}
1099
Zvi Nachmani334c03b2016-03-09 09:16:25 +02001100static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1101 struct qed_ptt *p_ptt)
1102{
1103 u32 transceiver_state;
1104
1105 transceiver_state = qed_rd(p_hwfn, p_ptt,
1106 p_hwfn->mcp_info->port_addr +
1107 offsetof(struct public_port,
1108 transceiver_data));
1109
1110 DP_VERBOSE(p_hwfn,
1111 (NETIF_MSG_HW | QED_MSG_SP),
1112 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1113 transceiver_state,
1114 (u32)(p_hwfn->mcp_info->port_addr +
Yuval Mintz1a635e42016-08-15 10:42:43 +03001115 offsetof(struct public_port, transceiver_data)));
Zvi Nachmani334c03b2016-03-09 09:16:25 +02001116
1117 transceiver_state = GET_FIELD(transceiver_state,
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001118 ETH_TRANSCEIVER_STATE);
Zvi Nachmani334c03b2016-03-09 09:16:25 +02001119
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001120 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
Zvi Nachmani334c03b2016-03-09 09:16:25 +02001121 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1122 else
1123 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1124}
1125
Sudarsana Reddy Kalluru645874e2017-07-26 06:07:11 -07001126static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1127 struct qed_ptt *p_ptt,
1128 struct qed_mcp_link_state *p_link)
1129{
1130 u32 eee_status, val;
1131
1132 p_link->eee_adv_caps = 0;
1133 p_link->eee_lp_adv_caps = 0;
1134 eee_status = qed_rd(p_hwfn,
1135 p_ptt,
1136 p_hwfn->mcp_info->port_addr +
1137 offsetof(struct public_port, eee_status));
1138 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1139 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1140 if (val & EEE_1G_ADV)
1141 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1142 if (val & EEE_10G_ADV)
1143 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1144 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1145 if (val & EEE_1G_ADV)
1146 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1147 if (val & EEE_10G_ADV)
1148 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1149}
1150
Yuval Mintzcc875c22015-10-26 11:02:31 +02001151static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001152 struct qed_ptt *p_ptt, bool b_reset)
Yuval Mintzcc875c22015-10-26 11:02:31 +02001153{
1154 struct qed_mcp_link_state *p_link;
Manish Chopraa64b02d2016-04-26 10:56:10 -04001155 u8 max_bw, min_bw;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001156 u32 status = 0;
1157
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +02001158 /* Prevent SW/attentions from doing this at the same time */
1159 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1160
Yuval Mintzcc875c22015-10-26 11:02:31 +02001161 p_link = &p_hwfn->mcp_info->link_output;
1162 memset(p_link, 0, sizeof(*p_link));
1163 if (!b_reset) {
1164 status = qed_rd(p_hwfn, p_ptt,
1165 p_hwfn->mcp_info->port_addr +
1166 offsetof(struct public_port, link_status));
1167 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1168 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1169 status,
1170 (u32)(p_hwfn->mcp_info->port_addr +
Yuval Mintz1a635e42016-08-15 10:42:43 +03001171 offsetof(struct public_port, link_status)));
Yuval Mintzcc875c22015-10-26 11:02:31 +02001172 } else {
1173 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1174 "Resetting link indications\n");
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +02001175 goto out;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001176 }
1177
Sudarsana Reddy Kallurufc916ff2016-03-09 09:16:23 +02001178 if (p_hwfn->b_drv_link_init)
1179 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1180 else
1181 p_link->link_up = false;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001182
1183 p_link->full_duplex = true;
1184 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1185 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1186 p_link->speed = 100000;
1187 break;
1188 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1189 p_link->speed = 50000;
1190 break;
1191 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1192 p_link->speed = 40000;
1193 break;
1194 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1195 p_link->speed = 25000;
1196 break;
1197 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1198 p_link->speed = 20000;
1199 break;
1200 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1201 p_link->speed = 10000;
1202 break;
1203 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1204 p_link->full_duplex = false;
1205 /* Fall-through */
1206 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1207 p_link->speed = 1000;
1208 break;
1209 default:
1210 p_link->speed = 0;
1211 }
1212
Manish Chopra4b01e512016-04-26 10:56:09 -04001213 if (p_link->link_up && p_link->speed)
1214 p_link->line_speed = p_link->speed;
1215 else
1216 p_link->line_speed = 0;
1217
1218 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
Manish Chopraa64b02d2016-04-26 10:56:10 -04001219 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
Manish Chopra4b01e512016-04-26 10:56:09 -04001220
Manish Chopraa64b02d2016-04-26 10:56:10 -04001221 /* Max bandwidth configuration */
Manish Chopra4b01e512016-04-26 10:56:09 -04001222 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
Yuval Mintzcc875c22015-10-26 11:02:31 +02001223
Manish Chopraa64b02d2016-04-26 10:56:10 -04001224 /* Min bandwidth configuration */
1225 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
Mintz, Yuval6f437d42017-02-27 11:06:33 +02001226 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1227 p_link->min_pf_rate);
Manish Chopraa64b02d2016-04-26 10:56:10 -04001228
Yuval Mintzcc875c22015-10-26 11:02:31 +02001229 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1230 p_link->an_complete = !!(status &
1231 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1232 p_link->parallel_detection = !!(status &
1233 LINK_STATUS_PARALLEL_DETECTION_USED);
1234 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1235
1236 p_link->partner_adv_speed |=
1237 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1238 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1239 p_link->partner_adv_speed |=
1240 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1241 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1242 p_link->partner_adv_speed |=
1243 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1244 QED_LINK_PARTNER_SPEED_10G : 0;
1245 p_link->partner_adv_speed |=
1246 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1247 QED_LINK_PARTNER_SPEED_20G : 0;
1248 p_link->partner_adv_speed |=
Sudarsana Reddy Kalluru054c67d2016-08-09 03:51:23 -04001249 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1250 QED_LINK_PARTNER_SPEED_25G : 0;
1251 p_link->partner_adv_speed |=
Yuval Mintzcc875c22015-10-26 11:02:31 +02001252 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1253 QED_LINK_PARTNER_SPEED_40G : 0;
1254 p_link->partner_adv_speed |=
1255 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1256 QED_LINK_PARTNER_SPEED_50G : 0;
1257 p_link->partner_adv_speed |=
1258 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1259 QED_LINK_PARTNER_SPEED_100G : 0;
1260
1261 p_link->partner_tx_flow_ctrl_en =
1262 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1263 p_link->partner_rx_flow_ctrl_en =
1264 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1265
1266 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1267 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1268 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1269 break;
1270 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1271 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1272 break;
1273 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1274 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1275 break;
1276 default:
1277 p_link->partner_adv_pause = 0;
1278 }
1279
1280 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1281
Sudarsana Reddy Kalluru645874e2017-07-26 06:07:11 -07001282 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1283 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1284
Yuval Mintzcc875c22015-10-26 11:02:31 +02001285 qed_link_update(p_hwfn);
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +02001286out:
1287 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
Yuval Mintzcc875c22015-10-26 11:02:31 +02001288}
1289
Yuval Mintz351a4ded2016-06-02 10:23:29 +03001290int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
Yuval Mintzcc875c22015-10-26 11:02:31 +02001291{
1292 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
Tomer Tayar5529bad2016-03-09 09:16:24 +02001293 struct qed_mcp_mb_params mb_params;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001294 struct eth_phy_cfg phy_cfg;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001295 int rc = 0;
Tomer Tayar5529bad2016-03-09 09:16:24 +02001296 u32 cmd;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001297
1298 /* Set the shmem configuration according to params */
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001299 memset(&phy_cfg, 0, sizeof(phy_cfg));
Yuval Mintzcc875c22015-10-26 11:02:31 +02001300 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1301 if (!params->speed.autoneg)
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001302 phy_cfg.speed = params->speed.forced_speed;
1303 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1304 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1305 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1306 phy_cfg.adv_speed = params->speed.advertised_speeds;
1307 phy_cfg.loopback_mode = params->loopback_mode;
Sudarsana Reddy Kalluru645874e2017-07-26 06:07:11 -07001308 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1309 if (params->eee.enable)
1310 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1311 if (params->eee.tx_lpi_enable)
1312 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1313 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1314 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1315 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1316 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1317 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1318 EEE_TX_TIMER_USEC_OFFSET) &
1319 EEE_TX_TIMER_USEC_MASK;
1320 }
Yuval Mintzcc875c22015-10-26 11:02:31 +02001321
Sudarsana Reddy Kallurufc916ff2016-03-09 09:16:23 +02001322 p_hwfn->b_drv_link_init = b_up;
1323
Yuval Mintzcc875c22015-10-26 11:02:31 +02001324 if (b_up) {
1325 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1326 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001327 phy_cfg.speed,
1328 phy_cfg.pause,
1329 phy_cfg.adv_speed,
1330 phy_cfg.loopback_mode,
1331 phy_cfg.feature_config_flags);
Yuval Mintzcc875c22015-10-26 11:02:31 +02001332 } else {
1333 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1334 "Resetting link\n");
1335 }
1336
Tomer Tayar5529bad2016-03-09 09:16:24 +02001337 memset(&mb_params, 0, sizeof(mb_params));
1338 mb_params.cmd = cmd;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001339 mb_params.p_data_src = &phy_cfg;
1340 mb_params.data_src_size = sizeof(phy_cfg);
Tomer Tayar5529bad2016-03-09 09:16:24 +02001341 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
Yuval Mintzcc875c22015-10-26 11:02:31 +02001342
1343 /* if mcp fails to respond we must abort */
1344 if (rc) {
1345 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1346 return rc;
1347 }
1348
Mintz, Yuval65ed2ff2017-02-20 22:43:39 +02001349 /* Mimic link-change attention, done for several reasons:
1350 * - On reset, there's no guarantee MFW would trigger
1351 * an attention.
1352 * - On initialization, older MFWs might not indicate link change
1353 * during LFA, so we'll never get an UP indication.
1354 */
1355 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
Yuval Mintzcc875c22015-10-26 11:02:31 +02001356
1357 return 0;
1358}
1359
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -04001360static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1361 struct qed_ptt *p_ptt,
1362 enum MFW_DRV_MSG_TYPE type)
1363{
1364 enum qed_mcp_protocol_type stats_type;
1365 union qed_mcp_protocol_stats stats;
1366 struct qed_mcp_mb_params mb_params;
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -04001367 u32 hsi_param;
1368
1369 switch (type) {
1370 case MFW_DRV_MSG_GET_LAN_STATS:
1371 stats_type = QED_MCP_LAN_STATS;
1372 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1373 break;
1374 case MFW_DRV_MSG_GET_FCOE_STATS:
1375 stats_type = QED_MCP_FCOE_STATS;
1376 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1377 break;
1378 case MFW_DRV_MSG_GET_ISCSI_STATS:
1379 stats_type = QED_MCP_ISCSI_STATS;
1380 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1381 break;
1382 case MFW_DRV_MSG_GET_RDMA_STATS:
1383 stats_type = QED_MCP_RDMA_STATS;
1384 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1385 break;
1386 default:
1387 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1388 return;
1389 }
1390
1391 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1392
1393 memset(&mb_params, 0, sizeof(mb_params));
1394 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1395 mb_params.param = hsi_param;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02001396 mb_params.p_data_src = &stats;
1397 mb_params.data_src_size = sizeof(stats);
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -04001398 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1399}
1400
Manish Chopra4b01e512016-04-26 10:56:09 -04001401static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1402 struct public_func *p_shmem_info)
1403{
1404 struct qed_mcp_function_info *p_info;
1405
1406 p_info = &p_hwfn->mcp_info->func_info;
1407
1408 p_info->bandwidth_min = (p_shmem_info->config &
1409 FUNC_MF_CFG_MIN_BW_MASK) >>
1410 FUNC_MF_CFG_MIN_BW_SHIFT;
1411 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1412 DP_INFO(p_hwfn,
1413 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1414 p_info->bandwidth_min);
1415 p_info->bandwidth_min = 1;
1416 }
1417
1418 p_info->bandwidth_max = (p_shmem_info->config &
1419 FUNC_MF_CFG_MAX_BW_MASK) >>
1420 FUNC_MF_CFG_MAX_BW_SHIFT;
1421 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1422 DP_INFO(p_hwfn,
1423 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1424 p_info->bandwidth_max);
1425 p_info->bandwidth_max = 100;
1426 }
1427}
1428
1429static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1430 struct qed_ptt *p_ptt,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001431 struct public_func *p_data, int pfid)
Manish Chopra4b01e512016-04-26 10:56:09 -04001432{
1433 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1434 PUBLIC_FUNC);
1435 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1436 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1437 u32 i, size;
1438
1439 memset(p_data, 0, sizeof(*p_data));
1440
Yuval Mintz1a635e42016-08-15 10:42:43 +03001441 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
Manish Chopra4b01e512016-04-26 10:56:09 -04001442 for (i = 0; i < size / sizeof(u32); i++)
1443 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1444 func_addr + (i << 2));
1445 return size;
1446}
1447
Yuval Mintz1a635e42016-08-15 10:42:43 +03001448static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Manish Chopra4b01e512016-04-26 10:56:09 -04001449{
1450 struct qed_mcp_function_info *p_info;
1451 struct public_func shmem_info;
1452 u32 resp = 0, param = 0;
1453
Yuval Mintz1a635e42016-08-15 10:42:43 +03001454 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
Manish Chopra4b01e512016-04-26 10:56:09 -04001455
1456 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1457
1458 p_info = &p_hwfn->mcp_info->func_info;
1459
Manish Chopraa64b02d2016-04-26 10:56:10 -04001460 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
Manish Chopra4b01e512016-04-26 10:56:09 -04001461 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1462
1463 /* Acknowledge the MFW */
1464 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1465 &param);
1466}
1467
Mintz, Yuval2a351fd92017-05-29 09:53:09 +03001468static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1469{
1470 struct public_func shmem_info;
1471 u32 resp = 0, param = 0;
1472
1473 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1474
1475 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1476 FUNC_MF_CFG_OV_STAG_MASK;
1477 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1478 if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
1479 (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
1480 qed_wr(p_hwfn, p_ptt,
1481 NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
1482 qed_sp_pf_update_stag(p_hwfn);
1483 }
1484
1485 /* Acknowledge the MFW */
1486 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1487 &resp, &param);
1488}
1489
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001490void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1491{
1492 struct public_func shmem_info;
1493 u32 port_cfg, val;
1494
1495 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1496 return;
1497
1498 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1499 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1500 offsetof(struct public_port, oem_cfg_port));
1501 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1502 OEM_CFG_CHANNEL_TYPE_OFFSET;
1503 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1504 DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
1505
1506 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1507 if (val == OEM_CFG_SCHED_TYPE_ETS) {
1508 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1509 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1510 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1511 } else {
1512 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1513 DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
1514 }
1515
1516 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1517 val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET;
1518 p_hwfn->ufp_info.tc = (u8)val;
1519 val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1520 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1521 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1522 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1523 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1524 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1525 } else {
1526 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1527 DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
1528 }
1529
1530 DP_NOTICE(p_hwfn,
1531 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1532 p_hwfn->ufp_info.mode,
1533 p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
1534}
1535
1536static int
1537qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1538{
1539 qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1540
1541 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1542 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1543 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
1544
1545 qed_qm_reconf(p_hwfn, p_ptt);
1546 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1547 /* Merge UFP TC with the dcbx TC data */
1548 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1549 QED_DCBX_OPERATIONAL_MIB);
1550 } else {
1551 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1552 return -EINVAL;
1553 }
1554
1555 /* update storm FW with negotiation results */
1556 qed_sp_pf_update_ufp(p_hwfn);
1557
1558 /* update stag pcp value */
1559 qed_sp_pf_update_stag(p_hwfn);
1560
1561 return 0;
1562}
1563
Yuval Mintzcc875c22015-10-26 11:02:31 +02001564int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1565 struct qed_ptt *p_ptt)
1566{
1567 struct qed_mcp_info *info = p_hwfn->mcp_info;
1568 int rc = 0;
1569 bool found = false;
1570 u16 i;
1571
1572 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1573
1574 /* Read Messages from MFW */
1575 qed_mcp_read_mb(p_hwfn, p_ptt);
1576
1577 /* Compare current messages to old ones */
1578 for (i = 0; i < info->mfw_mb_length; i++) {
1579 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1580 continue;
1581
1582 found = true;
1583
1584 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1585 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1586 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1587
1588 switch (i) {
1589 case MFW_DRV_MSG_LINK_CHANGE:
1590 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1591 break;
Yuval Mintz0b55e272016-05-11 16:36:15 +03001592 case MFW_DRV_MSG_VF_DISABLED:
1593 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1594 break;
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -04001595 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1596 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1597 QED_DCBX_REMOTE_LLDP_MIB);
1598 break;
1599 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1600 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1601 QED_DCBX_REMOTE_MIB);
1602 break;
1603 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1604 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1605 QED_DCBX_OPERATIONAL_MIB);
1606 break;
Sudarsana Reddy Kallurucac6f692018-05-05 18:43:02 -07001607 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1608 qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1609 break;
Zvi Nachmani334c03b2016-03-09 09:16:25 +02001610 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1611 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1612 break;
Sudarsana Reddy Kalluru6c754242016-08-16 10:51:03 -04001613 case MFW_DRV_MSG_GET_LAN_STATS:
1614 case MFW_DRV_MSG_GET_FCOE_STATS:
1615 case MFW_DRV_MSG_GET_ISCSI_STATS:
1616 case MFW_DRV_MSG_GET_RDMA_STATS:
1617 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1618 break;
Manish Chopra4b01e512016-04-26 10:56:09 -04001619 case MFW_DRV_MSG_BW_UPDATE:
1620 qed_mcp_update_bw(p_hwfn, p_ptt);
1621 break;
Mintz, Yuval2a351fd92017-05-29 09:53:09 +03001622 case MFW_DRV_MSG_S_TAG_UPDATE:
1623 qed_mcp_update_stag(p_hwfn, p_ptt);
1624 break;
Sudarsana Reddy Kalluru59ccf862018-05-22 00:28:41 -07001625 case MFW_DRV_MSG_GET_TLV_REQ:
1626 qed_mfw_tlv_req(p_hwfn);
Mintz, Yuval2a351fd92017-05-29 09:53:09 +03001627 break;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001628 default:
Mintz, Yuval39815942017-03-23 15:50:18 +02001629 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
Yuval Mintzcc875c22015-10-26 11:02:31 +02001630 rc = -EINVAL;
1631 }
1632 }
1633
1634 /* ACK everything */
1635 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1636 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1637
1638 /* MFW expect answer in BE, so we force write in that format */
1639 qed_wr(p_hwfn, p_ptt,
1640 info->mfw_mb_addr + sizeof(u32) +
1641 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1642 sizeof(u32) + i * sizeof(u32),
1643 (__force u32)val);
1644 }
1645
1646 if (!found) {
1647 DP_NOTICE(p_hwfn,
1648 "Received an MFW message indication but no new message!\n");
1649 rc = -EINVAL;
1650 }
1651
1652 /* Copy the new mfw messages into the shadow */
1653 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1654
1655 return rc;
1656}
1657
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001658int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1659 struct qed_ptt *p_ptt,
1660 u32 *p_mfw_ver, u32 *p_running_bundle_id)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001661{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001662 u32 global_offsize;
1663
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001664 if (IS_VF(p_hwfn->cdev)) {
1665 if (p_hwfn->vf_iov_info) {
1666 struct pfvf_acquire_resp_tlv *p_resp;
1667
1668 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1669 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1670 return 0;
1671 } else {
1672 DP_VERBOSE(p_hwfn,
1673 QED_MSG_IOV,
1674 "VF requested MFW version prior to ACQUIRE\n");
1675 return -EINVAL;
1676 }
1677 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001678
1679 global_offsize = qed_rd(p_hwfn, p_ptt,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001680 SECTION_OFFSIZE_ADDR(p_hwfn->
1681 mcp_info->public_base,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001682 PUBLIC_GLOBAL));
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001683 *p_mfw_ver =
1684 qed_rd(p_hwfn, p_ptt,
1685 SECTION_ADDR(global_offsize,
1686 0) + offsetof(struct public_global, mfw_ver));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001687
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001688 if (p_running_bundle_id != NULL) {
1689 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1690 SECTION_ADDR(global_offsize, 0) +
1691 offsetof(struct public_global,
1692 running_bundle_id));
1693 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001694
1695 return 0;
1696}
1697
Tomer Tayarae336662017-05-23 09:41:26 +03001698int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1699 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1700{
1701 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1702
1703 if (IS_VF(p_hwfn->cdev))
1704 return -EINVAL;
1705
1706 /* Read the address of the nvm_cfg */
1707 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1708 if (!nvm_cfg_addr) {
1709 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1710 return -EINVAL;
1711 }
1712
1713 /* Read the offset of nvm_cfg1 */
1714 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1715
1716 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1717 offsetof(struct nvm_cfg1, glob) +
1718 offsetof(struct nvm_cfg1_glob, mbi_version);
1719 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1720 mbi_ver_addr) &
1721 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1722 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1723 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1724
1725 return 0;
1726}
1727
Yuval Mintz1a635e42016-08-15 10:42:43 +03001728int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
Yuval Mintzcc875c22015-10-26 11:02:31 +02001729{
1730 struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
1731 struct qed_ptt *p_ptt;
1732
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001733 if (IS_VF(cdev))
1734 return -EINVAL;
1735
Yuval Mintzcc875c22015-10-26 11:02:31 +02001736 if (!qed_mcp_is_init(p_hwfn)) {
Yuval Mintz525ef5c2016-08-15 10:42:45 +03001737 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
Yuval Mintzcc875c22015-10-26 11:02:31 +02001738 return -EBUSY;
1739 }
1740
1741 *p_media_type = MEDIA_UNSPECIFIED;
1742
1743 p_ptt = qed_ptt_acquire(p_hwfn);
1744 if (!p_ptt)
1745 return -EBUSY;
1746
1747 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1748 offsetof(struct public_port, media_type));
1749
1750 qed_ptt_release(p_hwfn, p_ptt);
1751
1752 return 0;
1753}
1754
Mintz, Yuval6927e822016-10-31 07:14:25 +02001755/* Old MFW has a global configuration for all PFs regarding RDMA support */
1756static void
1757qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1758 enum qed_pci_personality *p_proto)
1759{
1760 /* There wasn't ever a legacy MFW that published iwarp.
1761 * So at this point, this is either plain l2 or RoCE.
1762 */
1763 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1764 *p_proto = QED_PCI_ETH_ROCE;
1765 else
1766 *p_proto = QED_PCI_ETH;
1767
1768 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1769 "According to Legacy capabilities, L2 personality is %08x\n",
1770 (u32) *p_proto);
1771}
1772
1773static int
1774qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1775 struct qed_ptt *p_ptt,
1776 enum qed_pci_personality *p_proto)
1777{
1778 u32 resp = 0, param = 0;
1779 int rc;
1780
1781 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1782 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
1783 if (rc)
1784 return rc;
1785 if (resp != FW_MSG_CODE_OK) {
1786 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1787 "MFW lacks support for command; Returns %08x\n",
1788 resp);
1789 return -EINVAL;
1790 }
1791
1792 switch (param) {
1793 case FW_MB_PARAM_GET_PF_RDMA_NONE:
1794 *p_proto = QED_PCI_ETH;
1795 break;
1796 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1797 *p_proto = QED_PCI_ETH_ROCE;
1798 break;
Mintz, Yuval6927e822016-10-31 07:14:25 +02001799 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
Michal Kalderone0a8f9d2017-09-24 12:09:42 +03001800 *p_proto = QED_PCI_ETH_IWARP;
1801 break;
1802 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
1803 *p_proto = QED_PCI_ETH_RDMA;
1804 break;
Mintz, Yuval6927e822016-10-31 07:14:25 +02001805 default:
1806 DP_NOTICE(p_hwfn,
1807 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1808 param);
1809 return -EINVAL;
1810 }
1811
1812 DP_VERBOSE(p_hwfn,
1813 NETIF_MSG_IFUP,
1814 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1815 (u32) *p_proto, resp, param);
1816 return 0;
1817}
1818
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001819static int
1820qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1821 struct public_func *p_info,
Mintz, Yuval6927e822016-10-31 07:14:25 +02001822 struct qed_ptt *p_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001823 enum qed_pci_personality *p_proto)
1824{
1825 int rc = 0;
1826
1827 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1828 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
Ram Amrani1fe582e2017-01-01 13:57:10 +02001829 if (!IS_ENABLED(CONFIG_QED_RDMA))
1830 *p_proto = QED_PCI_ETH;
1831 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
Mintz, Yuval6927e822016-10-31 07:14:25 +02001832 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
Yuval Mintzc5ac9312016-06-03 14:35:34 +03001833 break;
1834 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1835 *p_proto = QED_PCI_ISCSI;
1836 break;
Arun Easi1e128c82017-02-15 06:28:22 -08001837 case FUNC_MF_CFG_PROTOCOL_FCOE:
1838 *p_proto = QED_PCI_FCOE;
1839 break;
Yuval Mintzc5ac9312016-06-03 14:35:34 +03001840 case FUNC_MF_CFG_PROTOCOL_ROCE:
1841 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
Mintz, Yuval6927e822016-10-31 07:14:25 +02001842 /* Fallthrough */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001843 default:
1844 rc = -EINVAL;
1845 }
1846
1847 return rc;
1848}
1849
1850int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1851 struct qed_ptt *p_ptt)
1852{
1853 struct qed_mcp_function_info *info;
1854 struct public_func shmem_info;
1855
Yuval Mintz1a635e42016-08-15 10:42:43 +03001856 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001857 info = &p_hwfn->mcp_info->func_info;
1858
1859 info->pause_on_host = (shmem_info.config &
1860 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1861
Mintz, Yuval6927e822016-10-31 07:14:25 +02001862 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1863 &info->protocol)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001864 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1865 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1866 return -EINVAL;
1867 }
1868
Manish Chopra4b01e512016-04-26 10:56:09 -04001869 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001870
1871 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1872 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1873 info->mac[1] = (u8)(shmem_info.mac_upper);
1874 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1875 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1876 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1877 info->mac[5] = (u8)(shmem_info.mac_lower);
Mintz, Yuval14d39642016-10-31 07:14:23 +02001878
1879 /* Store primary MAC for later possible WoL */
1880 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001881 } else {
1882 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1883 }
1884
Mintz, Yuval57796752017-06-02 08:58:30 +03001885 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
1886 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
1887 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
1888 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001889
1890 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1891
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001892 info->mtu = (u16)shmem_info.mtu_size;
1893
Mintz, Yuval14d39642016-10-31 07:14:23 +02001894 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1895 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1896 if (qed_mcp_is_init(p_hwfn)) {
1897 u32 resp = 0, param = 0;
1898 int rc;
1899
1900 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1901 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
1902 if (rc)
1903 return rc;
1904 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1905 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1906 }
1907
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001908 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
Mintz, Yuval14d39642016-10-31 07:14:23 +02001909 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001910 info->pause_on_host, info->protocol,
1911 info->bandwidth_min, info->bandwidth_max,
1912 info->mac[0], info->mac[1], info->mac[2],
1913 info->mac[3], info->mac[4], info->mac[5],
Mintz, Yuval14d39642016-10-31 07:14:23 +02001914 info->wwn_port, info->wwn_node,
1915 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001916
1917 return 0;
1918}
1919
Yuval Mintzcc875c22015-10-26 11:02:31 +02001920struct qed_mcp_link_params
1921*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1922{
1923 if (!p_hwfn || !p_hwfn->mcp_info)
1924 return NULL;
1925 return &p_hwfn->mcp_info->link_input;
1926}
1927
1928struct qed_mcp_link_state
1929*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1930{
1931 if (!p_hwfn || !p_hwfn->mcp_info)
1932 return NULL;
1933 return &p_hwfn->mcp_info->link_output;
1934}
1935
1936struct qed_mcp_link_capabilities
1937*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1938{
1939 if (!p_hwfn || !p_hwfn->mcp_info)
1940 return NULL;
1941 return &p_hwfn->mcp_info->link_capabilities;
1942}
1943
Yuval Mintz1a635e42016-08-15 10:42:43 +03001944int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001945{
1946 u32 resp = 0, param = 0;
1947 int rc;
1948
1949 rc = qed_mcp_cmd(p_hwfn, p_ptt,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001950 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001951
1952 /* Wait for the drain to complete before returning */
Yuval Mintz8f60baf2016-03-09 09:16:26 +02001953 msleep(1020);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001954
1955 return rc;
1956}
1957
Manish Chopracee4d262015-10-26 11:02:28 +02001958int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001959 struct qed_ptt *p_ptt, u32 *p_flash_size)
Manish Chopracee4d262015-10-26 11:02:28 +02001960{
1961 u32 flash_size;
1962
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001963 if (IS_VF(p_hwfn->cdev))
1964 return -EINVAL;
1965
Manish Chopracee4d262015-10-26 11:02:28 +02001966 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1967 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1968 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1969 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1970
1971 *p_flash_size = flash_size;
1972
1973 return 0;
1974}
1975
Mintz, Yuval88072fd2017-05-29 09:53:08 +03001976static int
1977qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
1978 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001979{
1980 u32 resp = 0, param = 0, rc_param = 0;
1981 int rc;
1982
1983 /* Only Leader can configure MSIX, and need to take CMT into account */
1984 if (!IS_LEAD_HWFN(p_hwfn))
1985 return 0;
1986 num *= p_hwfn->cdev->num_hwfns;
1987
1988 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1989 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1990 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1991 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1992
1993 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1994 &resp, &rc_param);
1995
1996 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1997 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1998 rc = -EINVAL;
1999 } else {
2000 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2001 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2002 num, vf_id);
2003 }
2004
2005 return rc;
2006}
2007
Mintz, Yuval88072fd2017-05-29 09:53:08 +03002008static int
2009qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2010 struct qed_ptt *p_ptt, u8 num)
2011{
2012 u32 resp = 0, param = num, rc_param = 0;
2013 int rc;
2014
2015 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2016 param, &resp, &rc_param);
2017
2018 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2019 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2020 rc = -EINVAL;
2021 } else {
2022 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2023 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2024 }
2025
2026 return rc;
2027}
2028
2029int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2030 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2031{
2032 if (QED_IS_BB(p_hwfn->cdev))
2033 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2034 else
2035 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2036}
2037
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002038int
2039qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2040 struct qed_ptt *p_ptt,
2041 struct qed_mcp_drv_version *p_ver)
2042{
Tomer Tayar5529bad2016-03-09 09:16:24 +02002043 struct qed_mcp_mb_params mb_params;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02002044 struct drv_version_stc drv_version;
Tomer Tayar5529bad2016-03-09 09:16:24 +02002045 __be32 val;
2046 u32 i;
2047 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002048
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02002049 memset(&drv_version, 0, sizeof(drv_version));
2050 drv_version.version = p_ver->version;
Yuval Mintz67a99b72016-09-19 17:47:41 +03002051 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2052 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02002053 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002054 }
2055
Tomer Tayar5529bad2016-03-09 09:16:24 +02002056 memset(&mb_params, 0, sizeof(mb_params));
2057 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02002058 mb_params.p_data_src = &drv_version;
2059 mb_params.data_src_size = sizeof(drv_version);
Tomer Tayar5529bad2016-03-09 09:16:24 +02002060 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2061 if (rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002062 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002063
Tomer Tayar5529bad2016-03-09 09:16:24 +02002064 return rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02002065}
Sudarsana Kalluru91420b82015-11-30 12:25:03 +02002066
Tomer Tayar41024262016-09-05 14:35:10 +03002067int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2068{
2069 u32 resp = 0, param = 0;
2070 int rc;
2071
2072 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2073 &param);
2074 if (rc)
2075 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2076
2077 return rc;
2078}
2079
2080int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2081{
2082 u32 value, cpu_mode;
2083
2084 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2085
2086 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2087 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2088 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2089 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2090
2091 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
2092}
2093
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002094int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2095 struct qed_ptt *p_ptt,
2096 enum qed_ov_client client)
2097{
2098 u32 resp = 0, param = 0;
2099 u32 drv_mb_param;
2100 int rc;
2101
2102 switch (client) {
2103 case QED_OV_CLIENT_DRV:
2104 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2105 break;
2106 case QED_OV_CLIENT_USER:
2107 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2108 break;
2109 case QED_OV_CLIENT_VENDOR_SPEC:
2110 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2111 break;
2112 default:
2113 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2114 return -EINVAL;
2115 }
2116
2117 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2118 drv_mb_param, &resp, &param);
2119 if (rc)
2120 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2121
2122 return rc;
2123}
2124
2125int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2126 struct qed_ptt *p_ptt,
2127 enum qed_ov_driver_state drv_state)
2128{
2129 u32 resp = 0, param = 0;
2130 u32 drv_mb_param;
2131 int rc;
2132
2133 switch (drv_state) {
2134 case QED_OV_DRIVER_STATE_NOT_LOADED:
2135 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2136 break;
2137 case QED_OV_DRIVER_STATE_DISABLED:
2138 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2139 break;
2140 case QED_OV_DRIVER_STATE_ACTIVE:
2141 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2142 break;
2143 default:
2144 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2145 return -EINVAL;
2146 }
2147
2148 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2149 drv_mb_param, &resp, &param);
2150 if (rc)
2151 DP_ERR(p_hwfn, "Failed to send driver state\n");
2152
2153 return rc;
2154}
2155
2156int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2157 struct qed_ptt *p_ptt, u16 mtu)
2158{
2159 u32 resp = 0, param = 0;
2160 u32 drv_mb_param;
2161 int rc;
2162
2163 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2164 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2165 drv_mb_param, &resp, &param);
2166 if (rc)
2167 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2168
2169 return rc;
2170}
2171
2172int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2173 struct qed_ptt *p_ptt, u8 *mac)
2174{
2175 struct qed_mcp_mb_params mb_params;
Mintz, Yuval17991002017-03-23 15:50:17 +02002176 u32 mfw_mac[2];
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002177 int rc;
2178
2179 memset(&mb_params, 0, sizeof(mb_params));
2180 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2181 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2182 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2183 mb_params.param |= MCP_PF_ID(p_hwfn);
Tomer Tayar2f67af8c2017-03-23 15:50:16 +02002184
Mintz, Yuval17991002017-03-23 15:50:17 +02002185 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2186 * in 32-bit granularity.
2187 * So the MAC has to be set in native order [and not byte order],
2188 * otherwise it would be read incorrectly by MFW after swap.
2189 */
2190 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2191 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2192
2193 mb_params.p_data_src = (u8 *)mfw_mac;
2194 mb_params.data_src_size = 8;
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002195 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2196 if (rc)
2197 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2198
Mintz, Yuval14d39642016-10-31 07:14:23 +02002199 /* Store primary MAC for later possible WoL */
2200 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2201
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002202 return rc;
2203}
2204
2205int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2206 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2207{
2208 u32 resp = 0, param = 0;
2209 u32 drv_mb_param;
2210 int rc;
2211
Mintz, Yuval14d39642016-10-31 07:14:23 +02002212 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2213 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2214 "Can't change WoL configuration when WoL isn't supported\n");
2215 return -EINVAL;
2216 }
2217
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002218 switch (wol) {
2219 case QED_OV_WOL_DEFAULT:
2220 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2221 break;
2222 case QED_OV_WOL_DISABLED:
2223 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2224 break;
2225 case QED_OV_WOL_ENABLED:
2226 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2227 break;
2228 default:
2229 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2230 return -EINVAL;
2231 }
2232
2233 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2234 drv_mb_param, &resp, &param);
2235 if (rc)
2236 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2237
Mintz, Yuval14d39642016-10-31 07:14:23 +02002238 /* Store the WoL update for a future unload */
2239 p_hwfn->cdev->wol_config = (u8)wol;
2240
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002241 return rc;
2242}
2243
2244int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2245 struct qed_ptt *p_ptt,
2246 enum qed_ov_eswitch eswitch)
2247{
2248 u32 resp = 0, param = 0;
2249 u32 drv_mb_param;
2250 int rc;
2251
2252 switch (eswitch) {
2253 case QED_OV_ESWITCH_NONE:
2254 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2255 break;
2256 case QED_OV_ESWITCH_VEB:
2257 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2258 break;
2259 case QED_OV_ESWITCH_VEPA:
2260 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2261 break;
2262 default:
2263 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2264 return -EINVAL;
2265 }
2266
2267 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2268 drv_mb_param, &resp, &param);
2269 if (rc)
2270 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2271
2272 return rc;
2273}
2274
Yuval Mintz1a635e42016-08-15 10:42:43 +03002275int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2276 struct qed_ptt *p_ptt, enum qed_led_mode mode)
Sudarsana Kalluru91420b82015-11-30 12:25:03 +02002277{
2278 u32 resp = 0, param = 0, drv_mb_param;
2279 int rc;
2280
2281 switch (mode) {
2282 case QED_LED_MODE_ON:
2283 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2284 break;
2285 case QED_LED_MODE_OFF:
2286 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2287 break;
2288 case QED_LED_MODE_RESTORE:
2289 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2290 break;
2291 default:
2292 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2293 return -EINVAL;
2294 }
2295
2296 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2297 drv_mb_param, &resp, &param);
2298
2299 return rc;
2300}
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -04002301
Tomer Tayar41024262016-09-05 14:35:10 +03002302int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2303 struct qed_ptt *p_ptt, u32 mask_parities)
2304{
2305 u32 resp = 0, param = 0;
2306 int rc;
2307
2308 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2309 mask_parities, &resp, &param);
2310
2311 if (rc) {
2312 DP_ERR(p_hwfn,
2313 "MCP response failure for mask parities, aborting\n");
2314 } else if (resp != FW_MSG_CODE_OK) {
2315 DP_ERR(p_hwfn,
2316 "MCP did not acknowledge mask parity request. Old MFW?\n");
2317 rc = -EINVAL;
2318 }
2319
2320 return rc;
2321}
2322
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02002323int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2324{
2325 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2326 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2327 u32 resp = 0, resp_param = 0;
2328 struct qed_ptt *p_ptt;
2329 int rc = 0;
2330
2331 p_ptt = qed_ptt_acquire(p_hwfn);
2332 if (!p_ptt)
2333 return -EBUSY;
2334
2335 while (bytes_left > 0) {
2336 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2337
2338 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2339 DRV_MSG_CODE_NVM_READ_NVRAM,
2340 addr + offset +
2341 (bytes_to_copy <<
Tomer Tayarda090912017-12-27 19:30:07 +02002342 DRV_MB_PARAM_NVM_LEN_OFFSET),
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02002343 &resp, &resp_param,
2344 &read_len,
2345 (u32 *)(p_buf + offset));
2346
2347 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2348 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2349 break;
2350 }
2351
2352 /* This can be a lengthy process, and it's possible scheduler
2353 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2354 */
2355 if (bytes_left % 0x1000 <
2356 (bytes_left - read_len) % 0x1000)
2357 usleep_range(1000, 2000);
2358
2359 offset += read_len;
2360 bytes_left -= read_len;
2361 }
2362
2363 cdev->mcp_nvm_resp = resp;
2364 qed_ptt_release(p_hwfn, p_ptt);
2365
2366 return rc;
2367}
2368
Sudarsana Reddy Kalluru62e4d432018-03-28 05:14:21 -07002369int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2370{
2371 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2372 struct qed_ptt *p_ptt;
2373
2374 p_ptt = qed_ptt_acquire(p_hwfn);
2375 if (!p_ptt)
2376 return -EBUSY;
2377
2378 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2379 qed_ptt_release(p_hwfn, p_ptt);
2380
2381 return 0;
2382}
2383
2384int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
2385{
2386 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2387 struct qed_ptt *p_ptt;
2388 u32 resp, param;
2389 int rc;
2390
2391 p_ptt = qed_ptt_acquire(p_hwfn);
2392 if (!p_ptt)
2393 return -EBUSY;
2394 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2395 &resp, &param);
2396 cdev->mcp_nvm_resp = resp;
2397 qed_ptt_release(p_hwfn, p_ptt);
2398
2399 return rc;
2400}
2401
2402int qed_mcp_nvm_write(struct qed_dev *cdev,
2403 u32 cmd, u32 addr, u8 *p_buf, u32 len)
2404{
2405 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2406 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2407 struct qed_ptt *p_ptt;
2408 int rc = -EINVAL;
2409
2410 p_ptt = qed_ptt_acquire(p_hwfn);
2411 if (!p_ptt)
2412 return -EBUSY;
2413
2414 switch (cmd) {
2415 case QED_PUT_FILE_DATA:
2416 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2417 break;
2418 case QED_NVM_WRITE_NVRAM:
2419 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2420 break;
2421 default:
2422 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2423 rc = -EINVAL;
2424 goto out;
2425 }
2426
2427 while (buf_idx < len) {
2428 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2429 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2430 addr) + buf_idx;
2431 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2432 &resp, &param, buf_size,
2433 (u32 *)&p_buf[buf_idx]);
2434 if (rc) {
2435 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
2436 resp = FW_MSG_CODE_ERROR;
2437 break;
2438 }
2439
2440 if (resp != FW_MSG_CODE_OK &&
2441 resp != FW_MSG_CODE_NVM_OK &&
2442 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2443 DP_NOTICE(cdev,
2444 "nvm write failed, resp = 0x%08x\n", resp);
2445 rc = -EINVAL;
2446 break;
2447 }
2448
2449 /* This can be a lengthy process, and it's possible scheduler
2450 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
2451 */
2452 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
2453 usleep_range(1000, 2000);
2454
2455 buf_idx += buf_size;
2456 }
2457
2458 cdev->mcp_nvm_resp = resp;
2459out:
2460 qed_ptt_release(p_hwfn, p_ptt);
2461
2462 return rc;
2463}
2464
Sudarsana Reddy Kalluru03dc76c2016-04-28 20:20:52 -04002465int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2466{
2467 u32 drv_mb_param = 0, rsp, param;
2468 int rc = 0;
2469
2470 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2471 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2472
2473 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2474 drv_mb_param, &rsp, &param);
2475
2476 if (rc)
2477 return rc;
2478
2479 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2480 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2481 rc = -EAGAIN;
2482
2483 return rc;
2484}
2485
2486int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2487{
2488 u32 drv_mb_param, rsp, param;
2489 int rc = 0;
2490
2491 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2492 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2493
2494 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2495 drv_mb_param, &rsp, &param);
2496
2497 if (rc)
2498 return rc;
2499
2500 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2501 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2502 rc = -EAGAIN;
2503
2504 return rc;
2505}
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02002506
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002507int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
2508 struct qed_ptt *p_ptt,
2509 u32 *num_images)
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02002510{
2511 u32 drv_mb_param = 0, rsp;
2512 int rc = 0;
2513
2514 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2515 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2516
2517 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2518 drv_mb_param, &rsp, num_images);
2519 if (rc)
2520 return rc;
2521
2522 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2523 rc = -EINVAL;
2524
2525 return rc;
2526}
2527
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002528int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
2529 struct qed_ptt *p_ptt,
2530 struct bist_nvm_image_att *p_image_att,
2531 u32 image_index)
Mintz, Yuval7a4b21b2016-10-31 07:14:22 +02002532{
2533 u32 buf_size = 0, param, resp = 0, resp_param = 0;
2534 int rc;
2535
2536 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2537 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
2538 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
2539
2540 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2541 DRV_MSG_CODE_BIST_TEST, param,
2542 &resp, &resp_param,
2543 &buf_size,
2544 (u32 *)p_image_att);
2545 if (rc)
2546 return rc;
2547
2548 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2549 (p_image_att->return_code != 1))
2550 rc = -EINVAL;
2551
2552 return rc;
2553}
Tomer Tayar2edbff82016-10-31 07:14:27 +02002554
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002555int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2556{
2557 struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
2558 struct qed_ptt *p_ptt;
2559 int rc;
2560 u32 i;
2561
2562 p_ptt = qed_ptt_acquire(p_hwfn);
2563 if (!p_ptt) {
2564 DP_ERR(p_hwfn, "failed to acquire ptt\n");
2565 return -EBUSY;
2566 }
2567
2568 /* Acquire from MFW the amount of available images */
2569 nvm_info->num_images = 0;
2570 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
2571 p_ptt, &nvm_info->num_images);
2572 if (rc == -EOPNOTSUPP) {
2573 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
2574 goto out;
2575 } else if (rc || !nvm_info->num_images) {
2576 DP_ERR(p_hwfn, "Failed getting number of images\n");
2577 goto err0;
2578 }
2579
2580 nvm_info->image_att = kmalloc(nvm_info->num_images *
2581 sizeof(struct bist_nvm_image_att),
2582 GFP_KERNEL);
2583 if (!nvm_info->image_att) {
2584 rc = -ENOMEM;
2585 goto err0;
2586 }
2587
2588 /* Iterate over images and get their attributes */
2589 for (i = 0; i < nvm_info->num_images; i++) {
2590 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
2591 &nvm_info->image_att[i], i);
2592 if (rc) {
2593 DP_ERR(p_hwfn,
2594 "Failed getting image index %d attributes\n", i);
2595 goto err1;
2596 }
2597
2598 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
2599 nvm_info->image_att[i].len);
2600 }
2601out:
2602 qed_ptt_release(p_hwfn, p_ptt);
2603 return 0;
2604
2605err1:
2606 kfree(nvm_info->image_att);
2607err0:
2608 qed_ptt_release(p_hwfn, p_ptt);
2609 return rc;
2610}
2611
Denis Bolotin1ac43292018-04-23 14:56:05 +03002612int
Mintz, Yuval20675b32017-06-02 08:58:32 +03002613qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
Mintz, Yuval20675b32017-06-02 08:58:32 +03002614 enum qed_nvm_images image_id,
2615 struct qed_nvm_image_att *p_image_att)
2616{
Mintz, Yuval20675b32017-06-02 08:58:32 +03002617 enum nvm_image_type type;
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002618 u32 i;
Mintz, Yuval20675b32017-06-02 08:58:32 +03002619
2620 /* Translate image_id into MFW definitions */
2621 switch (image_id) {
2622 case QED_NVM_IMAGE_ISCSI_CFG:
2623 type = NVM_TYPE_ISCSI_CFG;
2624 break;
2625 case QED_NVM_IMAGE_FCOE_CFG:
2626 type = NVM_TYPE_FCOE_CFG;
2627 break;
Denis Bolotin1ac43292018-04-23 14:56:05 +03002628 case QED_NVM_IMAGE_NVM_CFG1:
2629 type = NVM_TYPE_NVM_CFG1;
2630 break;
2631 case QED_NVM_IMAGE_DEFAULT_CFG:
2632 type = NVM_TYPE_DEFAULT_CFG;
2633 break;
2634 case QED_NVM_IMAGE_NVM_META:
2635 type = NVM_TYPE_META;
2636 break;
Mintz, Yuval20675b32017-06-02 08:58:32 +03002637 default:
2638 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
2639 image_id);
2640 return -EINVAL;
2641 }
2642
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002643 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2644 if (type == p_hwfn->nvm_info.image_att[i].image_type)
Mintz, Yuval20675b32017-06-02 08:58:32 +03002645 break;
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002646 if (i == p_hwfn->nvm_info.num_images) {
Mintz, Yuval20675b32017-06-02 08:58:32 +03002647 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2648 "Failed to find nvram image of type %08x\n",
2649 image_id);
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002650 return -ENOENT;
Mintz, Yuval20675b32017-06-02 08:58:32 +03002651 }
2652
Sudarsana Reddy Kalluru43645ce2018-03-28 05:14:19 -07002653 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2654 p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
Mintz, Yuval20675b32017-06-02 08:58:32 +03002655
2656 return 0;
2657}
2658
2659int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
Mintz, Yuval20675b32017-06-02 08:58:32 +03002660 enum qed_nvm_images image_id,
2661 u8 *p_buffer, u32 buffer_len)
2662{
2663 struct qed_nvm_image_att image_att;
2664 int rc;
2665
2666 memset(p_buffer, 0, buffer_len);
2667
Denis Bolotinb60bfdf2018-04-23 14:56:04 +03002668 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
Mintz, Yuval20675b32017-06-02 08:58:32 +03002669 if (rc)
2670 return rc;
2671
2672 /* Validate sizes - both the image's and the supplied buffer's */
2673 if (image_att.length <= 4) {
2674 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2675 "Image [%d] is too small - only %d bytes\n",
2676 image_id, image_att.length);
2677 return -EINVAL;
2678 }
2679
Mintz, Yuval20675b32017-06-02 08:58:32 +03002680 if (image_att.length > buffer_len) {
2681 DP_VERBOSE(p_hwfn,
2682 QED_MSG_STORAGE,
2683 "Image [%d] is too big - %08x bytes where only %08x are available\n",
2684 image_id, image_att.length, buffer_len);
2685 return -ENOMEM;
2686 }
2687
2688 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
2689 p_buffer, image_att.length);
2690}
2691
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002692static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
2693{
2694 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2695
2696 switch (res_id) {
2697 case QED_SB:
2698 mfw_res_id = RESOURCE_NUM_SB_E;
2699 break;
2700 case QED_L2_QUEUE:
2701 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2702 break;
2703 case QED_VPORT:
2704 mfw_res_id = RESOURCE_NUM_VPORT_E;
2705 break;
2706 case QED_RSS_ENG:
2707 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2708 break;
2709 case QED_PQ:
2710 mfw_res_id = RESOURCE_NUM_PQ_E;
2711 break;
2712 case QED_RL:
2713 mfw_res_id = RESOURCE_NUM_RL_E;
2714 break;
2715 case QED_MAC:
2716 case QED_VLAN:
2717 /* Each VFC resource can accommodate both a MAC and a VLAN */
2718 mfw_res_id = RESOURCE_VFC_FILTER_E;
2719 break;
2720 case QED_ILT:
2721 mfw_res_id = RESOURCE_ILT_E;
2722 break;
2723 case QED_LL2_QUEUE:
2724 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2725 break;
2726 case QED_RDMA_CNQ_RAM:
2727 case QED_CMDQS_CQS:
2728 /* CNQ/CMDQS are the same resource */
2729 mfw_res_id = RESOURCE_CQS_E;
2730 break;
2731 case QED_RDMA_STATS_QUEUE:
2732 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2733 break;
2734 case QED_BDQ:
2735 mfw_res_id = RESOURCE_BDQ_E;
2736 break;
2737 default:
2738 break;
2739 }
2740
2741 return mfw_res_id;
2742}
2743
2744#define QED_RESC_ALLOC_VERSION_MAJOR 2
Tomer Tayar2edbff82016-10-31 07:14:27 +02002745#define QED_RESC_ALLOC_VERSION_MINOR 0
2746#define QED_RESC_ALLOC_VERSION \
2747 ((QED_RESC_ALLOC_VERSION_MAJOR << \
2748 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2749 (QED_RESC_ALLOC_VERSION_MINOR << \
2750 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002751
2752struct qed_resc_alloc_in_params {
2753 u32 cmd;
2754 enum qed_resources res_id;
2755 u32 resc_max_val;
2756};
2757
2758struct qed_resc_alloc_out_params {
2759 u32 mcp_resp;
2760 u32 mcp_param;
2761 u32 resc_num;
2762 u32 resc_start;
2763 u32 vf_resc_num;
2764 u32 vf_resc_start;
2765 u32 flags;
2766};
2767
2768static int
2769qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
2770 struct qed_ptt *p_ptt,
2771 struct qed_resc_alloc_in_params *p_in_params,
2772 struct qed_resc_alloc_out_params *p_out_params)
Tomer Tayar2edbff82016-10-31 07:14:27 +02002773{
2774 struct qed_mcp_mb_params mb_params;
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002775 struct resource_info mfw_resc_info;
Tomer Tayar2edbff82016-10-31 07:14:27 +02002776 int rc;
2777
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002778 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
Mintz, Yuvalbb480242016-11-06 17:12:27 +02002779
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002780 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
2781 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2782 DP_ERR(p_hwfn,
2783 "Failed to match resource %d [%s] with the MFW resources\n",
2784 p_in_params->res_id,
2785 qed_hw_get_resc_name(p_in_params->res_id));
2786 return -EINVAL;
2787 }
2788
2789 switch (p_in_params->cmd) {
2790 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2791 mfw_resc_info.size = p_in_params->resc_max_val;
2792 /* Fallthrough */
2793 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2794 break;
2795 default:
2796 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2797 p_in_params->cmd);
2798 return -EINVAL;
2799 }
2800
2801 memset(&mb_params, 0, sizeof(mb_params));
2802 mb_params.cmd = p_in_params->cmd;
2803 mb_params.param = QED_RESC_ALLOC_VERSION;
2804 mb_params.p_data_src = &mfw_resc_info;
2805 mb_params.data_src_size = sizeof(mfw_resc_info);
2806 mb_params.p_data_dst = mb_params.p_data_src;
2807 mb_params.data_dst_size = mb_params.data_src_size;
2808
2809 DP_VERBOSE(p_hwfn,
2810 QED_MSG_SP,
2811 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2812 p_in_params->cmd,
2813 p_in_params->res_id,
2814 qed_hw_get_resc_name(p_in_params->res_id),
2815 QED_MFW_GET_FIELD(mb_params.param,
2816 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2817 QED_MFW_GET_FIELD(mb_params.param,
2818 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2819 p_in_params->resc_max_val);
2820
Tomer Tayar2edbff82016-10-31 07:14:27 +02002821 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2822 if (rc)
2823 return rc;
2824
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002825 p_out_params->mcp_resp = mb_params.mcp_resp;
2826 p_out_params->mcp_param = mb_params.mcp_param;
2827 p_out_params->resc_num = mfw_resc_info.size;
2828 p_out_params->resc_start = mfw_resc_info.offset;
2829 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2830 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2831 p_out_params->flags = mfw_resc_info.flags;
Tomer Tayar2edbff82016-10-31 07:14:27 +02002832
2833 DP_VERBOSE(p_hwfn,
2834 QED_MSG_SP,
Tomer Tayar9c8517c2017-03-28 15:12:55 +03002835 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2836 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2837 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2838 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2839 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2840 p_out_params->resc_num,
2841 p_out_params->resc_start,
2842 p_out_params->vf_resc_num,
2843 p_out_params->vf_resc_start, p_out_params->flags);
2844
2845 return 0;
2846}
2847
2848int
2849qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
2850 struct qed_ptt *p_ptt,
2851 enum qed_resources res_id,
2852 u32 resc_max_val, u32 *p_mcp_resp)
2853{
2854 struct qed_resc_alloc_out_params out_params;
2855 struct qed_resc_alloc_in_params in_params;
2856 int rc;
2857
2858 memset(&in_params, 0, sizeof(in_params));
2859 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2860 in_params.res_id = res_id;
2861 in_params.resc_max_val = resc_max_val;
2862 memset(&out_params, 0, sizeof(out_params));
2863 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2864 &out_params);
2865 if (rc)
2866 return rc;
2867
2868 *p_mcp_resp = out_params.mcp_resp;
2869
2870 return 0;
2871}
2872
2873int
2874qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
2875 struct qed_ptt *p_ptt,
2876 enum qed_resources res_id,
2877 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
2878{
2879 struct qed_resc_alloc_out_params out_params;
2880 struct qed_resc_alloc_in_params in_params;
2881 int rc;
2882
2883 memset(&in_params, 0, sizeof(in_params));
2884 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2885 in_params.res_id = res_id;
2886 memset(&out_params, 0, sizeof(out_params));
2887 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2888 &out_params);
2889 if (rc)
2890 return rc;
2891
2892 *p_mcp_resp = out_params.mcp_resp;
2893
2894 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2895 *p_resc_num = out_params.resc_num;
2896 *p_resc_start = out_params.resc_start;
2897 }
Tomer Tayar2edbff82016-10-31 07:14:27 +02002898
2899 return 0;
2900}
Mintz, Yuval18a69e32017-03-28 15:12:53 +03002901
2902int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2903{
2904 u32 mcp_resp, mcp_param;
2905
2906 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2907 &mcp_resp, &mcp_param);
2908}
Tomer Tayar95691c92017-03-28 15:12:54 +03002909
2910static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
2911 struct qed_ptt *p_ptt,
2912 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
2913{
2914 int rc;
2915
2916 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2917 p_mcp_resp, p_mcp_param);
2918 if (rc)
2919 return rc;
2920
2921 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
2922 DP_INFO(p_hwfn,
2923 "The resource command is unsupported by the MFW\n");
2924 return -EINVAL;
2925 }
2926
2927 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2928 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2929
2930 DP_NOTICE(p_hwfn,
2931 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2932 param, opcode);
2933 return -EINVAL;
2934 }
2935
2936 return rc;
2937}
2938
2939int
2940__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2941 struct qed_ptt *p_ptt,
2942 struct qed_resc_lock_params *p_params)
2943{
2944 u32 param = 0, mcp_resp, mcp_param;
2945 u8 opcode;
2946 int rc;
2947
2948 switch (p_params->timeout) {
2949 case QED_MCP_RESC_LOCK_TO_DEFAULT:
2950 opcode = RESOURCE_OPCODE_REQ;
2951 p_params->timeout = 0;
2952 break;
2953 case QED_MCP_RESC_LOCK_TO_NONE:
2954 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2955 p_params->timeout = 0;
2956 break;
2957 default:
2958 opcode = RESOURCE_OPCODE_REQ_W_AGING;
2959 break;
2960 }
2961
2962 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2963 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2964 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
2965
2966 DP_VERBOSE(p_hwfn,
2967 QED_MSG_SP,
2968 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
2969 param, p_params->timeout, opcode, p_params->resource);
2970
2971 /* Attempt to acquire the resource */
2972 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2973 if (rc)
2974 return rc;
2975
2976 /* Analyze the response */
2977 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2978 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2979
2980 DP_VERBOSE(p_hwfn,
2981 QED_MSG_SP,
2982 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2983 mcp_param, opcode, p_params->owner);
2984
2985 switch (opcode) {
2986 case RESOURCE_OPCODE_GNT:
2987 p_params->b_granted = true;
2988 break;
2989 case RESOURCE_OPCODE_BUSY:
2990 p_params->b_granted = false;
2991 break;
2992 default:
2993 DP_NOTICE(p_hwfn,
2994 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2995 mcp_param, opcode);
2996 return -EINVAL;
2997 }
2998
2999 return 0;
3000}
3001
3002int
3003qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3004 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3005{
3006 u32 retry_cnt = 0;
3007 int rc;
3008
3009 do {
3010 /* No need for an interval before the first iteration */
3011 if (retry_cnt) {
3012 if (p_params->sleep_b4_retry) {
3013 u16 retry_interval_in_ms =
3014 DIV_ROUND_UP(p_params->retry_interval,
3015 1000);
3016
3017 msleep(retry_interval_in_ms);
3018 } else {
3019 udelay(p_params->retry_interval);
3020 }
3021 }
3022
3023 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3024 if (rc)
3025 return rc;
3026
3027 if (p_params->b_granted)
3028 break;
3029 } while (retry_cnt++ < p_params->retry_num);
3030
3031 return 0;
3032}
3033
3034int
3035qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3036 struct qed_ptt *p_ptt,
3037 struct qed_resc_unlock_params *p_params)
3038{
3039 u32 param = 0, mcp_resp, mcp_param;
3040 u8 opcode;
3041 int rc;
3042
3043 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3044 : RESOURCE_OPCODE_RELEASE;
3045 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3046 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3047
3048 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3049 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3050 param, opcode, p_params->resource);
3051
3052 /* Attempt to release the resource */
3053 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3054 if (rc)
3055 return rc;
3056
3057 /* Analyze the response */
3058 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3059
3060 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3061 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3062 mcp_param, opcode);
3063
3064 switch (opcode) {
3065 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3066 DP_INFO(p_hwfn,
3067 "Resource unlock request for an already released resource [%d]\n",
3068 p_params->resource);
3069 /* Fallthrough */
3070 case RESOURCE_OPCODE_RELEASED:
3071 p_params->b_released = true;
3072 break;
3073 case RESOURCE_OPCODE_WRONG_OWNER:
3074 p_params->b_released = false;
3075 break;
3076 default:
3077 DP_NOTICE(p_hwfn,
3078 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3079 mcp_param, opcode);
3080 return -EINVAL;
3081 }
3082
3083 return 0;
3084}
sudarsana.kalluru@cavium.comf470f222017-04-26 09:00:49 -07003085
3086void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3087 struct qed_resc_unlock_params *p_unlock,
3088 enum qed_resc_lock
3089 resource, bool b_is_permanent)
3090{
3091 if (p_lock) {
3092 memset(p_lock, 0, sizeof(*p_lock));
3093
3094 /* Permanent resources don't require aging, and there's no
3095 * point in trying to acquire them more than once since it's
3096 * unexpected another entity would release them.
3097 */
3098 if (b_is_permanent) {
3099 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3100 } else {
3101 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3102 p_lock->retry_interval =
3103 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3104 p_lock->sleep_b4_retry = true;
3105 }
3106
3107 p_lock->resource = resource;
3108 }
3109
3110 if (p_unlock) {
3111 memset(p_unlock, 0, sizeof(*p_unlock));
3112 p_unlock->resource = resource;
3113 }
3114}
Sudarsana Reddy Kalluru645874e2017-07-26 06:07:11 -07003115
3116int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3117{
3118 u32 mcp_resp;
3119 int rc;
3120
3121 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3122 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3123 if (!rc)
3124 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3125 "MFW supported features: %08x\n",
3126 p_hwfn->mcp_info->capabilities);
3127
3128 return rc;
3129}
3130
3131int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3132{
3133 u32 mcp_resp, mcp_param, features;
3134
3135 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3136
3137 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3138 features, &mcp_resp, &mcp_param);
3139}