blob: 920eadd6417cdeb2ef321fe6d32a736c2b2aa599 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/mutex.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/etherdevice.h>
21#include <linux/qed/qed_chain.h>
22#include <linux/qed/qed_if.h>
23#include "qed.h"
24#include "qed_cxt.h"
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -040025#include "qed_dcbx.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020026#include "qed_dev_api.h"
27#include "qed_hsi.h"
28#include "qed_hw.h"
29#include "qed_init_ops.h"
30#include "qed_int.h"
31#include "qed_mcp.h"
32#include "qed_reg_addr.h"
33#include "qed_sp.h"
Yuval Mintz32a47e72016-05-11 16:36:12 +030034#include "qed_sriov.h"
Yuval Mintz0b55e272016-05-11 16:36:15 +030035#include "qed_vf.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020036
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -040037static spinlock_t qm_lock;
38static bool qm_lock_init = false;
39
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020040/* API common to all protocols */
Ram Amranic2035ee2016-03-02 20:26:00 +020041enum BAR_ID {
42 BAR_ID_0, /* used for GRC */
43 BAR_ID_1 /* Used for doorbells */
44};
45
46static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
47 enum BAR_ID bar_id)
48{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030049 u32 bar_reg = (bar_id == BAR_ID_0 ?
50 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
51 u32 val;
Ram Amranic2035ee2016-03-02 20:26:00 +020052
Yuval Mintz1408cc1f2016-05-11 16:36:14 +030053 if (IS_VF(p_hwfn->cdev))
54 return 1 << 17;
55
56 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
Ram Amranic2035ee2016-03-02 20:26:00 +020057 if (val)
58 return 1 << (val + 15);
59
60 /* Old MFW initialized above registered only conditionally */
61 if (p_hwfn->cdev->num_hwfns > 1) {
62 DP_INFO(p_hwfn,
63 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
64 return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
65 } else {
66 DP_INFO(p_hwfn,
67 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
68 return 512 * 1024;
69 }
70}
71
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020072void qed_init_dp(struct qed_dev *cdev,
73 u32 dp_module, u8 dp_level)
74{
75 u32 i;
76
77 cdev->dp_level = dp_level;
78 cdev->dp_module = dp_module;
79 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
80 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
81
82 p_hwfn->dp_level = dp_level;
83 p_hwfn->dp_module = dp_module;
84 }
85}
86
87void qed_init_struct(struct qed_dev *cdev)
88{
89 u8 i;
90
91 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
92 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
93
94 p_hwfn->cdev = cdev;
95 p_hwfn->my_id = i;
96 p_hwfn->b_active = false;
97
98 mutex_init(&p_hwfn->dmae_info.mutex);
99 }
100
101 /* hwfn 0 is always active */
102 cdev->hwfns[0].b_active = true;
103
104 /* set the default cache alignment to 128 */
105 cdev->cache_shift = 7;
106}
107
108static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
109{
110 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
111
112 kfree(qm_info->qm_pq_params);
113 qm_info->qm_pq_params = NULL;
114 kfree(qm_info->qm_vport_params);
115 qm_info->qm_vport_params = NULL;
116 kfree(qm_info->qm_port_params);
117 qm_info->qm_port_params = NULL;
Manish Choprabcd197c2016-04-26 10:56:08 -0400118 kfree(qm_info->wfq_data);
119 qm_info->wfq_data = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200120}
121
122void qed_resc_free(struct qed_dev *cdev)
123{
124 int i;
125
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300126 if (IS_VF(cdev))
127 return;
128
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200129 kfree(cdev->fw_data);
130 cdev->fw_data = NULL;
131
132 kfree(cdev->reset_stats);
133
134 for_each_hwfn(cdev, i) {
135 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
136
Yuval Mintz25c089d2015-10-26 11:02:26 +0200137 kfree(p_hwfn->p_tx_cids);
138 p_hwfn->p_tx_cids = NULL;
139 kfree(p_hwfn->p_rx_cids);
140 p_hwfn->p_rx_cids = NULL;
141 }
142
143 for_each_hwfn(cdev, i) {
144 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
145
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200146 qed_cxt_mngr_free(p_hwfn);
147 qed_qm_info_free(p_hwfn);
148 qed_spq_free(p_hwfn);
149 qed_eq_free(p_hwfn, p_hwfn->p_eq);
150 qed_consq_free(p_hwfn, p_hwfn->p_consq);
151 qed_int_free(p_hwfn);
Yuval Mintz32a47e72016-05-11 16:36:12 +0300152 qed_iov_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200153 qed_dmae_info_free(p_hwfn);
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400154 qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200155 }
156}
157
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300158static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200159{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300160 u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200161 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
162 struct init_qm_port_params *p_qm_port;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200163 u16 num_pqs, multi_cos_tcs = 1;
Yuval Mintzcc3d5eb2016-05-26 11:01:21 +0300164 u8 pf_wfq = qm_info->pf_wfq;
165 u32 pf_rl = qm_info->pf_rl;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300166 u16 num_vfs = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200167
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300168#ifdef CONFIG_QED_SRIOV
169 if (p_hwfn->cdev->p_iov_info)
170 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
171#endif
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200172 memset(qm_info, 0, sizeof(*qm_info));
173
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300174 num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200175 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
176
177 /* Sanity checking that setup requires legal number of resources */
178 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
179 DP_ERR(p_hwfn,
180 "Need too many Physical queues - 0x%04x when only %04x are available\n",
181 num_pqs, RESC_NUM(p_hwfn, QED_PQ));
182 return -EINVAL;
183 }
184
185 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
186 */
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300187 qm_info->qm_pq_params = kcalloc(num_pqs,
188 sizeof(struct init_qm_pq_params),
189 b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200190 if (!qm_info->qm_pq_params)
191 goto alloc_err;
192
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300193 qm_info->qm_vport_params = kcalloc(num_vports,
194 sizeof(struct init_qm_vport_params),
195 b_sleepable ? GFP_KERNEL
196 : GFP_ATOMIC);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200197 if (!qm_info->qm_vport_params)
198 goto alloc_err;
199
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300200 qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
201 sizeof(struct init_qm_port_params),
202 b_sleepable ? GFP_KERNEL
203 : GFP_ATOMIC);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200204 if (!qm_info->qm_port_params)
205 goto alloc_err;
206
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300207 qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
208 b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
Manish Choprabcd197c2016-04-26 10:56:08 -0400209 if (!qm_info->wfq_data)
210 goto alloc_err;
211
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200212 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
213
214 /* First init per-TC PQs */
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400215 for (i = 0; i < multi_cos_tcs; i++) {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300216 struct init_qm_pq_params *params =
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400217 &qm_info->qm_pq_params[curr_queue++];
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200218
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400219 if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
220 params->vport_id = vport_id;
221 params->tc_id = p_hwfn->hw_info.non_offload_tc;
222 params->wrr_group = 1;
223 } else {
224 params->vport_id = vport_id;
225 params->tc_id = p_hwfn->hw_info.offload_tc;
226 params->wrr_group = 1;
227 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200228 }
229
230 /* Then init pure-LB PQ */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300231 qm_info->pure_lb_pq = curr_queue;
232 qm_info->qm_pq_params[curr_queue].vport_id =
233 (u8) RESC_START(p_hwfn, QED_VPORT);
234 qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
235 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
236 curr_queue++;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200237
238 qm_info->offload_pq = 0;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300239 /* Then init per-VF PQs */
240 vf_offset = curr_queue;
241 for (i = 0; i < num_vfs; i++) {
242 /* First vport is used by the PF */
243 qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
244 qm_info->qm_pq_params[curr_queue].tc_id =
245 p_hwfn->hw_info.non_offload_tc;
246 qm_info->qm_pq_params[curr_queue].wrr_group = 1;
247 curr_queue++;
248 }
249
250 qm_info->vf_queues_offset = vf_offset;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200251 qm_info->num_pqs = num_pqs;
252 qm_info->num_vports = num_vports;
253
254 /* Initialize qm port parameters */
255 num_ports = p_hwfn->cdev->num_ports_in_engines;
256 for (i = 0; i < num_ports; i++) {
257 p_qm_port = &qm_info->qm_port_params[i];
258 p_qm_port->active = 1;
259 p_qm_port->num_active_phys_tcs = 4;
260 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
261 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
262 }
263
264 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
265
266 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
267
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300268 qm_info->num_vf_pqs = num_vfs;
269 qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200270
Manish Chopraa64b02d2016-04-26 10:56:10 -0400271 for (i = 0; i < qm_info->num_vports; i++)
272 qm_info->qm_vport_params[i].vport_wfq = 1;
273
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200274 qm_info->vport_rl_en = 1;
Manish Chopraa64b02d2016-04-26 10:56:10 -0400275 qm_info->vport_wfq_en = 1;
Yuval Mintzcc3d5eb2016-05-26 11:01:21 +0300276 qm_info->pf_rl = pf_rl;
277 qm_info->pf_wfq = pf_wfq;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200278
279 return 0;
280
281alloc_err:
282 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
Manish Choprabcd197c2016-04-26 10:56:08 -0400283 qed_qm_info_free(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200284 return -ENOMEM;
285}
286
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400287/* This function reconfigures the QM pf on the fly.
288 * For this purpose we:
289 * 1. reconfigure the QM database
290 * 2. set new values to runtime arrat
291 * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
292 * 4. activate init tool in QM_PF stage
293 * 5. send an sdm_qm_cmd through rbc interface to release the QM
294 */
295int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
296{
297 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
298 bool b_rc;
299 int rc;
300
301 /* qm_info is allocated in qed_init_qm_info() which is already called
302 * from qed_resc_alloc() or previous call of qed_qm_reconf().
303 * The allocated size may change each init, so we free it before next
304 * allocation.
305 */
306 qed_qm_info_free(p_hwfn);
307
308 /* initialize qed's qm data structure */
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300309 rc = qed_init_qm_info(p_hwfn, false);
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400310 if (rc)
311 return rc;
312
313 /* stop PF's qm queues */
314 spin_lock_bh(&qm_lock);
315 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
316 qm_info->start_pq, qm_info->num_pqs);
317 spin_unlock_bh(&qm_lock);
318 if (!b_rc)
319 return -EINVAL;
320
321 /* clear the QM_PF runtime phase leftovers from previous init */
322 qed_init_clear_rt_data(p_hwfn);
323
324 /* prepare QM portion of runtime array */
325 qed_qm_init_pf(p_hwfn);
326
327 /* activate init tool on runtime array */
328 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
329 p_hwfn->hw_info.hw_mode);
330 if (rc)
331 return rc;
332
333 /* start PF's qm queues */
334 spin_lock_bh(&qm_lock);
335 b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
336 qm_info->start_pq, qm_info->num_pqs);
337 spin_unlock_bh(&qm_lock);
338 if (!b_rc)
339 return -EINVAL;
340
341 return 0;
342}
343
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200344int qed_resc_alloc(struct qed_dev *cdev)
345{
346 struct qed_consq *p_consq;
347 struct qed_eq *p_eq;
348 int i, rc = 0;
349
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300350 if (IS_VF(cdev))
351 return rc;
352
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200353 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
354 if (!cdev->fw_data)
355 return -ENOMEM;
356
Yuval Mintz25c089d2015-10-26 11:02:26 +0200357 /* Allocate Memory for the Queue->CID mapping */
358 for_each_hwfn(cdev, i) {
359 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
360 int tx_size = sizeof(struct qed_hw_cid_data) *
361 RESC_NUM(p_hwfn, QED_L2_QUEUE);
362 int rx_size = sizeof(struct qed_hw_cid_data) *
363 RESC_NUM(p_hwfn, QED_L2_QUEUE);
364
365 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
366 if (!p_hwfn->p_tx_cids) {
367 DP_NOTICE(p_hwfn,
368 "Failed to allocate memory for Tx Cids\n");
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300369 rc = -ENOMEM;
Yuval Mintz25c089d2015-10-26 11:02:26 +0200370 goto alloc_err;
371 }
372
373 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
374 if (!p_hwfn->p_rx_cids) {
375 DP_NOTICE(p_hwfn,
376 "Failed to allocate memory for Rx Cids\n");
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300377 rc = -ENOMEM;
Yuval Mintz25c089d2015-10-26 11:02:26 +0200378 goto alloc_err;
379 }
380 }
381
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200382 for_each_hwfn(cdev, i) {
383 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
384
385 /* First allocate the context manager structure */
386 rc = qed_cxt_mngr_alloc(p_hwfn);
387 if (rc)
388 goto alloc_err;
389
390 /* Set the HW cid/tid numbers (in the contest manager)
391 * Must be done prior to any further computations.
392 */
393 rc = qed_cxt_set_pf_params(p_hwfn);
394 if (rc)
395 goto alloc_err;
396
397 /* Prepare and process QM requirements */
Sudarsana Reddy Kalluru79529292016-05-26 11:01:20 +0300398 rc = qed_init_qm_info(p_hwfn, true);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200399 if (rc)
400 goto alloc_err;
401
402 /* Compute the ILT client partition */
403 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
404 if (rc)
405 goto alloc_err;
406
407 /* CID map / ILT shadow table / T2
408 * The talbes sizes are determined by the computations above
409 */
410 rc = qed_cxt_tables_alloc(p_hwfn);
411 if (rc)
412 goto alloc_err;
413
414 /* SPQ, must follow ILT because initializes SPQ context */
415 rc = qed_spq_alloc(p_hwfn);
416 if (rc)
417 goto alloc_err;
418
419 /* SP status block allocation */
420 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
421 RESERVED_PTT_DPC);
422
423 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
424 if (rc)
425 goto alloc_err;
426
Yuval Mintz32a47e72016-05-11 16:36:12 +0300427 rc = qed_iov_alloc(p_hwfn);
428 if (rc)
429 goto alloc_err;
430
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200431 /* EQ */
432 p_eq = qed_eq_alloc(p_hwfn, 256);
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300433 if (!p_eq) {
434 rc = -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200435 goto alloc_err;
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300436 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200437 p_hwfn->p_eq = p_eq;
438
439 p_consq = qed_consq_alloc(p_hwfn);
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300440 if (!p_consq) {
441 rc = -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200442 goto alloc_err;
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300443 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200444 p_hwfn->p_consq = p_consq;
445
446 /* DMA info initialization */
447 rc = qed_dmae_info_alloc(p_hwfn);
448 if (rc) {
449 DP_NOTICE(p_hwfn,
450 "Failed to allocate memory for dmae_info structure\n");
451 goto alloc_err;
452 }
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400453
454 /* DCBX initialization */
455 rc = qed_dcbx_info_alloc(p_hwfn);
456 if (rc) {
457 DP_NOTICE(p_hwfn,
458 "Failed to allocate memory for dcbx structure\n");
459 goto alloc_err;
460 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200461 }
462
463 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
464 if (!cdev->reset_stats) {
465 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
Dan Carpenter9b15acb2015-11-05 11:41:28 +0300466 rc = -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200467 goto alloc_err;
468 }
469
470 return 0;
471
472alloc_err:
473 qed_resc_free(cdev);
474 return rc;
475}
476
477void qed_resc_setup(struct qed_dev *cdev)
478{
479 int i;
480
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300481 if (IS_VF(cdev))
482 return;
483
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200484 for_each_hwfn(cdev, i) {
485 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
486
487 qed_cxt_mngr_setup(p_hwfn);
488 qed_spq_setup(p_hwfn);
489 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
490 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
491
492 /* Read shadow of current MFW mailbox */
493 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
494 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
495 p_hwfn->mcp_info->mfw_mb_cur,
496 p_hwfn->mcp_info->mfw_mb_length);
497
498 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
Yuval Mintz32a47e72016-05-11 16:36:12 +0300499
500 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200501 }
502}
503
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200504#define FINAL_CLEANUP_POLL_CNT (100)
505#define FINAL_CLEANUP_POLL_TIME (10)
506int qed_final_cleanup(struct qed_hwfn *p_hwfn,
Yuval Mintz0b55e272016-05-11 16:36:15 +0300507 struct qed_ptt *p_ptt, u16 id, bool is_vf)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200508{
509 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
510 int rc = -EBUSY;
511
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500512 addr = GTT_BAR0_MAP_REG_USDM_RAM +
513 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200514
Yuval Mintz0b55e272016-05-11 16:36:15 +0300515 if (is_vf)
516 id += 0x10;
517
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500518 command |= X_FINAL_CLEANUP_AGG_INT <<
519 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
520 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
521 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
522 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200523
524 /* Make sure notification is not set before initiating final cleanup */
525 if (REG_RD(p_hwfn, addr)) {
526 DP_NOTICE(
527 p_hwfn,
528 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
529 REG_WR(p_hwfn, addr, 0);
530 }
531
532 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
533 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
534 id, command);
535
536 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
537
538 /* Poll until completion */
539 while (!REG_RD(p_hwfn, addr) && count--)
540 msleep(FINAL_CLEANUP_POLL_TIME);
541
542 if (REG_RD(p_hwfn, addr))
543 rc = 0;
544 else
545 DP_NOTICE(p_hwfn,
546 "Failed to receive FW final cleanup notification\n");
547
548 /* Cleanup afterwards */
549 REG_WR(p_hwfn, addr, 0);
550
551 return rc;
552}
553
554static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
555{
556 int hw_mode = 0;
557
Yuval Mintz12e09c62016-03-02 20:26:01 +0200558 hw_mode = (1 << MODE_BB_B0);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200559
560 switch (p_hwfn->cdev->num_ports_in_engines) {
561 case 1:
562 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
563 break;
564 case 2:
565 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
566 break;
567 case 4:
568 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
569 break;
570 default:
571 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
572 p_hwfn->cdev->num_ports_in_engines);
573 return;
574 }
575
576 switch (p_hwfn->cdev->mf_mode) {
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500577 case QED_MF_DEFAULT:
578 case QED_MF_NPAR:
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200579 hw_mode |= 1 << MODE_MF_SI;
580 break;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500581 case QED_MF_OVLAN:
582 hw_mode |= 1 << MODE_MF_SD;
583 break;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200584 default:
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500585 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
586 hw_mode |= 1 << MODE_MF_SI;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200587 }
588
589 hw_mode |= 1 << MODE_ASIC;
590
591 p_hwfn->hw_info.hw_mode = hw_mode;
592}
593
594/* Init run time data for all PFs on an engine. */
595static void qed_init_cau_rt_data(struct qed_dev *cdev)
596{
597 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
598 int i, sb_id;
599
600 for_each_hwfn(cdev, i) {
601 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
602 struct qed_igu_info *p_igu_info;
603 struct qed_igu_block *p_block;
604 struct cau_sb_entry sb_entry;
605
606 p_igu_info = p_hwfn->hw_info.p_igu_info;
607
608 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
609 sb_id++) {
610 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
611 if (!p_block->is_pf)
612 continue;
613
614 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
615 p_block->function_id,
616 0, 0);
617 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
618 sb_entry);
619 }
620 }
621}
622
623static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
624 struct qed_ptt *p_ptt,
625 int hw_mode)
626{
627 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
628 struct qed_qm_common_rt_init_params params;
629 struct qed_dev *cdev = p_hwfn->cdev;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300630 u32 concrete_fid;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200631 int rc = 0;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300632 u8 vf_id;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200633
634 qed_init_cau_rt_data(cdev);
635
636 /* Program GTT windows */
637 qed_gtt_init(p_hwfn);
638
639 if (p_hwfn->mcp_info) {
640 if (p_hwfn->mcp_info->func_info.bandwidth_max)
641 qm_info->pf_rl_en = 1;
642 if (p_hwfn->mcp_info->func_info.bandwidth_min)
643 qm_info->pf_wfq_en = 1;
644 }
645
646 memset(&params, 0, sizeof(params));
647 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
648 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
649 params.pf_rl_en = qm_info->pf_rl_en;
650 params.pf_wfq_en = qm_info->pf_wfq_en;
651 params.vport_rl_en = qm_info->vport_rl_en;
652 params.vport_wfq_en = qm_info->vport_wfq_en;
653 params.port_params = qm_info->qm_port_params;
654
655 qed_qm_common_rt_init(p_hwfn, &params);
656
657 qed_cxt_hw_init_common(p_hwfn);
658
659 /* Close gate from NIG to BRB/Storm; By default they are open, but
660 * we close them to prevent NIG from passing data to reset blocks.
661 * Should have been done in the ENGINE phase, but init-tool lacks
662 * proper port-pretend capabilities.
663 */
664 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
665 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
666 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
667 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
668 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
669 qed_port_unpretend(p_hwfn, p_ptt);
670
671 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
672 if (rc != 0)
673 return rc;
674
675 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
676 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
677
678 /* Disable relaxed ordering in the PCI config space */
679 qed_wr(p_hwfn, p_ptt, 0x20b4,
680 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
681
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300682 for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
683 concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
684 qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
685 qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
686 }
687 /* pretend to original PF */
688 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
689
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200690 return rc;
691}
692
693static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
694 struct qed_ptt *p_ptt,
695 int hw_mode)
696{
697 int rc = 0;
698
699 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
700 hw_mode);
701 return rc;
702}
703
704static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
705 struct qed_ptt *p_ptt,
Manish Chopra464f6642016-04-14 01:38:29 -0400706 struct qed_tunn_start_params *p_tunn,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200707 int hw_mode,
708 bool b_hw_start,
709 enum qed_int_mode int_mode,
710 bool allow_npar_tx_switch)
711{
712 u8 rel_pf_id = p_hwfn->rel_pf_id;
713 int rc = 0;
714
715 if (p_hwfn->mcp_info) {
716 struct qed_mcp_function_info *p_info;
717
718 p_info = &p_hwfn->mcp_info->func_info;
719 if (p_info->bandwidth_min)
720 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
721
722 /* Update rate limit once we'll actually have a link */
Manish Chopra4b01e512016-04-26 10:56:09 -0400723 p_hwfn->qm_info.pf_rl = 100000;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200724 }
725
726 qed_cxt_hw_init_pf(p_hwfn);
727
728 qed_int_igu_init_rt(p_hwfn);
729
730 /* Set VLAN in NIG if needed */
731 if (hw_mode & (1 << MODE_MF_SD)) {
732 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
733 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
734 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
735 p_hwfn->hw_info.ovlan);
736 }
737
738 /* Enable classification by MAC if needed */
Dan Carpenter87aec472015-11-04 16:29:11 +0300739 if (hw_mode & (1 << MODE_MF_SI)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200740 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
741 "Configuring TAGMAC_CLS_TYPE\n");
742 STORE_RT_REG(p_hwfn,
743 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
744 }
745
746 /* Protocl Configuration */
747 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
748 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
749 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
750
751 /* Cleanup chip from previous driver if such remains exist */
Yuval Mintz0b55e272016-05-11 16:36:15 +0300752 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200753 if (rc != 0)
754 return rc;
755
756 /* PF Init sequence */
757 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
758 if (rc)
759 return rc;
760
761 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
762 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
763 if (rc)
764 return rc;
765
766 /* Pure runtime initializations - directly to the HW */
767 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
768
769 if (b_hw_start) {
770 /* enable interrupts */
771 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
772
773 /* send function start command */
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300774 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
775 allow_npar_tx_switch);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200776 if (rc)
777 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
778 }
779 return rc;
780}
781
782static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
783 struct qed_ptt *p_ptt,
784 u8 enable)
785{
786 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
787
788 /* Change PF in PXP */
789 qed_wr(p_hwfn, p_ptt,
790 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
791
792 /* wait until value is set - try for 1 second every 50us */
793 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
794 val = qed_rd(p_hwfn, p_ptt,
795 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
796 if (val == set_val)
797 break;
798
799 usleep_range(50, 60);
800 }
801
802 if (val != set_val) {
803 DP_NOTICE(p_hwfn,
804 "PFID_ENABLE_MASTER wasn't changed after a second\n");
805 return -EAGAIN;
806 }
807
808 return 0;
809}
810
811static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
812 struct qed_ptt *p_main_ptt)
813{
814 /* Read shadow of current MFW mailbox */
815 qed_mcp_read_mb(p_hwfn, p_main_ptt);
816 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
817 p_hwfn->mcp_info->mfw_mb_cur,
818 p_hwfn->mcp_info->mfw_mb_length);
819}
820
821int qed_hw_init(struct qed_dev *cdev,
Manish Chopra464f6642016-04-14 01:38:29 -0400822 struct qed_tunn_start_params *p_tunn,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200823 bool b_hw_start,
824 enum qed_int_mode int_mode,
825 bool allow_npar_tx_switch,
826 const u8 *bin_fw_data)
827{
Yuval Mintz86622ee2016-03-02 20:26:02 +0200828 u32 load_code, param;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200829 int rc, mfw_rc, i;
830
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300831 if (IS_PF(cdev)) {
832 rc = qed_init_fw_data(cdev, bin_fw_data);
833 if (rc != 0)
834 return rc;
835 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200836
837 for_each_hwfn(cdev, i) {
838 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
839
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300840 if (IS_VF(cdev)) {
841 p_hwfn->b_int_enabled = 1;
842 continue;
843 }
844
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200845 /* Enable DMAE in PXP */
846 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
847
848 qed_calc_hw_mode(p_hwfn);
849
850 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
851 &load_code);
852 if (rc) {
853 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
854 return rc;
855 }
856
857 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
858
859 DP_VERBOSE(p_hwfn, QED_MSG_SP,
860 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
861 rc, load_code);
862
863 p_hwfn->first_on_engine = (load_code ==
864 FW_MSG_CODE_DRV_LOAD_ENGINE);
865
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400866 if (!qm_lock_init) {
867 spin_lock_init(&qm_lock);
868 qm_lock_init = true;
869 }
870
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200871 switch (load_code) {
872 case FW_MSG_CODE_DRV_LOAD_ENGINE:
873 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
874 p_hwfn->hw_info.hw_mode);
875 if (rc)
876 break;
877 /* Fall into */
878 case FW_MSG_CODE_DRV_LOAD_PORT:
879 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
880 p_hwfn->hw_info.hw_mode);
881 if (rc)
882 break;
883
884 /* Fall into */
885 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
886 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
Manish Chopra464f6642016-04-14 01:38:29 -0400887 p_tunn, p_hwfn->hw_info.hw_mode,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200888 b_hw_start, int_mode,
889 allow_npar_tx_switch);
890 break;
891 default:
892 rc = -EINVAL;
893 break;
894 }
895
896 if (rc)
897 DP_NOTICE(p_hwfn,
898 "init phase failed for loadcode 0x%x (rc %d)\n",
899 load_code, rc);
900
901 /* ACK mfw regardless of success or failure of initialization */
902 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
903 DRV_MSG_CODE_LOAD_DONE,
904 0, &load_code, &param);
905 if (rc)
906 return rc;
907 if (mfw_rc) {
908 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
909 return mfw_rc;
910 }
911
Sudarsana Reddy Kalluru39651ab2016-05-17 06:44:26 -0400912 /* send DCBX attention request command */
913 DP_VERBOSE(p_hwfn,
914 QED_MSG_DCB,
915 "sending phony dcbx set command to trigger DCBx attention handling\n");
916 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
917 DRV_MSG_CODE_SET_DCBX,
918 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
919 &load_code, &param);
920 if (mfw_rc) {
921 DP_NOTICE(p_hwfn,
922 "Failed to send DCBX attention request\n");
923 return mfw_rc;
924 }
925
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200926 p_hwfn->hw_init_done = true;
927 }
928
929 return 0;
930}
931
932#define QED_HW_STOP_RETRY_LIMIT (10)
Yuval Mintz8c925c42016-03-02 20:26:03 +0200933static inline void qed_hw_timers_stop(struct qed_dev *cdev,
934 struct qed_hwfn *p_hwfn,
935 struct qed_ptt *p_ptt)
936{
937 int i;
938
939 /* close timers */
940 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
941 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
942
943 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
944 if ((!qed_rd(p_hwfn, p_ptt,
945 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
946 (!qed_rd(p_hwfn, p_ptt,
947 TM_REG_PF_SCAN_ACTIVE_TASK)))
948 break;
949
950 /* Dependent on number of connection/tasks, possibly
951 * 1ms sleep is required between polls
952 */
953 usleep_range(1000, 2000);
954 }
955
956 if (i < QED_HW_STOP_RETRY_LIMIT)
957 return;
958
959 DP_NOTICE(p_hwfn,
960 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
961 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
962 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
963}
964
965void qed_hw_timers_stop_all(struct qed_dev *cdev)
966{
967 int j;
968
969 for_each_hwfn(cdev, j) {
970 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
971 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
972
973 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
974 }
975}
976
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200977int qed_hw_stop(struct qed_dev *cdev)
978{
979 int rc = 0, t_rc;
Yuval Mintz8c925c42016-03-02 20:26:03 +0200980 int j;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200981
982 for_each_hwfn(cdev, j) {
983 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
984 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
985
986 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
987
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300988 if (IS_VF(cdev)) {
Yuval Mintz0b55e272016-05-11 16:36:15 +0300989 qed_vf_pf_int_cleanup(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300990 continue;
991 }
992
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200993 /* mark the hw as uninitialized... */
994 p_hwfn->hw_init_done = false;
995
996 rc = qed_sp_pf_stop(p_hwfn);
997 if (rc)
Yuval Mintz8c925c42016-03-02 20:26:03 +0200998 DP_NOTICE(p_hwfn,
999 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001000
1001 qed_wr(p_hwfn, p_ptt,
1002 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1003
1004 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1005 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1006 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1007 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1008 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1009
Yuval Mintz8c925c42016-03-02 20:26:03 +02001010 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001011
1012 /* Disable Attention Generation */
1013 qed_int_igu_disable_int(p_hwfn, p_ptt);
1014
1015 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
1016 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
1017
1018 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
1019
1020 /* Need to wait 1ms to guarantee SBs are cleared */
1021 usleep_range(1000, 2000);
1022 }
1023
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001024 if (IS_PF(cdev)) {
1025 /* Disable DMAE in PXP - in CMT, this should only be done for
1026 * first hw-function, and only after all transactions have
1027 * stopped for all active hw-functions.
1028 */
1029 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
1030 cdev->hwfns[0].p_main_ptt, false);
1031 if (t_rc != 0)
1032 rc = t_rc;
1033 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001034
1035 return rc;
1036}
1037
Manish Chopracee4d262015-10-26 11:02:28 +02001038void qed_hw_stop_fastpath(struct qed_dev *cdev)
1039{
Yuval Mintz8c925c42016-03-02 20:26:03 +02001040 int j;
Manish Chopracee4d262015-10-26 11:02:28 +02001041
1042 for_each_hwfn(cdev, j) {
1043 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001044 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
1045
1046 if (IS_VF(cdev)) {
1047 qed_vf_pf_int_cleanup(p_hwfn);
1048 continue;
1049 }
Manish Chopracee4d262015-10-26 11:02:28 +02001050
1051 DP_VERBOSE(p_hwfn,
1052 NETIF_MSG_IFDOWN,
1053 "Shutting down the fastpath\n");
1054
1055 qed_wr(p_hwfn, p_ptt,
1056 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1057
1058 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1059 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1060 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1061 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1062 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1063
Manish Chopracee4d262015-10-26 11:02:28 +02001064 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
1065
1066 /* Need to wait 1ms to guarantee SBs are cleared */
1067 usleep_range(1000, 2000);
1068 }
1069}
1070
1071void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
1072{
Yuval Mintzdacd88d2016-05-11 16:36:16 +03001073 if (IS_VF(p_hwfn->cdev))
1074 return;
1075
Manish Chopracee4d262015-10-26 11:02:28 +02001076 /* Re-open incoming traffic */
1077 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1078 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
1079}
1080
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001081static int qed_reg_assert(struct qed_hwfn *hwfn,
1082 struct qed_ptt *ptt, u32 reg,
1083 bool expected)
1084{
1085 u32 assert_val = qed_rd(hwfn, ptt, reg);
1086
1087 if (assert_val != expected) {
1088 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
1089 reg, expected);
1090 return -EINVAL;
1091 }
1092
1093 return 0;
1094}
1095
1096int qed_hw_reset(struct qed_dev *cdev)
1097{
1098 int rc = 0;
1099 u32 unload_resp, unload_param;
1100 int i;
1101
1102 for_each_hwfn(cdev, i) {
1103 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1104
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001105 if (IS_VF(cdev)) {
Yuval Mintz0b55e272016-05-11 16:36:15 +03001106 rc = qed_vf_pf_reset(p_hwfn);
1107 if (rc)
1108 return rc;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001109 continue;
1110 }
1111
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001112 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
1113
1114 /* Check for incorrect states */
1115 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1116 QM_REG_USG_CNT_PF_TX, 0);
1117 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1118 QM_REG_USG_CNT_PF_OTHER, 0);
1119
1120 /* Disable PF in HW blocks */
1121 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1122 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1123 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1124 TCFC_REG_STRONG_ENABLE_PF, 0);
1125 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1126 CCFC_REG_STRONG_ENABLE_PF, 0);
1127
1128 /* Send unload command to MCP */
1129 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1130 DRV_MSG_CODE_UNLOAD_REQ,
1131 DRV_MB_PARAM_UNLOAD_WOL_MCP,
1132 &unload_resp, &unload_param);
1133 if (rc) {
1134 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
1135 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1136 }
1137
1138 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1139 DRV_MSG_CODE_UNLOAD_DONE,
1140 0, &unload_resp, &unload_param);
1141 if (rc) {
1142 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
1143 return rc;
1144 }
1145 }
1146
1147 return rc;
1148}
1149
1150/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1151static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
1152{
1153 qed_ptt_pool_free(p_hwfn);
1154 kfree(p_hwfn->hw_info.p_igu_info);
1155}
1156
1157/* Setup bar access */
Yuval Mintz12e09c62016-03-02 20:26:01 +02001158static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001159{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001160 /* clear indirect access */
1161 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1162 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1163 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1164 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1165
1166 /* Clean Previous errors if such exist */
1167 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1168 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
1169 1 << p_hwfn->abs_pf_id);
1170
1171 /* enable internal target-read */
1172 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1173 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001174}
1175
1176static void get_function_id(struct qed_hwfn *p_hwfn)
1177{
1178 /* ME Register */
1179 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1180
1181 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1182
1183 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1184 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1185 PXP_CONCRETE_FID_PFID);
1186 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1187 PXP_CONCRETE_FID_PORT);
1188}
1189
Yuval Mintz25c089d2015-10-26 11:02:26 +02001190static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1191{
1192 u32 *feat_num = p_hwfn->hw_info.feat_num;
1193 int num_features = 1;
1194
1195 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1196 num_features,
1197 RESC_NUM(p_hwfn, QED_L2_QUEUE));
1198 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1199 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1200 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1201 num_features);
1202}
1203
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001204static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1205{
1206 u32 *resc_start = p_hwfn->hw_info.resc_start;
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001207 u8 num_funcs = p_hwfn->num_funcs_on_engine;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001208 u32 *resc_num = p_hwfn->hw_info.resc_num;
Yuval Mintz4ac801b2016-02-28 12:26:52 +02001209 struct qed_sb_cnt_info sb_cnt_info;
Yuval Mintz08feecd2016-05-11 16:36:20 +03001210 int i, max_vf_vlan_filters;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001211
Yuval Mintz4ac801b2016-02-28 12:26:52 +02001212 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
Yuval Mintz08feecd2016-05-11 16:36:20 +03001213
1214#ifdef CONFIG_QED_SRIOV
1215 max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
1216#else
1217 max_vf_vlan_filters = 0;
1218#endif
1219
Yuval Mintz4ac801b2016-02-28 12:26:52 +02001220 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1221
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001222 resc_num[QED_SB] = min_t(u32,
1223 (MAX_SB_PER_PATH_BB / num_funcs),
Yuval Mintz4ac801b2016-02-28 12:26:52 +02001224 sb_cnt_info.sb_cnt);
Yuval Mintz25c089d2015-10-26 11:02:26 +02001225 resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001226 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
Yuval Mintz25c089d2015-10-26 11:02:26 +02001227 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001228 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1229 resc_num[QED_RL] = 8;
Yuval Mintz25c089d2015-10-26 11:02:26 +02001230 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1231 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1232 num_funcs;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001233 resc_num[QED_ILT] = 950;
1234
1235 for (i = 0; i < QED_MAX_RESC; i++)
1236 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1237
Yuval Mintz25c089d2015-10-26 11:02:26 +02001238 qed_hw_set_feat(p_hwfn);
1239
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001240 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1241 "The numbers for each resource are:\n"
1242 "SB = %d start = %d\n"
Yuval Mintz25c089d2015-10-26 11:02:26 +02001243 "L2_QUEUE = %d start = %d\n"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001244 "VPORT = %d start = %d\n"
1245 "PQ = %d start = %d\n"
1246 "RL = %d start = %d\n"
Yuval Mintz25c089d2015-10-26 11:02:26 +02001247 "MAC = %d start = %d\n"
1248 "VLAN = %d start = %d\n"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001249 "ILT = %d start = %d\n",
1250 p_hwfn->hw_info.resc_num[QED_SB],
1251 p_hwfn->hw_info.resc_start[QED_SB],
Yuval Mintz25c089d2015-10-26 11:02:26 +02001252 p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1253 p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001254 p_hwfn->hw_info.resc_num[QED_VPORT],
1255 p_hwfn->hw_info.resc_start[QED_VPORT],
1256 p_hwfn->hw_info.resc_num[QED_PQ],
1257 p_hwfn->hw_info.resc_start[QED_PQ],
1258 p_hwfn->hw_info.resc_num[QED_RL],
1259 p_hwfn->hw_info.resc_start[QED_RL],
Yuval Mintz25c089d2015-10-26 11:02:26 +02001260 p_hwfn->hw_info.resc_num[QED_MAC],
1261 p_hwfn->hw_info.resc_start[QED_MAC],
1262 p_hwfn->hw_info.resc_num[QED_VLAN],
1263 p_hwfn->hw_info.resc_start[QED_VLAN],
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001264 p_hwfn->hw_info.resc_num[QED_ILT],
1265 p_hwfn->hw_info.resc_start[QED_ILT]);
1266}
1267
1268static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1269 struct qed_ptt *p_ptt)
1270{
Yuval Mintzcc875c22015-10-26 11:02:31 +02001271 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001272 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
Yuval Mintzcc875c22015-10-26 11:02:31 +02001273 struct qed_mcp_link_params *link;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001274
1275 /* Read global nvm_cfg address */
1276 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1277
1278 /* Verify MCP has initialized it */
1279 if (!nvm_cfg_addr) {
1280 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1281 return -EINVAL;
1282 }
1283
1284 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1285 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1286
Yuval Mintzcc875c22015-10-26 11:02:31 +02001287 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1288 offsetof(struct nvm_cfg1, glob) +
1289 offsetof(struct nvm_cfg1_glob, core_cfg);
1290
1291 core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1292
1293 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1294 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1295 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1296 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1297 break;
1298 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1299 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1300 break;
1301 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1302 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1303 break;
1304 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1305 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1306 break;
1307 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1308 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1309 break;
1310 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1311 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1312 break;
1313 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1314 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1315 break;
1316 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1317 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1318 break;
1319 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1320 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1321 break;
1322 default:
1323 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1324 core_cfg);
1325 break;
1326 }
1327
Yuval Mintzcc875c22015-10-26 11:02:31 +02001328 /* Read default link configuration */
1329 link = &p_hwfn->mcp_info->link_input;
1330 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1331 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1332 link_temp = qed_rd(p_hwfn, p_ptt,
1333 port_cfg_addr +
1334 offsetof(struct nvm_cfg1_port, speed_cap_mask));
1335 link->speed.advertised_speeds =
1336 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1337
1338 p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1339 link->speed.advertised_speeds;
1340
1341 link_temp = qed_rd(p_hwfn, p_ptt,
1342 port_cfg_addr +
1343 offsetof(struct nvm_cfg1_port, link_settings));
1344 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1345 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1346 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1347 link->speed.autoneg = true;
1348 break;
1349 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1350 link->speed.forced_speed = 1000;
1351 break;
1352 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1353 link->speed.forced_speed = 10000;
1354 break;
1355 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1356 link->speed.forced_speed = 25000;
1357 break;
1358 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1359 link->speed.forced_speed = 40000;
1360 break;
1361 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1362 link->speed.forced_speed = 50000;
1363 break;
1364 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1365 link->speed.forced_speed = 100000;
1366 break;
1367 default:
1368 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1369 link_temp);
1370 }
1371
1372 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1373 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1374 link->pause.autoneg = !!(link_temp &
1375 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1376 link->pause.forced_rx = !!(link_temp &
1377 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1378 link->pause.forced_tx = !!(link_temp &
1379 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1380 link->loopback_mode = 0;
1381
1382 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1383 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1384 link->speed.forced_speed, link->speed.advertised_speeds,
1385 link->speed.autoneg, link->pause.autoneg);
1386
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001387 /* Read Multi-function information from shmem */
1388 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1389 offsetof(struct nvm_cfg1, glob) +
1390 offsetof(struct nvm_cfg1_glob, generic_cont0);
1391
1392 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1393
1394 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1395 NVM_CFG1_GLOB_MF_MODE_OFFSET;
1396
1397 switch (mf_mode) {
1398 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001399 p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001400 break;
1401 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001402 p_hwfn->cdev->mf_mode = QED_MF_NPAR;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001403 break;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001404 case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1405 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001406 break;
1407 }
1408 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1409 p_hwfn->cdev->mf_mode);
1410
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001411 /* Read Multi-function information from shmem */
1412 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1413 offsetof(struct nvm_cfg1, glob) +
1414 offsetof(struct nvm_cfg1_glob, device_capabilities);
1415
1416 device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1417 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1418 __set_bit(QED_DEV_CAP_ETH,
1419 &p_hwfn->hw_info.device_capabilities);
1420
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001421 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1422}
1423
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001424static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1425{
1426 u32 reg_function_hide, tmp, eng_mask;
1427 u8 num_funcs;
1428
1429 num_funcs = MAX_NUM_PFS_BB;
1430
1431 /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1432 * in the other bits are selected.
1433 * Bits 1-15 are for functions 1-15, respectively, and their value is
1434 * '0' only for enabled functions (function 0 always exists and
1435 * enabled).
1436 * In case of CMT, only the "even" functions are enabled, and thus the
1437 * number of functions for both hwfns is learnt from the same bits.
1438 */
1439 reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
1440
1441 if (reg_function_hide & 0x1) {
1442 if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
1443 num_funcs = 0;
1444 eng_mask = 0xaaaa;
1445 } else {
1446 num_funcs = 1;
1447 eng_mask = 0x5554;
1448 }
1449
1450 /* Get the number of the enabled functions on the engine */
1451 tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
1452 while (tmp) {
1453 if (tmp & 0x1)
1454 num_funcs++;
1455 tmp >>= 0x1;
1456 }
1457 }
1458
1459 p_hwfn->num_funcs_on_engine = num_funcs;
1460
1461 DP_VERBOSE(p_hwfn,
1462 NETIF_MSG_PROBE,
1463 "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
1464 p_hwfn->rel_pf_id,
1465 p_hwfn->abs_pf_id,
1466 p_hwfn->num_funcs_on_engine);
1467}
1468
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001469static int
1470qed_get_hw_info(struct qed_hwfn *p_hwfn,
1471 struct qed_ptt *p_ptt,
1472 enum qed_pci_personality personality)
1473{
1474 u32 port_mode;
1475 int rc;
1476
Yuval Mintz32a47e72016-05-11 16:36:12 +03001477 /* Since all information is common, only first hwfns should do this */
1478 if (IS_LEAD_HWFN(p_hwfn)) {
1479 rc = qed_iov_hw_info(p_hwfn);
1480 if (rc)
1481 return rc;
1482 }
1483
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001484 /* Read the port mode */
1485 port_mode = qed_rd(p_hwfn, p_ptt,
1486 CNIG_REG_NW_PORT_MODE_BB_B0);
1487
1488 if (port_mode < 3) {
1489 p_hwfn->cdev->num_ports_in_engines = 1;
1490 } else if (port_mode <= 5) {
1491 p_hwfn->cdev->num_ports_in_engines = 2;
1492 } else {
1493 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1494 p_hwfn->cdev->num_ports_in_engines);
1495
1496 /* Default num_ports_in_engines to something */
1497 p_hwfn->cdev->num_ports_in_engines = 1;
1498 }
1499
1500 qed_hw_get_nvm_info(p_hwfn, p_ptt);
1501
1502 rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1503 if (rc)
1504 return rc;
1505
1506 if (qed_mcp_is_init(p_hwfn))
1507 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1508 p_hwfn->mcp_info->func_info.mac);
1509 else
1510 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1511
1512 if (qed_mcp_is_init(p_hwfn)) {
1513 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1514 p_hwfn->hw_info.ovlan =
1515 p_hwfn->mcp_info->func_info.ovlan;
1516
1517 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1518 }
1519
1520 if (qed_mcp_is_init(p_hwfn)) {
1521 enum qed_pci_personality protocol;
1522
1523 protocol = p_hwfn->mcp_info->func_info.protocol;
1524 p_hwfn->hw_info.personality = protocol;
1525 }
1526
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001527 qed_get_num_funcs(p_hwfn, p_ptt);
1528
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001529 qed_hw_get_resc(p_hwfn);
1530
1531 return rc;
1532}
1533
Yuval Mintz12e09c62016-03-02 20:26:01 +02001534static int qed_get_dev_info(struct qed_dev *cdev)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001535{
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001536 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001537 u32 tmp;
1538
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001539 /* Read Vendor Id / Device Id */
1540 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1541 &cdev->vendor_id);
1542 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1543 &cdev->device_id);
1544 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001545 MISCS_REG_CHIP_NUM);
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001546 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001547 MISCS_REG_CHIP_REV);
1548 MASK_FIELD(CHIP_REV, cdev->chip_rev);
1549
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001550 cdev->type = QED_DEV_TYPE_BB;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001551 /* Learn number of HW-functions */
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001552 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001553 MISCS_REG_CMT_ENABLED_FOR_PAIR);
1554
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001555 if (tmp & (1 << p_hwfn->rel_pf_id)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001556 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1557 cdev->num_hwfns = 2;
1558 } else {
1559 cdev->num_hwfns = 1;
1560 }
1561
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001562 cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001563 MISCS_REG_CHIP_TEST_REG) >> 4;
1564 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001565 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001566 MISCS_REG_CHIP_METAL);
1567 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1568
1569 DP_INFO(cdev->hwfns,
1570 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1571 cdev->chip_num, cdev->chip_rev,
1572 cdev->chip_bond_id, cdev->chip_metal);
Yuval Mintz12e09c62016-03-02 20:26:01 +02001573
1574 if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
1575 DP_NOTICE(cdev->hwfns,
1576 "The chip type/rev (BB A0) is not supported!\n");
1577 return -EINVAL;
1578 }
1579
1580 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001581}
1582
1583static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1584 void __iomem *p_regview,
1585 void __iomem *p_doorbells,
1586 enum qed_pci_personality personality)
1587{
1588 int rc = 0;
1589
1590 /* Split PCI bars evenly between hwfns */
1591 p_hwfn->regview = p_regview;
1592 p_hwfn->doorbells = p_doorbells;
1593
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001594 if (IS_VF(p_hwfn->cdev))
1595 return qed_vf_hw_prepare(p_hwfn);
1596
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001597 /* Validate that chip access is feasible */
1598 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1599 DP_ERR(p_hwfn,
1600 "Reading the ME register returns all Fs; Preventing further chip access\n");
1601 return -EINVAL;
1602 }
1603
1604 get_function_id(p_hwfn);
1605
Yuval Mintz12e09c62016-03-02 20:26:01 +02001606 /* Allocate PTT pool */
1607 rc = qed_ptt_pool_alloc(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001608 if (rc) {
1609 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1610 goto err0;
1611 }
1612
Yuval Mintz12e09c62016-03-02 20:26:01 +02001613 /* Allocate the main PTT */
1614 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
1615
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001616 /* First hwfn learns basic information, e.g., number of hwfns */
Yuval Mintz12e09c62016-03-02 20:26:01 +02001617 if (!p_hwfn->my_id) {
1618 rc = qed_get_dev_info(p_hwfn->cdev);
1619 if (rc != 0)
1620 goto err1;
1621 }
1622
1623 qed_hw_hwfn_prepare(p_hwfn);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001624
1625 /* Initialize MCP structure */
1626 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1627 if (rc) {
1628 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1629 goto err1;
1630 }
1631
1632 /* Read the device configuration information from the HW and SHMEM */
1633 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1634 if (rc) {
1635 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1636 goto err2;
1637 }
1638
1639 /* Allocate the init RT array and initialize the init-ops engine */
1640 rc = qed_init_alloc(p_hwfn);
1641 if (rc) {
1642 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1643 goto err2;
1644 }
1645
1646 return rc;
1647err2:
Yuval Mintz32a47e72016-05-11 16:36:12 +03001648 if (IS_LEAD_HWFN(p_hwfn))
1649 qed_iov_free_hw_info(p_hwfn->cdev);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001650 qed_mcp_free(p_hwfn);
1651err1:
1652 qed_hw_hwfn_free(p_hwfn);
1653err0:
1654 return rc;
1655}
1656
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001657int qed_hw_prepare(struct qed_dev *cdev,
1658 int personality)
1659{
Ariel Eliorc78df142015-12-07 06:25:58 -05001660 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1661 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001662
1663 /* Store the precompiled init data ptrs */
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001664 if (IS_PF(cdev))
1665 qed_init_iro_array(cdev);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001666
1667 /* Initialize the first hwfn - will learn number of hwfns */
Ariel Eliorc78df142015-12-07 06:25:58 -05001668 rc = qed_hw_prepare_single(p_hwfn,
1669 cdev->regview,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001670 cdev->doorbells, personality);
1671 if (rc)
1672 return rc;
1673
Ariel Eliorc78df142015-12-07 06:25:58 -05001674 personality = p_hwfn->hw_info.personality;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001675
1676 /* Initialize the rest of the hwfns */
Ariel Eliorc78df142015-12-07 06:25:58 -05001677 if (cdev->num_hwfns > 1) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001678 void __iomem *p_regview, *p_doorbell;
Ariel Eliorc78df142015-12-07 06:25:58 -05001679 u8 __iomem *addr;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001680
Ariel Eliorc78df142015-12-07 06:25:58 -05001681 /* adjust bar offset for second engine */
Ram Amranic2035ee2016-03-02 20:26:00 +02001682 addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
Ariel Eliorc78df142015-12-07 06:25:58 -05001683 p_regview = addr;
1684
1685 /* adjust doorbell bar offset for second engine */
Ram Amranic2035ee2016-03-02 20:26:00 +02001686 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
Ariel Eliorc78df142015-12-07 06:25:58 -05001687 p_doorbell = addr;
1688
1689 /* prepare second hw function */
1690 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001691 p_doorbell, personality);
Ariel Eliorc78df142015-12-07 06:25:58 -05001692
1693 /* in case of error, need to free the previously
1694 * initiliazed hwfn 0.
1695 */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001696 if (rc) {
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001697 if (IS_PF(cdev)) {
1698 qed_init_free(p_hwfn);
1699 qed_mcp_free(p_hwfn);
1700 qed_hw_hwfn_free(p_hwfn);
1701 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001702 }
1703 }
1704
Ariel Eliorc78df142015-12-07 06:25:58 -05001705 return rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001706}
1707
1708void qed_hw_remove(struct qed_dev *cdev)
1709{
1710 int i;
1711
1712 for_each_hwfn(cdev, i) {
1713 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1714
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001715 if (IS_VF(cdev)) {
Yuval Mintz0b55e272016-05-11 16:36:15 +03001716 qed_vf_pf_release(p_hwfn);
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03001717 continue;
1718 }
1719
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001720 qed_init_free(p_hwfn);
1721 qed_hw_hwfn_free(p_hwfn);
1722 qed_mcp_free(p_hwfn);
1723 }
Yuval Mintz32a47e72016-05-11 16:36:12 +03001724
1725 qed_iov_free_hw_info(cdev);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001726}
1727
1728int qed_chain_alloc(struct qed_dev *cdev,
1729 enum qed_chain_use_mode intended_use,
1730 enum qed_chain_mode mode,
1731 u16 num_elems,
1732 size_t elem_size,
1733 struct qed_chain *p_chain)
1734{
1735 dma_addr_t p_pbl_phys = 0;
1736 void *p_pbl_virt = NULL;
1737 dma_addr_t p_phys = 0;
1738 void *p_virt = NULL;
1739 u16 page_cnt = 0;
1740 size_t size;
1741
1742 if (mode == QED_CHAIN_MODE_SINGLE)
1743 page_cnt = 1;
1744 else
1745 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1746
1747 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1748 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1749 size, &p_phys, GFP_KERNEL);
1750 if (!p_virt) {
1751 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1752 goto nomem;
1753 }
1754
1755 if (mode == QED_CHAIN_MODE_PBL) {
1756 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1757 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1758 size, &p_pbl_phys,
1759 GFP_KERNEL);
1760 if (!p_pbl_virt) {
1761 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1762 goto nomem;
1763 }
1764
1765 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1766 (u8)elem_size, intended_use,
1767 p_pbl_phys, p_pbl_virt);
1768 } else {
1769 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1770 (u8)elem_size, intended_use, mode);
1771 }
1772
1773 return 0;
1774
1775nomem:
1776 dma_free_coherent(&cdev->pdev->dev,
1777 page_cnt * QED_CHAIN_PAGE_SIZE,
1778 p_virt, p_phys);
1779 dma_free_coherent(&cdev->pdev->dev,
1780 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1781 p_pbl_virt, p_pbl_phys);
1782
1783 return -ENOMEM;
1784}
1785
1786void qed_chain_free(struct qed_dev *cdev,
1787 struct qed_chain *p_chain)
1788{
1789 size_t size;
1790
1791 if (!p_chain->p_virt_addr)
1792 return;
1793
1794 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1795 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1796 dma_free_coherent(&cdev->pdev->dev, size,
1797 p_chain->pbl.p_virt_table,
1798 p_chain->pbl.p_phys_table);
1799 }
1800
1801 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1802 dma_free_coherent(&cdev->pdev->dev, size,
1803 p_chain->p_virt_addr,
1804 p_chain->p_phys_addr);
1805}
Manish Chopracee4d262015-10-26 11:02:28 +02001806
1807int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1808 u16 src_id, u16 *dst_id)
1809{
1810 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1811 u16 min, max;
1812
1813 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1814 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1815 DP_NOTICE(p_hwfn,
1816 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1817 src_id, min, max);
1818
1819 return -EINVAL;
1820 }
1821
1822 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1823
1824 return 0;
1825}
1826
1827int qed_fw_vport(struct qed_hwfn *p_hwfn,
1828 u8 src_id, u8 *dst_id)
1829{
1830 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1831 u8 min, max;
1832
1833 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1834 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1835 DP_NOTICE(p_hwfn,
1836 "vport id [%d] is not valid, available indices [%d - %d]\n",
1837 src_id, min, max);
1838
1839 return -EINVAL;
1840 }
1841
1842 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1843
1844 return 0;
1845}
1846
1847int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1848 u8 src_id, u8 *dst_id)
1849{
1850 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1851 u8 min, max;
1852
1853 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1854 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1855 DP_NOTICE(p_hwfn,
1856 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1857 src_id, min, max);
1858
1859 return -EINVAL;
1860 }
1861
1862 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1863
1864 return 0;
1865}
Manish Choprabcd197c2016-04-26 10:56:08 -04001866
1867/* Calculate final WFQ values for all vports and configure them.
1868 * After this configuration each vport will have
1869 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1870 */
1871static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1872 struct qed_ptt *p_ptt,
1873 u32 min_pf_rate)
1874{
1875 struct init_qm_vport_params *vport_params;
1876 int i;
1877
1878 vport_params = p_hwfn->qm_info.qm_vport_params;
1879
1880 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1881 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1882
1883 vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1884 min_pf_rate;
1885 qed_init_vport_wfq(p_hwfn, p_ptt,
1886 vport_params[i].first_tx_pq_id,
1887 vport_params[i].vport_wfq);
1888 }
1889}
1890
1891static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1892 u32 min_pf_rate)
1893
1894{
1895 int i;
1896
1897 for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1898 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1899}
1900
1901static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1902 struct qed_ptt *p_ptt,
1903 u32 min_pf_rate)
1904{
1905 struct init_qm_vport_params *vport_params;
1906 int i;
1907
1908 vport_params = p_hwfn->qm_info.qm_vport_params;
1909
1910 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1911 qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1912 qed_init_vport_wfq(p_hwfn, p_ptt,
1913 vport_params[i].first_tx_pq_id,
1914 vport_params[i].vport_wfq);
1915 }
1916}
1917
1918/* This function performs several validations for WFQ
1919 * configuration and required min rate for a given vport
1920 * 1. req_rate must be greater than one percent of min_pf_rate.
1921 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1922 * rates to get less than one percent of min_pf_rate.
1923 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1924 */
1925static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1926 u16 vport_id, u32 req_rate,
1927 u32 min_pf_rate)
1928{
1929 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1930 int non_requested_count = 0, req_count = 0, i, num_vports;
1931
1932 num_vports = p_hwfn->qm_info.num_vports;
1933
1934 /* Accounting for the vports which are configured for WFQ explicitly */
1935 for (i = 0; i < num_vports; i++) {
1936 u32 tmp_speed;
1937
1938 if ((i != vport_id) &&
1939 p_hwfn->qm_info.wfq_data[i].configured) {
1940 req_count++;
1941 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1942 total_req_min_rate += tmp_speed;
1943 }
1944 }
1945
1946 /* Include current vport data as well */
1947 req_count++;
1948 total_req_min_rate += req_rate;
1949 non_requested_count = num_vports - req_count;
1950
1951 if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1952 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1953 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1954 vport_id, req_rate, min_pf_rate);
1955 return -EINVAL;
1956 }
1957
1958 if (num_vports > QED_WFQ_UNIT) {
1959 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1960 "Number of vports is greater than %d\n",
1961 QED_WFQ_UNIT);
1962 return -EINVAL;
1963 }
1964
1965 if (total_req_min_rate > min_pf_rate) {
1966 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1967 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1968 total_req_min_rate, min_pf_rate);
1969 return -EINVAL;
1970 }
1971
1972 total_left_rate = min_pf_rate - total_req_min_rate;
1973
1974 left_rate_per_vp = total_left_rate / non_requested_count;
1975 if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
1976 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1977 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1978 left_rate_per_vp, min_pf_rate);
1979 return -EINVAL;
1980 }
1981
1982 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1983 p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1984
1985 for (i = 0; i < num_vports; i++) {
1986 if (p_hwfn->qm_info.wfq_data[i].configured)
1987 continue;
1988
1989 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1990 }
1991
1992 return 0;
1993}
1994
Yuval Mintz733def62016-05-11 16:36:22 +03001995static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
1996 struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
1997{
1998 struct qed_mcp_link_state *p_link;
1999 int rc = 0;
2000
2001 p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
2002
2003 if (!p_link->min_pf_rate) {
2004 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
2005 p_hwfn->qm_info.wfq_data[vp_id].configured = true;
2006 return rc;
2007 }
2008
2009 rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
2010
2011 if (rc == 0)
2012 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
2013 p_link->min_pf_rate);
2014 else
2015 DP_NOTICE(p_hwfn,
2016 "Validation failed while configuring min rate\n");
2017
2018 return rc;
2019}
2020
Manish Choprabcd197c2016-04-26 10:56:08 -04002021static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
2022 struct qed_ptt *p_ptt,
2023 u32 min_pf_rate)
2024{
2025 bool use_wfq = false;
2026 int rc = 0;
2027 u16 i;
2028
2029 /* Validate all pre configured vports for wfq */
2030 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
2031 u32 rate;
2032
2033 if (!p_hwfn->qm_info.wfq_data[i].configured)
2034 continue;
2035
2036 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
2037 use_wfq = true;
2038
2039 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
2040 if (rc) {
2041 DP_NOTICE(p_hwfn,
2042 "WFQ validation failed while configuring min rate\n");
2043 break;
2044 }
2045 }
2046
2047 if (!rc && use_wfq)
2048 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
2049 else
2050 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
2051
2052 return rc;
2053}
2054
Yuval Mintz733def62016-05-11 16:36:22 +03002055/* Main API for qed clients to configure vport min rate.
2056 * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
2057 * rate - Speed in Mbps needs to be assigned to a given vport.
2058 */
2059int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
2060{
2061 int i, rc = -EINVAL;
2062
2063 /* Currently not supported; Might change in future */
2064 if (cdev->num_hwfns > 1) {
2065 DP_NOTICE(cdev,
2066 "WFQ configuration is not supported for this device\n");
2067 return rc;
2068 }
2069
2070 for_each_hwfn(cdev, i) {
2071 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2072 struct qed_ptt *p_ptt;
2073
2074 p_ptt = qed_ptt_acquire(p_hwfn);
2075 if (!p_ptt)
2076 return -EBUSY;
2077
2078 rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
2079
2080 if (!rc) {
2081 qed_ptt_release(p_hwfn, p_ptt);
2082 return rc;
2083 }
2084
2085 qed_ptt_release(p_hwfn, p_ptt);
2086 }
2087
2088 return rc;
2089}
2090
Manish Choprabcd197c2016-04-26 10:56:08 -04002091/* API to configure WFQ from mcp link change */
2092void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
2093{
2094 int i;
2095
2096 for_each_hwfn(cdev, i) {
2097 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2098
2099 __qed_configure_vp_wfq_on_link_change(p_hwfn,
2100 p_hwfn->p_dpc_ptt,
2101 min_pf_rate);
2102 }
2103}
Manish Chopra4b01e512016-04-26 10:56:09 -04002104
2105int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
2106 struct qed_ptt *p_ptt,
2107 struct qed_mcp_link_state *p_link,
2108 u8 max_bw)
2109{
2110 int rc = 0;
2111
2112 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
2113
2114 if (!p_link->line_speed && (max_bw != 100))
2115 return rc;
2116
2117 p_link->speed = (p_link->line_speed * max_bw) / 100;
2118 p_hwfn->qm_info.pf_rl = p_link->speed;
2119
2120 /* Since the limiter also affects Tx-switched traffic, we don't want it
2121 * to limit such traffic in case there's no actual limit.
2122 * In that case, set limit to imaginary high boundary.
2123 */
2124 if (max_bw == 100)
2125 p_hwfn->qm_info.pf_rl = 100000;
2126
2127 rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2128 p_hwfn->qm_info.pf_rl);
2129
2130 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2131 "Configured MAX bandwidth to be %08x Mb/sec\n",
2132 p_link->speed);
2133
2134 return rc;
2135}
2136
2137/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
2138int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
2139{
2140 int i, rc = -EINVAL;
2141
2142 if (max_bw < 1 || max_bw > 100) {
2143 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
2144 return rc;
2145 }
2146
2147 for_each_hwfn(cdev, i) {
2148 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2149 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
2150 struct qed_mcp_link_state *p_link;
2151 struct qed_ptt *p_ptt;
2152
2153 p_link = &p_lead->mcp_info->link_output;
2154
2155 p_ptt = qed_ptt_acquire(p_hwfn);
2156 if (!p_ptt)
2157 return -EBUSY;
2158
2159 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
2160 p_link, max_bw);
2161
2162 qed_ptt_release(p_hwfn, p_ptt);
2163
2164 if (rc)
2165 break;
2166 }
2167
2168 return rc;
2169}
Manish Chopraa64b02d2016-04-26 10:56:10 -04002170
2171int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
2172 struct qed_ptt *p_ptt,
2173 struct qed_mcp_link_state *p_link,
2174 u8 min_bw)
2175{
2176 int rc = 0;
2177
2178 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
2179 p_hwfn->qm_info.pf_wfq = min_bw;
2180
2181 if (!p_link->line_speed)
2182 return rc;
2183
2184 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
2185
2186 rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
2187
2188 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2189 "Configured MIN bandwidth to be %d Mb/sec\n",
2190 p_link->min_pf_rate);
2191
2192 return rc;
2193}
2194
2195/* Main API to configure PF min bandwidth where bw range is [1-100] */
2196int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
2197{
2198 int i, rc = -EINVAL;
2199
2200 if (min_bw < 1 || min_bw > 100) {
2201 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
2202 return rc;
2203 }
2204
2205 for_each_hwfn(cdev, i) {
2206 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2207 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
2208 struct qed_mcp_link_state *p_link;
2209 struct qed_ptt *p_ptt;
2210
2211 p_link = &p_lead->mcp_info->link_output;
2212
2213 p_ptt = qed_ptt_acquire(p_hwfn);
2214 if (!p_ptt)
2215 return -EBUSY;
2216
2217 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
2218 p_link, min_bw);
2219 if (rc) {
2220 qed_ptt_release(p_hwfn, p_ptt);
2221 return rc;
2222 }
2223
2224 if (p_link->min_pf_rate) {
2225 u32 min_rate = p_link->min_pf_rate;
2226
2227 rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
2228 p_ptt,
2229 min_rate);
2230 }
2231
2232 qed_ptt_release(p_hwfn, p_ptt);
2233 }
2234
2235 return rc;
2236}
Yuval Mintz733def62016-05-11 16:36:22 +03002237
2238void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2239{
2240 struct qed_mcp_link_state *p_link;
2241
2242 p_link = &p_hwfn->mcp_info->link_output;
2243
2244 if (p_link->min_pf_rate)
2245 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
2246 p_link->min_pf_rate);
2247
2248 memset(p_hwfn->qm_info.wfq_data, 0,
2249 sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
2250}