blob: 5b845220ae8c870b5bd6cee682b335b805d17e1f [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/mutex.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/etherdevice.h>
21#include <linux/qed/qed_chain.h>
22#include <linux/qed/qed_if.h>
23#include "qed.h"
24#include "qed_cxt.h"
25#include "qed_dev_api.h"
26#include "qed_hsi.h"
27#include "qed_hw.h"
28#include "qed_init_ops.h"
29#include "qed_int.h"
30#include "qed_mcp.h"
31#include "qed_reg_addr.h"
32#include "qed_sp.h"
33
34/* API common to all protocols */
35void qed_init_dp(struct qed_dev *cdev,
36 u32 dp_module, u8 dp_level)
37{
38 u32 i;
39
40 cdev->dp_level = dp_level;
41 cdev->dp_module = dp_module;
42 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
43 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
44
45 p_hwfn->dp_level = dp_level;
46 p_hwfn->dp_module = dp_module;
47 }
48}
49
50void qed_init_struct(struct qed_dev *cdev)
51{
52 u8 i;
53
54 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
55 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
56
57 p_hwfn->cdev = cdev;
58 p_hwfn->my_id = i;
59 p_hwfn->b_active = false;
60
61 mutex_init(&p_hwfn->dmae_info.mutex);
62 }
63
64 /* hwfn 0 is always active */
65 cdev->hwfns[0].b_active = true;
66
67 /* set the default cache alignment to 128 */
68 cdev->cache_shift = 7;
69}
70
71static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
72{
73 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
74
75 kfree(qm_info->qm_pq_params);
76 qm_info->qm_pq_params = NULL;
77 kfree(qm_info->qm_vport_params);
78 qm_info->qm_vport_params = NULL;
79 kfree(qm_info->qm_port_params);
80 qm_info->qm_port_params = NULL;
81}
82
83void qed_resc_free(struct qed_dev *cdev)
84{
85 int i;
86
87 kfree(cdev->fw_data);
88 cdev->fw_data = NULL;
89
90 kfree(cdev->reset_stats);
91
92 for_each_hwfn(cdev, i) {
93 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
94
95 qed_cxt_mngr_free(p_hwfn);
96 qed_qm_info_free(p_hwfn);
97 qed_spq_free(p_hwfn);
98 qed_eq_free(p_hwfn, p_hwfn->p_eq);
99 qed_consq_free(p_hwfn, p_hwfn->p_consq);
100 qed_int_free(p_hwfn);
101 qed_dmae_info_free(p_hwfn);
102 }
103}
104
105static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
106{
107 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
108 struct init_qm_port_params *p_qm_port;
109 u8 num_vports, i, vport_id, num_ports;
110 u16 num_pqs, multi_cos_tcs = 1;
111
112 memset(qm_info, 0, sizeof(*qm_info));
113
114 num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
115 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
116
117 /* Sanity checking that setup requires legal number of resources */
118 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
119 DP_ERR(p_hwfn,
120 "Need too many Physical queues - 0x%04x when only %04x are available\n",
121 num_pqs, RESC_NUM(p_hwfn, QED_PQ));
122 return -EINVAL;
123 }
124
125 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
126 */
127 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
128 num_pqs, GFP_ATOMIC);
129 if (!qm_info->qm_pq_params)
130 goto alloc_err;
131
132 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
133 num_vports, GFP_ATOMIC);
134 if (!qm_info->qm_vport_params)
135 goto alloc_err;
136
137 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
138 MAX_NUM_PORTS, GFP_ATOMIC);
139 if (!qm_info->qm_port_params)
140 goto alloc_err;
141
142 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
143
144 /* First init per-TC PQs */
145 for (i = 0; i < multi_cos_tcs; i++) {
146 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
147
148 params->vport_id = vport_id;
149 params->tc_id = p_hwfn->hw_info.non_offload_tc;
150 params->wrr_group = 1;
151 }
152
153 /* Then init pure-LB PQ */
154 qm_info->pure_lb_pq = i;
155 qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
156 qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
157 qm_info->qm_pq_params[i].wrr_group = 1;
158 i++;
159
160 qm_info->offload_pq = 0;
161 qm_info->num_pqs = num_pqs;
162 qm_info->num_vports = num_vports;
163
164 /* Initialize qm port parameters */
165 num_ports = p_hwfn->cdev->num_ports_in_engines;
166 for (i = 0; i < num_ports; i++) {
167 p_qm_port = &qm_info->qm_port_params[i];
168 p_qm_port->active = 1;
169 p_qm_port->num_active_phys_tcs = 4;
170 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
171 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
172 }
173
174 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
175
176 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
177
178 qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
179
180 qm_info->pf_wfq = 0;
181 qm_info->pf_rl = 0;
182 qm_info->vport_rl_en = 1;
183
184 return 0;
185
186alloc_err:
187 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
188 kfree(qm_info->qm_pq_params);
189 kfree(qm_info->qm_vport_params);
190 kfree(qm_info->qm_port_params);
191
192 return -ENOMEM;
193}
194
195int qed_resc_alloc(struct qed_dev *cdev)
196{
197 struct qed_consq *p_consq;
198 struct qed_eq *p_eq;
199 int i, rc = 0;
200
201 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
202 if (!cdev->fw_data)
203 return -ENOMEM;
204
205 for_each_hwfn(cdev, i) {
206 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
207
208 /* First allocate the context manager structure */
209 rc = qed_cxt_mngr_alloc(p_hwfn);
210 if (rc)
211 goto alloc_err;
212
213 /* Set the HW cid/tid numbers (in the contest manager)
214 * Must be done prior to any further computations.
215 */
216 rc = qed_cxt_set_pf_params(p_hwfn);
217 if (rc)
218 goto alloc_err;
219
220 /* Prepare and process QM requirements */
221 rc = qed_init_qm_info(p_hwfn);
222 if (rc)
223 goto alloc_err;
224
225 /* Compute the ILT client partition */
226 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
227 if (rc)
228 goto alloc_err;
229
230 /* CID map / ILT shadow table / T2
231 * The talbes sizes are determined by the computations above
232 */
233 rc = qed_cxt_tables_alloc(p_hwfn);
234 if (rc)
235 goto alloc_err;
236
237 /* SPQ, must follow ILT because initializes SPQ context */
238 rc = qed_spq_alloc(p_hwfn);
239 if (rc)
240 goto alloc_err;
241
242 /* SP status block allocation */
243 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
244 RESERVED_PTT_DPC);
245
246 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
247 if (rc)
248 goto alloc_err;
249
250 /* EQ */
251 p_eq = qed_eq_alloc(p_hwfn, 256);
252
253 if (!p_eq)
254 goto alloc_err;
255 p_hwfn->p_eq = p_eq;
256
257 p_consq = qed_consq_alloc(p_hwfn);
258 if (!p_consq)
259 goto alloc_err;
260 p_hwfn->p_consq = p_consq;
261
262 /* DMA info initialization */
263 rc = qed_dmae_info_alloc(p_hwfn);
264 if (rc) {
265 DP_NOTICE(p_hwfn,
266 "Failed to allocate memory for dmae_info structure\n");
267 goto alloc_err;
268 }
269 }
270
271 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
272 if (!cdev->reset_stats) {
273 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
274 goto alloc_err;
275 }
276
277 return 0;
278
279alloc_err:
280 qed_resc_free(cdev);
281 return rc;
282}
283
284void qed_resc_setup(struct qed_dev *cdev)
285{
286 int i;
287
288 for_each_hwfn(cdev, i) {
289 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
290
291 qed_cxt_mngr_setup(p_hwfn);
292 qed_spq_setup(p_hwfn);
293 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
294 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
295
296 /* Read shadow of current MFW mailbox */
297 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
298 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
299 p_hwfn->mcp_info->mfw_mb_cur,
300 p_hwfn->mcp_info->mfw_mb_length);
301
302 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
303 }
304}
305
306#define FINAL_CLEANUP_CMD_OFFSET (0)
307#define FINAL_CLEANUP_CMD (0x1)
308#define FINAL_CLEANUP_VALID_OFFSET (6)
309#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
310#define FINAL_CLEANUP_COMP (0x2)
311#define FINAL_CLEANUP_POLL_CNT (100)
312#define FINAL_CLEANUP_POLL_TIME (10)
313int qed_final_cleanup(struct qed_hwfn *p_hwfn,
314 struct qed_ptt *p_ptt,
315 u16 id)
316{
317 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
318 int rc = -EBUSY;
319
320 addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
321
322 command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
323 command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
324 command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
325 command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
326
327 /* Make sure notification is not set before initiating final cleanup */
328 if (REG_RD(p_hwfn, addr)) {
329 DP_NOTICE(
330 p_hwfn,
331 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
332 REG_WR(p_hwfn, addr, 0);
333 }
334
335 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
336 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
337 id, command);
338
339 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
340
341 /* Poll until completion */
342 while (!REG_RD(p_hwfn, addr) && count--)
343 msleep(FINAL_CLEANUP_POLL_TIME);
344
345 if (REG_RD(p_hwfn, addr))
346 rc = 0;
347 else
348 DP_NOTICE(p_hwfn,
349 "Failed to receive FW final cleanup notification\n");
350
351 /* Cleanup afterwards */
352 REG_WR(p_hwfn, addr, 0);
353
354 return rc;
355}
356
357static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
358{
359 int hw_mode = 0;
360
361 hw_mode = (1 << MODE_BB_A0);
362
363 switch (p_hwfn->cdev->num_ports_in_engines) {
364 case 1:
365 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
366 break;
367 case 2:
368 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
369 break;
370 case 4:
371 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
372 break;
373 default:
374 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
375 p_hwfn->cdev->num_ports_in_engines);
376 return;
377 }
378
379 switch (p_hwfn->cdev->mf_mode) {
380 case SF:
381 hw_mode |= 1 << MODE_SF;
382 break;
383 case MF_OVLAN:
384 hw_mode |= 1 << MODE_MF_SD;
385 break;
386 case MF_NPAR:
387 hw_mode |= 1 << MODE_MF_SI;
388 break;
389 default:
390 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
391 hw_mode |= 1 << MODE_SF;
392 }
393
394 hw_mode |= 1 << MODE_ASIC;
395
396 p_hwfn->hw_info.hw_mode = hw_mode;
397}
398
399/* Init run time data for all PFs on an engine. */
400static void qed_init_cau_rt_data(struct qed_dev *cdev)
401{
402 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
403 int i, sb_id;
404
405 for_each_hwfn(cdev, i) {
406 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
407 struct qed_igu_info *p_igu_info;
408 struct qed_igu_block *p_block;
409 struct cau_sb_entry sb_entry;
410
411 p_igu_info = p_hwfn->hw_info.p_igu_info;
412
413 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
414 sb_id++) {
415 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
416 if (!p_block->is_pf)
417 continue;
418
419 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
420 p_block->function_id,
421 0, 0);
422 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
423 sb_entry);
424 }
425 }
426}
427
428static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
429 struct qed_ptt *p_ptt,
430 int hw_mode)
431{
432 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
433 struct qed_qm_common_rt_init_params params;
434 struct qed_dev *cdev = p_hwfn->cdev;
435 int rc = 0;
436
437 qed_init_cau_rt_data(cdev);
438
439 /* Program GTT windows */
440 qed_gtt_init(p_hwfn);
441
442 if (p_hwfn->mcp_info) {
443 if (p_hwfn->mcp_info->func_info.bandwidth_max)
444 qm_info->pf_rl_en = 1;
445 if (p_hwfn->mcp_info->func_info.bandwidth_min)
446 qm_info->pf_wfq_en = 1;
447 }
448
449 memset(&params, 0, sizeof(params));
450 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
451 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
452 params.pf_rl_en = qm_info->pf_rl_en;
453 params.pf_wfq_en = qm_info->pf_wfq_en;
454 params.vport_rl_en = qm_info->vport_rl_en;
455 params.vport_wfq_en = qm_info->vport_wfq_en;
456 params.port_params = qm_info->qm_port_params;
457
458 qed_qm_common_rt_init(p_hwfn, &params);
459
460 qed_cxt_hw_init_common(p_hwfn);
461
462 /* Close gate from NIG to BRB/Storm; By default they are open, but
463 * we close them to prevent NIG from passing data to reset blocks.
464 * Should have been done in the ENGINE phase, but init-tool lacks
465 * proper port-pretend capabilities.
466 */
467 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
468 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
469 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
470 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
471 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
472 qed_port_unpretend(p_hwfn, p_ptt);
473
474 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
475 if (rc != 0)
476 return rc;
477
478 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
479 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
480
481 /* Disable relaxed ordering in the PCI config space */
482 qed_wr(p_hwfn, p_ptt, 0x20b4,
483 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
484
485 return rc;
486}
487
488static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
489 struct qed_ptt *p_ptt,
490 int hw_mode)
491{
492 int rc = 0;
493
494 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
495 hw_mode);
496 return rc;
497}
498
499static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
500 struct qed_ptt *p_ptt,
501 int hw_mode,
502 bool b_hw_start,
503 enum qed_int_mode int_mode,
504 bool allow_npar_tx_switch)
505{
506 u8 rel_pf_id = p_hwfn->rel_pf_id;
507 int rc = 0;
508
509 if (p_hwfn->mcp_info) {
510 struct qed_mcp_function_info *p_info;
511
512 p_info = &p_hwfn->mcp_info->func_info;
513 if (p_info->bandwidth_min)
514 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
515
516 /* Update rate limit once we'll actually have a link */
517 p_hwfn->qm_info.pf_rl = 100;
518 }
519
520 qed_cxt_hw_init_pf(p_hwfn);
521
522 qed_int_igu_init_rt(p_hwfn);
523
524 /* Set VLAN in NIG if needed */
525 if (hw_mode & (1 << MODE_MF_SD)) {
526 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
527 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
528 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
529 p_hwfn->hw_info.ovlan);
530 }
531
532 /* Enable classification by MAC if needed */
533 if (hw_mode & MODE_MF_SI) {
534 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
535 "Configuring TAGMAC_CLS_TYPE\n");
536 STORE_RT_REG(p_hwfn,
537 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
538 }
539
540 /* Protocl Configuration */
541 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
542 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
543 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
544
545 /* Cleanup chip from previous driver if such remains exist */
546 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
547 if (rc != 0)
548 return rc;
549
550 /* PF Init sequence */
551 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
552 if (rc)
553 return rc;
554
555 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
556 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
557 if (rc)
558 return rc;
559
560 /* Pure runtime initializations - directly to the HW */
561 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
562
563 if (b_hw_start) {
564 /* enable interrupts */
565 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
566
567 /* send function start command */
568 rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
569 if (rc)
570 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
571 }
572 return rc;
573}
574
575static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
576 struct qed_ptt *p_ptt,
577 u8 enable)
578{
579 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
580
581 /* Change PF in PXP */
582 qed_wr(p_hwfn, p_ptt,
583 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
584
585 /* wait until value is set - try for 1 second every 50us */
586 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
587 val = qed_rd(p_hwfn, p_ptt,
588 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
589 if (val == set_val)
590 break;
591
592 usleep_range(50, 60);
593 }
594
595 if (val != set_val) {
596 DP_NOTICE(p_hwfn,
597 "PFID_ENABLE_MASTER wasn't changed after a second\n");
598 return -EAGAIN;
599 }
600
601 return 0;
602}
603
604static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
605 struct qed_ptt *p_main_ptt)
606{
607 /* Read shadow of current MFW mailbox */
608 qed_mcp_read_mb(p_hwfn, p_main_ptt);
609 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
610 p_hwfn->mcp_info->mfw_mb_cur,
611 p_hwfn->mcp_info->mfw_mb_length);
612}
613
614int qed_hw_init(struct qed_dev *cdev,
615 bool b_hw_start,
616 enum qed_int_mode int_mode,
617 bool allow_npar_tx_switch,
618 const u8 *bin_fw_data)
619{
620 u32 load_code, param;
621 int rc, mfw_rc, i;
622
623 rc = qed_init_fw_data(cdev, bin_fw_data);
624 if (rc != 0)
625 return rc;
626
627 for_each_hwfn(cdev, i) {
628 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
629
630 /* Enable DMAE in PXP */
631 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
632
633 qed_calc_hw_mode(p_hwfn);
634
635 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
636 &load_code);
637 if (rc) {
638 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
639 return rc;
640 }
641
642 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
643
644 DP_VERBOSE(p_hwfn, QED_MSG_SP,
645 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
646 rc, load_code);
647
648 p_hwfn->first_on_engine = (load_code ==
649 FW_MSG_CODE_DRV_LOAD_ENGINE);
650
651 switch (load_code) {
652 case FW_MSG_CODE_DRV_LOAD_ENGINE:
653 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
654 p_hwfn->hw_info.hw_mode);
655 if (rc)
656 break;
657 /* Fall into */
658 case FW_MSG_CODE_DRV_LOAD_PORT:
659 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
660 p_hwfn->hw_info.hw_mode);
661 if (rc)
662 break;
663
664 /* Fall into */
665 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
666 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
667 p_hwfn->hw_info.hw_mode,
668 b_hw_start, int_mode,
669 allow_npar_tx_switch);
670 break;
671 default:
672 rc = -EINVAL;
673 break;
674 }
675
676 if (rc)
677 DP_NOTICE(p_hwfn,
678 "init phase failed for loadcode 0x%x (rc %d)\n",
679 load_code, rc);
680
681 /* ACK mfw regardless of success or failure of initialization */
682 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
683 DRV_MSG_CODE_LOAD_DONE,
684 0, &load_code, &param);
685 if (rc)
686 return rc;
687 if (mfw_rc) {
688 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
689 return mfw_rc;
690 }
691
692 p_hwfn->hw_init_done = true;
693 }
694
695 return 0;
696}
697
698#define QED_HW_STOP_RETRY_LIMIT (10)
699int qed_hw_stop(struct qed_dev *cdev)
700{
701 int rc = 0, t_rc;
702 int i, j;
703
704 for_each_hwfn(cdev, j) {
705 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
706 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
707
708 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
709
710 /* mark the hw as uninitialized... */
711 p_hwfn->hw_init_done = false;
712
713 rc = qed_sp_pf_stop(p_hwfn);
714 if (rc)
715 return rc;
716
717 qed_wr(p_hwfn, p_ptt,
718 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
719
720 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
721 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
722 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
723 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
724 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
725
726 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
727 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
728 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
729 if ((!qed_rd(p_hwfn, p_ptt,
730 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
731 (!qed_rd(p_hwfn, p_ptt,
732 TM_REG_PF_SCAN_ACTIVE_TASK)))
733 break;
734
735 usleep_range(1000, 2000);
736 }
737 if (i == QED_HW_STOP_RETRY_LIMIT)
738 DP_NOTICE(p_hwfn,
739 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
740 (u8)qed_rd(p_hwfn, p_ptt,
741 TM_REG_PF_SCAN_ACTIVE_CONN),
742 (u8)qed_rd(p_hwfn, p_ptt,
743 TM_REG_PF_SCAN_ACTIVE_TASK));
744
745 /* Disable Attention Generation */
746 qed_int_igu_disable_int(p_hwfn, p_ptt);
747
748 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
749 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
750
751 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
752
753 /* Need to wait 1ms to guarantee SBs are cleared */
754 usleep_range(1000, 2000);
755 }
756
757 /* Disable DMAE in PXP - in CMT, this should only be done for
758 * first hw-function, and only after all transactions have
759 * stopped for all active hw-functions.
760 */
761 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
762 cdev->hwfns[0].p_main_ptt,
763 false);
764 if (t_rc != 0)
765 rc = t_rc;
766
767 return rc;
768}
769
770static int qed_reg_assert(struct qed_hwfn *hwfn,
771 struct qed_ptt *ptt, u32 reg,
772 bool expected)
773{
774 u32 assert_val = qed_rd(hwfn, ptt, reg);
775
776 if (assert_val != expected) {
777 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
778 reg, expected);
779 return -EINVAL;
780 }
781
782 return 0;
783}
784
785int qed_hw_reset(struct qed_dev *cdev)
786{
787 int rc = 0;
788 u32 unload_resp, unload_param;
789 int i;
790
791 for_each_hwfn(cdev, i) {
792 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
793
794 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
795
796 /* Check for incorrect states */
797 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
798 QM_REG_USG_CNT_PF_TX, 0);
799 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
800 QM_REG_USG_CNT_PF_OTHER, 0);
801
802 /* Disable PF in HW blocks */
803 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
804 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
805 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
806 TCFC_REG_STRONG_ENABLE_PF, 0);
807 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
808 CCFC_REG_STRONG_ENABLE_PF, 0);
809
810 /* Send unload command to MCP */
811 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
812 DRV_MSG_CODE_UNLOAD_REQ,
813 DRV_MB_PARAM_UNLOAD_WOL_MCP,
814 &unload_resp, &unload_param);
815 if (rc) {
816 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
817 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
818 }
819
820 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
821 DRV_MSG_CODE_UNLOAD_DONE,
822 0, &unload_resp, &unload_param);
823 if (rc) {
824 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
825 return rc;
826 }
827 }
828
829 return rc;
830}
831
832/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
833static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
834{
835 qed_ptt_pool_free(p_hwfn);
836 kfree(p_hwfn->hw_info.p_igu_info);
837}
838
839/* Setup bar access */
840static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
841{
842 int rc;
843
844 /* Allocate PTT pool */
845 rc = qed_ptt_pool_alloc(p_hwfn);
846 if (rc)
847 return rc;
848
849 /* Allocate the main PTT */
850 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
851
852 /* clear indirect access */
853 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
854 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
855 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
856 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
857
858 /* Clean Previous errors if such exist */
859 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
860 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
861 1 << p_hwfn->abs_pf_id);
862
863 /* enable internal target-read */
864 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
865 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
866
867 return 0;
868}
869
870static void get_function_id(struct qed_hwfn *p_hwfn)
871{
872 /* ME Register */
873 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
874
875 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
876
877 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
878 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
879 PXP_CONCRETE_FID_PFID);
880 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
881 PXP_CONCRETE_FID_PORT);
882}
883
884static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
885{
886 u32 *resc_start = p_hwfn->hw_info.resc_start;
887 u32 *resc_num = p_hwfn->hw_info.resc_num;
888 int num_funcs, i;
889
890 num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
891 : p_hwfn->cdev->num_ports_in_engines;
892
893 resc_num[QED_SB] = min_t(u32,
894 (MAX_SB_PER_PATH_BB / num_funcs),
895 qed_int_get_num_sbs(p_hwfn, NULL));
896 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
897 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
898 resc_num[QED_RL] = 8;
899 resc_num[QED_ILT] = 950;
900
901 for (i = 0; i < QED_MAX_RESC; i++)
902 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
903
904 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
905 "The numbers for each resource are:\n"
906 "SB = %d start = %d\n"
907 "VPORT = %d start = %d\n"
908 "PQ = %d start = %d\n"
909 "RL = %d start = %d\n"
910 "ILT = %d start = %d\n",
911 p_hwfn->hw_info.resc_num[QED_SB],
912 p_hwfn->hw_info.resc_start[QED_SB],
913 p_hwfn->hw_info.resc_num[QED_VPORT],
914 p_hwfn->hw_info.resc_start[QED_VPORT],
915 p_hwfn->hw_info.resc_num[QED_PQ],
916 p_hwfn->hw_info.resc_start[QED_PQ],
917 p_hwfn->hw_info.resc_num[QED_RL],
918 p_hwfn->hw_info.resc_start[QED_RL],
919 p_hwfn->hw_info.resc_num[QED_ILT],
920 p_hwfn->hw_info.resc_start[QED_ILT]);
921}
922
923static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
924 struct qed_ptt *p_ptt)
925{
926 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, nvm_cfg_addr;
927 u32 val;
928
929 /* Read global nvm_cfg address */
930 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
931
932 /* Verify MCP has initialized it */
933 if (!nvm_cfg_addr) {
934 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
935 return -EINVAL;
936 }
937
938 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
939 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
940
941 /* Read Vendor Id / Device Id */
942 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
943 offsetof(struct nvm_cfg1, glob) +
944 offsetof(struct nvm_cfg1_glob, pci_id);
945 p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
946 NVM_CFG1_GLOB_VENDOR_ID_MASK;
947 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
948 offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
949 offsetof(struct nvm_cfg1_func, device_id);
950 val = qed_rd(p_hwfn, p_ptt, addr);
951
952 if (IS_MF(p_hwfn)) {
953 p_hwfn->hw_info.device_id =
954 (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
955 NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
956 } else {
957 p_hwfn->hw_info.device_id =
958 (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
959 NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
960 }
961
962 /* Read Multi-function information from shmem */
963 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
964 offsetof(struct nvm_cfg1, glob) +
965 offsetof(struct nvm_cfg1_glob, generic_cont0);
966
967 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
968
969 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
970 NVM_CFG1_GLOB_MF_MODE_OFFSET;
971
972 switch (mf_mode) {
973 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
974 p_hwfn->cdev->mf_mode = MF_OVLAN;
975 break;
976 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
977 p_hwfn->cdev->mf_mode = MF_NPAR;
978 break;
979 case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
980 p_hwfn->cdev->mf_mode = SF;
981 break;
982 }
983 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
984 p_hwfn->cdev->mf_mode);
985
986 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
987}
988
989static int
990qed_get_hw_info(struct qed_hwfn *p_hwfn,
991 struct qed_ptt *p_ptt,
992 enum qed_pci_personality personality)
993{
994 u32 port_mode;
995 int rc;
996
997 /* Read the port mode */
998 port_mode = qed_rd(p_hwfn, p_ptt,
999 CNIG_REG_NW_PORT_MODE_BB_B0);
1000
1001 if (port_mode < 3) {
1002 p_hwfn->cdev->num_ports_in_engines = 1;
1003 } else if (port_mode <= 5) {
1004 p_hwfn->cdev->num_ports_in_engines = 2;
1005 } else {
1006 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1007 p_hwfn->cdev->num_ports_in_engines);
1008
1009 /* Default num_ports_in_engines to something */
1010 p_hwfn->cdev->num_ports_in_engines = 1;
1011 }
1012
1013 qed_hw_get_nvm_info(p_hwfn, p_ptt);
1014
1015 rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1016 if (rc)
1017 return rc;
1018
1019 if (qed_mcp_is_init(p_hwfn))
1020 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1021 p_hwfn->mcp_info->func_info.mac);
1022 else
1023 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1024
1025 if (qed_mcp_is_init(p_hwfn)) {
1026 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1027 p_hwfn->hw_info.ovlan =
1028 p_hwfn->mcp_info->func_info.ovlan;
1029
1030 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1031 }
1032
1033 if (qed_mcp_is_init(p_hwfn)) {
1034 enum qed_pci_personality protocol;
1035
1036 protocol = p_hwfn->mcp_info->func_info.protocol;
1037 p_hwfn->hw_info.personality = protocol;
1038 }
1039
1040 qed_hw_get_resc(p_hwfn);
1041
1042 return rc;
1043}
1044
1045static void qed_get_dev_info(struct qed_dev *cdev)
1046{
1047 u32 tmp;
1048
1049 cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1050 MISCS_REG_CHIP_NUM);
1051 cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1052 MISCS_REG_CHIP_REV);
1053 MASK_FIELD(CHIP_REV, cdev->chip_rev);
1054
1055 /* Learn number of HW-functions */
1056 tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1057 MISCS_REG_CMT_ENABLED_FOR_PAIR);
1058
1059 if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
1060 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1061 cdev->num_hwfns = 2;
1062 } else {
1063 cdev->num_hwfns = 1;
1064 }
1065
1066 cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1067 MISCS_REG_CHIP_TEST_REG) >> 4;
1068 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1069 cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1070 MISCS_REG_CHIP_METAL);
1071 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1072
1073 DP_INFO(cdev->hwfns,
1074 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1075 cdev->chip_num, cdev->chip_rev,
1076 cdev->chip_bond_id, cdev->chip_metal);
1077}
1078
1079static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1080 void __iomem *p_regview,
1081 void __iomem *p_doorbells,
1082 enum qed_pci_personality personality)
1083{
1084 int rc = 0;
1085
1086 /* Split PCI bars evenly between hwfns */
1087 p_hwfn->regview = p_regview;
1088 p_hwfn->doorbells = p_doorbells;
1089
1090 /* Validate that chip access is feasible */
1091 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1092 DP_ERR(p_hwfn,
1093 "Reading the ME register returns all Fs; Preventing further chip access\n");
1094 return -EINVAL;
1095 }
1096
1097 get_function_id(p_hwfn);
1098
1099 rc = qed_hw_hwfn_prepare(p_hwfn);
1100 if (rc) {
1101 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1102 goto err0;
1103 }
1104
1105 /* First hwfn learns basic information, e.g., number of hwfns */
1106 if (!p_hwfn->my_id)
1107 qed_get_dev_info(p_hwfn->cdev);
1108
1109 /* Initialize MCP structure */
1110 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1111 if (rc) {
1112 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1113 goto err1;
1114 }
1115
1116 /* Read the device configuration information from the HW and SHMEM */
1117 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1118 if (rc) {
1119 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1120 goto err2;
1121 }
1122
1123 /* Allocate the init RT array and initialize the init-ops engine */
1124 rc = qed_init_alloc(p_hwfn);
1125 if (rc) {
1126 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1127 goto err2;
1128 }
1129
1130 return rc;
1131err2:
1132 qed_mcp_free(p_hwfn);
1133err1:
1134 qed_hw_hwfn_free(p_hwfn);
1135err0:
1136 return rc;
1137}
1138
1139static u32 qed_hw_bar_size(struct qed_dev *cdev,
1140 u8 bar_id)
1141{
1142 u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
1143
1144 return size / cdev->num_hwfns;
1145}
1146
1147int qed_hw_prepare(struct qed_dev *cdev,
1148 int personality)
1149{
1150 int rc, i;
1151
1152 /* Store the precompiled init data ptrs */
1153 qed_init_iro_array(cdev);
1154
1155 /* Initialize the first hwfn - will learn number of hwfns */
1156 rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
1157 cdev->doorbells, personality);
1158 if (rc)
1159 return rc;
1160
1161 personality = cdev->hwfns[0].hw_info.personality;
1162
1163 /* Initialize the rest of the hwfns */
1164 for (i = 1; i < cdev->num_hwfns; i++) {
1165 void __iomem *p_regview, *p_doorbell;
1166
1167 p_regview = cdev->regview +
1168 i * qed_hw_bar_size(cdev, 0);
1169 p_doorbell = cdev->doorbells +
1170 i * qed_hw_bar_size(cdev, 1);
1171 rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
1172 p_doorbell, personality);
1173 if (rc) {
1174 /* Cleanup previously initialized hwfns */
1175 while (--i >= 0) {
1176 qed_init_free(&cdev->hwfns[i]);
1177 qed_mcp_free(&cdev->hwfns[i]);
1178 qed_hw_hwfn_free(&cdev->hwfns[i]);
1179 }
1180 return rc;
1181 }
1182 }
1183
1184 return 0;
1185}
1186
1187void qed_hw_remove(struct qed_dev *cdev)
1188{
1189 int i;
1190
1191 for_each_hwfn(cdev, i) {
1192 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1193
1194 qed_init_free(p_hwfn);
1195 qed_hw_hwfn_free(p_hwfn);
1196 qed_mcp_free(p_hwfn);
1197 }
1198}
1199
1200int qed_chain_alloc(struct qed_dev *cdev,
1201 enum qed_chain_use_mode intended_use,
1202 enum qed_chain_mode mode,
1203 u16 num_elems,
1204 size_t elem_size,
1205 struct qed_chain *p_chain)
1206{
1207 dma_addr_t p_pbl_phys = 0;
1208 void *p_pbl_virt = NULL;
1209 dma_addr_t p_phys = 0;
1210 void *p_virt = NULL;
1211 u16 page_cnt = 0;
1212 size_t size;
1213
1214 if (mode == QED_CHAIN_MODE_SINGLE)
1215 page_cnt = 1;
1216 else
1217 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1218
1219 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1220 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1221 size, &p_phys, GFP_KERNEL);
1222 if (!p_virt) {
1223 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1224 goto nomem;
1225 }
1226
1227 if (mode == QED_CHAIN_MODE_PBL) {
1228 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1229 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1230 size, &p_pbl_phys,
1231 GFP_KERNEL);
1232 if (!p_pbl_virt) {
1233 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1234 goto nomem;
1235 }
1236
1237 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1238 (u8)elem_size, intended_use,
1239 p_pbl_phys, p_pbl_virt);
1240 } else {
1241 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1242 (u8)elem_size, intended_use, mode);
1243 }
1244
1245 return 0;
1246
1247nomem:
1248 dma_free_coherent(&cdev->pdev->dev,
1249 page_cnt * QED_CHAIN_PAGE_SIZE,
1250 p_virt, p_phys);
1251 dma_free_coherent(&cdev->pdev->dev,
1252 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1253 p_pbl_virt, p_pbl_phys);
1254
1255 return -ENOMEM;
1256}
1257
1258void qed_chain_free(struct qed_dev *cdev,
1259 struct qed_chain *p_chain)
1260{
1261 size_t size;
1262
1263 if (!p_chain->p_virt_addr)
1264 return;
1265
1266 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1267 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1268 dma_free_coherent(&cdev->pdev->dev, size,
1269 p_chain->pbl.p_virt_table,
1270 p_chain->pbl.p_phys_table);
1271 }
1272
1273 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1274 dma_free_coherent(&cdev->pdev->dev, size,
1275 p_chain->p_virt_addr,
1276 p_chain->p_phys_addr);
1277}