blob: dede73f41e61dcb67a4fe8c80f67e93880294334 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/pci.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include "qed.h"
46#include "qed_cxt.h"
47#include "qed_dev_api.h"
48#include "qed_hsi.h"
49#include "qed_hw.h"
50#include "qed_int.h"
Yuval Mintzfc831822016-12-01 00:21:06 -080051#include "qed_iscsi.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020052#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080053#include "qed_ooo.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020054#include "qed_reg_addr.h"
55#include "qed_sp.h"
Yuval Mintz37bff2b2016-05-11 16:36:13 +030056#include "qed_sriov.h"
Ram Amrani51ff1722016-10-01 21:59:57 +030057#include "qed_roce.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058
59/***************************************************************************
60* Structures & Definitions
61***************************************************************************/
62
63#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
Yuval Mintzc59f52912016-10-14 05:19:21 -040064
65#define SPQ_BLOCK_DELAY_MAX_ITER (10)
66#define SPQ_BLOCK_DELAY_US (10)
67#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
68#define SPQ_BLOCK_SLEEP_MS (5)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020069
70/***************************************************************************
71* Blocking Imp. (BLOCK/EBLOCK mode)
72***************************************************************************/
73static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 void *cookie,
Yuval Mintz1a635e42016-08-15 10:42:43 +030075 union event_ring_data *data, u8 fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020076{
77 struct qed_spq_comp_done *comp_done;
78
79 comp_done = (struct qed_spq_comp_done *)cookie;
80
Manish Choprad5df7682016-10-14 05:19:23 -040081 comp_done->fw_return_code = fw_return_code;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020082
Manish Choprad5df7682016-10-14 05:19:23 -040083 /* Make sure completion done is visible on waiting thread */
84 smp_store_release(&comp_done->done, 0x1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020085}
86
Yuval Mintzc59f52912016-10-14 05:19:21 -040087static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 struct qed_spq_entry *p_ent,
89 u8 *p_fw_ret, bool sleep_between_iter)
90{
91 struct qed_spq_comp_done *comp_done;
92 u32 iter_cnt;
93
94 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 : SPQ_BLOCK_DELAY_MAX_ITER;
97
98 while (iter_cnt--) {
99 /* Validate we receive completion update */
Manish Choprad5df7682016-10-14 05:19:23 -0400100 if (READ_ONCE(comp_done->done) == 1) {
101 /* Read updated FW return value */
102 smp_read_barrier_depends();
Yuval Mintzc59f52912016-10-14 05:19:21 -0400103 if (p_fw_ret)
104 *p_fw_ret = comp_done->fw_return_code;
105 return 0;
106 }
107
108 if (sleep_between_iter)
109 msleep(SPQ_BLOCK_SLEEP_MS);
110 else
111 udelay(SPQ_BLOCK_DELAY_US);
112 }
113
114 return -EBUSY;
115}
116
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200117static int qed_spq_block(struct qed_hwfn *p_hwfn,
118 struct qed_spq_entry *p_ent,
Yuval Mintzc59f52912016-10-14 05:19:21 -0400119 u8 *p_fw_ret, bool skip_quick_poll)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200120{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200121 struct qed_spq_comp_done *comp_done;
Rahul Verma15582962017-04-06 15:58:29 +0300122 struct qed_ptt *p_ptt;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200123 int rc;
124
Yuval Mintzc59f52912016-10-14 05:19:21 -0400125 /* A relatively short polling period w/o sleeping, to allow the FW to
126 * complete the ramrod and thus possibly to avoid the following sleeps.
127 */
128 if (!skip_quick_poll) {
129 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
130 if (!rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200131 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200132 }
133
Yuval Mintzc59f52912016-10-14 05:19:21 -0400134 /* Move to polling with a sleeping period between iterations */
135 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
136 if (!rc)
137 return 0;
138
Rahul Verma15582962017-04-06 15:58:29 +0300139 p_ptt = qed_ptt_acquire(p_hwfn);
140 if (!p_ptt) {
141 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
142 return -EAGAIN;
143 }
144
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200145 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
Rahul Verma15582962017-04-06 15:58:29 +0300146 rc = qed_mcp_drain(p_hwfn, p_ptt);
Yuval Mintzc59f52912016-10-14 05:19:21 -0400147 if (rc) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200148 DP_NOTICE(p_hwfn, "MCP drain failed\n");
Yuval Mintzc59f52912016-10-14 05:19:21 -0400149 goto err;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200150 }
151
Yuval Mintzc59f52912016-10-14 05:19:21 -0400152 /* Retry after drain */
153 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
154 if (!rc)
Rahul Verma15582962017-04-06 15:58:29 +0300155 goto out;
Yuval Mintzc59f52912016-10-14 05:19:21 -0400156
157 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
Rahul Verma15582962017-04-06 15:58:29 +0300158 if (comp_done->done == 1)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200159 if (p_fw_ret)
160 *p_fw_ret = comp_done->fw_return_code;
Rahul Verma15582962017-04-06 15:58:29 +0300161out:
162 qed_ptt_release(p_hwfn, p_ptt);
163 return 0;
164
Yuval Mintzc59f52912016-10-14 05:19:21 -0400165err:
Rahul Verma15582962017-04-06 15:58:29 +0300166 qed_ptt_release(p_hwfn, p_ptt);
Yuval Mintzc59f52912016-10-14 05:19:21 -0400167 DP_NOTICE(p_hwfn,
168 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
169 le32_to_cpu(p_ent->elem.hdr.cid),
170 p_ent->elem.hdr.cmd_id,
171 p_ent->elem.hdr.protocol_id,
172 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200173
174 return -EBUSY;
175}
176
177/***************************************************************************
178* SPQ entries inner API
179***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300180static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
181 struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200182{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200183 p_ent->flags = 0;
184
185 switch (p_ent->comp_mode) {
186 case QED_SPQ_MODE_EBLOCK:
187 case QED_SPQ_MODE_BLOCK:
188 p_ent->comp_cb.function = qed_spq_blocking_cb;
189 break;
190 case QED_SPQ_MODE_CB:
191 break;
192 default:
193 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
194 p_ent->comp_mode);
195 return -EINVAL;
196 }
197
198 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
199 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
200 p_ent->elem.hdr.cid,
201 p_ent->elem.hdr.cmd_id,
202 p_ent->elem.hdr.protocol_id,
203 p_ent->elem.data_ptr.hi,
204 p_ent->elem.data_ptr.lo,
205 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
206 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
207 "MODE_CB"));
208
209 return 0;
210}
211
212/***************************************************************************
213* HSI access
214***************************************************************************/
215static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
216 struct qed_spq *p_spq)
217{
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300218 struct core_conn_context *p_cxt;
219 struct qed_cxt_info cxt_info;
220 u16 physical_q;
221 int rc;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200222
223 cxt_info.iid = p_spq->cid;
224
225 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
226
227 if (rc < 0) {
228 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
229 p_spq->cid);
230 return;
231 }
232
233 p_cxt = cxt_info.p_cxt;
234
235 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
236 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
237 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
238 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
239 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
240 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
241
242 /* QM physical queue */
Ariel Eliorb5a9ee72017-04-03 12:21:09 +0300243 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
244 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200245
246 p_cxt->xstorm_st_context.spq_base_lo =
247 DMA_LO_LE(p_spq->chain.p_phys_addr);
248 p_cxt->xstorm_st_context.spq_base_hi =
249 DMA_HI_LE(p_spq->chain.p_phys_addr);
250
Yuval Mintz94494592016-02-21 11:40:10 +0200251 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
252 p_hwfn->p_consq->chain.p_phys_addr);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200253}
254
255static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300256 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200257{
Tomer Tayar76a9a362015-12-07 06:25:57 -0500258 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
259 u16 echo = qed_chain_get_prod_idx(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200260 struct slow_path_element *elem;
261 struct core_db_data db;
262
Tomer Tayar76a9a362015-12-07 06:25:57 -0500263 p_ent->elem.hdr.echo = cpu_to_le16(echo);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200264 elem = qed_chain_produce(p_chain);
265 if (!elem) {
266 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
267 return -EINVAL;
268 }
269
270 *elem = p_ent->elem; /* struct assignment */
271
272 /* send a doorbell on the slow hwfn session */
273 memset(&db, 0, sizeof(db));
274 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
275 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
276 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
277 DQ_XCM_CORE_SPQ_PROD_CMD);
278 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200279 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
280
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400281 /* make sure the SPQE is updated before the doorbell */
282 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200283
284 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
285
286 /* make sure doorbell is rang */
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400287 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200288
289 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
290 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
291 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
292 p_spq->cid, db.params, db.agg_flags,
293 qed_chain_get_prod_idx(p_chain));
294
295 return 0;
296}
297
298/***************************************************************************
299* Asynchronous events
300***************************************************************************/
301static int
302qed_async_event_completion(struct qed_hwfn *p_hwfn,
303 struct event_ring_entry *p_eqe)
304{
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300305 switch (p_eqe->protocol_id) {
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200306#if IS_ENABLED(CONFIG_QED_RDMA)
Ram Amrani51ff1722016-10-01 21:59:57 +0300307 case PROTOCOLID_ROCE:
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200308 qed_roce_async_event(p_hwfn, p_eqe->opcode,
309 &p_eqe->data.rdma_data);
Ram Amrani51ff1722016-10-01 21:59:57 +0300310 return 0;
Mintz, Yuvalbe086e72017-03-11 18:39:18 +0200311#endif
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300312 case PROTOCOLID_COMMON:
313 return qed_sriov_eqe_event(p_hwfn,
314 p_eqe->opcode,
315 p_eqe->echo, &p_eqe->data);
Yuval Mintzfc831822016-12-01 00:21:06 -0800316 case PROTOCOLID_ISCSI:
317 if (!IS_ENABLED(CONFIG_QED_ISCSI))
318 return -EINVAL;
319
320 if (p_hwfn->p_iscsi_info->event_cb) {
321 struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
322
323 return p_iscsi->event_cb(p_iscsi->event_context,
324 p_eqe->opcode, &p_eqe->data);
325 } else {
326 DP_NOTICE(p_hwfn,
327 "iSCSI async completion is not set\n");
328 return -EINVAL;
329 }
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300330 default:
331 DP_NOTICE(p_hwfn,
332 "Unknown Async completion for protocol: %d\n",
333 p_eqe->protocol_id);
334 return -EINVAL;
335 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200336}
337
338/***************************************************************************
339* EQ API
340***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300341void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200342{
343 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
344 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
345
346 REG_WR16(p_hwfn, addr, prod);
347
348 /* keep prod updates ordered */
349 mmiowb();
350}
351
Yuval Mintz1a635e42016-08-15 10:42:43 +0300352int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200353{
354 struct qed_eq *p_eq = cookie;
355 struct qed_chain *p_chain = &p_eq->chain;
356 int rc = 0;
357
358 /* take a snapshot of the FW consumer */
359 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
360
361 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
362
363 /* Need to guarantee the fw_cons index we use points to a usuable
364 * element (to comply with our chain), so our macros would comply
365 */
366 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
367 qed_chain_get_usable_per_page(p_chain))
368 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
369
370 /* Complete current segment of eq entries */
371 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
372 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
373
374 if (!p_eqe) {
375 rc = -EINVAL;
376 break;
377 }
378
379 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
380 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
381 p_eqe->opcode,
382 p_eqe->protocol_id,
383 p_eqe->reserved0,
384 le16_to_cpu(p_eqe->echo),
385 p_eqe->fw_return_code,
386 p_eqe->flags);
387
388 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
389 if (qed_async_event_completion(p_hwfn, p_eqe))
390 rc = -EINVAL;
391 } else if (qed_spq_completion(p_hwfn,
392 p_eqe->echo,
393 p_eqe->fw_return_code,
394 &p_eqe->data)) {
395 rc = -EINVAL;
396 }
397
398 qed_chain_recycle_consumed(p_chain);
399 }
400
401 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
402
403 return rc;
404}
405
Tomer Tayar3587cb82017-05-21 12:10:56 +0300406int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200407{
408 struct qed_eq *p_eq;
409
410 /* Allocate EQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200411 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700412 if (!p_eq)
Tomer Tayar3587cb82017-05-21 12:10:56 +0300413 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200414
415 /* Allocate and initialize EQ chain*/
416 if (qed_chain_alloc(p_hwfn->cdev,
417 QED_CHAIN_USE_TO_PRODUCE,
418 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300419 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200420 num_elem,
421 sizeof(union event_ring_element),
Joe Perches2591c282016-09-04 14:24:03 -0700422 &p_eq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200423 goto eq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200424
425 /* register EQ completion on the SP SB */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300426 qed_int_register_cb(p_hwfn, qed_eq_completion,
427 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200428
Tomer Tayar3587cb82017-05-21 12:10:56 +0300429 p_hwfn->p_eq = p_eq;
430 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200431
432eq_allocate_fail:
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200433 kfree(p_eq);
Tomer Tayar3587cb82017-05-21 12:10:56 +0300434 return -ENOMEM;
435}
436
437void qed_eq_setup(struct qed_hwfn *p_hwfn)
438{
439 qed_chain_reset(&p_hwfn->p_eq->chain);
440}
441
442void qed_eq_free(struct qed_hwfn *p_hwfn)
443{
444 if (!p_hwfn->p_eq)
445 return;
446
447 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
448
449 kfree(p_hwfn->p_eq);
450 p_hwfn->p_eq = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200451}
452
453/***************************************************************************
Manish Chopracee4d262015-10-26 11:02:28 +0200454* CQE API - manipulate EQ functionality
455***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300456static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
457 struct eth_slow_path_rx_cqe *cqe,
458 enum protocol_type protocol)
Manish Chopracee4d262015-10-26 11:02:28 +0200459{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300460 if (IS_VF(p_hwfn->cdev))
461 return 0;
462
Manish Chopracee4d262015-10-26 11:02:28 +0200463 /* @@@tmp - it's possible we'll eventually want to handle some
464 * actual commands that can arrive here, but for now this is only
465 * used to complete the ramrod using the echo value on the cqe
466 */
467 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
468}
469
470int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
471 struct eth_slow_path_rx_cqe *cqe)
472{
473 int rc;
474
475 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
476 if (rc)
477 DP_NOTICE(p_hwfn,
478 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
479 cqe->ramrod_cmd_id);
480
481 return rc;
482}
483
484/***************************************************************************
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200485* Slow hwfn Queue (spq)
486***************************************************************************/
487void qed_spq_setup(struct qed_hwfn *p_hwfn)
488{
Yuval Mintza91eb522016-06-03 14:35:32 +0300489 struct qed_spq *p_spq = p_hwfn->p_spq;
490 struct qed_spq_entry *p_virt = NULL;
491 dma_addr_t p_phys = 0;
492 u32 i, capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200493
494 INIT_LIST_HEAD(&p_spq->pending);
495 INIT_LIST_HEAD(&p_spq->completion_pending);
496 INIT_LIST_HEAD(&p_spq->free_pool);
497 INIT_LIST_HEAD(&p_spq->unlimited_pending);
498 spin_lock_init(&p_spq->lock);
499
500 /* SPQ empty pool */
501 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
502 p_virt = p_spq->p_virt;
503
Yuval Mintza91eb522016-06-03 14:35:32 +0300504 capacity = qed_chain_get_capacity(&p_spq->chain);
505 for (i = 0; i < capacity; i++) {
Yuval Mintz94494592016-02-21 11:40:10 +0200506 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200507
508 list_add_tail(&p_virt->list, &p_spq->free_pool);
509
510 p_virt++;
511 p_phys += sizeof(struct qed_spq_entry);
512 }
513
514 /* Statistics */
515 p_spq->normal_count = 0;
516 p_spq->comp_count = 0;
517 p_spq->comp_sent_count = 0;
518 p_spq->unlimited_pending_count = 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500519
520 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
521 p_spq->comp_bitmap_idx = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200522
523 /* SPQ cid, cannot fail */
524 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
525 qed_spq_hw_initialize(p_hwfn, p_spq);
526
527 /* reset the chain itself */
528 qed_chain_reset(&p_spq->chain);
529}
530
531int qed_spq_alloc(struct qed_hwfn *p_hwfn)
532{
Yuval Mintza91eb522016-06-03 14:35:32 +0300533 struct qed_spq_entry *p_virt = NULL;
534 struct qed_spq *p_spq = NULL;
535 dma_addr_t p_phys = 0;
536 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200537
538 /* SPQ struct */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300539 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700540 if (!p_spq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200541 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200542
543 /* SPQ ring */
544 if (qed_chain_alloc(p_hwfn->cdev,
545 QED_CHAIN_USE_TO_PRODUCE,
546 QED_CHAIN_MODE_SINGLE,
Yuval Mintza91eb522016-06-03 14:35:32 +0300547 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200548 0, /* N/A when the mode is SINGLE */
549 sizeof(struct slow_path_element),
Joe Perches2591c282016-09-04 14:24:03 -0700550 &p_spq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200551 goto spq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200552
553 /* allocate and fill the SPQ elements (incl. ramrod data list) */
Yuval Mintza91eb522016-06-03 14:35:32 +0300554 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200555 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Joe Perches2591c282016-09-04 14:24:03 -0700556 capacity * sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300557 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200558 if (!p_virt)
559 goto spq_allocate_fail;
560
561 p_spq->p_virt = p_virt;
562 p_spq->p_phys = p_phys;
563 p_hwfn->p_spq = p_spq;
564
565 return 0;
566
567spq_allocate_fail:
568 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
569 kfree(p_spq);
570 return -ENOMEM;
571}
572
573void qed_spq_free(struct qed_hwfn *p_hwfn)
574{
575 struct qed_spq *p_spq = p_hwfn->p_spq;
Yuval Mintza91eb522016-06-03 14:35:32 +0300576 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200577
578 if (!p_spq)
579 return;
580
Yuval Mintza91eb522016-06-03 14:35:32 +0300581 if (p_spq->p_virt) {
582 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200583 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300584 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200585 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300586 p_spq->p_virt, p_spq->p_phys);
587 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200588
589 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200590 kfree(p_spq);
Tomer Tayar3587cb82017-05-21 12:10:56 +0300591 p_hwfn->p_spq = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200592}
593
Yuval Mintz1a635e42016-08-15 10:42:43 +0300594int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200595{
596 struct qed_spq *p_spq = p_hwfn->p_spq;
597 struct qed_spq_entry *p_ent = NULL;
598 int rc = 0;
599
600 spin_lock_bh(&p_spq->lock);
601
602 if (list_empty(&p_spq->free_pool)) {
603 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
604 if (!p_ent) {
Yuval Mintz1a635e42016-08-15 10:42:43 +0300605 DP_NOTICE(p_hwfn,
606 "Failed to allocate an SPQ entry for a pending ramrod\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200607 rc = -ENOMEM;
608 goto out_unlock;
609 }
610 p_ent->queue = &p_spq->unlimited_pending;
611 } else {
612 p_ent = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300613 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200614 list_del(&p_ent->list);
615 p_ent->queue = &p_spq->pending;
616 }
617
618 *pp_ent = p_ent;
619
620out_unlock:
621 spin_unlock_bh(&p_spq->lock);
622 return rc;
623}
624
625/* Locked variant; Should be called while the SPQ lock is taken */
626static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
627 struct qed_spq_entry *p_ent)
628{
629 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
630}
631
Yuval Mintz1a635e42016-08-15 10:42:43 +0300632void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200633{
634 spin_lock_bh(&p_hwfn->p_spq->lock);
635 __qed_spq_return_entry(p_hwfn, p_ent);
636 spin_unlock_bh(&p_hwfn->p_spq->lock);
637}
638
639/**
640 * @brief qed_spq_add_entry - adds a new entry to the pending
641 * list. Should be used while lock is being held.
642 *
643 * Addes an entry to the pending list is there is room (en empty
644 * element is available in the free_pool), or else places the
645 * entry in the unlimited_pending pool.
646 *
647 * @param p_hwfn
648 * @param p_ent
649 * @param priority
650 *
651 * @return int
652 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300653static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
654 struct qed_spq_entry *p_ent,
655 enum spq_priority priority)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200656{
657 struct qed_spq *p_spq = p_hwfn->p_spq;
658
659 if (p_ent->queue == &p_spq->unlimited_pending) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200660
661 if (list_empty(&p_spq->free_pool)) {
662 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
663 p_spq->unlimited_pending_count++;
664
665 return 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500666 } else {
667 struct qed_spq_entry *p_en2;
668
669 p_en2 = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300670 struct qed_spq_entry, list);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500671 list_del(&p_en2->list);
672
673 /* Copy the ring element physical pointer to the new
674 * entry, since we are about to override the entire ring
675 * entry and don't want to lose the pointer.
676 */
677 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
678
679 *p_en2 = *p_ent;
680
Yuval Mintzdb511c32016-06-19 15:18:14 +0300681 /* EBLOCK responsible to free the allocated p_ent */
682 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
683 kfree(p_ent);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500684
685 p_ent = p_en2;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200686 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200687 }
688
689 /* entry is to be placed in 'pending' queue */
690 switch (priority) {
691 case QED_SPQ_PRIORITY_NORMAL:
692 list_add_tail(&p_ent->list, &p_spq->pending);
693 p_spq->normal_count++;
694 break;
695 case QED_SPQ_PRIORITY_HIGH:
696 list_add(&p_ent->list, &p_spq->pending);
697 p_spq->high_count++;
698 break;
699 default:
700 return -EINVAL;
701 }
702
703 return 0;
704}
705
706/***************************************************************************
707* Accessor
708***************************************************************************/
709u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
710{
711 if (!p_hwfn->p_spq)
712 return 0xffffffff; /* illegal */
713 return p_hwfn->p_spq->cid;
714}
715
716/***************************************************************************
717* Posting new Ramrods
718***************************************************************************/
719static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300720 struct list_head *head, u32 keep_reserve)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200721{
722 struct qed_spq *p_spq = p_hwfn->p_spq;
723 int rc;
724
725 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
726 !list_empty(head)) {
727 struct qed_spq_entry *p_ent =
728 list_first_entry(head, struct qed_spq_entry, list);
729 list_del(&p_ent->list);
730 list_add_tail(&p_ent->list, &p_spq->completion_pending);
731 p_spq->comp_sent_count++;
732
733 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
734 if (rc) {
735 list_del(&p_ent->list);
736 __qed_spq_return_entry(p_hwfn, p_ent);
737 return rc;
738 }
739 }
740
741 return 0;
742}
743
744static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
745{
746 struct qed_spq *p_spq = p_hwfn->p_spq;
747 struct qed_spq_entry *p_ent = NULL;
748
749 while (!list_empty(&p_spq->free_pool)) {
750 if (list_empty(&p_spq->unlimited_pending))
751 break;
752
753 p_ent = list_first_entry(&p_spq->unlimited_pending,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300754 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200755 if (!p_ent)
756 return -EINVAL;
757
758 list_del(&p_ent->list);
759
760 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
761 }
762
763 return qed_spq_post_list(p_hwfn, &p_spq->pending,
764 SPQ_HIGH_PRI_RESERVE_DEFAULT);
765}
766
767int qed_spq_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300768 struct qed_spq_entry *p_ent, u8 *fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200769{
770 int rc = 0;
771 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
772 bool b_ret_ent = true;
773
774 if (!p_hwfn)
775 return -EINVAL;
776
777 if (!p_ent) {
778 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
779 return -EINVAL;
780 }
781
782 /* Complete the entry */
783 rc = qed_spq_fill_entry(p_hwfn, p_ent);
784
785 spin_lock_bh(&p_spq->lock);
786
787 /* Check return value after LOCK is taken for cleaner error flow */
788 if (rc)
789 goto spq_post_fail;
790
791 /* Add the request to the pending queue */
792 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
793 if (rc)
794 goto spq_post_fail;
795
796 rc = qed_spq_pend_post(p_hwfn);
797 if (rc) {
798 /* Since it's possible that pending failed for a different
799 * entry [although unlikely], the failed entry was already
800 * dealt with; No need to return it here.
801 */
802 b_ret_ent = false;
803 goto spq_post_fail;
804 }
805
806 spin_unlock_bh(&p_spq->lock);
807
808 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
809 /* For entries in QED BLOCK mode, the completion code cannot
810 * perform the necessary cleanup - if it did, we couldn't
811 * access p_ent here to see whether it's successful or not.
812 * Thus, after gaining the answer perform the cleanup here.
813 */
Yuval Mintzc59f52912016-10-14 05:19:21 -0400814 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
815 p_ent->queue == &p_spq->unlimited_pending);
Yuval Mintzdb511c32016-06-19 15:18:14 +0300816
817 if (p_ent->queue == &p_spq->unlimited_pending) {
818 /* This is an allocated p_ent which does not need to
819 * return to pool.
820 */
821 kfree(p_ent);
822 return rc;
823 }
824
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200825 if (rc)
826 goto spq_post_fail2;
827
828 /* return to pool */
829 qed_spq_return_entry(p_hwfn, p_ent);
830 }
831 return rc;
832
833spq_post_fail2:
834 spin_lock_bh(&p_spq->lock);
835 list_del(&p_ent->list);
836 qed_chain_return_produced(&p_spq->chain);
837
838spq_post_fail:
839 /* return to the free pool */
840 if (b_ret_ent)
841 __qed_spq_return_entry(p_hwfn, p_ent);
842 spin_unlock_bh(&p_spq->lock);
843
844 return rc;
845}
846
847int qed_spq_completion(struct qed_hwfn *p_hwfn,
848 __le16 echo,
849 u8 fw_return_code,
850 union event_ring_data *p_data)
851{
852 struct qed_spq *p_spq;
853 struct qed_spq_entry *p_ent = NULL;
854 struct qed_spq_entry *tmp;
855 struct qed_spq_entry *found = NULL;
856 int rc;
857
858 if (!p_hwfn)
859 return -EINVAL;
860
861 p_spq = p_hwfn->p_spq;
862 if (!p_spq)
863 return -EINVAL;
864
865 spin_lock_bh(&p_spq->lock);
Yuval Mintz1a635e42016-08-15 10:42:43 +0300866 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200867 if (p_ent->elem.hdr.echo == echo) {
Tomer Tayar76a9a362015-12-07 06:25:57 -0500868 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
869
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200870 list_del(&p_ent->list);
871
Tomer Tayar76a9a362015-12-07 06:25:57 -0500872 /* Avoid overriding of SPQ entries when getting
873 * out-of-order completions, by marking the completions
874 * in a bitmap and increasing the chain consumer only
875 * for the first successive completed entries.
876 */
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300877 __set_bit(pos, p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500878
879 while (test_bit(p_spq->comp_bitmap_idx,
880 p_spq->p_comp_bitmap)) {
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300881 __clear_bit(p_spq->comp_bitmap_idx,
882 p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500883 p_spq->comp_bitmap_idx++;
884 qed_chain_return_produced(&p_spq->chain);
885 }
886
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200887 p_spq->comp_count++;
888 found = p_ent;
889 break;
890 }
Tomer Tayar76a9a362015-12-07 06:25:57 -0500891
892 /* This is relatively uncommon - depends on scenarios
893 * which have mutliple per-PF sent ramrods.
894 */
895 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
896 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
897 le16_to_cpu(echo),
898 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200899 }
900
901 /* Release lock before callback, as callback may post
902 * an additional ramrod.
903 */
904 spin_unlock_bh(&p_spq->lock);
905
906 if (!found) {
907 DP_NOTICE(p_hwfn,
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300908 "Failed to find an entry this EQE [echo %04x] completes\n",
909 le16_to_cpu(echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200910 return -EEXIST;
911 }
912
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300913 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
914 "Complete EQE [echo %04x]: func %p cookie %p)\n",
915 le16_to_cpu(echo),
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200916 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
917 if (found->comp_cb.function)
918 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
919 fw_return_code);
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300920 else
921 DP_VERBOSE(p_hwfn,
922 QED_MSG_SPQ,
923 "Got a completion without a callback function\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200924
Yuval Mintzdb511c32016-06-19 15:18:14 +0300925 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
926 (found->queue == &p_spq->unlimited_pending))
927 /* EBLOCK is responsible for returning its own entry into the
928 * free list, unless it originally added the entry into the
929 * unlimited pending list.
930 */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200931 qed_spq_return_entry(p_hwfn, found);
932
933 /* Attempt to post pending requests */
934 spin_lock_bh(&p_spq->lock);
935 rc = qed_spq_pend_post(p_hwfn);
936 spin_unlock_bh(&p_spq->lock);
937
938 return rc;
939}
940
Tomer Tayar3587cb82017-05-21 12:10:56 +0300941int qed_consq_alloc(struct qed_hwfn *p_hwfn)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200942{
943 struct qed_consq *p_consq;
944
945 /* Allocate ConsQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200946 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700947 if (!p_consq)
Tomer Tayar3587cb82017-05-21 12:10:56 +0300948 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200949
950 /* Allocate and initialize EQ chain*/
951 if (qed_chain_alloc(p_hwfn->cdev,
952 QED_CHAIN_USE_TO_PRODUCE,
953 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300954 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200955 QED_CHAIN_PAGE_SIZE / 0x80,
Joe Perches2591c282016-09-04 14:24:03 -0700956 0x80, &p_consq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200957 goto consq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200958
Tomer Tayar3587cb82017-05-21 12:10:56 +0300959 p_hwfn->p_consq = p_consq;
960 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200961
962consq_allocate_fail:
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200963 kfree(p_consq);
Tomer Tayar3587cb82017-05-21 12:10:56 +0300964 return -ENOMEM;
965}
966
967void qed_consq_setup(struct qed_hwfn *p_hwfn)
968{
969 qed_chain_reset(&p_hwfn->p_consq->chain);
970}
971
972void qed_consq_free(struct qed_hwfn *p_hwfn)
973{
974 if (!p_hwfn->p_consq)
975 return;
976
977 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
978
979 kfree(p_hwfn->p_consq);
980 p_hwfn->p_consq = NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200981}