blob: 0265a32c868133defa1d34ccd82f7143d006dec5 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
Yuval Mintz37bff2b2016-05-11 16:36:13 +030030#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031
32/***************************************************************************
33* Structures & Definitions
34***************************************************************************/
35
36#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
37#define SPQ_BLOCK_SLEEP_LENGTH (1000)
38
39/***************************************************************************
40* Blocking Imp. (BLOCK/EBLOCK mode)
41***************************************************************************/
42static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43 void *cookie,
Yuval Mintz1a635e42016-08-15 10:42:43 +030044 union event_ring_data *data, u8 fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020045{
46 struct qed_spq_comp_done *comp_done;
47
48 comp_done = (struct qed_spq_comp_done *)cookie;
49
50 comp_done->done = 0x1;
51 comp_done->fw_return_code = fw_return_code;
52
53 /* make update visible to waiting thread */
54 smp_wmb();
55}
56
57static int qed_spq_block(struct qed_hwfn *p_hwfn,
58 struct qed_spq_entry *p_ent,
59 u8 *p_fw_ret)
60{
61 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
62 struct qed_spq_comp_done *comp_done;
63 int rc;
64
65 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
66 while (sleep_count) {
67 /* validate we receive completion update */
68 smp_rmb();
69 if (comp_done->done == 1) {
70 if (p_fw_ret)
71 *p_fw_ret = comp_done->fw_return_code;
72 return 0;
73 }
74 usleep_range(5000, 10000);
75 sleep_count--;
76 }
77
78 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
79 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
80 if (rc != 0)
81 DP_NOTICE(p_hwfn, "MCP drain failed\n");
82
83 /* Retry after drain */
84 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
85 while (sleep_count) {
86 /* validate we receive completion update */
87 smp_rmb();
88 if (comp_done->done == 1) {
89 if (p_fw_ret)
90 *p_fw_ret = comp_done->fw_return_code;
91 return 0;
92 }
93 usleep_range(5000, 10000);
94 sleep_count--;
95 }
96
97 if (comp_done->done == 1) {
98 if (p_fw_ret)
99 *p_fw_ret = comp_done->fw_return_code;
100 return 0;
101 }
102
103 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
104
105 return -EBUSY;
106}
107
108/***************************************************************************
109* SPQ entries inner API
110***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300111static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
112 struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200113{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200114 p_ent->flags = 0;
115
116 switch (p_ent->comp_mode) {
117 case QED_SPQ_MODE_EBLOCK:
118 case QED_SPQ_MODE_BLOCK:
119 p_ent->comp_cb.function = qed_spq_blocking_cb;
120 break;
121 case QED_SPQ_MODE_CB:
122 break;
123 default:
124 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
125 p_ent->comp_mode);
126 return -EINVAL;
127 }
128
129 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
130 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
131 p_ent->elem.hdr.cid,
132 p_ent->elem.hdr.cmd_id,
133 p_ent->elem.hdr.protocol_id,
134 p_ent->elem.data_ptr.hi,
135 p_ent->elem.data_ptr.lo,
136 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
137 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
138 "MODE_CB"));
139
140 return 0;
141}
142
143/***************************************************************************
144* HSI access
145***************************************************************************/
146static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
147 struct qed_spq *p_spq)
148{
149 u16 pq;
150 struct qed_cxt_info cxt_info;
151 struct core_conn_context *p_cxt;
152 union qed_qm_pq_params pq_params;
153 int rc;
154
155 cxt_info.iid = p_spq->cid;
156
157 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
158
159 if (rc < 0) {
160 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
161 p_spq->cid);
162 return;
163 }
164
165 p_cxt = cxt_info.p_cxt;
166
167 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
168 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
169 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
171 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
172 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
173
174 /* QM physical queue */
175 memset(&pq_params, 0, sizeof(pq_params));
176 pq_params.core.tc = LB_TC;
177 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
178 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
179
180 p_cxt->xstorm_st_context.spq_base_lo =
181 DMA_LO_LE(p_spq->chain.p_phys_addr);
182 p_cxt->xstorm_st_context.spq_base_hi =
183 DMA_HI_LE(p_spq->chain.p_phys_addr);
184
Yuval Mintz94494592016-02-21 11:40:10 +0200185 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
186 p_hwfn->p_consq->chain.p_phys_addr);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200187}
188
189static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300190 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200191{
Tomer Tayar76a9a362015-12-07 06:25:57 -0500192 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
193 u16 echo = qed_chain_get_prod_idx(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200194 struct slow_path_element *elem;
195 struct core_db_data db;
196
Tomer Tayar76a9a362015-12-07 06:25:57 -0500197 p_ent->elem.hdr.echo = cpu_to_le16(echo);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200198 elem = qed_chain_produce(p_chain);
199 if (!elem) {
200 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
201 return -EINVAL;
202 }
203
204 *elem = p_ent->elem; /* struct assignment */
205
206 /* send a doorbell on the slow hwfn session */
207 memset(&db, 0, sizeof(db));
208 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
209 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
210 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
211 DQ_XCM_CORE_SPQ_PROD_CMD);
212 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200213 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
214
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400215 /* make sure the SPQE is updated before the doorbell */
216 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200217
218 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
219
220 /* make sure doorbell is rang */
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400221 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200222
223 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
224 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
225 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
226 p_spq->cid, db.params, db.agg_flags,
227 qed_chain_get_prod_idx(p_chain));
228
229 return 0;
230}
231
232/***************************************************************************
233* Asynchronous events
234***************************************************************************/
235static int
236qed_async_event_completion(struct qed_hwfn *p_hwfn,
237 struct event_ring_entry *p_eqe)
238{
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300239 switch (p_eqe->protocol_id) {
240 case PROTOCOLID_COMMON:
241 return qed_sriov_eqe_event(p_hwfn,
242 p_eqe->opcode,
243 p_eqe->echo, &p_eqe->data);
244 default:
245 DP_NOTICE(p_hwfn,
246 "Unknown Async completion for protocol: %d\n",
247 p_eqe->protocol_id);
248 return -EINVAL;
249 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200250}
251
252/***************************************************************************
253* EQ API
254***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300255void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200256{
257 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
258 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
259
260 REG_WR16(p_hwfn, addr, prod);
261
262 /* keep prod updates ordered */
263 mmiowb();
264}
265
Yuval Mintz1a635e42016-08-15 10:42:43 +0300266int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200267{
268 struct qed_eq *p_eq = cookie;
269 struct qed_chain *p_chain = &p_eq->chain;
270 int rc = 0;
271
272 /* take a snapshot of the FW consumer */
273 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
274
275 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
276
277 /* Need to guarantee the fw_cons index we use points to a usuable
278 * element (to comply with our chain), so our macros would comply
279 */
280 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
281 qed_chain_get_usable_per_page(p_chain))
282 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
283
284 /* Complete current segment of eq entries */
285 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
286 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
287
288 if (!p_eqe) {
289 rc = -EINVAL;
290 break;
291 }
292
293 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
294 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
295 p_eqe->opcode,
296 p_eqe->protocol_id,
297 p_eqe->reserved0,
298 le16_to_cpu(p_eqe->echo),
299 p_eqe->fw_return_code,
300 p_eqe->flags);
301
302 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
303 if (qed_async_event_completion(p_hwfn, p_eqe))
304 rc = -EINVAL;
305 } else if (qed_spq_completion(p_hwfn,
306 p_eqe->echo,
307 p_eqe->fw_return_code,
308 &p_eqe->data)) {
309 rc = -EINVAL;
310 }
311
312 qed_chain_recycle_consumed(p_chain);
313 }
314
315 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
316
317 return rc;
318}
319
Yuval Mintz1a635e42016-08-15 10:42:43 +0300320struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200321{
322 struct qed_eq *p_eq;
323
324 /* Allocate EQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200325 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200326 if (!p_eq) {
327 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
328 return NULL;
329 }
330
331 /* Allocate and initialize EQ chain*/
332 if (qed_chain_alloc(p_hwfn->cdev,
333 QED_CHAIN_USE_TO_PRODUCE,
334 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300335 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200336 num_elem,
337 sizeof(union event_ring_element),
338 &p_eq->chain)) {
339 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
340 goto eq_allocate_fail;
341 }
342
343 /* register EQ completion on the SP SB */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300344 qed_int_register_cb(p_hwfn, qed_eq_completion,
345 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200346
347 return p_eq;
348
349eq_allocate_fail:
350 qed_eq_free(p_hwfn, p_eq);
351 return NULL;
352}
353
Yuval Mintz1a635e42016-08-15 10:42:43 +0300354void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200355{
356 qed_chain_reset(&p_eq->chain);
357}
358
Yuval Mintz1a635e42016-08-15 10:42:43 +0300359void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200360{
361 if (!p_eq)
362 return;
363 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
364 kfree(p_eq);
365}
366
367/***************************************************************************
Manish Chopracee4d262015-10-26 11:02:28 +0200368* CQE API - manipulate EQ functionality
369***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300370static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
371 struct eth_slow_path_rx_cqe *cqe,
372 enum protocol_type protocol)
Manish Chopracee4d262015-10-26 11:02:28 +0200373{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300374 if (IS_VF(p_hwfn->cdev))
375 return 0;
376
Manish Chopracee4d262015-10-26 11:02:28 +0200377 /* @@@tmp - it's possible we'll eventually want to handle some
378 * actual commands that can arrive here, but for now this is only
379 * used to complete the ramrod using the echo value on the cqe
380 */
381 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
382}
383
384int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
385 struct eth_slow_path_rx_cqe *cqe)
386{
387 int rc;
388
389 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
390 if (rc)
391 DP_NOTICE(p_hwfn,
392 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
393 cqe->ramrod_cmd_id);
394
395 return rc;
396}
397
398/***************************************************************************
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200399* Slow hwfn Queue (spq)
400***************************************************************************/
401void qed_spq_setup(struct qed_hwfn *p_hwfn)
402{
Yuval Mintza91eb522016-06-03 14:35:32 +0300403 struct qed_spq *p_spq = p_hwfn->p_spq;
404 struct qed_spq_entry *p_virt = NULL;
405 dma_addr_t p_phys = 0;
406 u32 i, capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200407
408 INIT_LIST_HEAD(&p_spq->pending);
409 INIT_LIST_HEAD(&p_spq->completion_pending);
410 INIT_LIST_HEAD(&p_spq->free_pool);
411 INIT_LIST_HEAD(&p_spq->unlimited_pending);
412 spin_lock_init(&p_spq->lock);
413
414 /* SPQ empty pool */
415 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
416 p_virt = p_spq->p_virt;
417
Yuval Mintza91eb522016-06-03 14:35:32 +0300418 capacity = qed_chain_get_capacity(&p_spq->chain);
419 for (i = 0; i < capacity; i++) {
Yuval Mintz94494592016-02-21 11:40:10 +0200420 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200421
422 list_add_tail(&p_virt->list, &p_spq->free_pool);
423
424 p_virt++;
425 p_phys += sizeof(struct qed_spq_entry);
426 }
427
428 /* Statistics */
429 p_spq->normal_count = 0;
430 p_spq->comp_count = 0;
431 p_spq->comp_sent_count = 0;
432 p_spq->unlimited_pending_count = 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500433
434 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
435 p_spq->comp_bitmap_idx = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200436
437 /* SPQ cid, cannot fail */
438 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
439 qed_spq_hw_initialize(p_hwfn, p_spq);
440
441 /* reset the chain itself */
442 qed_chain_reset(&p_spq->chain);
443}
444
445int qed_spq_alloc(struct qed_hwfn *p_hwfn)
446{
Yuval Mintza91eb522016-06-03 14:35:32 +0300447 struct qed_spq_entry *p_virt = NULL;
448 struct qed_spq *p_spq = NULL;
449 dma_addr_t p_phys = 0;
450 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200451
452 /* SPQ struct */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300453 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200454 if (!p_spq) {
455 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
456 return -ENOMEM;
457 }
458
459 /* SPQ ring */
460 if (qed_chain_alloc(p_hwfn->cdev,
461 QED_CHAIN_USE_TO_PRODUCE,
462 QED_CHAIN_MODE_SINGLE,
Yuval Mintza91eb522016-06-03 14:35:32 +0300463 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200464 0, /* N/A when the mode is SINGLE */
465 sizeof(struct slow_path_element),
466 &p_spq->chain)) {
467 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
468 goto spq_allocate_fail;
469 }
470
471 /* allocate and fill the SPQ elements (incl. ramrod data list) */
Yuval Mintza91eb522016-06-03 14:35:32 +0300472 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200473 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300474 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200475 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300476 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200477
478 if (!p_virt)
479 goto spq_allocate_fail;
480
481 p_spq->p_virt = p_virt;
482 p_spq->p_phys = p_phys;
483 p_hwfn->p_spq = p_spq;
484
485 return 0;
486
487spq_allocate_fail:
488 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
489 kfree(p_spq);
490 return -ENOMEM;
491}
492
493void qed_spq_free(struct qed_hwfn *p_hwfn)
494{
495 struct qed_spq *p_spq = p_hwfn->p_spq;
Yuval Mintza91eb522016-06-03 14:35:32 +0300496 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200497
498 if (!p_spq)
499 return;
500
Yuval Mintza91eb522016-06-03 14:35:32 +0300501 if (p_spq->p_virt) {
502 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200503 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300504 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200505 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300506 p_spq->p_virt, p_spq->p_phys);
507 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200508
509 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
510 ;
511 kfree(p_spq);
512}
513
Yuval Mintz1a635e42016-08-15 10:42:43 +0300514int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200515{
516 struct qed_spq *p_spq = p_hwfn->p_spq;
517 struct qed_spq_entry *p_ent = NULL;
518 int rc = 0;
519
520 spin_lock_bh(&p_spq->lock);
521
522 if (list_empty(&p_spq->free_pool)) {
523 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
524 if (!p_ent) {
Yuval Mintz1a635e42016-08-15 10:42:43 +0300525 DP_NOTICE(p_hwfn,
526 "Failed to allocate an SPQ entry for a pending ramrod\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200527 rc = -ENOMEM;
528 goto out_unlock;
529 }
530 p_ent->queue = &p_spq->unlimited_pending;
531 } else {
532 p_ent = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300533 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200534 list_del(&p_ent->list);
535 p_ent->queue = &p_spq->pending;
536 }
537
538 *pp_ent = p_ent;
539
540out_unlock:
541 spin_unlock_bh(&p_spq->lock);
542 return rc;
543}
544
545/* Locked variant; Should be called while the SPQ lock is taken */
546static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
547 struct qed_spq_entry *p_ent)
548{
549 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
550}
551
Yuval Mintz1a635e42016-08-15 10:42:43 +0300552void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200553{
554 spin_lock_bh(&p_hwfn->p_spq->lock);
555 __qed_spq_return_entry(p_hwfn, p_ent);
556 spin_unlock_bh(&p_hwfn->p_spq->lock);
557}
558
559/**
560 * @brief qed_spq_add_entry - adds a new entry to the pending
561 * list. Should be used while lock is being held.
562 *
563 * Addes an entry to the pending list is there is room (en empty
564 * element is available in the free_pool), or else places the
565 * entry in the unlimited_pending pool.
566 *
567 * @param p_hwfn
568 * @param p_ent
569 * @param priority
570 *
571 * @return int
572 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300573static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
574 struct qed_spq_entry *p_ent,
575 enum spq_priority priority)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200576{
577 struct qed_spq *p_spq = p_hwfn->p_spq;
578
579 if (p_ent->queue == &p_spq->unlimited_pending) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200580
581 if (list_empty(&p_spq->free_pool)) {
582 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
583 p_spq->unlimited_pending_count++;
584
585 return 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500586 } else {
587 struct qed_spq_entry *p_en2;
588
589 p_en2 = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300590 struct qed_spq_entry, list);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500591 list_del(&p_en2->list);
592
593 /* Copy the ring element physical pointer to the new
594 * entry, since we are about to override the entire ring
595 * entry and don't want to lose the pointer.
596 */
597 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
598
599 *p_en2 = *p_ent;
600
Yuval Mintzdb511c32016-06-19 15:18:14 +0300601 /* EBLOCK responsible to free the allocated p_ent */
602 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
603 kfree(p_ent);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500604
605 p_ent = p_en2;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200606 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200607 }
608
609 /* entry is to be placed in 'pending' queue */
610 switch (priority) {
611 case QED_SPQ_PRIORITY_NORMAL:
612 list_add_tail(&p_ent->list, &p_spq->pending);
613 p_spq->normal_count++;
614 break;
615 case QED_SPQ_PRIORITY_HIGH:
616 list_add(&p_ent->list, &p_spq->pending);
617 p_spq->high_count++;
618 break;
619 default:
620 return -EINVAL;
621 }
622
623 return 0;
624}
625
626/***************************************************************************
627* Accessor
628***************************************************************************/
629u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
630{
631 if (!p_hwfn->p_spq)
632 return 0xffffffff; /* illegal */
633 return p_hwfn->p_spq->cid;
634}
635
636/***************************************************************************
637* Posting new Ramrods
638***************************************************************************/
639static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300640 struct list_head *head, u32 keep_reserve)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200641{
642 struct qed_spq *p_spq = p_hwfn->p_spq;
643 int rc;
644
645 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
646 !list_empty(head)) {
647 struct qed_spq_entry *p_ent =
648 list_first_entry(head, struct qed_spq_entry, list);
649 list_del(&p_ent->list);
650 list_add_tail(&p_ent->list, &p_spq->completion_pending);
651 p_spq->comp_sent_count++;
652
653 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
654 if (rc) {
655 list_del(&p_ent->list);
656 __qed_spq_return_entry(p_hwfn, p_ent);
657 return rc;
658 }
659 }
660
661 return 0;
662}
663
664static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
665{
666 struct qed_spq *p_spq = p_hwfn->p_spq;
667 struct qed_spq_entry *p_ent = NULL;
668
669 while (!list_empty(&p_spq->free_pool)) {
670 if (list_empty(&p_spq->unlimited_pending))
671 break;
672
673 p_ent = list_first_entry(&p_spq->unlimited_pending,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300674 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200675 if (!p_ent)
676 return -EINVAL;
677
678 list_del(&p_ent->list);
679
680 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
681 }
682
683 return qed_spq_post_list(p_hwfn, &p_spq->pending,
684 SPQ_HIGH_PRI_RESERVE_DEFAULT);
685}
686
687int qed_spq_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300688 struct qed_spq_entry *p_ent, u8 *fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200689{
690 int rc = 0;
691 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
692 bool b_ret_ent = true;
693
694 if (!p_hwfn)
695 return -EINVAL;
696
697 if (!p_ent) {
698 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
699 return -EINVAL;
700 }
701
702 /* Complete the entry */
703 rc = qed_spq_fill_entry(p_hwfn, p_ent);
704
705 spin_lock_bh(&p_spq->lock);
706
707 /* Check return value after LOCK is taken for cleaner error flow */
708 if (rc)
709 goto spq_post_fail;
710
711 /* Add the request to the pending queue */
712 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
713 if (rc)
714 goto spq_post_fail;
715
716 rc = qed_spq_pend_post(p_hwfn);
717 if (rc) {
718 /* Since it's possible that pending failed for a different
719 * entry [although unlikely], the failed entry was already
720 * dealt with; No need to return it here.
721 */
722 b_ret_ent = false;
723 goto spq_post_fail;
724 }
725
726 spin_unlock_bh(&p_spq->lock);
727
728 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
729 /* For entries in QED BLOCK mode, the completion code cannot
730 * perform the necessary cleanup - if it did, we couldn't
731 * access p_ent here to see whether it's successful or not.
732 * Thus, after gaining the answer perform the cleanup here.
733 */
734 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
Yuval Mintzdb511c32016-06-19 15:18:14 +0300735
736 if (p_ent->queue == &p_spq->unlimited_pending) {
737 /* This is an allocated p_ent which does not need to
738 * return to pool.
739 */
740 kfree(p_ent);
741 return rc;
742 }
743
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200744 if (rc)
745 goto spq_post_fail2;
746
747 /* return to pool */
748 qed_spq_return_entry(p_hwfn, p_ent);
749 }
750 return rc;
751
752spq_post_fail2:
753 spin_lock_bh(&p_spq->lock);
754 list_del(&p_ent->list);
755 qed_chain_return_produced(&p_spq->chain);
756
757spq_post_fail:
758 /* return to the free pool */
759 if (b_ret_ent)
760 __qed_spq_return_entry(p_hwfn, p_ent);
761 spin_unlock_bh(&p_spq->lock);
762
763 return rc;
764}
765
766int qed_spq_completion(struct qed_hwfn *p_hwfn,
767 __le16 echo,
768 u8 fw_return_code,
769 union event_ring_data *p_data)
770{
771 struct qed_spq *p_spq;
772 struct qed_spq_entry *p_ent = NULL;
773 struct qed_spq_entry *tmp;
774 struct qed_spq_entry *found = NULL;
775 int rc;
776
777 if (!p_hwfn)
778 return -EINVAL;
779
780 p_spq = p_hwfn->p_spq;
781 if (!p_spq)
782 return -EINVAL;
783
784 spin_lock_bh(&p_spq->lock);
Yuval Mintz1a635e42016-08-15 10:42:43 +0300785 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200786 if (p_ent->elem.hdr.echo == echo) {
Tomer Tayar76a9a362015-12-07 06:25:57 -0500787 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
788
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200789 list_del(&p_ent->list);
790
Tomer Tayar76a9a362015-12-07 06:25:57 -0500791 /* Avoid overriding of SPQ entries when getting
792 * out-of-order completions, by marking the completions
793 * in a bitmap and increasing the chain consumer only
794 * for the first successive completed entries.
795 */
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300796 __set_bit(pos, p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500797
798 while (test_bit(p_spq->comp_bitmap_idx,
799 p_spq->p_comp_bitmap)) {
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300800 __clear_bit(p_spq->comp_bitmap_idx,
801 p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500802 p_spq->comp_bitmap_idx++;
803 qed_chain_return_produced(&p_spq->chain);
804 }
805
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200806 p_spq->comp_count++;
807 found = p_ent;
808 break;
809 }
Tomer Tayar76a9a362015-12-07 06:25:57 -0500810
811 /* This is relatively uncommon - depends on scenarios
812 * which have mutliple per-PF sent ramrods.
813 */
814 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
815 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
816 le16_to_cpu(echo),
817 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200818 }
819
820 /* Release lock before callback, as callback may post
821 * an additional ramrod.
822 */
823 spin_unlock_bh(&p_spq->lock);
824
825 if (!found) {
826 DP_NOTICE(p_hwfn,
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300827 "Failed to find an entry this EQE [echo %04x] completes\n",
828 le16_to_cpu(echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200829 return -EEXIST;
830 }
831
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300832 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
833 "Complete EQE [echo %04x]: func %p cookie %p)\n",
834 le16_to_cpu(echo),
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200835 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
836 if (found->comp_cb.function)
837 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
838 fw_return_code);
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300839 else
840 DP_VERBOSE(p_hwfn,
841 QED_MSG_SPQ,
842 "Got a completion without a callback function\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200843
Yuval Mintzdb511c32016-06-19 15:18:14 +0300844 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
845 (found->queue == &p_spq->unlimited_pending))
846 /* EBLOCK is responsible for returning its own entry into the
847 * free list, unless it originally added the entry into the
848 * unlimited pending list.
849 */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200850 qed_spq_return_entry(p_hwfn, found);
851
852 /* Attempt to post pending requests */
853 spin_lock_bh(&p_spq->lock);
854 rc = qed_spq_pend_post(p_hwfn);
855 spin_unlock_bh(&p_spq->lock);
856
857 return rc;
858}
859
860struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
861{
862 struct qed_consq *p_consq;
863
864 /* Allocate ConsQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200865 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200866 if (!p_consq) {
867 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
868 return NULL;
869 }
870
871 /* Allocate and initialize EQ chain*/
872 if (qed_chain_alloc(p_hwfn->cdev,
873 QED_CHAIN_USE_TO_PRODUCE,
874 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300875 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200876 QED_CHAIN_PAGE_SIZE / 0x80,
Yuval Mintza91eb522016-06-03 14:35:32 +0300877 0x80, &p_consq->chain)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200878 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
879 goto consq_allocate_fail;
880 }
881
882 return p_consq;
883
884consq_allocate_fail:
885 qed_consq_free(p_hwfn, p_consq);
886 return NULL;
887}
888
Yuval Mintz1a635e42016-08-15 10:42:43 +0300889void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200890{
891 qed_chain_reset(&p_consq->chain);
892}
893
Yuval Mintz1a635e42016-08-15 10:42:43 +0300894void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200895{
896 if (!p_consq)
897 return;
898 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
899 kfree(p_consq);
900}