blob: caff41544898baed09f45a41829cb0ba9c719fb9 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
Yuval Mintz37bff2b2016-05-11 16:36:13 +030030#include "qed_sriov.h"
Ram Amrani51ff1722016-10-01 21:59:57 +030031#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
32#include "qed_roce.h"
33#endif
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020034
35/***************************************************************************
36* Structures & Definitions
37***************************************************************************/
38
39#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
40#define SPQ_BLOCK_SLEEP_LENGTH (1000)
41
42/***************************************************************************
43* Blocking Imp. (BLOCK/EBLOCK mode)
44***************************************************************************/
45static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
46 void *cookie,
Yuval Mintz1a635e42016-08-15 10:42:43 +030047 union event_ring_data *data, u8 fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020048{
49 struct qed_spq_comp_done *comp_done;
50
51 comp_done = (struct qed_spq_comp_done *)cookie;
52
53 comp_done->done = 0x1;
54 comp_done->fw_return_code = fw_return_code;
55
56 /* make update visible to waiting thread */
57 smp_wmb();
58}
59
60static int qed_spq_block(struct qed_hwfn *p_hwfn,
61 struct qed_spq_entry *p_ent,
62 u8 *p_fw_ret)
63{
64 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
65 struct qed_spq_comp_done *comp_done;
66 int rc;
67
68 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69 while (sleep_count) {
70 /* validate we receive completion update */
71 smp_rmb();
72 if (comp_done->done == 1) {
73 if (p_fw_ret)
74 *p_fw_ret = comp_done->fw_return_code;
75 return 0;
76 }
77 usleep_range(5000, 10000);
78 sleep_count--;
79 }
80
81 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
82 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
83 if (rc != 0)
84 DP_NOTICE(p_hwfn, "MCP drain failed\n");
85
86 /* Retry after drain */
87 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
88 while (sleep_count) {
89 /* validate we receive completion update */
90 smp_rmb();
91 if (comp_done->done == 1) {
92 if (p_fw_ret)
93 *p_fw_ret = comp_done->fw_return_code;
94 return 0;
95 }
96 usleep_range(5000, 10000);
97 sleep_count--;
98 }
99
100 if (comp_done->done == 1) {
101 if (p_fw_ret)
102 *p_fw_ret = comp_done->fw_return_code;
103 return 0;
104 }
105
106 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
107
108 return -EBUSY;
109}
110
111/***************************************************************************
112* SPQ entries inner API
113***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300114static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
115 struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200116{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200117 p_ent->flags = 0;
118
119 switch (p_ent->comp_mode) {
120 case QED_SPQ_MODE_EBLOCK:
121 case QED_SPQ_MODE_BLOCK:
122 p_ent->comp_cb.function = qed_spq_blocking_cb;
123 break;
124 case QED_SPQ_MODE_CB:
125 break;
126 default:
127 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
128 p_ent->comp_mode);
129 return -EINVAL;
130 }
131
132 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
133 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
134 p_ent->elem.hdr.cid,
135 p_ent->elem.hdr.cmd_id,
136 p_ent->elem.hdr.protocol_id,
137 p_ent->elem.data_ptr.hi,
138 p_ent->elem.data_ptr.lo,
139 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
140 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
141 "MODE_CB"));
142
143 return 0;
144}
145
146/***************************************************************************
147* HSI access
148***************************************************************************/
149static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
150 struct qed_spq *p_spq)
151{
152 u16 pq;
153 struct qed_cxt_info cxt_info;
154 struct core_conn_context *p_cxt;
155 union qed_qm_pq_params pq_params;
156 int rc;
157
158 cxt_info.iid = p_spq->cid;
159
160 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
161
162 if (rc < 0) {
163 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
164 p_spq->cid);
165 return;
166 }
167
168 p_cxt = cxt_info.p_cxt;
169
170 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
171 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
172 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
173 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
174 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
176
177 /* QM physical queue */
178 memset(&pq_params, 0, sizeof(pq_params));
179 pq_params.core.tc = LB_TC;
180 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
181 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
182
183 p_cxt->xstorm_st_context.spq_base_lo =
184 DMA_LO_LE(p_spq->chain.p_phys_addr);
185 p_cxt->xstorm_st_context.spq_base_hi =
186 DMA_HI_LE(p_spq->chain.p_phys_addr);
187
Yuval Mintz94494592016-02-21 11:40:10 +0200188 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
189 p_hwfn->p_consq->chain.p_phys_addr);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200190}
191
192static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300193 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200194{
Tomer Tayar76a9a362015-12-07 06:25:57 -0500195 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196 u16 echo = qed_chain_get_prod_idx(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200197 struct slow_path_element *elem;
198 struct core_db_data db;
199
Tomer Tayar76a9a362015-12-07 06:25:57 -0500200 p_ent->elem.hdr.echo = cpu_to_le16(echo);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200201 elem = qed_chain_produce(p_chain);
202 if (!elem) {
203 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
204 return -EINVAL;
205 }
206
207 *elem = p_ent->elem; /* struct assignment */
208
209 /* send a doorbell on the slow hwfn session */
210 memset(&db, 0, sizeof(db));
211 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 DQ_XCM_CORE_SPQ_PROD_CMD);
215 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200216 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
217
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400218 /* make sure the SPQE is updated before the doorbell */
219 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200220
221 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
222
223 /* make sure doorbell is rang */
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400224 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200225
226 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
227 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
228 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
229 p_spq->cid, db.params, db.agg_flags,
230 qed_chain_get_prod_idx(p_chain));
231
232 return 0;
233}
234
235/***************************************************************************
236* Asynchronous events
237***************************************************************************/
238static int
239qed_async_event_completion(struct qed_hwfn *p_hwfn,
240 struct event_ring_entry *p_eqe)
241{
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300242 switch (p_eqe->protocol_id) {
Ram Amrani51ff1722016-10-01 21:59:57 +0300243#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
244 case PROTOCOLID_ROCE:
245 qed_async_roce_event(p_hwfn, p_eqe);
246 return 0;
247#endif
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300248 case PROTOCOLID_COMMON:
249 return qed_sriov_eqe_event(p_hwfn,
250 p_eqe->opcode,
251 p_eqe->echo, &p_eqe->data);
252 default:
253 DP_NOTICE(p_hwfn,
254 "Unknown Async completion for protocol: %d\n",
255 p_eqe->protocol_id);
256 return -EINVAL;
257 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200258}
259
260/***************************************************************************
261* EQ API
262***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300263void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200264{
265 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
266 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
267
268 REG_WR16(p_hwfn, addr, prod);
269
270 /* keep prod updates ordered */
271 mmiowb();
272}
273
Yuval Mintz1a635e42016-08-15 10:42:43 +0300274int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200275{
276 struct qed_eq *p_eq = cookie;
277 struct qed_chain *p_chain = &p_eq->chain;
278 int rc = 0;
279
280 /* take a snapshot of the FW consumer */
281 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
282
283 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
284
285 /* Need to guarantee the fw_cons index we use points to a usuable
286 * element (to comply with our chain), so our macros would comply
287 */
288 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
289 qed_chain_get_usable_per_page(p_chain))
290 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
291
292 /* Complete current segment of eq entries */
293 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
294 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
295
296 if (!p_eqe) {
297 rc = -EINVAL;
298 break;
299 }
300
301 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
302 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
303 p_eqe->opcode,
304 p_eqe->protocol_id,
305 p_eqe->reserved0,
306 le16_to_cpu(p_eqe->echo),
307 p_eqe->fw_return_code,
308 p_eqe->flags);
309
310 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
311 if (qed_async_event_completion(p_hwfn, p_eqe))
312 rc = -EINVAL;
313 } else if (qed_spq_completion(p_hwfn,
314 p_eqe->echo,
315 p_eqe->fw_return_code,
316 &p_eqe->data)) {
317 rc = -EINVAL;
318 }
319
320 qed_chain_recycle_consumed(p_chain);
321 }
322
323 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
324
325 return rc;
326}
327
Yuval Mintz1a635e42016-08-15 10:42:43 +0300328struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200329{
330 struct qed_eq *p_eq;
331
332 /* Allocate EQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200333 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700334 if (!p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200335 return NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200336
337 /* Allocate and initialize EQ chain*/
338 if (qed_chain_alloc(p_hwfn->cdev,
339 QED_CHAIN_USE_TO_PRODUCE,
340 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300341 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200342 num_elem,
343 sizeof(union event_ring_element),
Joe Perches2591c282016-09-04 14:24:03 -0700344 &p_eq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200345 goto eq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200346
347 /* register EQ completion on the SP SB */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300348 qed_int_register_cb(p_hwfn, qed_eq_completion,
349 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200350
351 return p_eq;
352
353eq_allocate_fail:
354 qed_eq_free(p_hwfn, p_eq);
355 return NULL;
356}
357
Yuval Mintz1a635e42016-08-15 10:42:43 +0300358void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200359{
360 qed_chain_reset(&p_eq->chain);
361}
362
Yuval Mintz1a635e42016-08-15 10:42:43 +0300363void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200364{
365 if (!p_eq)
366 return;
367 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
368 kfree(p_eq);
369}
370
371/***************************************************************************
Manish Chopracee4d262015-10-26 11:02:28 +0200372* CQE API - manipulate EQ functionality
373***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300374static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
375 struct eth_slow_path_rx_cqe *cqe,
376 enum protocol_type protocol)
Manish Chopracee4d262015-10-26 11:02:28 +0200377{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300378 if (IS_VF(p_hwfn->cdev))
379 return 0;
380
Manish Chopracee4d262015-10-26 11:02:28 +0200381 /* @@@tmp - it's possible we'll eventually want to handle some
382 * actual commands that can arrive here, but for now this is only
383 * used to complete the ramrod using the echo value on the cqe
384 */
385 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
386}
387
388int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
389 struct eth_slow_path_rx_cqe *cqe)
390{
391 int rc;
392
393 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
394 if (rc)
395 DP_NOTICE(p_hwfn,
396 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
397 cqe->ramrod_cmd_id);
398
399 return rc;
400}
401
402/***************************************************************************
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200403* Slow hwfn Queue (spq)
404***************************************************************************/
405void qed_spq_setup(struct qed_hwfn *p_hwfn)
406{
Yuval Mintza91eb522016-06-03 14:35:32 +0300407 struct qed_spq *p_spq = p_hwfn->p_spq;
408 struct qed_spq_entry *p_virt = NULL;
409 dma_addr_t p_phys = 0;
410 u32 i, capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200411
412 INIT_LIST_HEAD(&p_spq->pending);
413 INIT_LIST_HEAD(&p_spq->completion_pending);
414 INIT_LIST_HEAD(&p_spq->free_pool);
415 INIT_LIST_HEAD(&p_spq->unlimited_pending);
416 spin_lock_init(&p_spq->lock);
417
418 /* SPQ empty pool */
419 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
420 p_virt = p_spq->p_virt;
421
Yuval Mintza91eb522016-06-03 14:35:32 +0300422 capacity = qed_chain_get_capacity(&p_spq->chain);
423 for (i = 0; i < capacity; i++) {
Yuval Mintz94494592016-02-21 11:40:10 +0200424 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200425
426 list_add_tail(&p_virt->list, &p_spq->free_pool);
427
428 p_virt++;
429 p_phys += sizeof(struct qed_spq_entry);
430 }
431
432 /* Statistics */
433 p_spq->normal_count = 0;
434 p_spq->comp_count = 0;
435 p_spq->comp_sent_count = 0;
436 p_spq->unlimited_pending_count = 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500437
438 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
439 p_spq->comp_bitmap_idx = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200440
441 /* SPQ cid, cannot fail */
442 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
443 qed_spq_hw_initialize(p_hwfn, p_spq);
444
445 /* reset the chain itself */
446 qed_chain_reset(&p_spq->chain);
447}
448
449int qed_spq_alloc(struct qed_hwfn *p_hwfn)
450{
Yuval Mintza91eb522016-06-03 14:35:32 +0300451 struct qed_spq_entry *p_virt = NULL;
452 struct qed_spq *p_spq = NULL;
453 dma_addr_t p_phys = 0;
454 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200455
456 /* SPQ struct */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300457 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700458 if (!p_spq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200459 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200460
461 /* SPQ ring */
462 if (qed_chain_alloc(p_hwfn->cdev,
463 QED_CHAIN_USE_TO_PRODUCE,
464 QED_CHAIN_MODE_SINGLE,
Yuval Mintza91eb522016-06-03 14:35:32 +0300465 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200466 0, /* N/A when the mode is SINGLE */
467 sizeof(struct slow_path_element),
Joe Perches2591c282016-09-04 14:24:03 -0700468 &p_spq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200469 goto spq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200470
471 /* allocate and fill the SPQ elements (incl. ramrod data list) */
Yuval Mintza91eb522016-06-03 14:35:32 +0300472 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200473 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Joe Perches2591c282016-09-04 14:24:03 -0700474 capacity * sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300475 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200476 if (!p_virt)
477 goto spq_allocate_fail;
478
479 p_spq->p_virt = p_virt;
480 p_spq->p_phys = p_phys;
481 p_hwfn->p_spq = p_spq;
482
483 return 0;
484
485spq_allocate_fail:
486 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
487 kfree(p_spq);
488 return -ENOMEM;
489}
490
491void qed_spq_free(struct qed_hwfn *p_hwfn)
492{
493 struct qed_spq *p_spq = p_hwfn->p_spq;
Yuval Mintza91eb522016-06-03 14:35:32 +0300494 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200495
496 if (!p_spq)
497 return;
498
Yuval Mintza91eb522016-06-03 14:35:32 +0300499 if (p_spq->p_virt) {
500 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200501 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300502 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200503 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300504 p_spq->p_virt, p_spq->p_phys);
505 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200506
507 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
508 ;
509 kfree(p_spq);
510}
511
Yuval Mintz1a635e42016-08-15 10:42:43 +0300512int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200513{
514 struct qed_spq *p_spq = p_hwfn->p_spq;
515 struct qed_spq_entry *p_ent = NULL;
516 int rc = 0;
517
518 spin_lock_bh(&p_spq->lock);
519
520 if (list_empty(&p_spq->free_pool)) {
521 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
522 if (!p_ent) {
Yuval Mintz1a635e42016-08-15 10:42:43 +0300523 DP_NOTICE(p_hwfn,
524 "Failed to allocate an SPQ entry for a pending ramrod\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200525 rc = -ENOMEM;
526 goto out_unlock;
527 }
528 p_ent->queue = &p_spq->unlimited_pending;
529 } else {
530 p_ent = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300531 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200532 list_del(&p_ent->list);
533 p_ent->queue = &p_spq->pending;
534 }
535
536 *pp_ent = p_ent;
537
538out_unlock:
539 spin_unlock_bh(&p_spq->lock);
540 return rc;
541}
542
543/* Locked variant; Should be called while the SPQ lock is taken */
544static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
545 struct qed_spq_entry *p_ent)
546{
547 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
548}
549
Yuval Mintz1a635e42016-08-15 10:42:43 +0300550void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200551{
552 spin_lock_bh(&p_hwfn->p_spq->lock);
553 __qed_spq_return_entry(p_hwfn, p_ent);
554 spin_unlock_bh(&p_hwfn->p_spq->lock);
555}
556
557/**
558 * @brief qed_spq_add_entry - adds a new entry to the pending
559 * list. Should be used while lock is being held.
560 *
561 * Addes an entry to the pending list is there is room (en empty
562 * element is available in the free_pool), or else places the
563 * entry in the unlimited_pending pool.
564 *
565 * @param p_hwfn
566 * @param p_ent
567 * @param priority
568 *
569 * @return int
570 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300571static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
572 struct qed_spq_entry *p_ent,
573 enum spq_priority priority)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200574{
575 struct qed_spq *p_spq = p_hwfn->p_spq;
576
577 if (p_ent->queue == &p_spq->unlimited_pending) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200578
579 if (list_empty(&p_spq->free_pool)) {
580 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
581 p_spq->unlimited_pending_count++;
582
583 return 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500584 } else {
585 struct qed_spq_entry *p_en2;
586
587 p_en2 = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300588 struct qed_spq_entry, list);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500589 list_del(&p_en2->list);
590
591 /* Copy the ring element physical pointer to the new
592 * entry, since we are about to override the entire ring
593 * entry and don't want to lose the pointer.
594 */
595 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
596
597 *p_en2 = *p_ent;
598
Yuval Mintzdb511c32016-06-19 15:18:14 +0300599 /* EBLOCK responsible to free the allocated p_ent */
600 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
601 kfree(p_ent);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500602
603 p_ent = p_en2;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200604 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200605 }
606
607 /* entry is to be placed in 'pending' queue */
608 switch (priority) {
609 case QED_SPQ_PRIORITY_NORMAL:
610 list_add_tail(&p_ent->list, &p_spq->pending);
611 p_spq->normal_count++;
612 break;
613 case QED_SPQ_PRIORITY_HIGH:
614 list_add(&p_ent->list, &p_spq->pending);
615 p_spq->high_count++;
616 break;
617 default:
618 return -EINVAL;
619 }
620
621 return 0;
622}
623
624/***************************************************************************
625* Accessor
626***************************************************************************/
627u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
628{
629 if (!p_hwfn->p_spq)
630 return 0xffffffff; /* illegal */
631 return p_hwfn->p_spq->cid;
632}
633
634/***************************************************************************
635* Posting new Ramrods
636***************************************************************************/
637static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300638 struct list_head *head, u32 keep_reserve)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200639{
640 struct qed_spq *p_spq = p_hwfn->p_spq;
641 int rc;
642
643 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
644 !list_empty(head)) {
645 struct qed_spq_entry *p_ent =
646 list_first_entry(head, struct qed_spq_entry, list);
647 list_del(&p_ent->list);
648 list_add_tail(&p_ent->list, &p_spq->completion_pending);
649 p_spq->comp_sent_count++;
650
651 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
652 if (rc) {
653 list_del(&p_ent->list);
654 __qed_spq_return_entry(p_hwfn, p_ent);
655 return rc;
656 }
657 }
658
659 return 0;
660}
661
662static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
663{
664 struct qed_spq *p_spq = p_hwfn->p_spq;
665 struct qed_spq_entry *p_ent = NULL;
666
667 while (!list_empty(&p_spq->free_pool)) {
668 if (list_empty(&p_spq->unlimited_pending))
669 break;
670
671 p_ent = list_first_entry(&p_spq->unlimited_pending,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300672 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200673 if (!p_ent)
674 return -EINVAL;
675
676 list_del(&p_ent->list);
677
678 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
679 }
680
681 return qed_spq_post_list(p_hwfn, &p_spq->pending,
682 SPQ_HIGH_PRI_RESERVE_DEFAULT);
683}
684
685int qed_spq_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300686 struct qed_spq_entry *p_ent, u8 *fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200687{
688 int rc = 0;
689 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
690 bool b_ret_ent = true;
691
692 if (!p_hwfn)
693 return -EINVAL;
694
695 if (!p_ent) {
696 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
697 return -EINVAL;
698 }
699
700 /* Complete the entry */
701 rc = qed_spq_fill_entry(p_hwfn, p_ent);
702
703 spin_lock_bh(&p_spq->lock);
704
705 /* Check return value after LOCK is taken for cleaner error flow */
706 if (rc)
707 goto spq_post_fail;
708
709 /* Add the request to the pending queue */
710 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
711 if (rc)
712 goto spq_post_fail;
713
714 rc = qed_spq_pend_post(p_hwfn);
715 if (rc) {
716 /* Since it's possible that pending failed for a different
717 * entry [although unlikely], the failed entry was already
718 * dealt with; No need to return it here.
719 */
720 b_ret_ent = false;
721 goto spq_post_fail;
722 }
723
724 spin_unlock_bh(&p_spq->lock);
725
726 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
727 /* For entries in QED BLOCK mode, the completion code cannot
728 * perform the necessary cleanup - if it did, we couldn't
729 * access p_ent here to see whether it's successful or not.
730 * Thus, after gaining the answer perform the cleanup here.
731 */
732 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
Yuval Mintzdb511c32016-06-19 15:18:14 +0300733
734 if (p_ent->queue == &p_spq->unlimited_pending) {
735 /* This is an allocated p_ent which does not need to
736 * return to pool.
737 */
738 kfree(p_ent);
739 return rc;
740 }
741
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200742 if (rc)
743 goto spq_post_fail2;
744
745 /* return to pool */
746 qed_spq_return_entry(p_hwfn, p_ent);
747 }
748 return rc;
749
750spq_post_fail2:
751 spin_lock_bh(&p_spq->lock);
752 list_del(&p_ent->list);
753 qed_chain_return_produced(&p_spq->chain);
754
755spq_post_fail:
756 /* return to the free pool */
757 if (b_ret_ent)
758 __qed_spq_return_entry(p_hwfn, p_ent);
759 spin_unlock_bh(&p_spq->lock);
760
761 return rc;
762}
763
764int qed_spq_completion(struct qed_hwfn *p_hwfn,
765 __le16 echo,
766 u8 fw_return_code,
767 union event_ring_data *p_data)
768{
769 struct qed_spq *p_spq;
770 struct qed_spq_entry *p_ent = NULL;
771 struct qed_spq_entry *tmp;
772 struct qed_spq_entry *found = NULL;
773 int rc;
774
775 if (!p_hwfn)
776 return -EINVAL;
777
778 p_spq = p_hwfn->p_spq;
779 if (!p_spq)
780 return -EINVAL;
781
782 spin_lock_bh(&p_spq->lock);
Yuval Mintz1a635e42016-08-15 10:42:43 +0300783 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200784 if (p_ent->elem.hdr.echo == echo) {
Tomer Tayar76a9a362015-12-07 06:25:57 -0500785 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
786
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200787 list_del(&p_ent->list);
788
Tomer Tayar76a9a362015-12-07 06:25:57 -0500789 /* Avoid overriding of SPQ entries when getting
790 * out-of-order completions, by marking the completions
791 * in a bitmap and increasing the chain consumer only
792 * for the first successive completed entries.
793 */
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300794 __set_bit(pos, p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500795
796 while (test_bit(p_spq->comp_bitmap_idx,
797 p_spq->p_comp_bitmap)) {
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300798 __clear_bit(p_spq->comp_bitmap_idx,
799 p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500800 p_spq->comp_bitmap_idx++;
801 qed_chain_return_produced(&p_spq->chain);
802 }
803
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200804 p_spq->comp_count++;
805 found = p_ent;
806 break;
807 }
Tomer Tayar76a9a362015-12-07 06:25:57 -0500808
809 /* This is relatively uncommon - depends on scenarios
810 * which have mutliple per-PF sent ramrods.
811 */
812 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
813 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
814 le16_to_cpu(echo),
815 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200816 }
817
818 /* Release lock before callback, as callback may post
819 * an additional ramrod.
820 */
821 spin_unlock_bh(&p_spq->lock);
822
823 if (!found) {
824 DP_NOTICE(p_hwfn,
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300825 "Failed to find an entry this EQE [echo %04x] completes\n",
826 le16_to_cpu(echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200827 return -EEXIST;
828 }
829
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300830 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
831 "Complete EQE [echo %04x]: func %p cookie %p)\n",
832 le16_to_cpu(echo),
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200833 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
834 if (found->comp_cb.function)
835 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
836 fw_return_code);
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300837 else
838 DP_VERBOSE(p_hwfn,
839 QED_MSG_SPQ,
840 "Got a completion without a callback function\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200841
Yuval Mintzdb511c32016-06-19 15:18:14 +0300842 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
843 (found->queue == &p_spq->unlimited_pending))
844 /* EBLOCK is responsible for returning its own entry into the
845 * free list, unless it originally added the entry into the
846 * unlimited pending list.
847 */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200848 qed_spq_return_entry(p_hwfn, found);
849
850 /* Attempt to post pending requests */
851 spin_lock_bh(&p_spq->lock);
852 rc = qed_spq_pend_post(p_hwfn);
853 spin_unlock_bh(&p_spq->lock);
854
855 return rc;
856}
857
858struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
859{
860 struct qed_consq *p_consq;
861
862 /* Allocate ConsQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200863 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700864 if (!p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200865 return NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200866
867 /* Allocate and initialize EQ chain*/
868 if (qed_chain_alloc(p_hwfn->cdev,
869 QED_CHAIN_USE_TO_PRODUCE,
870 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300871 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200872 QED_CHAIN_PAGE_SIZE / 0x80,
Joe Perches2591c282016-09-04 14:24:03 -0700873 0x80, &p_consq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200874 goto consq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200875
876 return p_consq;
877
878consq_allocate_fail:
879 qed_consq_free(p_hwfn, p_consq);
880 return NULL;
881}
882
Yuval Mintz1a635e42016-08-15 10:42:43 +0300883void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200884{
885 qed_chain_reset(&p_consq->chain);
886}
887
Yuval Mintz1a635e42016-08-15 10:42:43 +0300888void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200889{
890 if (!p_consq)
891 return;
892 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
893 kfree(p_consq);
894}