blob: ad9bf5c85c3ff9b5dbbd033a4b519e1a06f59a6e [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
Yuval Mintz37bff2b2016-05-11 16:36:13 +030030#include "qed_sriov.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031
32/***************************************************************************
33* Structures & Definitions
34***************************************************************************/
35
36#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
37#define SPQ_BLOCK_SLEEP_LENGTH (1000)
38
39/***************************************************************************
40* Blocking Imp. (BLOCK/EBLOCK mode)
41***************************************************************************/
42static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43 void *cookie,
44 union event_ring_data *data,
45 u8 fw_return_code)
46{
47 struct qed_spq_comp_done *comp_done;
48
49 comp_done = (struct qed_spq_comp_done *)cookie;
50
51 comp_done->done = 0x1;
52 comp_done->fw_return_code = fw_return_code;
53
54 /* make update visible to waiting thread */
55 smp_wmb();
56}
57
58static int qed_spq_block(struct qed_hwfn *p_hwfn,
59 struct qed_spq_entry *p_ent,
60 u8 *p_fw_ret)
61{
62 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
63 struct qed_spq_comp_done *comp_done;
64 int rc;
65
66 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
67 while (sleep_count) {
68 /* validate we receive completion update */
69 smp_rmb();
70 if (comp_done->done == 1) {
71 if (p_fw_ret)
72 *p_fw_ret = comp_done->fw_return_code;
73 return 0;
74 }
75 usleep_range(5000, 10000);
76 sleep_count--;
77 }
78
79 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
80 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
81 if (rc != 0)
82 DP_NOTICE(p_hwfn, "MCP drain failed\n");
83
84 /* Retry after drain */
85 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
86 while (sleep_count) {
87 /* validate we receive completion update */
88 smp_rmb();
89 if (comp_done->done == 1) {
90 if (p_fw_ret)
91 *p_fw_ret = comp_done->fw_return_code;
92 return 0;
93 }
94 usleep_range(5000, 10000);
95 sleep_count--;
96 }
97
98 if (comp_done->done == 1) {
99 if (p_fw_ret)
100 *p_fw_ret = comp_done->fw_return_code;
101 return 0;
102 }
103
104 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
105
106 return -EBUSY;
107}
108
109/***************************************************************************
110* SPQ entries inner API
111***************************************************************************/
112static int
113qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
114 struct qed_spq_entry *p_ent)
115{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200116 p_ent->flags = 0;
117
118 switch (p_ent->comp_mode) {
119 case QED_SPQ_MODE_EBLOCK:
120 case QED_SPQ_MODE_BLOCK:
121 p_ent->comp_cb.function = qed_spq_blocking_cb;
122 break;
123 case QED_SPQ_MODE_CB:
124 break;
125 default:
126 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
127 p_ent->comp_mode);
128 return -EINVAL;
129 }
130
131 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
132 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
133 p_ent->elem.hdr.cid,
134 p_ent->elem.hdr.cmd_id,
135 p_ent->elem.hdr.protocol_id,
136 p_ent->elem.data_ptr.hi,
137 p_ent->elem.data_ptr.lo,
138 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
139 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
140 "MODE_CB"));
141
142 return 0;
143}
144
145/***************************************************************************
146* HSI access
147***************************************************************************/
148static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
149 struct qed_spq *p_spq)
150{
151 u16 pq;
152 struct qed_cxt_info cxt_info;
153 struct core_conn_context *p_cxt;
154 union qed_qm_pq_params pq_params;
155 int rc;
156
157 cxt_info.iid = p_spq->cid;
158
159 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
160
161 if (rc < 0) {
162 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
163 p_spq->cid);
164 return;
165 }
166
167 p_cxt = cxt_info.p_cxt;
168
169 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
170 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
171 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
172 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
173 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
174 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
175
176 /* QM physical queue */
177 memset(&pq_params, 0, sizeof(pq_params));
178 pq_params.core.tc = LB_TC;
179 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
180 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
181
182 p_cxt->xstorm_st_context.spq_base_lo =
183 DMA_LO_LE(p_spq->chain.p_phys_addr);
184 p_cxt->xstorm_st_context.spq_base_hi =
185 DMA_HI_LE(p_spq->chain.p_phys_addr);
186
Yuval Mintz94494592016-02-21 11:40:10 +0200187 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
188 p_hwfn->p_consq->chain.p_phys_addr);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200189}
190
191static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
192 struct qed_spq *p_spq,
193 struct qed_spq_entry *p_ent)
194{
Tomer Tayar76a9a362015-12-07 06:25:57 -0500195 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
196 u16 echo = qed_chain_get_prod_idx(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200197 struct slow_path_element *elem;
198 struct core_db_data db;
199
Tomer Tayar76a9a362015-12-07 06:25:57 -0500200 p_ent->elem.hdr.echo = cpu_to_le16(echo);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200201 elem = qed_chain_produce(p_chain);
202 if (!elem) {
203 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
204 return -EINVAL;
205 }
206
207 *elem = p_ent->elem; /* struct assignment */
208
209 /* send a doorbell on the slow hwfn session */
210 memset(&db, 0, sizeof(db));
211 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
212 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
213 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
214 DQ_XCM_CORE_SPQ_PROD_CMD);
215 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
216
217 /* validate producer is up to-date */
218 rmb();
219
220 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
221
222 /* do not reorder */
223 barrier();
224
225 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
226
227 /* make sure doorbell is rang */
228 mmiowb();
229
230 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
231 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
232 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
233 p_spq->cid, db.params, db.agg_flags,
234 qed_chain_get_prod_idx(p_chain));
235
236 return 0;
237}
238
239/***************************************************************************
240* Asynchronous events
241***************************************************************************/
242static int
243qed_async_event_completion(struct qed_hwfn *p_hwfn,
244 struct event_ring_entry *p_eqe)
245{
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300246 switch (p_eqe->protocol_id) {
247 case PROTOCOLID_COMMON:
248 return qed_sriov_eqe_event(p_hwfn,
249 p_eqe->opcode,
250 p_eqe->echo, &p_eqe->data);
251 default:
252 DP_NOTICE(p_hwfn,
253 "Unknown Async completion for protocol: %d\n",
254 p_eqe->protocol_id);
255 return -EINVAL;
256 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200257}
258
259/***************************************************************************
260* EQ API
261***************************************************************************/
262void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
263 u16 prod)
264{
265 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
266 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
267
268 REG_WR16(p_hwfn, addr, prod);
269
270 /* keep prod updates ordered */
271 mmiowb();
272}
273
274int qed_eq_completion(struct qed_hwfn *p_hwfn,
275 void *cookie)
276
277{
278 struct qed_eq *p_eq = cookie;
279 struct qed_chain *p_chain = &p_eq->chain;
280 int rc = 0;
281
282 /* take a snapshot of the FW consumer */
283 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
284
285 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
286
287 /* Need to guarantee the fw_cons index we use points to a usuable
288 * element (to comply with our chain), so our macros would comply
289 */
290 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
291 qed_chain_get_usable_per_page(p_chain))
292 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
293
294 /* Complete current segment of eq entries */
295 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
296 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
297
298 if (!p_eqe) {
299 rc = -EINVAL;
300 break;
301 }
302
303 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
304 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
305 p_eqe->opcode,
306 p_eqe->protocol_id,
307 p_eqe->reserved0,
308 le16_to_cpu(p_eqe->echo),
309 p_eqe->fw_return_code,
310 p_eqe->flags);
311
312 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
313 if (qed_async_event_completion(p_hwfn, p_eqe))
314 rc = -EINVAL;
315 } else if (qed_spq_completion(p_hwfn,
316 p_eqe->echo,
317 p_eqe->fw_return_code,
318 &p_eqe->data)) {
319 rc = -EINVAL;
320 }
321
322 qed_chain_recycle_consumed(p_chain);
323 }
324
325 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
326
327 return rc;
328}
329
330struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
331 u16 num_elem)
332{
333 struct qed_eq *p_eq;
334
335 /* Allocate EQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200336 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200337 if (!p_eq) {
338 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
339 return NULL;
340 }
341
342 /* Allocate and initialize EQ chain*/
343 if (qed_chain_alloc(p_hwfn->cdev,
344 QED_CHAIN_USE_TO_PRODUCE,
345 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300346 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200347 num_elem,
348 sizeof(union event_ring_element),
349 &p_eq->chain)) {
350 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
351 goto eq_allocate_fail;
352 }
353
354 /* register EQ completion on the SP SB */
355 qed_int_register_cb(p_hwfn,
356 qed_eq_completion,
357 p_eq,
358 &p_eq->eq_sb_index,
359 &p_eq->p_fw_cons);
360
361 return p_eq;
362
363eq_allocate_fail:
364 qed_eq_free(p_hwfn, p_eq);
365 return NULL;
366}
367
368void qed_eq_setup(struct qed_hwfn *p_hwfn,
369 struct qed_eq *p_eq)
370{
371 qed_chain_reset(&p_eq->chain);
372}
373
374void qed_eq_free(struct qed_hwfn *p_hwfn,
375 struct qed_eq *p_eq)
376{
377 if (!p_eq)
378 return;
379 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
380 kfree(p_eq);
381}
382
383/***************************************************************************
Manish Chopracee4d262015-10-26 11:02:28 +0200384* CQE API - manipulate EQ functionality
385***************************************************************************/
386static int qed_cqe_completion(
387 struct qed_hwfn *p_hwfn,
388 struct eth_slow_path_rx_cqe *cqe,
389 enum protocol_type protocol)
390{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300391 if (IS_VF(p_hwfn->cdev))
392 return 0;
393
Manish Chopracee4d262015-10-26 11:02:28 +0200394 /* @@@tmp - it's possible we'll eventually want to handle some
395 * actual commands that can arrive here, but for now this is only
396 * used to complete the ramrod using the echo value on the cqe
397 */
398 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
399}
400
401int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
402 struct eth_slow_path_rx_cqe *cqe)
403{
404 int rc;
405
406 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
407 if (rc)
408 DP_NOTICE(p_hwfn,
409 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
410 cqe->ramrod_cmd_id);
411
412 return rc;
413}
414
415/***************************************************************************
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200416* Slow hwfn Queue (spq)
417***************************************************************************/
418void qed_spq_setup(struct qed_hwfn *p_hwfn)
419{
Yuval Mintza91eb522016-06-03 14:35:32 +0300420 struct qed_spq *p_spq = p_hwfn->p_spq;
421 struct qed_spq_entry *p_virt = NULL;
422 dma_addr_t p_phys = 0;
423 u32 i, capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200424
425 INIT_LIST_HEAD(&p_spq->pending);
426 INIT_LIST_HEAD(&p_spq->completion_pending);
427 INIT_LIST_HEAD(&p_spq->free_pool);
428 INIT_LIST_HEAD(&p_spq->unlimited_pending);
429 spin_lock_init(&p_spq->lock);
430
431 /* SPQ empty pool */
432 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
433 p_virt = p_spq->p_virt;
434
Yuval Mintza91eb522016-06-03 14:35:32 +0300435 capacity = qed_chain_get_capacity(&p_spq->chain);
436 for (i = 0; i < capacity; i++) {
Yuval Mintz94494592016-02-21 11:40:10 +0200437 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200438
439 list_add_tail(&p_virt->list, &p_spq->free_pool);
440
441 p_virt++;
442 p_phys += sizeof(struct qed_spq_entry);
443 }
444
445 /* Statistics */
446 p_spq->normal_count = 0;
447 p_spq->comp_count = 0;
448 p_spq->comp_sent_count = 0;
449 p_spq->unlimited_pending_count = 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500450
451 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
452 p_spq->comp_bitmap_idx = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200453
454 /* SPQ cid, cannot fail */
455 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
456 qed_spq_hw_initialize(p_hwfn, p_spq);
457
458 /* reset the chain itself */
459 qed_chain_reset(&p_spq->chain);
460}
461
462int qed_spq_alloc(struct qed_hwfn *p_hwfn)
463{
Yuval Mintza91eb522016-06-03 14:35:32 +0300464 struct qed_spq_entry *p_virt = NULL;
465 struct qed_spq *p_spq = NULL;
466 dma_addr_t p_phys = 0;
467 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200468
469 /* SPQ struct */
470 p_spq =
Yuval Mintz60fffb32016-02-21 11:40:07 +0200471 kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200472 if (!p_spq) {
473 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
474 return -ENOMEM;
475 }
476
477 /* SPQ ring */
478 if (qed_chain_alloc(p_hwfn->cdev,
479 QED_CHAIN_USE_TO_PRODUCE,
480 QED_CHAIN_MODE_SINGLE,
Yuval Mintza91eb522016-06-03 14:35:32 +0300481 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200482 0, /* N/A when the mode is SINGLE */
483 sizeof(struct slow_path_element),
484 &p_spq->chain)) {
485 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
486 goto spq_allocate_fail;
487 }
488
489 /* allocate and fill the SPQ elements (incl. ramrod data list) */
Yuval Mintza91eb522016-06-03 14:35:32 +0300490 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200491 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300492 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200493 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300494 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200495
496 if (!p_virt)
497 goto spq_allocate_fail;
498
499 p_spq->p_virt = p_virt;
500 p_spq->p_phys = p_phys;
501 p_hwfn->p_spq = p_spq;
502
503 return 0;
504
505spq_allocate_fail:
506 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
507 kfree(p_spq);
508 return -ENOMEM;
509}
510
511void qed_spq_free(struct qed_hwfn *p_hwfn)
512{
513 struct qed_spq *p_spq = p_hwfn->p_spq;
Yuval Mintza91eb522016-06-03 14:35:32 +0300514 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200515
516 if (!p_spq)
517 return;
518
Yuval Mintza91eb522016-06-03 14:35:32 +0300519 if (p_spq->p_virt) {
520 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200521 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300522 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200523 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300524 p_spq->p_virt, p_spq->p_phys);
525 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200526
527 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
528 ;
529 kfree(p_spq);
530}
531
532int
533qed_spq_get_entry(struct qed_hwfn *p_hwfn,
534 struct qed_spq_entry **pp_ent)
535{
536 struct qed_spq *p_spq = p_hwfn->p_spq;
537 struct qed_spq_entry *p_ent = NULL;
538 int rc = 0;
539
540 spin_lock_bh(&p_spq->lock);
541
542 if (list_empty(&p_spq->free_pool)) {
543 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
544 if (!p_ent) {
545 rc = -ENOMEM;
546 goto out_unlock;
547 }
548 p_ent->queue = &p_spq->unlimited_pending;
549 } else {
550 p_ent = list_first_entry(&p_spq->free_pool,
551 struct qed_spq_entry,
552 list);
553 list_del(&p_ent->list);
554 p_ent->queue = &p_spq->pending;
555 }
556
557 *pp_ent = p_ent;
558
559out_unlock:
560 spin_unlock_bh(&p_spq->lock);
561 return rc;
562}
563
564/* Locked variant; Should be called while the SPQ lock is taken */
565static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
566 struct qed_spq_entry *p_ent)
567{
568 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
569}
570
571void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
572 struct qed_spq_entry *p_ent)
573{
574 spin_lock_bh(&p_hwfn->p_spq->lock);
575 __qed_spq_return_entry(p_hwfn, p_ent);
576 spin_unlock_bh(&p_hwfn->p_spq->lock);
577}
578
579/**
580 * @brief qed_spq_add_entry - adds a new entry to the pending
581 * list. Should be used while lock is being held.
582 *
583 * Addes an entry to the pending list is there is room (en empty
584 * element is available in the free_pool), or else places the
585 * entry in the unlimited_pending pool.
586 *
587 * @param p_hwfn
588 * @param p_ent
589 * @param priority
590 *
591 * @return int
592 */
593static int
594qed_spq_add_entry(struct qed_hwfn *p_hwfn,
595 struct qed_spq_entry *p_ent,
596 enum spq_priority priority)
597{
598 struct qed_spq *p_spq = p_hwfn->p_spq;
599
600 if (p_ent->queue == &p_spq->unlimited_pending) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200601
602 if (list_empty(&p_spq->free_pool)) {
603 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
604 p_spq->unlimited_pending_count++;
605
606 return 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500607 } else {
608 struct qed_spq_entry *p_en2;
609
610 p_en2 = list_first_entry(&p_spq->free_pool,
611 struct qed_spq_entry,
612 list);
613 list_del(&p_en2->list);
614
615 /* Copy the ring element physical pointer to the new
616 * entry, since we are about to override the entire ring
617 * entry and don't want to lose the pointer.
618 */
619 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
620
621 *p_en2 = *p_ent;
622
623 kfree(p_ent);
624
625 p_ent = p_en2;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200626 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200627 }
628
629 /* entry is to be placed in 'pending' queue */
630 switch (priority) {
631 case QED_SPQ_PRIORITY_NORMAL:
632 list_add_tail(&p_ent->list, &p_spq->pending);
633 p_spq->normal_count++;
634 break;
635 case QED_SPQ_PRIORITY_HIGH:
636 list_add(&p_ent->list, &p_spq->pending);
637 p_spq->high_count++;
638 break;
639 default:
640 return -EINVAL;
641 }
642
643 return 0;
644}
645
646/***************************************************************************
647* Accessor
648***************************************************************************/
649u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
650{
651 if (!p_hwfn->p_spq)
652 return 0xffffffff; /* illegal */
653 return p_hwfn->p_spq->cid;
654}
655
656/***************************************************************************
657* Posting new Ramrods
658***************************************************************************/
659static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
660 struct list_head *head,
661 u32 keep_reserve)
662{
663 struct qed_spq *p_spq = p_hwfn->p_spq;
664 int rc;
665
666 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
667 !list_empty(head)) {
668 struct qed_spq_entry *p_ent =
669 list_first_entry(head, struct qed_spq_entry, list);
670 list_del(&p_ent->list);
671 list_add_tail(&p_ent->list, &p_spq->completion_pending);
672 p_spq->comp_sent_count++;
673
674 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
675 if (rc) {
676 list_del(&p_ent->list);
677 __qed_spq_return_entry(p_hwfn, p_ent);
678 return rc;
679 }
680 }
681
682 return 0;
683}
684
685static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
686{
687 struct qed_spq *p_spq = p_hwfn->p_spq;
688 struct qed_spq_entry *p_ent = NULL;
689
690 while (!list_empty(&p_spq->free_pool)) {
691 if (list_empty(&p_spq->unlimited_pending))
692 break;
693
694 p_ent = list_first_entry(&p_spq->unlimited_pending,
695 struct qed_spq_entry,
696 list);
697 if (!p_ent)
698 return -EINVAL;
699
700 list_del(&p_ent->list);
701
702 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
703 }
704
705 return qed_spq_post_list(p_hwfn, &p_spq->pending,
706 SPQ_HIGH_PRI_RESERVE_DEFAULT);
707}
708
709int qed_spq_post(struct qed_hwfn *p_hwfn,
710 struct qed_spq_entry *p_ent,
711 u8 *fw_return_code)
712{
713 int rc = 0;
714 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
715 bool b_ret_ent = true;
716
717 if (!p_hwfn)
718 return -EINVAL;
719
720 if (!p_ent) {
721 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
722 return -EINVAL;
723 }
724
725 /* Complete the entry */
726 rc = qed_spq_fill_entry(p_hwfn, p_ent);
727
728 spin_lock_bh(&p_spq->lock);
729
730 /* Check return value after LOCK is taken for cleaner error flow */
731 if (rc)
732 goto spq_post_fail;
733
734 /* Add the request to the pending queue */
735 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
736 if (rc)
737 goto spq_post_fail;
738
739 rc = qed_spq_pend_post(p_hwfn);
740 if (rc) {
741 /* Since it's possible that pending failed for a different
742 * entry [although unlikely], the failed entry was already
743 * dealt with; No need to return it here.
744 */
745 b_ret_ent = false;
746 goto spq_post_fail;
747 }
748
749 spin_unlock_bh(&p_spq->lock);
750
751 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
752 /* For entries in QED BLOCK mode, the completion code cannot
753 * perform the necessary cleanup - if it did, we couldn't
754 * access p_ent here to see whether it's successful or not.
755 * Thus, after gaining the answer perform the cleanup here.
756 */
757 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
758 if (rc)
759 goto spq_post_fail2;
760
761 /* return to pool */
762 qed_spq_return_entry(p_hwfn, p_ent);
763 }
764 return rc;
765
766spq_post_fail2:
767 spin_lock_bh(&p_spq->lock);
768 list_del(&p_ent->list);
769 qed_chain_return_produced(&p_spq->chain);
770
771spq_post_fail:
772 /* return to the free pool */
773 if (b_ret_ent)
774 __qed_spq_return_entry(p_hwfn, p_ent);
775 spin_unlock_bh(&p_spq->lock);
776
777 return rc;
778}
779
780int qed_spq_completion(struct qed_hwfn *p_hwfn,
781 __le16 echo,
782 u8 fw_return_code,
783 union event_ring_data *p_data)
784{
785 struct qed_spq *p_spq;
786 struct qed_spq_entry *p_ent = NULL;
787 struct qed_spq_entry *tmp;
788 struct qed_spq_entry *found = NULL;
789 int rc;
790
791 if (!p_hwfn)
792 return -EINVAL;
793
794 p_spq = p_hwfn->p_spq;
795 if (!p_spq)
796 return -EINVAL;
797
798 spin_lock_bh(&p_spq->lock);
799 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
800 list) {
801 if (p_ent->elem.hdr.echo == echo) {
Tomer Tayar76a9a362015-12-07 06:25:57 -0500802 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
803
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200804 list_del(&p_ent->list);
805
Tomer Tayar76a9a362015-12-07 06:25:57 -0500806 /* Avoid overriding of SPQ entries when getting
807 * out-of-order completions, by marking the completions
808 * in a bitmap and increasing the chain consumer only
809 * for the first successive completed entries.
810 */
811 bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
812
813 while (test_bit(p_spq->comp_bitmap_idx,
814 p_spq->p_comp_bitmap)) {
815 bitmap_clear(p_spq->p_comp_bitmap,
816 p_spq->comp_bitmap_idx,
817 SPQ_RING_SIZE);
818 p_spq->comp_bitmap_idx++;
819 qed_chain_return_produced(&p_spq->chain);
820 }
821
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200822 p_spq->comp_count++;
823 found = p_ent;
824 break;
825 }
Tomer Tayar76a9a362015-12-07 06:25:57 -0500826
827 /* This is relatively uncommon - depends on scenarios
828 * which have mutliple per-PF sent ramrods.
829 */
830 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
831 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
832 le16_to_cpu(echo),
833 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200834 }
835
836 /* Release lock before callback, as callback may post
837 * an additional ramrod.
838 */
839 spin_unlock_bh(&p_spq->lock);
840
841 if (!found) {
842 DP_NOTICE(p_hwfn,
843 "Failed to find an entry this EQE completes\n");
844 return -EEXIST;
845 }
846
847 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
848 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
849 if (found->comp_cb.function)
850 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
851 fw_return_code);
852
853 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
854 /* EBLOCK is responsible for freeing its own entry */
855 qed_spq_return_entry(p_hwfn, found);
856
857 /* Attempt to post pending requests */
858 spin_lock_bh(&p_spq->lock);
859 rc = qed_spq_pend_post(p_hwfn);
860 spin_unlock_bh(&p_spq->lock);
861
862 return rc;
863}
864
865struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
866{
867 struct qed_consq *p_consq;
868
869 /* Allocate ConsQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200870 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200871 if (!p_consq) {
872 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
873 return NULL;
874 }
875
876 /* Allocate and initialize EQ chain*/
877 if (qed_chain_alloc(p_hwfn->cdev,
878 QED_CHAIN_USE_TO_PRODUCE,
879 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300880 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200881 QED_CHAIN_PAGE_SIZE / 0x80,
Yuval Mintza91eb522016-06-03 14:35:32 +0300882 0x80, &p_consq->chain)) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200883 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
884 goto consq_allocate_fail;
885 }
886
887 return p_consq;
888
889consq_allocate_fail:
890 qed_consq_free(p_hwfn, p_consq);
891 return NULL;
892}
893
894void qed_consq_setup(struct qed_hwfn *p_hwfn,
895 struct qed_consq *p_consq)
896{
897 qed_chain_reset(&p_consq->chain);
898}
899
900void qed_consq_free(struct qed_hwfn *p_hwfn,
901 struct qed_consq *p_consq)
902{
903 if (!p_consq)
904 return;
905 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
906 kfree(p_consq);
907}