blob: 019960b7855a9162a6c2fe75262bcc63551cac74 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
Yuval Mintz37bff2b2016-05-11 16:36:13 +030030#include "qed_sriov.h"
Ram Amrani51ff1722016-10-01 21:59:57 +030031#include "qed_roce.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020032
33/***************************************************************************
34* Structures & Definitions
35***************************************************************************/
36
37#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
Yuval Mintzc59f52912016-10-14 05:19:21 -040038
39#define SPQ_BLOCK_DELAY_MAX_ITER (10)
40#define SPQ_BLOCK_DELAY_US (10)
41#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
42#define SPQ_BLOCK_SLEEP_MS (5)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020043
44/***************************************************************************
45* Blocking Imp. (BLOCK/EBLOCK mode)
46***************************************************************************/
47static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
48 void *cookie,
Yuval Mintz1a635e42016-08-15 10:42:43 +030049 union event_ring_data *data, u8 fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020050{
51 struct qed_spq_comp_done *comp_done;
52
53 comp_done = (struct qed_spq_comp_done *)cookie;
54
Manish Choprad5df7682016-10-14 05:19:23 -040055 comp_done->fw_return_code = fw_return_code;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020056
Manish Choprad5df7682016-10-14 05:19:23 -040057 /* Make sure completion done is visible on waiting thread */
58 smp_store_release(&comp_done->done, 0x1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020059}
60
Yuval Mintzc59f52912016-10-14 05:19:21 -040061static int __qed_spq_block(struct qed_hwfn *p_hwfn,
62 struct qed_spq_entry *p_ent,
63 u8 *p_fw_ret, bool sleep_between_iter)
64{
65 struct qed_spq_comp_done *comp_done;
66 u32 iter_cnt;
67
68 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
70 : SPQ_BLOCK_DELAY_MAX_ITER;
71
72 while (iter_cnt--) {
73 /* Validate we receive completion update */
Manish Choprad5df7682016-10-14 05:19:23 -040074 if (READ_ONCE(comp_done->done) == 1) {
75 /* Read updated FW return value */
76 smp_read_barrier_depends();
Yuval Mintzc59f52912016-10-14 05:19:21 -040077 if (p_fw_ret)
78 *p_fw_ret = comp_done->fw_return_code;
79 return 0;
80 }
81
82 if (sleep_between_iter)
83 msleep(SPQ_BLOCK_SLEEP_MS);
84 else
85 udelay(SPQ_BLOCK_DELAY_US);
86 }
87
88 return -EBUSY;
89}
90
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020091static int qed_spq_block(struct qed_hwfn *p_hwfn,
92 struct qed_spq_entry *p_ent,
Yuval Mintzc59f52912016-10-14 05:19:21 -040093 u8 *p_fw_ret, bool skip_quick_poll)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020094{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020095 struct qed_spq_comp_done *comp_done;
96 int rc;
97
Yuval Mintzc59f52912016-10-14 05:19:21 -040098 /* A relatively short polling period w/o sleeping, to allow the FW to
99 * complete the ramrod and thus possibly to avoid the following sleeps.
100 */
101 if (!skip_quick_poll) {
102 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
103 if (!rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200104 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200105 }
106
Yuval Mintzc59f52912016-10-14 05:19:21 -0400107 /* Move to polling with a sleeping period between iterations */
108 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
109 if (!rc)
110 return 0;
111
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200112 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
Yuval Mintzc59f52912016-10-14 05:19:21 -0400114 if (rc) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200115 DP_NOTICE(p_hwfn, "MCP drain failed\n");
Yuval Mintzc59f52912016-10-14 05:19:21 -0400116 goto err;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200117 }
118
Yuval Mintzc59f52912016-10-14 05:19:21 -0400119 /* Retry after drain */
120 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
121 if (!rc)
122 return 0;
123
124 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200125 if (comp_done->done == 1) {
126 if (p_fw_ret)
127 *p_fw_ret = comp_done->fw_return_code;
128 return 0;
129 }
Yuval Mintzc59f52912016-10-14 05:19:21 -0400130err:
131 DP_NOTICE(p_hwfn,
132 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
133 le32_to_cpu(p_ent->elem.hdr.cid),
134 p_ent->elem.hdr.cmd_id,
135 p_ent->elem.hdr.protocol_id,
136 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200137
138 return -EBUSY;
139}
140
141/***************************************************************************
142* SPQ entries inner API
143***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300144static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
145 struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200146{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200147 p_ent->flags = 0;
148
149 switch (p_ent->comp_mode) {
150 case QED_SPQ_MODE_EBLOCK:
151 case QED_SPQ_MODE_BLOCK:
152 p_ent->comp_cb.function = qed_spq_blocking_cb;
153 break;
154 case QED_SPQ_MODE_CB:
155 break;
156 default:
157 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
158 p_ent->comp_mode);
159 return -EINVAL;
160 }
161
162 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
163 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
164 p_ent->elem.hdr.cid,
165 p_ent->elem.hdr.cmd_id,
166 p_ent->elem.hdr.protocol_id,
167 p_ent->elem.data_ptr.hi,
168 p_ent->elem.data_ptr.lo,
169 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
170 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
171 "MODE_CB"));
172
173 return 0;
174}
175
176/***************************************************************************
177* HSI access
178***************************************************************************/
179static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
180 struct qed_spq *p_spq)
181{
182 u16 pq;
183 struct qed_cxt_info cxt_info;
184 struct core_conn_context *p_cxt;
185 union qed_qm_pq_params pq_params;
186 int rc;
187
188 cxt_info.iid = p_spq->cid;
189
190 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
191
192 if (rc < 0) {
193 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
194 p_spq->cid);
195 return;
196 }
197
198 p_cxt = cxt_info.p_cxt;
199
200 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
201 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
202 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
203 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
204 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
205 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
206
207 /* QM physical queue */
208 memset(&pq_params, 0, sizeof(pq_params));
209 pq_params.core.tc = LB_TC;
210 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
211 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
212
213 p_cxt->xstorm_st_context.spq_base_lo =
214 DMA_LO_LE(p_spq->chain.p_phys_addr);
215 p_cxt->xstorm_st_context.spq_base_hi =
216 DMA_HI_LE(p_spq->chain.p_phys_addr);
217
Yuval Mintz94494592016-02-21 11:40:10 +0200218 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
219 p_hwfn->p_consq->chain.p_phys_addr);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200220}
221
222static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300223 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200224{
Tomer Tayar76a9a362015-12-07 06:25:57 -0500225 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
226 u16 echo = qed_chain_get_prod_idx(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200227 struct slow_path_element *elem;
228 struct core_db_data db;
229
Tomer Tayar76a9a362015-12-07 06:25:57 -0500230 p_ent->elem.hdr.echo = cpu_to_le16(echo);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200231 elem = qed_chain_produce(p_chain);
232 if (!elem) {
233 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
234 return -EINVAL;
235 }
236
237 *elem = p_ent->elem; /* struct assignment */
238
239 /* send a doorbell on the slow hwfn session */
240 memset(&db, 0, sizeof(db));
241 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
242 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
243 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
244 DQ_XCM_CORE_SPQ_PROD_CMD);
245 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200246 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
247
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400248 /* make sure the SPQE is updated before the doorbell */
249 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200250
251 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
252
253 /* make sure doorbell is rang */
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400254 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200255
256 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
257 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
258 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
259 p_spq->cid, db.params, db.agg_flags,
260 qed_chain_get_prod_idx(p_chain));
261
262 return 0;
263}
264
265/***************************************************************************
266* Asynchronous events
267***************************************************************************/
268static int
269qed_async_event_completion(struct qed_hwfn *p_hwfn,
270 struct event_ring_entry *p_eqe)
271{
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300272 switch (p_eqe->protocol_id) {
Ram Amrani51ff1722016-10-01 21:59:57 +0300273 case PROTOCOLID_ROCE:
274 qed_async_roce_event(p_hwfn, p_eqe);
275 return 0;
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300276 case PROTOCOLID_COMMON:
277 return qed_sriov_eqe_event(p_hwfn,
278 p_eqe->opcode,
279 p_eqe->echo, &p_eqe->data);
280 default:
281 DP_NOTICE(p_hwfn,
282 "Unknown Async completion for protocol: %d\n",
283 p_eqe->protocol_id);
284 return -EINVAL;
285 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200286}
287
288/***************************************************************************
289* EQ API
290***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300291void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200292{
293 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
294 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
295
296 REG_WR16(p_hwfn, addr, prod);
297
298 /* keep prod updates ordered */
299 mmiowb();
300}
301
Yuval Mintz1a635e42016-08-15 10:42:43 +0300302int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200303{
304 struct qed_eq *p_eq = cookie;
305 struct qed_chain *p_chain = &p_eq->chain;
306 int rc = 0;
307
308 /* take a snapshot of the FW consumer */
309 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
310
311 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
312
313 /* Need to guarantee the fw_cons index we use points to a usuable
314 * element (to comply with our chain), so our macros would comply
315 */
316 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
317 qed_chain_get_usable_per_page(p_chain))
318 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
319
320 /* Complete current segment of eq entries */
321 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
322 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
323
324 if (!p_eqe) {
325 rc = -EINVAL;
326 break;
327 }
328
329 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
330 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
331 p_eqe->opcode,
332 p_eqe->protocol_id,
333 p_eqe->reserved0,
334 le16_to_cpu(p_eqe->echo),
335 p_eqe->fw_return_code,
336 p_eqe->flags);
337
338 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
339 if (qed_async_event_completion(p_hwfn, p_eqe))
340 rc = -EINVAL;
341 } else if (qed_spq_completion(p_hwfn,
342 p_eqe->echo,
343 p_eqe->fw_return_code,
344 &p_eqe->data)) {
345 rc = -EINVAL;
346 }
347
348 qed_chain_recycle_consumed(p_chain);
349 }
350
351 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
352
353 return rc;
354}
355
Yuval Mintz1a635e42016-08-15 10:42:43 +0300356struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200357{
358 struct qed_eq *p_eq;
359
360 /* Allocate EQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200361 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700362 if (!p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200363 return NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200364
365 /* Allocate and initialize EQ chain*/
366 if (qed_chain_alloc(p_hwfn->cdev,
367 QED_CHAIN_USE_TO_PRODUCE,
368 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300369 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200370 num_elem,
371 sizeof(union event_ring_element),
Joe Perches2591c282016-09-04 14:24:03 -0700372 &p_eq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200373 goto eq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200374
375 /* register EQ completion on the SP SB */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300376 qed_int_register_cb(p_hwfn, qed_eq_completion,
377 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200378
379 return p_eq;
380
381eq_allocate_fail:
382 qed_eq_free(p_hwfn, p_eq);
383 return NULL;
384}
385
Yuval Mintz1a635e42016-08-15 10:42:43 +0300386void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200387{
388 qed_chain_reset(&p_eq->chain);
389}
390
Yuval Mintz1a635e42016-08-15 10:42:43 +0300391void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200392{
393 if (!p_eq)
394 return;
395 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
396 kfree(p_eq);
397}
398
399/***************************************************************************
Manish Chopracee4d262015-10-26 11:02:28 +0200400* CQE API - manipulate EQ functionality
401***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300402static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
403 struct eth_slow_path_rx_cqe *cqe,
404 enum protocol_type protocol)
Manish Chopracee4d262015-10-26 11:02:28 +0200405{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300406 if (IS_VF(p_hwfn->cdev))
407 return 0;
408
Manish Chopracee4d262015-10-26 11:02:28 +0200409 /* @@@tmp - it's possible we'll eventually want to handle some
410 * actual commands that can arrive here, but for now this is only
411 * used to complete the ramrod using the echo value on the cqe
412 */
413 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
414}
415
416int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
417 struct eth_slow_path_rx_cqe *cqe)
418{
419 int rc;
420
421 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
422 if (rc)
423 DP_NOTICE(p_hwfn,
424 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
425 cqe->ramrod_cmd_id);
426
427 return rc;
428}
429
430/***************************************************************************
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200431* Slow hwfn Queue (spq)
432***************************************************************************/
433void qed_spq_setup(struct qed_hwfn *p_hwfn)
434{
Yuval Mintza91eb522016-06-03 14:35:32 +0300435 struct qed_spq *p_spq = p_hwfn->p_spq;
436 struct qed_spq_entry *p_virt = NULL;
437 dma_addr_t p_phys = 0;
438 u32 i, capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200439
440 INIT_LIST_HEAD(&p_spq->pending);
441 INIT_LIST_HEAD(&p_spq->completion_pending);
442 INIT_LIST_HEAD(&p_spq->free_pool);
443 INIT_LIST_HEAD(&p_spq->unlimited_pending);
444 spin_lock_init(&p_spq->lock);
445
446 /* SPQ empty pool */
447 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
448 p_virt = p_spq->p_virt;
449
Yuval Mintza91eb522016-06-03 14:35:32 +0300450 capacity = qed_chain_get_capacity(&p_spq->chain);
451 for (i = 0; i < capacity; i++) {
Yuval Mintz94494592016-02-21 11:40:10 +0200452 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200453
454 list_add_tail(&p_virt->list, &p_spq->free_pool);
455
456 p_virt++;
457 p_phys += sizeof(struct qed_spq_entry);
458 }
459
460 /* Statistics */
461 p_spq->normal_count = 0;
462 p_spq->comp_count = 0;
463 p_spq->comp_sent_count = 0;
464 p_spq->unlimited_pending_count = 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500465
466 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
467 p_spq->comp_bitmap_idx = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200468
469 /* SPQ cid, cannot fail */
470 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
471 qed_spq_hw_initialize(p_hwfn, p_spq);
472
473 /* reset the chain itself */
474 qed_chain_reset(&p_spq->chain);
475}
476
477int qed_spq_alloc(struct qed_hwfn *p_hwfn)
478{
Yuval Mintza91eb522016-06-03 14:35:32 +0300479 struct qed_spq_entry *p_virt = NULL;
480 struct qed_spq *p_spq = NULL;
481 dma_addr_t p_phys = 0;
482 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200483
484 /* SPQ struct */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300485 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700486 if (!p_spq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200487 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200488
489 /* SPQ ring */
490 if (qed_chain_alloc(p_hwfn->cdev,
491 QED_CHAIN_USE_TO_PRODUCE,
492 QED_CHAIN_MODE_SINGLE,
Yuval Mintza91eb522016-06-03 14:35:32 +0300493 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200494 0, /* N/A when the mode is SINGLE */
495 sizeof(struct slow_path_element),
Joe Perches2591c282016-09-04 14:24:03 -0700496 &p_spq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200497 goto spq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200498
499 /* allocate and fill the SPQ elements (incl. ramrod data list) */
Yuval Mintza91eb522016-06-03 14:35:32 +0300500 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200501 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Joe Perches2591c282016-09-04 14:24:03 -0700502 capacity * sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300503 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200504 if (!p_virt)
505 goto spq_allocate_fail;
506
507 p_spq->p_virt = p_virt;
508 p_spq->p_phys = p_phys;
509 p_hwfn->p_spq = p_spq;
510
511 return 0;
512
513spq_allocate_fail:
514 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
515 kfree(p_spq);
516 return -ENOMEM;
517}
518
519void qed_spq_free(struct qed_hwfn *p_hwfn)
520{
521 struct qed_spq *p_spq = p_hwfn->p_spq;
Yuval Mintza91eb522016-06-03 14:35:32 +0300522 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200523
524 if (!p_spq)
525 return;
526
Yuval Mintza91eb522016-06-03 14:35:32 +0300527 if (p_spq->p_virt) {
528 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200529 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300530 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200531 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300532 p_spq->p_virt, p_spq->p_phys);
533 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200534
535 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
536 ;
537 kfree(p_spq);
538}
539
Yuval Mintz1a635e42016-08-15 10:42:43 +0300540int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200541{
542 struct qed_spq *p_spq = p_hwfn->p_spq;
543 struct qed_spq_entry *p_ent = NULL;
544 int rc = 0;
545
546 spin_lock_bh(&p_spq->lock);
547
548 if (list_empty(&p_spq->free_pool)) {
549 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
550 if (!p_ent) {
Yuval Mintz1a635e42016-08-15 10:42:43 +0300551 DP_NOTICE(p_hwfn,
552 "Failed to allocate an SPQ entry for a pending ramrod\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200553 rc = -ENOMEM;
554 goto out_unlock;
555 }
556 p_ent->queue = &p_spq->unlimited_pending;
557 } else {
558 p_ent = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300559 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200560 list_del(&p_ent->list);
561 p_ent->queue = &p_spq->pending;
562 }
563
564 *pp_ent = p_ent;
565
566out_unlock:
567 spin_unlock_bh(&p_spq->lock);
568 return rc;
569}
570
571/* Locked variant; Should be called while the SPQ lock is taken */
572static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
573 struct qed_spq_entry *p_ent)
574{
575 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
576}
577
Yuval Mintz1a635e42016-08-15 10:42:43 +0300578void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200579{
580 spin_lock_bh(&p_hwfn->p_spq->lock);
581 __qed_spq_return_entry(p_hwfn, p_ent);
582 spin_unlock_bh(&p_hwfn->p_spq->lock);
583}
584
585/**
586 * @brief qed_spq_add_entry - adds a new entry to the pending
587 * list. Should be used while lock is being held.
588 *
589 * Addes an entry to the pending list is there is room (en empty
590 * element is available in the free_pool), or else places the
591 * entry in the unlimited_pending pool.
592 *
593 * @param p_hwfn
594 * @param p_ent
595 * @param priority
596 *
597 * @return int
598 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300599static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
600 struct qed_spq_entry *p_ent,
601 enum spq_priority priority)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200602{
603 struct qed_spq *p_spq = p_hwfn->p_spq;
604
605 if (p_ent->queue == &p_spq->unlimited_pending) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200606
607 if (list_empty(&p_spq->free_pool)) {
608 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
609 p_spq->unlimited_pending_count++;
610
611 return 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500612 } else {
613 struct qed_spq_entry *p_en2;
614
615 p_en2 = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300616 struct qed_spq_entry, list);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500617 list_del(&p_en2->list);
618
619 /* Copy the ring element physical pointer to the new
620 * entry, since we are about to override the entire ring
621 * entry and don't want to lose the pointer.
622 */
623 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
624
625 *p_en2 = *p_ent;
626
Yuval Mintzdb511c32016-06-19 15:18:14 +0300627 /* EBLOCK responsible to free the allocated p_ent */
628 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
629 kfree(p_ent);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500630
631 p_ent = p_en2;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200632 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200633 }
634
635 /* entry is to be placed in 'pending' queue */
636 switch (priority) {
637 case QED_SPQ_PRIORITY_NORMAL:
638 list_add_tail(&p_ent->list, &p_spq->pending);
639 p_spq->normal_count++;
640 break;
641 case QED_SPQ_PRIORITY_HIGH:
642 list_add(&p_ent->list, &p_spq->pending);
643 p_spq->high_count++;
644 break;
645 default:
646 return -EINVAL;
647 }
648
649 return 0;
650}
651
652/***************************************************************************
653* Accessor
654***************************************************************************/
655u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
656{
657 if (!p_hwfn->p_spq)
658 return 0xffffffff; /* illegal */
659 return p_hwfn->p_spq->cid;
660}
661
662/***************************************************************************
663* Posting new Ramrods
664***************************************************************************/
665static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300666 struct list_head *head, u32 keep_reserve)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200667{
668 struct qed_spq *p_spq = p_hwfn->p_spq;
669 int rc;
670
671 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
672 !list_empty(head)) {
673 struct qed_spq_entry *p_ent =
674 list_first_entry(head, struct qed_spq_entry, list);
675 list_del(&p_ent->list);
676 list_add_tail(&p_ent->list, &p_spq->completion_pending);
677 p_spq->comp_sent_count++;
678
679 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
680 if (rc) {
681 list_del(&p_ent->list);
682 __qed_spq_return_entry(p_hwfn, p_ent);
683 return rc;
684 }
685 }
686
687 return 0;
688}
689
690static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
691{
692 struct qed_spq *p_spq = p_hwfn->p_spq;
693 struct qed_spq_entry *p_ent = NULL;
694
695 while (!list_empty(&p_spq->free_pool)) {
696 if (list_empty(&p_spq->unlimited_pending))
697 break;
698
699 p_ent = list_first_entry(&p_spq->unlimited_pending,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300700 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200701 if (!p_ent)
702 return -EINVAL;
703
704 list_del(&p_ent->list);
705
706 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
707 }
708
709 return qed_spq_post_list(p_hwfn, &p_spq->pending,
710 SPQ_HIGH_PRI_RESERVE_DEFAULT);
711}
712
713int qed_spq_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300714 struct qed_spq_entry *p_ent, u8 *fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200715{
716 int rc = 0;
717 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
718 bool b_ret_ent = true;
719
720 if (!p_hwfn)
721 return -EINVAL;
722
723 if (!p_ent) {
724 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
725 return -EINVAL;
726 }
727
728 /* Complete the entry */
729 rc = qed_spq_fill_entry(p_hwfn, p_ent);
730
731 spin_lock_bh(&p_spq->lock);
732
733 /* Check return value after LOCK is taken for cleaner error flow */
734 if (rc)
735 goto spq_post_fail;
736
737 /* Add the request to the pending queue */
738 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
739 if (rc)
740 goto spq_post_fail;
741
742 rc = qed_spq_pend_post(p_hwfn);
743 if (rc) {
744 /* Since it's possible that pending failed for a different
745 * entry [although unlikely], the failed entry was already
746 * dealt with; No need to return it here.
747 */
748 b_ret_ent = false;
749 goto spq_post_fail;
750 }
751
752 spin_unlock_bh(&p_spq->lock);
753
754 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
755 /* For entries in QED BLOCK mode, the completion code cannot
756 * perform the necessary cleanup - if it did, we couldn't
757 * access p_ent here to see whether it's successful or not.
758 * Thus, after gaining the answer perform the cleanup here.
759 */
Yuval Mintzc59f52912016-10-14 05:19:21 -0400760 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
761 p_ent->queue == &p_spq->unlimited_pending);
Yuval Mintzdb511c32016-06-19 15:18:14 +0300762
763 if (p_ent->queue == &p_spq->unlimited_pending) {
764 /* This is an allocated p_ent which does not need to
765 * return to pool.
766 */
767 kfree(p_ent);
768 return rc;
769 }
770
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200771 if (rc)
772 goto spq_post_fail2;
773
774 /* return to pool */
775 qed_spq_return_entry(p_hwfn, p_ent);
776 }
777 return rc;
778
779spq_post_fail2:
780 spin_lock_bh(&p_spq->lock);
781 list_del(&p_ent->list);
782 qed_chain_return_produced(&p_spq->chain);
783
784spq_post_fail:
785 /* return to the free pool */
786 if (b_ret_ent)
787 __qed_spq_return_entry(p_hwfn, p_ent);
788 spin_unlock_bh(&p_spq->lock);
789
790 return rc;
791}
792
793int qed_spq_completion(struct qed_hwfn *p_hwfn,
794 __le16 echo,
795 u8 fw_return_code,
796 union event_ring_data *p_data)
797{
798 struct qed_spq *p_spq;
799 struct qed_spq_entry *p_ent = NULL;
800 struct qed_spq_entry *tmp;
801 struct qed_spq_entry *found = NULL;
802 int rc;
803
804 if (!p_hwfn)
805 return -EINVAL;
806
807 p_spq = p_hwfn->p_spq;
808 if (!p_spq)
809 return -EINVAL;
810
811 spin_lock_bh(&p_spq->lock);
Yuval Mintz1a635e42016-08-15 10:42:43 +0300812 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200813 if (p_ent->elem.hdr.echo == echo) {
Tomer Tayar76a9a362015-12-07 06:25:57 -0500814 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
815
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200816 list_del(&p_ent->list);
817
Tomer Tayar76a9a362015-12-07 06:25:57 -0500818 /* Avoid overriding of SPQ entries when getting
819 * out-of-order completions, by marking the completions
820 * in a bitmap and increasing the chain consumer only
821 * for the first successive completed entries.
822 */
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300823 __set_bit(pos, p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500824
825 while (test_bit(p_spq->comp_bitmap_idx,
826 p_spq->p_comp_bitmap)) {
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300827 __clear_bit(p_spq->comp_bitmap_idx,
828 p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500829 p_spq->comp_bitmap_idx++;
830 qed_chain_return_produced(&p_spq->chain);
831 }
832
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200833 p_spq->comp_count++;
834 found = p_ent;
835 break;
836 }
Tomer Tayar76a9a362015-12-07 06:25:57 -0500837
838 /* This is relatively uncommon - depends on scenarios
839 * which have mutliple per-PF sent ramrods.
840 */
841 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
842 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
843 le16_to_cpu(echo),
844 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200845 }
846
847 /* Release lock before callback, as callback may post
848 * an additional ramrod.
849 */
850 spin_unlock_bh(&p_spq->lock);
851
852 if (!found) {
853 DP_NOTICE(p_hwfn,
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300854 "Failed to find an entry this EQE [echo %04x] completes\n",
855 le16_to_cpu(echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200856 return -EEXIST;
857 }
858
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300859 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
860 "Complete EQE [echo %04x]: func %p cookie %p)\n",
861 le16_to_cpu(echo),
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200862 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
863 if (found->comp_cb.function)
864 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
865 fw_return_code);
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300866 else
867 DP_VERBOSE(p_hwfn,
868 QED_MSG_SPQ,
869 "Got a completion without a callback function\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200870
Yuval Mintzdb511c32016-06-19 15:18:14 +0300871 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
872 (found->queue == &p_spq->unlimited_pending))
873 /* EBLOCK is responsible for returning its own entry into the
874 * free list, unless it originally added the entry into the
875 * unlimited pending list.
876 */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200877 qed_spq_return_entry(p_hwfn, found);
878
879 /* Attempt to post pending requests */
880 spin_lock_bh(&p_spq->lock);
881 rc = qed_spq_pend_post(p_hwfn);
882 spin_unlock_bh(&p_spq->lock);
883
884 return rc;
885}
886
887struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
888{
889 struct qed_consq *p_consq;
890
891 /* Allocate ConsQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200892 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700893 if (!p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200894 return NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200895
896 /* Allocate and initialize EQ chain*/
897 if (qed_chain_alloc(p_hwfn->cdev,
898 QED_CHAIN_USE_TO_PRODUCE,
899 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300900 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200901 QED_CHAIN_PAGE_SIZE / 0x80,
Joe Perches2591c282016-09-04 14:24:03 -0700902 0x80, &p_consq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200903 goto consq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200904
905 return p_consq;
906
907consq_allocate_fail:
908 qed_consq_free(p_hwfn, p_consq);
909 return NULL;
910}
911
Yuval Mintz1a635e42016-08-15 10:42:43 +0300912void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200913{
914 qed_chain_reset(&p_consq->chain);
915}
916
Yuval Mintz1a635e42016-08-15 10:42:43 +0300917void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200918{
919 if (!p_consq)
920 return;
921 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
922 kfree(p_consq);
923}