blob: 645328a9f0cfb6b4040c8d6402ad5684d79adab9 [file] [log] [blame]
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02001/* QLogic qed NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
Yuval Mintzfe56b9e2015-10-26 11:02:25 +02003 *
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02004 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020031 */
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/pci.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include "qed.h"
46#include "qed_cxt.h"
47#include "qed_dev_api.h"
48#include "qed_hsi.h"
49#include "qed_hw.h"
50#include "qed_int.h"
Yuval Mintzfc831822016-12-01 00:21:06 -080051#include "qed_iscsi.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020052#include "qed_mcp.h"
Yuval Mintz1d6cff42016-12-01 00:21:07 -080053#include "qed_ooo.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020054#include "qed_reg_addr.h"
55#include "qed_sp.h"
Yuval Mintz37bff2b2016-05-11 16:36:13 +030056#include "qed_sriov.h"
Ram Amrani51ff1722016-10-01 21:59:57 +030057#include "qed_roce.h"
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020058
59/***************************************************************************
60* Structures & Definitions
61***************************************************************************/
62
63#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
Yuval Mintzc59f52912016-10-14 05:19:21 -040064
65#define SPQ_BLOCK_DELAY_MAX_ITER (10)
66#define SPQ_BLOCK_DELAY_US (10)
67#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
68#define SPQ_BLOCK_SLEEP_MS (5)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020069
70/***************************************************************************
71* Blocking Imp. (BLOCK/EBLOCK mode)
72***************************************************************************/
73static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 void *cookie,
Yuval Mintz1a635e42016-08-15 10:42:43 +030075 union event_ring_data *data, u8 fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020076{
77 struct qed_spq_comp_done *comp_done;
78
79 comp_done = (struct qed_spq_comp_done *)cookie;
80
Manish Choprad5df7682016-10-14 05:19:23 -040081 comp_done->fw_return_code = fw_return_code;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020082
Manish Choprad5df7682016-10-14 05:19:23 -040083 /* Make sure completion done is visible on waiting thread */
84 smp_store_release(&comp_done->done, 0x1);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +020085}
86
Yuval Mintzc59f52912016-10-14 05:19:21 -040087static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 struct qed_spq_entry *p_ent,
89 u8 *p_fw_ret, bool sleep_between_iter)
90{
91 struct qed_spq_comp_done *comp_done;
92 u32 iter_cnt;
93
94 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 : SPQ_BLOCK_DELAY_MAX_ITER;
97
98 while (iter_cnt--) {
99 /* Validate we receive completion update */
Manish Choprad5df7682016-10-14 05:19:23 -0400100 if (READ_ONCE(comp_done->done) == 1) {
101 /* Read updated FW return value */
102 smp_read_barrier_depends();
Yuval Mintzc59f52912016-10-14 05:19:21 -0400103 if (p_fw_ret)
104 *p_fw_ret = comp_done->fw_return_code;
105 return 0;
106 }
107
108 if (sleep_between_iter)
109 msleep(SPQ_BLOCK_SLEEP_MS);
110 else
111 udelay(SPQ_BLOCK_DELAY_US);
112 }
113
114 return -EBUSY;
115}
116
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200117static int qed_spq_block(struct qed_hwfn *p_hwfn,
118 struct qed_spq_entry *p_ent,
Yuval Mintzc59f52912016-10-14 05:19:21 -0400119 u8 *p_fw_ret, bool skip_quick_poll)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200120{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200121 struct qed_spq_comp_done *comp_done;
122 int rc;
123
Yuval Mintzc59f52912016-10-14 05:19:21 -0400124 /* A relatively short polling period w/o sleeping, to allow the FW to
125 * complete the ramrod and thus possibly to avoid the following sleeps.
126 */
127 if (!skip_quick_poll) {
128 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
129 if (!rc)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200130 return 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200131 }
132
Yuval Mintzc59f52912016-10-14 05:19:21 -0400133 /* Move to polling with a sleeping period between iterations */
134 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
135 if (!rc)
136 return 0;
137
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200138 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
139 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
Yuval Mintzc59f52912016-10-14 05:19:21 -0400140 if (rc) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200141 DP_NOTICE(p_hwfn, "MCP drain failed\n");
Yuval Mintzc59f52912016-10-14 05:19:21 -0400142 goto err;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200143 }
144
Yuval Mintzc59f52912016-10-14 05:19:21 -0400145 /* Retry after drain */
146 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
147 if (!rc)
148 return 0;
149
150 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200151 if (comp_done->done == 1) {
152 if (p_fw_ret)
153 *p_fw_ret = comp_done->fw_return_code;
154 return 0;
155 }
Yuval Mintzc59f52912016-10-14 05:19:21 -0400156err:
157 DP_NOTICE(p_hwfn,
158 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
159 le32_to_cpu(p_ent->elem.hdr.cid),
160 p_ent->elem.hdr.cmd_id,
161 p_ent->elem.hdr.protocol_id,
162 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200163
164 return -EBUSY;
165}
166
167/***************************************************************************
168* SPQ entries inner API
169***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300170static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
171 struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200172{
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200173 p_ent->flags = 0;
174
175 switch (p_ent->comp_mode) {
176 case QED_SPQ_MODE_EBLOCK:
177 case QED_SPQ_MODE_BLOCK:
178 p_ent->comp_cb.function = qed_spq_blocking_cb;
179 break;
180 case QED_SPQ_MODE_CB:
181 break;
182 default:
183 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
184 p_ent->comp_mode);
185 return -EINVAL;
186 }
187
188 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
189 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
190 p_ent->elem.hdr.cid,
191 p_ent->elem.hdr.cmd_id,
192 p_ent->elem.hdr.protocol_id,
193 p_ent->elem.data_ptr.hi,
194 p_ent->elem.data_ptr.lo,
195 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
196 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
197 "MODE_CB"));
198
199 return 0;
200}
201
202/***************************************************************************
203* HSI access
204***************************************************************************/
205static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
206 struct qed_spq *p_spq)
207{
208 u16 pq;
209 struct qed_cxt_info cxt_info;
210 struct core_conn_context *p_cxt;
211 union qed_qm_pq_params pq_params;
212 int rc;
213
214 cxt_info.iid = p_spq->cid;
215
216 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
217
218 if (rc < 0) {
219 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
220 p_spq->cid);
221 return;
222 }
223
224 p_cxt = cxt_info.p_cxt;
225
226 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
227 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
228 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
229 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
230 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
231 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
232
233 /* QM physical queue */
234 memset(&pq_params, 0, sizeof(pq_params));
235 pq_params.core.tc = LB_TC;
236 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
237 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
238
239 p_cxt->xstorm_st_context.spq_base_lo =
240 DMA_LO_LE(p_spq->chain.p_phys_addr);
241 p_cxt->xstorm_st_context.spq_base_hi =
242 DMA_HI_LE(p_spq->chain.p_phys_addr);
243
Yuval Mintz94494592016-02-21 11:40:10 +0200244 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
245 p_hwfn->p_consq->chain.p_phys_addr);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200246}
247
248static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300249 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200250{
Tomer Tayar76a9a362015-12-07 06:25:57 -0500251 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
252 u16 echo = qed_chain_get_prod_idx(p_chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200253 struct slow_path_element *elem;
254 struct core_db_data db;
255
Tomer Tayar76a9a362015-12-07 06:25:57 -0500256 p_ent->elem.hdr.echo = cpu_to_le16(echo);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200257 elem = qed_chain_produce(p_chain);
258 if (!elem) {
259 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
260 return -EINVAL;
261 }
262
263 *elem = p_ent->elem; /* struct assignment */
264
265 /* send a doorbell on the slow hwfn session */
266 memset(&db, 0, sizeof(db));
267 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
268 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
269 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
270 DQ_XCM_CORE_SPQ_PROD_CMD);
271 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200272 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
273
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400274 /* make sure the SPQE is updated before the doorbell */
275 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200276
277 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
278
279 /* make sure doorbell is rang */
Sudarsana Reddy Kalluru34c7bb42016-06-28 07:46:03 -0400280 wmb();
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200281
282 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
283 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
284 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
285 p_spq->cid, db.params, db.agg_flags,
286 qed_chain_get_prod_idx(p_chain));
287
288 return 0;
289}
290
291/***************************************************************************
292* Asynchronous events
293***************************************************************************/
294static int
295qed_async_event_completion(struct qed_hwfn *p_hwfn,
296 struct event_ring_entry *p_eqe)
297{
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300298 switch (p_eqe->protocol_id) {
Ram Amrani51ff1722016-10-01 21:59:57 +0300299 case PROTOCOLID_ROCE:
300 qed_async_roce_event(p_hwfn, p_eqe);
301 return 0;
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300302 case PROTOCOLID_COMMON:
303 return qed_sriov_eqe_event(p_hwfn,
304 p_eqe->opcode,
305 p_eqe->echo, &p_eqe->data);
Yuval Mintzfc831822016-12-01 00:21:06 -0800306 case PROTOCOLID_ISCSI:
307 if (!IS_ENABLED(CONFIG_QED_ISCSI))
308 return -EINVAL;
Yuval Mintz1d6cff42016-12-01 00:21:07 -0800309 if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
310 u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
311
312 qed_ooo_release_connection_isles(p_hwfn,
313 p_hwfn->p_ooo_info,
314 cid);
315 return 0;
316 }
Yuval Mintzfc831822016-12-01 00:21:06 -0800317
318 if (p_hwfn->p_iscsi_info->event_cb) {
319 struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
320
321 return p_iscsi->event_cb(p_iscsi->event_context,
322 p_eqe->opcode, &p_eqe->data);
323 } else {
324 DP_NOTICE(p_hwfn,
325 "iSCSI async completion is not set\n");
326 return -EINVAL;
327 }
Yuval Mintz37bff2b2016-05-11 16:36:13 +0300328 default:
329 DP_NOTICE(p_hwfn,
330 "Unknown Async completion for protocol: %d\n",
331 p_eqe->protocol_id);
332 return -EINVAL;
333 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200334}
335
336/***************************************************************************
337* EQ API
338***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300339void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200340{
341 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
342 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
343
344 REG_WR16(p_hwfn, addr, prod);
345
346 /* keep prod updates ordered */
347 mmiowb();
348}
349
Yuval Mintz1a635e42016-08-15 10:42:43 +0300350int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200351{
352 struct qed_eq *p_eq = cookie;
353 struct qed_chain *p_chain = &p_eq->chain;
354 int rc = 0;
355
356 /* take a snapshot of the FW consumer */
357 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
358
359 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
360
361 /* Need to guarantee the fw_cons index we use points to a usuable
362 * element (to comply with our chain), so our macros would comply
363 */
364 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
365 qed_chain_get_usable_per_page(p_chain))
366 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
367
368 /* Complete current segment of eq entries */
369 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
370 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
371
372 if (!p_eqe) {
373 rc = -EINVAL;
374 break;
375 }
376
377 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
378 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
379 p_eqe->opcode,
380 p_eqe->protocol_id,
381 p_eqe->reserved0,
382 le16_to_cpu(p_eqe->echo),
383 p_eqe->fw_return_code,
384 p_eqe->flags);
385
386 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
387 if (qed_async_event_completion(p_hwfn, p_eqe))
388 rc = -EINVAL;
389 } else if (qed_spq_completion(p_hwfn,
390 p_eqe->echo,
391 p_eqe->fw_return_code,
392 &p_eqe->data)) {
393 rc = -EINVAL;
394 }
395
396 qed_chain_recycle_consumed(p_chain);
397 }
398
399 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
400
401 return rc;
402}
403
Yuval Mintz1a635e42016-08-15 10:42:43 +0300404struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200405{
406 struct qed_eq *p_eq;
407
408 /* Allocate EQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200409 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700410 if (!p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200411 return NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200412
413 /* Allocate and initialize EQ chain*/
414 if (qed_chain_alloc(p_hwfn->cdev,
415 QED_CHAIN_USE_TO_PRODUCE,
416 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300417 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200418 num_elem,
419 sizeof(union event_ring_element),
Joe Perches2591c282016-09-04 14:24:03 -0700420 &p_eq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200421 goto eq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200422
423 /* register EQ completion on the SP SB */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300424 qed_int_register_cb(p_hwfn, qed_eq_completion,
425 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200426
427 return p_eq;
428
429eq_allocate_fail:
430 qed_eq_free(p_hwfn, p_eq);
431 return NULL;
432}
433
Yuval Mintz1a635e42016-08-15 10:42:43 +0300434void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200435{
436 qed_chain_reset(&p_eq->chain);
437}
438
Yuval Mintz1a635e42016-08-15 10:42:43 +0300439void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200440{
441 if (!p_eq)
442 return;
443 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
444 kfree(p_eq);
445}
446
447/***************************************************************************
Manish Chopracee4d262015-10-26 11:02:28 +0200448* CQE API - manipulate EQ functionality
449***************************************************************************/
Yuval Mintz1a635e42016-08-15 10:42:43 +0300450static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
451 struct eth_slow_path_rx_cqe *cqe,
452 enum protocol_type protocol)
Manish Chopracee4d262015-10-26 11:02:28 +0200453{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300454 if (IS_VF(p_hwfn->cdev))
455 return 0;
456
Manish Chopracee4d262015-10-26 11:02:28 +0200457 /* @@@tmp - it's possible we'll eventually want to handle some
458 * actual commands that can arrive here, but for now this is only
459 * used to complete the ramrod using the echo value on the cqe
460 */
461 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
462}
463
464int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
465 struct eth_slow_path_rx_cqe *cqe)
466{
467 int rc;
468
469 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
470 if (rc)
471 DP_NOTICE(p_hwfn,
472 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
473 cqe->ramrod_cmd_id);
474
475 return rc;
476}
477
478/***************************************************************************
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200479* Slow hwfn Queue (spq)
480***************************************************************************/
481void qed_spq_setup(struct qed_hwfn *p_hwfn)
482{
Yuval Mintza91eb522016-06-03 14:35:32 +0300483 struct qed_spq *p_spq = p_hwfn->p_spq;
484 struct qed_spq_entry *p_virt = NULL;
485 dma_addr_t p_phys = 0;
486 u32 i, capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200487
488 INIT_LIST_HEAD(&p_spq->pending);
489 INIT_LIST_HEAD(&p_spq->completion_pending);
490 INIT_LIST_HEAD(&p_spq->free_pool);
491 INIT_LIST_HEAD(&p_spq->unlimited_pending);
492 spin_lock_init(&p_spq->lock);
493
494 /* SPQ empty pool */
495 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
496 p_virt = p_spq->p_virt;
497
Yuval Mintza91eb522016-06-03 14:35:32 +0300498 capacity = qed_chain_get_capacity(&p_spq->chain);
499 for (i = 0; i < capacity; i++) {
Yuval Mintz94494592016-02-21 11:40:10 +0200500 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200501
502 list_add_tail(&p_virt->list, &p_spq->free_pool);
503
504 p_virt++;
505 p_phys += sizeof(struct qed_spq_entry);
506 }
507
508 /* Statistics */
509 p_spq->normal_count = 0;
510 p_spq->comp_count = 0;
511 p_spq->comp_sent_count = 0;
512 p_spq->unlimited_pending_count = 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500513
514 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
515 p_spq->comp_bitmap_idx = 0;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200516
517 /* SPQ cid, cannot fail */
518 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
519 qed_spq_hw_initialize(p_hwfn, p_spq);
520
521 /* reset the chain itself */
522 qed_chain_reset(&p_spq->chain);
523}
524
525int qed_spq_alloc(struct qed_hwfn *p_hwfn)
526{
Yuval Mintza91eb522016-06-03 14:35:32 +0300527 struct qed_spq_entry *p_virt = NULL;
528 struct qed_spq *p_spq = NULL;
529 dma_addr_t p_phys = 0;
530 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200531
532 /* SPQ struct */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300533 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700534 if (!p_spq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200535 return -ENOMEM;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200536
537 /* SPQ ring */
538 if (qed_chain_alloc(p_hwfn->cdev,
539 QED_CHAIN_USE_TO_PRODUCE,
540 QED_CHAIN_MODE_SINGLE,
Yuval Mintza91eb522016-06-03 14:35:32 +0300541 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200542 0, /* N/A when the mode is SINGLE */
543 sizeof(struct slow_path_element),
Joe Perches2591c282016-09-04 14:24:03 -0700544 &p_spq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200545 goto spq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200546
547 /* allocate and fill the SPQ elements (incl. ramrod data list) */
Yuval Mintza91eb522016-06-03 14:35:32 +0300548 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200549 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
Joe Perches2591c282016-09-04 14:24:03 -0700550 capacity * sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300551 &p_phys, GFP_KERNEL);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200552 if (!p_virt)
553 goto spq_allocate_fail;
554
555 p_spq->p_virt = p_virt;
556 p_spq->p_phys = p_phys;
557 p_hwfn->p_spq = p_spq;
558
559 return 0;
560
561spq_allocate_fail:
562 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
563 kfree(p_spq);
564 return -ENOMEM;
565}
566
567void qed_spq_free(struct qed_hwfn *p_hwfn)
568{
569 struct qed_spq *p_spq = p_hwfn->p_spq;
Yuval Mintza91eb522016-06-03 14:35:32 +0300570 u32 capacity;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200571
572 if (!p_spq)
573 return;
574
Yuval Mintza91eb522016-06-03 14:35:32 +0300575 if (p_spq->p_virt) {
576 capacity = qed_chain_get_capacity(&p_spq->chain);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200577 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
Yuval Mintza91eb522016-06-03 14:35:32 +0300578 capacity *
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200579 sizeof(struct qed_spq_entry),
Yuval Mintza91eb522016-06-03 14:35:32 +0300580 p_spq->p_virt, p_spq->p_phys);
581 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200582
583 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
584 ;
585 kfree(p_spq);
586}
587
Yuval Mintz1a635e42016-08-15 10:42:43 +0300588int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200589{
590 struct qed_spq *p_spq = p_hwfn->p_spq;
591 struct qed_spq_entry *p_ent = NULL;
592 int rc = 0;
593
594 spin_lock_bh(&p_spq->lock);
595
596 if (list_empty(&p_spq->free_pool)) {
597 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
598 if (!p_ent) {
Yuval Mintz1a635e42016-08-15 10:42:43 +0300599 DP_NOTICE(p_hwfn,
600 "Failed to allocate an SPQ entry for a pending ramrod\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200601 rc = -ENOMEM;
602 goto out_unlock;
603 }
604 p_ent->queue = &p_spq->unlimited_pending;
605 } else {
606 p_ent = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300607 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200608 list_del(&p_ent->list);
609 p_ent->queue = &p_spq->pending;
610 }
611
612 *pp_ent = p_ent;
613
614out_unlock:
615 spin_unlock_bh(&p_spq->lock);
616 return rc;
617}
618
619/* Locked variant; Should be called while the SPQ lock is taken */
620static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
621 struct qed_spq_entry *p_ent)
622{
623 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
624}
625
Yuval Mintz1a635e42016-08-15 10:42:43 +0300626void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200627{
628 spin_lock_bh(&p_hwfn->p_spq->lock);
629 __qed_spq_return_entry(p_hwfn, p_ent);
630 spin_unlock_bh(&p_hwfn->p_spq->lock);
631}
632
633/**
634 * @brief qed_spq_add_entry - adds a new entry to the pending
635 * list. Should be used while lock is being held.
636 *
637 * Addes an entry to the pending list is there is room (en empty
638 * element is available in the free_pool), or else places the
639 * entry in the unlimited_pending pool.
640 *
641 * @param p_hwfn
642 * @param p_ent
643 * @param priority
644 *
645 * @return int
646 */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300647static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
648 struct qed_spq_entry *p_ent,
649 enum spq_priority priority)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200650{
651 struct qed_spq *p_spq = p_hwfn->p_spq;
652
653 if (p_ent->queue == &p_spq->unlimited_pending) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200654
655 if (list_empty(&p_spq->free_pool)) {
656 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
657 p_spq->unlimited_pending_count++;
658
659 return 0;
Tomer Tayar76a9a362015-12-07 06:25:57 -0500660 } else {
661 struct qed_spq_entry *p_en2;
662
663 p_en2 = list_first_entry(&p_spq->free_pool,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300664 struct qed_spq_entry, list);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500665 list_del(&p_en2->list);
666
667 /* Copy the ring element physical pointer to the new
668 * entry, since we are about to override the entire ring
669 * entry and don't want to lose the pointer.
670 */
671 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
672
673 *p_en2 = *p_ent;
674
Yuval Mintzdb511c32016-06-19 15:18:14 +0300675 /* EBLOCK responsible to free the allocated p_ent */
676 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
677 kfree(p_ent);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500678
679 p_ent = p_en2;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200680 }
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200681 }
682
683 /* entry is to be placed in 'pending' queue */
684 switch (priority) {
685 case QED_SPQ_PRIORITY_NORMAL:
686 list_add_tail(&p_ent->list, &p_spq->pending);
687 p_spq->normal_count++;
688 break;
689 case QED_SPQ_PRIORITY_HIGH:
690 list_add(&p_ent->list, &p_spq->pending);
691 p_spq->high_count++;
692 break;
693 default:
694 return -EINVAL;
695 }
696
697 return 0;
698}
699
700/***************************************************************************
701* Accessor
702***************************************************************************/
703u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
704{
705 if (!p_hwfn->p_spq)
706 return 0xffffffff; /* illegal */
707 return p_hwfn->p_spq->cid;
708}
709
710/***************************************************************************
711* Posting new Ramrods
712***************************************************************************/
713static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300714 struct list_head *head, u32 keep_reserve)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200715{
716 struct qed_spq *p_spq = p_hwfn->p_spq;
717 int rc;
718
719 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
720 !list_empty(head)) {
721 struct qed_spq_entry *p_ent =
722 list_first_entry(head, struct qed_spq_entry, list);
723 list_del(&p_ent->list);
724 list_add_tail(&p_ent->list, &p_spq->completion_pending);
725 p_spq->comp_sent_count++;
726
727 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
728 if (rc) {
729 list_del(&p_ent->list);
730 __qed_spq_return_entry(p_hwfn, p_ent);
731 return rc;
732 }
733 }
734
735 return 0;
736}
737
738static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
739{
740 struct qed_spq *p_spq = p_hwfn->p_spq;
741 struct qed_spq_entry *p_ent = NULL;
742
743 while (!list_empty(&p_spq->free_pool)) {
744 if (list_empty(&p_spq->unlimited_pending))
745 break;
746
747 p_ent = list_first_entry(&p_spq->unlimited_pending,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300748 struct qed_spq_entry, list);
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200749 if (!p_ent)
750 return -EINVAL;
751
752 list_del(&p_ent->list);
753
754 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
755 }
756
757 return qed_spq_post_list(p_hwfn, &p_spq->pending,
758 SPQ_HIGH_PRI_RESERVE_DEFAULT);
759}
760
761int qed_spq_post(struct qed_hwfn *p_hwfn,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300762 struct qed_spq_entry *p_ent, u8 *fw_return_code)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200763{
764 int rc = 0;
765 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
766 bool b_ret_ent = true;
767
768 if (!p_hwfn)
769 return -EINVAL;
770
771 if (!p_ent) {
772 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
773 return -EINVAL;
774 }
775
776 /* Complete the entry */
777 rc = qed_spq_fill_entry(p_hwfn, p_ent);
778
779 spin_lock_bh(&p_spq->lock);
780
781 /* Check return value after LOCK is taken for cleaner error flow */
782 if (rc)
783 goto spq_post_fail;
784
785 /* Add the request to the pending queue */
786 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
787 if (rc)
788 goto spq_post_fail;
789
790 rc = qed_spq_pend_post(p_hwfn);
791 if (rc) {
792 /* Since it's possible that pending failed for a different
793 * entry [although unlikely], the failed entry was already
794 * dealt with; No need to return it here.
795 */
796 b_ret_ent = false;
797 goto spq_post_fail;
798 }
799
800 spin_unlock_bh(&p_spq->lock);
801
802 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
803 /* For entries in QED BLOCK mode, the completion code cannot
804 * perform the necessary cleanup - if it did, we couldn't
805 * access p_ent here to see whether it's successful or not.
806 * Thus, after gaining the answer perform the cleanup here.
807 */
Yuval Mintzc59f52912016-10-14 05:19:21 -0400808 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
809 p_ent->queue == &p_spq->unlimited_pending);
Yuval Mintzdb511c32016-06-19 15:18:14 +0300810
811 if (p_ent->queue == &p_spq->unlimited_pending) {
812 /* This is an allocated p_ent which does not need to
813 * return to pool.
814 */
815 kfree(p_ent);
816 return rc;
817 }
818
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200819 if (rc)
820 goto spq_post_fail2;
821
822 /* return to pool */
823 qed_spq_return_entry(p_hwfn, p_ent);
824 }
825 return rc;
826
827spq_post_fail2:
828 spin_lock_bh(&p_spq->lock);
829 list_del(&p_ent->list);
830 qed_chain_return_produced(&p_spq->chain);
831
832spq_post_fail:
833 /* return to the free pool */
834 if (b_ret_ent)
835 __qed_spq_return_entry(p_hwfn, p_ent);
836 spin_unlock_bh(&p_spq->lock);
837
838 return rc;
839}
840
841int qed_spq_completion(struct qed_hwfn *p_hwfn,
842 __le16 echo,
843 u8 fw_return_code,
844 union event_ring_data *p_data)
845{
846 struct qed_spq *p_spq;
847 struct qed_spq_entry *p_ent = NULL;
848 struct qed_spq_entry *tmp;
849 struct qed_spq_entry *found = NULL;
850 int rc;
851
852 if (!p_hwfn)
853 return -EINVAL;
854
855 p_spq = p_hwfn->p_spq;
856 if (!p_spq)
857 return -EINVAL;
858
859 spin_lock_bh(&p_spq->lock);
Yuval Mintz1a635e42016-08-15 10:42:43 +0300860 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200861 if (p_ent->elem.hdr.echo == echo) {
Tomer Tayar76a9a362015-12-07 06:25:57 -0500862 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
863
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200864 list_del(&p_ent->list);
865
Tomer Tayar76a9a362015-12-07 06:25:57 -0500866 /* Avoid overriding of SPQ entries when getting
867 * out-of-order completions, by marking the completions
868 * in a bitmap and increasing the chain consumer only
869 * for the first successive completed entries.
870 */
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300871 __set_bit(pos, p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500872
873 while (test_bit(p_spq->comp_bitmap_idx,
874 p_spq->p_comp_bitmap)) {
Manish Chopra59d3f1c2016-07-25 19:07:46 +0300875 __clear_bit(p_spq->comp_bitmap_idx,
876 p_spq->p_comp_bitmap);
Tomer Tayar76a9a362015-12-07 06:25:57 -0500877 p_spq->comp_bitmap_idx++;
878 qed_chain_return_produced(&p_spq->chain);
879 }
880
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200881 p_spq->comp_count++;
882 found = p_ent;
883 break;
884 }
Tomer Tayar76a9a362015-12-07 06:25:57 -0500885
886 /* This is relatively uncommon - depends on scenarios
887 * which have mutliple per-PF sent ramrods.
888 */
889 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
890 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
891 le16_to_cpu(echo),
892 le16_to_cpu(p_ent->elem.hdr.echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200893 }
894
895 /* Release lock before callback, as callback may post
896 * an additional ramrod.
897 */
898 spin_unlock_bh(&p_spq->lock);
899
900 if (!found) {
901 DP_NOTICE(p_hwfn,
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300902 "Failed to find an entry this EQE [echo %04x] completes\n",
903 le16_to_cpu(echo));
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200904 return -EEXIST;
905 }
906
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300907 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
908 "Complete EQE [echo %04x]: func %p cookie %p)\n",
909 le16_to_cpu(echo),
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200910 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
911 if (found->comp_cb.function)
912 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
913 fw_return_code);
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300914 else
915 DP_VERBOSE(p_hwfn,
916 QED_MSG_SPQ,
917 "Got a completion without a callback function\n");
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200918
Yuval Mintzdb511c32016-06-19 15:18:14 +0300919 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
920 (found->queue == &p_spq->unlimited_pending))
921 /* EBLOCK is responsible for returning its own entry into the
922 * free list, unless it originally added the entry into the
923 * unlimited pending list.
924 */
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200925 qed_spq_return_entry(p_hwfn, found);
926
927 /* Attempt to post pending requests */
928 spin_lock_bh(&p_spq->lock);
929 rc = qed_spq_pend_post(p_hwfn);
930 spin_unlock_bh(&p_spq->lock);
931
932 return rc;
933}
934
935struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
936{
937 struct qed_consq *p_consq;
938
939 /* Allocate ConsQ struct */
Yuval Mintz60fffb32016-02-21 11:40:07 +0200940 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
Joe Perches2591c282016-09-04 14:24:03 -0700941 if (!p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200942 return NULL;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200943
944 /* Allocate and initialize EQ chain*/
945 if (qed_chain_alloc(p_hwfn->cdev,
946 QED_CHAIN_USE_TO_PRODUCE,
947 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +0300948 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200949 QED_CHAIN_PAGE_SIZE / 0x80,
Joe Perches2591c282016-09-04 14:24:03 -0700950 0x80, &p_consq->chain))
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200951 goto consq_allocate_fail;
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200952
953 return p_consq;
954
955consq_allocate_fail:
956 qed_consq_free(p_hwfn, p_consq);
957 return NULL;
958}
959
Yuval Mintz1a635e42016-08-15 10:42:43 +0300960void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200961{
962 qed_chain_reset(&p_consq->chain);
963}
964
Yuval Mintz1a635e42016-08-15 10:42:43 +0300965void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
Yuval Mintzfe56b9e2015-10-26 11:02:25 +0200966{
967 if (!p_consq)
968 return;
969 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
970 kfree(p_consq);
971}