blob: ea55e95cd2c5df33bf9de567eb685fae3cbaea1c [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "iw_cxgb4.h"
34
35static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
Steve Wise13ce8312017-09-26 13:08:08 -070036 struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
37 struct c4iw_wr_wait *wr_waitp)
Steve Wisecfdda9d2010-04-21 15:30:06 -070038{
39 struct fw_ri_res_wr *res_wr;
40 struct fw_ri_res *res;
41 int wr_len;
Steve Wisecfdda9d2010-04-21 15:30:06 -070042 int ret;
43
44 wr_len = sizeof *res_wr + sizeof *res;
Steve Wisecfdda9d2010-04-21 15:30:06 -070045 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
46
yuan linyude77b962017-06-18 22:48:17 +080047 res_wr = __skb_put_zero(skb, wr_len);
Steve Wisecfdda9d2010-04-21 15:30:06 -070048 res_wr->op_nres = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +053049 FW_WR_OP_V(FW_RI_RES_WR) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +053050 FW_RI_RES_WR_NRES_V(1) |
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +053051 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -070052 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Steve Wise13ce8312017-09-26 13:08:08 -070053 res_wr->cookie = (uintptr_t)wr_waitp;
Steve Wisecfdda9d2010-04-21 15:30:06 -070054 res = res_wr->res;
55 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
56 res->u.cq.op = FW_RI_RES_OP_RESET;
57 res->u.cq.iqid = cpu_to_be32(cq->cqid);
58
Steve Wise13ce8312017-09-26 13:08:08 -070059 c4iw_init_wr_wait(wr_waitp);
Steve Wise2015f262017-09-26 13:13:17 -070060 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -070061
62 kfree(cq->sw_queue);
63 dma_free_coherent(&(rdev->lldi.pdev->dev),
64 cq->memsize, cq->queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +000065 dma_unmap_addr(cq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -070066 c4iw_put_cqid(rdev, cq->cqid, uctx);
67 return ret;
68}
69
70static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
Steve Wise13ce8312017-09-26 13:08:08 -070071 struct c4iw_dev_ucontext *uctx,
72 struct c4iw_wr_wait *wr_waitp)
Steve Wisecfdda9d2010-04-21 15:30:06 -070073{
74 struct fw_ri_res_wr *res_wr;
75 struct fw_ri_res *res;
76 int wr_len;
77 int user = (uctx != &rdev->uctx);
Steve Wisecfdda9d2010-04-21 15:30:06 -070078 int ret;
79 struct sk_buff *skb;
80
81 cq->cqid = c4iw_get_cqid(rdev, uctx);
82 if (!cq->cqid) {
83 ret = -ENOMEM;
84 goto err1;
85 }
86
87 if (!user) {
88 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
89 if (!cq->sw_queue) {
90 ret = -ENOMEM;
91 goto err2;
92 }
93 }
94 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
95 &cq->dma_addr, GFP_KERNEL);
96 if (!cq->queue) {
97 ret = -ENOMEM;
98 goto err3;
99 }
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000100 dma_unmap_addr_set(cq, mapping, cq->dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700101 memset(cq->queue, 0, cq->memsize);
102
103 /* build fw_ri_res_wr */
104 wr_len = sizeof *res_wr + sizeof *res;
105
David Rientjesd3c814e2010-07-21 02:44:56 +0000106 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700107 if (!skb) {
108 ret = -ENOMEM;
109 goto err4;
110 }
111 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
112
yuan linyude77b962017-06-18 22:48:17 +0800113 res_wr = __skb_put_zero(skb, wr_len);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700114 res_wr->op_nres = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530115 FW_WR_OP_V(FW_RI_RES_WR) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530116 FW_RI_RES_WR_NRES_V(1) |
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530117 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700118 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Steve Wise13ce8312017-09-26 13:08:08 -0700119 res_wr->cookie = (uintptr_t)wr_waitp;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700120 res = res_wr->res;
121 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
122 res->u.cq.op = FW_RI_RES_OP_WRITE;
123 res->u.cq.iqid = cpu_to_be32(cq->cqid);
124 res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530125 FW_RI_RES_WR_IQANUS_V(0) |
126 FW_RI_RES_WR_IQANUD_V(1) |
127 FW_RI_RES_WR_IQANDST_F |
128 FW_RI_RES_WR_IQANDSTINDEX_V(
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530129 rdev->lldi.ciq_ids[cq->vector]));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700130 res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530131 FW_RI_RES_WR_IQDROPRSS_F |
132 FW_RI_RES_WR_IQPCIECH_V(2) |
133 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
134 FW_RI_RES_WR_IQO_F |
135 FW_RI_RES_WR_IQESIZE_V(1));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700136 res->u.cq.iqsize = cpu_to_be16(cq->size);
137 res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
138
Steve Wise13ce8312017-09-26 13:08:08 -0700139 c4iw_init_wr_wait(wr_waitp);
Steve Wise2015f262017-09-26 13:13:17 -0700140 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700141 if (ret)
142 goto err4;
143
144 cq->gen = 1;
Hariprasad S74217d42015-06-09 18:23:12 +0530145 cq->gts = rdev->lldi.gts_reg;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700146 cq->rdev = rdev;
Hariprasad S09ece8b2015-04-22 01:45:00 +0530147
Hariprasad S74217d42015-06-09 18:23:12 +0530148 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
149 &cq->bar2_qid,
150 user ? &cq->bar2_pa : NULL);
Hariprasad S32cc92c2016-04-05 10:23:48 +0530151 if (user && !cq->bar2_pa) {
Joe Perches700456b2017-02-09 14:23:50 -0800152 pr_warn("%s: cqid %u not in BAR2 range\n",
Hariprasad S74217d42015-06-09 18:23:12 +0530153 pci_name(rdev->lldi.pdev), cq->cqid);
154 ret = -EINVAL;
155 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700156 }
157 return 0;
158err4:
159 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000160 dma_unmap_addr(cq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700161err3:
162 kfree(cq->sw_queue);
163err2:
164 c4iw_put_cqid(rdev, cq->cqid, uctx);
165err1:
166 return ret;
167}
168
169static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
170{
171 struct t4_cqe cqe;
172
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530173 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
Joe Perchesa9a42882017-02-09 14:23:51 -0800174 wq, cq, cq->sw_cidx, cq->sw_pidx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700175 memset(&cqe, 0, sizeof(cqe));
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530176 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
177 CQE_OPCODE_V(FW_RI_SEND) |
178 CQE_TYPE_V(0) |
179 CQE_SWCQE_V(1) |
180 CQE_QPID_V(wq->sq.qid));
181 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700182 cq->sw_queue[cq->sw_pidx] = cqe;
183 t4_swcq_produce(cq);
184}
185
186int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
187{
188 int flushed = 0;
189 int in_use = wq->rq.in_use - count;
190
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530191 pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
Joe Perchesa9a42882017-02-09 14:23:51 -0800192 wq, cq, wq->rq.in_use, count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700193 while (in_use--) {
194 insert_recv_cqe(wq, cq);
195 flushed++;
196 }
197 return flushed;
198}
199
200static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
201 struct t4_swsqe *swcqe)
202{
203 struct t4_cqe cqe;
204
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530205 pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
Joe Perchesa9a42882017-02-09 14:23:51 -0800206 wq, cq, cq->sw_cidx, cq->sw_pidx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700207 memset(&cqe, 0, sizeof(cqe));
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530208 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
209 CQE_OPCODE_V(swcqe->opcode) |
210 CQE_TYPE_V(1) |
211 CQE_SWCQE_V(1) |
212 CQE_QPID_V(wq->sq.qid));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700213 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530214 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700215 cq->sw_queue[cq->sw_pidx] = cqe;
216 t4_swcq_produce(cq);
217}
218
Steve Wise1cf24dc2013-08-06 21:04:35 +0530219static void advance_oldest_read(struct t4_wq *wq);
220
221int c4iw_flush_sq(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700222{
223 int flushed = 0;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530224 struct t4_wq *wq = &qhp->wq;
225 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
226 struct t4_cq *cq = &chp->cq;
227 int idx;
228 struct t4_swsqe *swsqe;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700229
Steve Wise1cf24dc2013-08-06 21:04:35 +0530230 if (wq->sq.flush_cidx == -1)
231 wq->sq.flush_cidx = wq->sq.cidx;
232 idx = wq->sq.flush_cidx;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530233 while (idx != wq->sq.pidx) {
Steve Wiseb4e29012014-04-09 09:38:26 -0500234 swsqe = &wq->sq.sw_sq[idx];
Steve Wiseb4e29012014-04-09 09:38:26 -0500235 swsqe->flushed = 1;
236 insert_sq_cqe(wq, cq, swsqe);
237 if (wq->sq.oldest_read == swsqe) {
Steve Wiseb4e29012014-04-09 09:38:26 -0500238 advance_oldest_read(wq);
Steve Wise1cf24dc2013-08-06 21:04:35 +0530239 }
Steve Wiseb4e29012014-04-09 09:38:26 -0500240 flushed++;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530241 if (++idx == wq->sq.size)
242 idx = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700243 }
Steve Wise1cf24dc2013-08-06 21:04:35 +0530244 wq->sq.flush_cidx += flushed;
245 if (wq->sq.flush_cidx >= wq->sq.size)
246 wq->sq.flush_cidx -= wq->sq.size;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700247 return flushed;
248}
249
Steve Wise1cf24dc2013-08-06 21:04:35 +0530250static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
251{
252 struct t4_swsqe *swsqe;
253 int cidx;
254
255 if (wq->sq.flush_cidx == -1)
256 wq->sq.flush_cidx = wq->sq.cidx;
257 cidx = wq->sq.flush_cidx;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530258
259 while (cidx != wq->sq.pidx) {
260 swsqe = &wq->sq.sw_sq[cidx];
261 if (!swsqe->signaled) {
262 if (++cidx == wq->sq.size)
263 cidx = 0;
264 } else if (swsqe->complete) {
265
Steve Wise1cf24dc2013-08-06 21:04:35 +0530266 /*
267 * Insert this completed cqe into the swcq.
268 */
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530269 pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
270 cidx, cq->sw_pidx);
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530271 swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
Steve Wise1cf24dc2013-08-06 21:04:35 +0530272 cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
273 t4_swcq_produce(cq);
274 swsqe->flushed = 1;
275 if (++cidx == wq->sq.size)
276 cidx = 0;
277 wq->sq.flush_cidx = cidx;
278 } else
279 break;
280 }
281}
282
283static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
284 struct t4_cqe *read_cqe)
285{
286 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
287 read_cqe->len = htonl(wq->sq.oldest_read->read_len);
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530288 read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
289 CQE_SWCQE_V(SW_CQE(hw_cqe)) |
290 CQE_OPCODE_V(FW_RI_READ_REQ) |
291 CQE_TYPE_V(1));
Steve Wise1cf24dc2013-08-06 21:04:35 +0530292 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
293}
294
295static void advance_oldest_read(struct t4_wq *wq)
296{
297
298 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
299
300 if (rptr == wq->sq.size)
301 rptr = 0;
302 while (rptr != wq->sq.pidx) {
303 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
304
305 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
306 return;
307 if (++rptr == wq->sq.size)
308 rptr = 0;
309 }
310 wq->sq.oldest_read = NULL;
311}
312
Steve Wisecfdda9d2010-04-21 15:30:06 -0700313/*
314 * Move all CQEs from the HWCQ into the SWCQ.
Steve Wise1cf24dc2013-08-06 21:04:35 +0530315 * Deal with out-of-order and/or completions that complete
316 * prior unsignalled WRs.
Steve Wisecfdda9d2010-04-21 15:30:06 -0700317 */
Steve Wise1cf24dc2013-08-06 21:04:35 +0530318void c4iw_flush_hw_cq(struct c4iw_cq *chp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700319{
Steve Wise1cf24dc2013-08-06 21:04:35 +0530320 struct t4_cqe *hw_cqe, *swcqe, read_cqe;
321 struct c4iw_qp *qhp;
322 struct t4_swsqe *swsqe;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700323 int ret;
324
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530325 pr_debug("cqid 0x%x\n", chp->cq.cqid);
Steve Wise1cf24dc2013-08-06 21:04:35 +0530326 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
327
328 /*
329 * This logic is similar to poll_cq(), but not quite the same
330 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
331 * also do any translation magic that poll_cq() normally does.
332 */
Steve Wisecfdda9d2010-04-21 15:30:06 -0700333 while (!ret) {
Steve Wise1cf24dc2013-08-06 21:04:35 +0530334 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
335
336 /*
337 * drop CQEs with no associated QP
338 */
339 if (qhp == NULL)
340 goto next_cqe;
341
342 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
343 goto next_cqe;
344
345 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
346
Steve Wise70b9c662014-03-21 20:40:32 +0530347 /* If we have reached here because of async
348 * event or other error, and have egress error
349 * then drop
350 */
351 if (CQE_TYPE(hw_cqe) == 1)
352 goto next_cqe;
353
354 /* drop peer2peer RTR reads.
Steve Wise1cf24dc2013-08-06 21:04:35 +0530355 */
356 if (CQE_WRID_STAG(hw_cqe) == 1)
357 goto next_cqe;
358
359 /*
360 * Eat completions for unsignaled read WRs.
361 */
362 if (!qhp->wq.sq.oldest_read->signaled) {
363 advance_oldest_read(&qhp->wq);
364 goto next_cqe;
365 }
366
367 /*
368 * Don't write to the HWCQ, create a new read req CQE
369 * in local memory and move it into the swcq.
370 */
371 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
372 hw_cqe = &read_cqe;
373 advance_oldest_read(&qhp->wq);
374 }
375
376 /* if its a SQ completion, then do the magic to move all the
377 * unsignaled and now in-order completions into the swcq.
378 */
379 if (SQ_TYPE(hw_cqe)) {
380 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
381 swsqe->cqe = *hw_cqe;
382 swsqe->complete = 1;
383 flush_completed_wrs(&qhp->wq, &chp->cq);
384 } else {
385 swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
386 *swcqe = *hw_cqe;
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530387 swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
Steve Wise1cf24dc2013-08-06 21:04:35 +0530388 t4_swcq_produce(&chp->cq);
389 }
390next_cqe:
391 t4_hwcq_consume(&chp->cq);
392 ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700393 }
394}
395
396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
397{
398 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
399 return 0;
400
401 if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
402 return 0;
403
404 if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
405 return 0;
406
407 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
408 return 0;
409 return 1;
410}
411
Steve Wisecfdda9d2010-04-21 15:30:06 -0700412void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
413{
414 struct t4_cqe *cqe;
415 u32 ptr;
416
417 *count = 0;
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530418 pr_debug("count zero %d\n", *count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700419 ptr = cq->sw_cidx;
420 while (ptr != cq->sw_pidx) {
421 cqe = &cq->sw_queue[ptr];
422 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
Jonathan Lallingerc34c97a2011-10-20 13:25:14 -0500423 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
Steve Wisecfdda9d2010-04-21 15:30:06 -0700424 (*count)++;
425 if (++ptr == cq->size)
426 ptr = 0;
427 }
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530428 pr_debug("cq %p count %d\n", cq, *count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700429}
430
Steve Wisecfdda9d2010-04-21 15:30:06 -0700431/*
432 * poll_cq
433 *
434 * Caller must:
435 * check the validity of the first CQE,
436 * supply the wq assicated with the qpid.
437 *
438 * credit: cq credit to return to sge.
439 * cqe_flushed: 1 iff the CQE is flushed.
440 * cqe: copy of the polled CQE.
441 *
442 * return value:
443 * 0 CQE returned ok.
444 * -EAGAIN CQE skipped, try again.
445 * -EOVERFLOW CQ overflow detected.
446 */
447static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
448 u8 *cqe_flushed, u64 *cookie, u32 *credit)
449{
450 int ret = 0;
451 struct t4_cqe *hw_cqe, read_cqe;
452
453 *cqe_flushed = 0;
454 *credit = 0;
455 ret = t4_next_cqe(cq, &hw_cqe);
456 if (ret)
457 return ret;
458
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530459 pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
460 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
Joe Perchesa9a42882017-02-09 14:23:51 -0800461 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
462 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
463 CQE_WRID_LOW(hw_cqe));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700464
465 /*
466 * skip cqe's not affiliated with a QP.
467 */
468 if (wq == NULL) {
469 ret = -EAGAIN;
470 goto skip_cqe;
471 }
472
473 /*
Steve Wise1cf24dc2013-08-06 21:04:35 +0530474 * skip hw cqe's if the wq is flushed.
475 */
476 if (wq->flushed && !SW_CQE(hw_cqe)) {
477 ret = -EAGAIN;
478 goto skip_cqe;
479 }
480
481 /*
482 * skip TERMINATE cqes...
483 */
484 if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
485 ret = -EAGAIN;
486 goto skip_cqe;
487 }
488
489 /*
Steve Wise4fe7c292016-12-22 07:04:59 -0800490 * Special cqe for drain WR completions...
491 */
492 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
493 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
494 *cqe = *hw_cqe;
495 goto skip_cqe;
496 }
497
498 /*
Steve Wisecfdda9d2010-04-21 15:30:06 -0700499 * Gotta tweak READ completions:
500 * 1) the cqe doesn't contain the sq_wptr from the wr.
501 * 2) opcode not reflected from the wr.
502 * 3) read_len not reflected from the wr.
503 * 4) cq_type is RQ_TYPE not SQ_TYPE.
504 */
505 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
506
Steve Wise70b9c662014-03-21 20:40:32 +0530507 /* If we have reached here because of async
508 * event or other error, and have egress error
509 * then drop
510 */
511 if (CQE_TYPE(hw_cqe) == 1) {
512 if (CQE_STATUS(hw_cqe))
513 t4_set_wq_in_error(wq);
514 ret = -EAGAIN;
515 goto skip_cqe;
516 }
517
518 /* If this is an unsolicited read response, then the read
Steve Wisecfdda9d2010-04-21 15:30:06 -0700519 * was generated by the kernel driver as part of peer-2-peer
520 * connection setup. So ignore the completion.
521 */
Steve Wise1cf24dc2013-08-06 21:04:35 +0530522 if (CQE_WRID_STAG(hw_cqe) == 1) {
Steve Wisecfdda9d2010-04-21 15:30:06 -0700523 if (CQE_STATUS(hw_cqe))
524 t4_set_wq_in_error(wq);
525 ret = -EAGAIN;
526 goto skip_cqe;
527 }
528
529 /*
Steve Wise1cf24dc2013-08-06 21:04:35 +0530530 * Eat completions for unsignaled read WRs.
531 */
532 if (!wq->sq.oldest_read->signaled) {
533 advance_oldest_read(wq);
534 ret = -EAGAIN;
535 goto skip_cqe;
536 }
537
538 /*
Steve Wisecfdda9d2010-04-21 15:30:06 -0700539 * Don't write to the HWCQ, so create a new read req CQE
540 * in local memory.
541 */
542 create_read_req_cqe(wq, hw_cqe, &read_cqe);
543 hw_cqe = &read_cqe;
544 advance_oldest_read(wq);
545 }
546
547 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
Steve Wise1cf24dc2013-08-06 21:04:35 +0530548 *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700549 t4_set_wq_in_error(wq);
Steve Wise6ff0e342010-09-10 11:15:04 -0500550 }
551
Steve Wisecfdda9d2010-04-21 15:30:06 -0700552 /*
553 * RECV completion.
554 */
555 if (RQ_TYPE(hw_cqe)) {
556
557 /*
558 * HW only validates 4 bits of MSN. So we must validate that
559 * the MSN in the SEND is the next expected MSN. If its not,
560 * then we complete this with T4_ERR_MSN and mark the wq in
561 * error.
562 */
563
564 if (t4_rq_empty(wq)) {
565 t4_set_wq_in_error(wq);
566 ret = -EAGAIN;
567 goto skip_cqe;
568 }
569 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
570 t4_set_wq_in_error(wq);
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +0530571 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700572 goto proc_cqe;
573 }
574 goto proc_cqe;
575 }
576
577 /*
578 * If we get here its a send completion.
579 *
580 * Handle out of order completion. These get stuffed
581 * in the SW SQ. Then the SW SQ is walked to move any
582 * now in-order completions into the SW CQ. This handles
583 * 2 cases:
584 * 1) reaping unsignaled WRs when the first subsequent
585 * signaled WR is completed.
586 * 2) out of order read completions.
587 */
588 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
589 struct t4_swsqe *swsqe;
590
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530591 pr_debug("out of order completion going in sw_sq at idx %u\n",
592 CQE_WRID_SQ_IDX(hw_cqe));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700593 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
594 swsqe->cqe = *hw_cqe;
595 swsqe->complete = 1;
596 ret = -EAGAIN;
597 goto flush_wq;
598 }
599
600proc_cqe:
601 *cqe = *hw_cqe;
602
603 /*
604 * Reap the associated WR(s) that are freed up with this
605 * completion.
606 */
607 if (SQ_TYPE(hw_cqe)) {
Steve Wise1cf24dc2013-08-06 21:04:35 +0530608 int idx = CQE_WRID_SQ_IDX(hw_cqe);
Steve Wise1cf24dc2013-08-06 21:04:35 +0530609
610 /*
611 * Account for any unsignaled completions completed by
612 * this signaled completion. In this case, cidx points
613 * to the first unsignaled one, and idx points to the
614 * signaled one. So adjust in_use based on this delta.
615 * if this is not completing any unsigned wrs, then the
Steve Wise27ca34f2013-08-06 21:04:36 +0530616 * delta will be 0. Handle wrapping also!
Steve Wise1cf24dc2013-08-06 21:04:35 +0530617 */
Steve Wise27ca34f2013-08-06 21:04:36 +0530618 if (idx < wq->sq.cidx)
619 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
620 else
621 wq->sq.in_use -= idx - wq->sq.cidx;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530622
623 wq->sq.cidx = (uint16_t)idx;
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530624 pr_debug("completing sq idx %u\n", wq->sq.cidx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700625 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530626 if (c4iw_wr_log)
627 c4iw_log_wr_stats(wq, hw_cqe);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700628 t4_sq_consume(wq);
629 } else {
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530630 pr_debug("completing rq idx %u\n", wq->rq.cidx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700631 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530632 if (c4iw_wr_log)
633 c4iw_log_wr_stats(wq, hw_cqe);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700634 t4_rq_consume(wq);
Steve Wise1cf24dc2013-08-06 21:04:35 +0530635 goto skip_cqe;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700636 }
637
638flush_wq:
639 /*
640 * Flush any completed cqes that are now in-order.
641 */
642 flush_completed_wrs(wq, cq);
643
644skip_cqe:
645 if (SW_CQE(hw_cqe)) {
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530646 pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
647 cq, cq->cqid, cq->sw_cidx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700648 t4_swcq_consume(cq);
649 } else {
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530650 pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
651 cq, cq->cqid, cq->cidx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700652 t4_hwcq_consume(cq);
653 }
654 return ret;
655}
656
657/*
658 * Get one cq entry from c4iw and map it to openib.
659 *
660 * Returns:
661 * 0 cqe returned
662 * -ENODATA EMPTY;
663 * -EAGAIN caller must try again
664 * any other -errno fatal error
665 */
666static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
667{
668 struct c4iw_qp *qhp = NULL;
Steve Wise97df1c62014-04-09 09:38:28 -0500669 struct t4_cqe uninitialized_var(cqe), *rd_cqe;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700670 struct t4_wq *wq;
671 u32 credit = 0;
672 u8 cqe_flushed;
673 u64 cookie = 0;
674 int ret;
675
676 ret = t4_next_cqe(&chp->cq, &rd_cqe);
677
678 if (ret)
679 return ret;
680
681 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
682 if (!qhp)
683 wq = NULL;
684 else {
685 spin_lock(&qhp->lock);
686 wq = &(qhp->wq);
687 }
688 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
689 if (ret)
690 goto out;
691
692 wc->wr_id = cookie;
693 wc->qp = &qhp->ibqp;
694 wc->vendor_err = CQE_STATUS(&cqe);
695 wc->wc_flags = 0;
696
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530697 pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
698 CQE_QPID(&cqe),
Joe Perchesa9a42882017-02-09 14:23:51 -0800699 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
700 CQE_STATUS(&cqe), CQE_LEN(&cqe),
701 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
702 (unsigned long long)cookie);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700703
704 if (CQE_TYPE(&cqe) == 0) {
705 if (!CQE_STATUS(&cqe))
706 wc->byte_len = CQE_LEN(&cqe);
707 else
708 wc->byte_len = 0;
709 wc->opcode = IB_WC_RECV;
710 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
711 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
712 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
713 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
Steve Wise5c6b2aa2016-11-03 12:09:38 -0700714 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700715 }
716 } else {
717 switch (CQE_OPCODE(&cqe)) {
718 case FW_RI_RDMA_WRITE:
719 wc->opcode = IB_WC_RDMA_WRITE;
720 break;
721 case FW_RI_READ_REQ:
722 wc->opcode = IB_WC_RDMA_READ;
723 wc->byte_len = CQE_LEN(&cqe);
724 break;
725 case FW_RI_SEND_WITH_INV:
726 case FW_RI_SEND_WITH_SE_INV:
727 wc->opcode = IB_WC_SEND;
728 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
729 break;
730 case FW_RI_SEND:
731 case FW_RI_SEND_WITH_SE:
732 wc->opcode = IB_WC_SEND;
733 break;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700734
735 case FW_RI_LOCAL_INV:
736 wc->opcode = IB_WC_LOCAL_INV;
737 break;
738 case FW_RI_FAST_REGISTER:
Sagi Grimbergd3cfd002015-10-13 19:11:46 +0300739 wc->opcode = IB_WC_REG_MR;
Steve Wise49b53a92016-09-16 07:54:52 -0700740
741 /* Invalidate the MR if the fastreg failed */
742 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
Steve Wise5c6b2aa2016-11-03 12:09:38 -0700743 c4iw_invalidate_mr(qhp->rhp,
744 CQE_WRID_FR_STAG(&cqe));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700745 break;
Steve Wise4fe7c292016-12-22 07:04:59 -0800746 case C4IW_DRAIN_OPCODE:
747 wc->opcode = IB_WC_SEND;
748 break;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700749 default:
Joe Perches700456b2017-02-09 14:23:50 -0800750 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700751 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
752 ret = -EINVAL;
753 goto out;
754 }
755 }
756
757 if (cqe_flushed)
758 wc->status = IB_WC_WR_FLUSH_ERR;
759 else {
760
761 switch (CQE_STATUS(&cqe)) {
762 case T4_ERR_SUCCESS:
763 wc->status = IB_WC_SUCCESS;
764 break;
765 case T4_ERR_STAG:
766 wc->status = IB_WC_LOC_ACCESS_ERR;
767 break;
768 case T4_ERR_PDID:
769 wc->status = IB_WC_LOC_PROT_ERR;
770 break;
771 case T4_ERR_QPID:
772 case T4_ERR_ACCESS:
773 wc->status = IB_WC_LOC_ACCESS_ERR;
774 break;
775 case T4_ERR_WRAP:
776 wc->status = IB_WC_GENERAL_ERR;
777 break;
778 case T4_ERR_BOUND:
779 wc->status = IB_WC_LOC_LEN_ERR;
780 break;
781 case T4_ERR_INVALIDATE_SHARED_MR:
782 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
783 wc->status = IB_WC_MW_BIND_ERR;
784 break;
785 case T4_ERR_CRC:
786 case T4_ERR_MARKER:
787 case T4_ERR_PDU_LEN_ERR:
788 case T4_ERR_OUT_OF_RQE:
789 case T4_ERR_DDP_VERSION:
790 case T4_ERR_RDMA_VERSION:
791 case T4_ERR_DDP_QUEUE_NUM:
792 case T4_ERR_MSN:
793 case T4_ERR_TBIT:
794 case T4_ERR_MO:
795 case T4_ERR_MSN_RANGE:
796 case T4_ERR_IRD_OVERFLOW:
797 case T4_ERR_OPCODE:
Steve Wise6ff0e342010-09-10 11:15:04 -0500798 case T4_ERR_INTERNAL_ERR:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700799 wc->status = IB_WC_FATAL_ERR;
800 break;
801 case T4_ERR_SWFLUSH:
802 wc->status = IB_WC_WR_FLUSH_ERR;
803 break;
804 default:
Joe Perches700456b2017-02-09 14:23:50 -0800805 pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700806 CQE_STATUS(&cqe), CQE_QPID(&cqe));
Hariprasad S3661df12015-07-27 14:08:14 +0530807 wc->status = IB_WC_FATAL_ERR;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700808 }
809 }
810out:
Steve Wise4fe7c292016-12-22 07:04:59 -0800811 if (wq)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700812 spin_unlock(&qhp->lock);
813 return ret;
814}
815
816int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
817{
818 struct c4iw_cq *chp;
819 unsigned long flags;
820 int npolled;
821 int err = 0;
822
823 chp = to_c4iw_cq(ibcq);
824
825 spin_lock_irqsave(&chp->lock, flags);
826 for (npolled = 0; npolled < num_entries; ++npolled) {
827 do {
828 err = c4iw_poll_cq_one(chp, wc + npolled);
829 } while (err == -EAGAIN);
830 if (err)
831 break;
832 }
833 spin_unlock_irqrestore(&chp->lock, flags);
834 return !err || err == -ENODATA ? npolled : err;
835}
836
837int c4iw_destroy_cq(struct ib_cq *ib_cq)
838{
839 struct c4iw_cq *chp;
840 struct c4iw_ucontext *ucontext;
841
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530842 pr_debug("ib_cq %p\n", ib_cq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700843 chp = to_c4iw_cq(ib_cq);
844
845 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
846 atomic_dec(&chp->refcnt);
847 wait_event(chp->wait, !atomic_read(&chp->refcnt));
848
849 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
850 : NULL;
851 destroy_cq(&chp->rhp->rdev, &chp->cq,
Hariprasad Sdd6b0242016-06-10 01:05:17 +0530852 ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
Steve Wise13ce8312017-09-26 13:08:08 -0700853 chp->destroy_skb, chp->wr_waitp);
Steve Wise2015f262017-09-26 13:13:17 -0700854 c4iw_put_wr_wait(chp->wr_waitp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700855 kfree(chp);
856 return 0;
857}
858
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300859struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
860 const struct ib_cq_init_attr *attr,
861 struct ib_ucontext *ib_context,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700862 struct ib_udata *udata)
863{
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300864 int entries = attr->cqe;
865 int vector = attr->comp_vector;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700866 struct c4iw_dev *rhp;
867 struct c4iw_cq *chp;
868 struct c4iw_create_cq_resp uresp;
869 struct c4iw_ucontext *ucontext = NULL;
Hariprasad Sdd6b0242016-06-10 01:05:17 +0530870 int ret, wr_len;
Steve Wise1973e8b2010-06-10 19:03:06 +0000871 size_t memsize, hwentries;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700872 struct c4iw_mm_entry *mm, *mm2;
873
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530874 pr_debug("ib_dev %p entries %d\n", ibdev, entries);
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300875 if (attr->flags)
876 return ERR_PTR(-EINVAL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700877
878 rhp = to_c4iw_dev(ibdev);
879
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530880 if (vector >= rhp->rdev.lldi.nciq)
881 return ERR_PTR(-EINVAL);
882
Steve Wisecfdda9d2010-04-21 15:30:06 -0700883 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
884 if (!chp)
885 return ERR_PTR(-ENOMEM);
Steve Wise2015f262017-09-26 13:13:17 -0700886 chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
Steve Wise13ce8312017-09-26 13:08:08 -0700887 if (!chp->wr_waitp) {
888 ret = -ENOMEM;
889 goto err_free_chp;
890 }
891 c4iw_init_wr_wait(chp->wr_waitp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700892
Hariprasad Sdd6b0242016-06-10 01:05:17 +0530893 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
894 chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
895 if (!chp->destroy_skb) {
896 ret = -ENOMEM;
Steve Wise13ce8312017-09-26 13:08:08 -0700897 goto err_free_wr_wait;
Hariprasad Sdd6b0242016-06-10 01:05:17 +0530898 }
899
Steve Wisecfdda9d2010-04-21 15:30:06 -0700900 if (ib_context)
901 ucontext = to_c4iw_ucontext(ib_context);
902
903 /* account for the status page. */
904 entries++;
905
Steve Wise895cf5f2010-05-20 16:57:38 -0500906 /* IQ needs one extra entry to differentiate full vs empty. */
907 entries++;
908
Steve Wisecfdda9d2010-04-21 15:30:06 -0700909 /*
910 * entries must be multiple of 16 for HW.
911 */
912 entries = roundup(entries, 16);
Steve Wise1973e8b2010-06-10 19:03:06 +0000913
914 /*
915 * Make actual HW queue 2x to avoid cdix_inc overflows.
916 */
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530917 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
Steve Wise1973e8b2010-06-10 19:03:06 +0000918
919 /*
920 * Make HW queue at least 64 entries so GTS updates aren't too
921 * frequent.
922 */
923 if (hwentries < 64)
924 hwentries = 64;
925
926 memsize = hwentries * sizeof *chp->cq.queue;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700927
928 /*
929 * memsize must be a multiple of the page size if its a user cq.
930 */
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530931 if (ucontext)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700932 memsize = roundup(memsize, PAGE_SIZE);
Steve Wise1973e8b2010-06-10 19:03:06 +0000933 chp->cq.size = hwentries;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700934 chp->cq.memsize = memsize;
Hariprasad Shenaicf38be62014-06-06 21:40:42 +0530935 chp->cq.vector = vector;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700936
937 ret = create_cq(&rhp->rdev, &chp->cq,
Steve Wise13ce8312017-09-26 13:08:08 -0700938 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
939 chp->wr_waitp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700940 if (ret)
Steve Wise13ce8312017-09-26 13:08:08 -0700941 goto err_free_skb;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700942
943 chp->rhp = rhp;
944 chp->cq.size--; /* status page */
Steve Wise1973e8b2010-06-10 19:03:06 +0000945 chp->ibcq.cqe = entries - 2;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700946 spin_lock_init(&chp->lock);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530947 spin_lock_init(&chp->comp_handler_lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700948 atomic_set(&chp->refcnt, 1);
949 init_waitqueue_head(&chp->wait);
950 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
951 if (ret)
Steve Wise13ce8312017-09-26 13:08:08 -0700952 goto err_destroy_cq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700953
954 if (ucontext) {
Dan Carpenter6ebedac2017-07-13 10:47:40 +0300955 ret = -ENOMEM;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700956 mm = kmalloc(sizeof *mm, GFP_KERNEL);
957 if (!mm)
Steve Wise13ce8312017-09-26 13:08:08 -0700958 goto err_remove_handle;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700959 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
960 if (!mm2)
Steve Wise13ce8312017-09-26 13:08:08 -0700961 goto err_free_mm;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700962
Steve Wisecfdda9d2010-04-21 15:30:06 -0700963 uresp.qid_mask = rhp->rdev.cqmask;
964 uresp.cqid = chp->cq.cqid;
965 uresp.size = chp->cq.size;
966 uresp.memsize = chp->cq.memsize;
967 spin_lock(&ucontext->mmap_lock);
968 uresp.key = ucontext->key;
969 ucontext->key += PAGE_SIZE;
970 uresp.gts_key = ucontext->key;
971 ucontext->key += PAGE_SIZE;
972 spin_unlock(&ucontext->mmap_lock);
Yann Droneaudb6f04d32014-05-05 19:33:23 +0200973 ret = ib_copy_to_udata(udata, &uresp,
974 sizeof(uresp) - sizeof(uresp.reserved));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700975 if (ret)
Steve Wise13ce8312017-09-26 13:08:08 -0700976 goto err_free_mm2;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700977
978 mm->key = uresp.key;
979 mm->addr = virt_to_phys(chp->cq.queue);
980 mm->len = chp->cq.memsize;
981 insert_mmap(ucontext, mm);
982
983 mm2->key = uresp.gts_key;
Hariprasad S74217d42015-06-09 18:23:12 +0530984 mm2->addr = chp->cq.bar2_pa;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700985 mm2->len = PAGE_SIZE;
986 insert_mmap(ucontext, mm2);
987 }
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530988 pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
989 chp->cq.cqid, chp, chp->cq.size,
Joe Perchesa9a42882017-02-09 14:23:51 -0800990 chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700991 return &chp->ibcq;
Steve Wise13ce8312017-09-26 13:08:08 -0700992err_free_mm2:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700993 kfree(mm2);
Steve Wise13ce8312017-09-26 13:08:08 -0700994err_free_mm:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700995 kfree(mm);
Steve Wise13ce8312017-09-26 13:08:08 -0700996err_remove_handle:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700997 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
Steve Wise13ce8312017-09-26 13:08:08 -0700998err_destroy_cq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700999 destroy_cq(&chp->rhp->rdev, &chp->cq,
Hariprasad Sdd6b0242016-06-10 01:05:17 +05301000 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
Steve Wise13ce8312017-09-26 13:08:08 -07001001 chp->destroy_skb, chp->wr_waitp);
1002err_free_skb:
Hariprasad Sdd6b0242016-06-10 01:05:17 +05301003 kfree_skb(chp->destroy_skb);
Steve Wise13ce8312017-09-26 13:08:08 -07001004err_free_wr_wait:
Steve Wise2015f262017-09-26 13:13:17 -07001005 c4iw_put_wr_wait(chp->wr_waitp);
Steve Wise13ce8312017-09-26 13:08:08 -07001006err_free_chp:
Steve Wisecfdda9d2010-04-21 15:30:06 -07001007 kfree(chp);
1008 return ERR_PTR(ret);
1009}
1010
1011int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1012{
1013 return -ENOSYS;
1014}
1015
1016int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1017{
1018 struct c4iw_cq *chp;
Bharat Potnuricff069b2016-08-23 20:27:33 +05301019 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001020 unsigned long flag;
1021
1022 chp = to_c4iw_cq(ibcq);
1023 spin_lock_irqsave(&chp->lock, flag);
Bharat Potnuricff069b2016-08-23 20:27:33 +05301024 t4_arm_cq(&chp->cq,
1025 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1026 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1027 ret = t4_cq_notempty(&chp->cq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001028 spin_unlock_irqrestore(&chp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001029 return ret;
1030}