Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include "iw_cxgb4.h" |
| 34 | |
| 35 | static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, |
| 36 | struct c4iw_dev_ucontext *uctx) |
| 37 | { |
| 38 | struct fw_ri_res_wr *res_wr; |
| 39 | struct fw_ri_res *res; |
| 40 | int wr_len; |
| 41 | struct c4iw_wr_wait wr_wait; |
| 42 | struct sk_buff *skb; |
| 43 | int ret; |
| 44 | |
| 45 | wr_len = sizeof *res_wr + sizeof *res; |
David Rientjes | d3c814e | 2010-07-21 02:44:56 +0000 | [diff] [blame] | 46 | skb = alloc_skb(wr_len, GFP_KERNEL); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 47 | if (!skb) |
| 48 | return -ENOMEM; |
| 49 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
| 50 | |
| 51 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); |
| 52 | memset(res_wr, 0, wr_len); |
| 53 | res_wr->op_nres = cpu_to_be32( |
Hariprasad Shenai | e2ac962 | 2014-11-07 09:35:25 +0530 | [diff] [blame] | 54 | FW_WR_OP_V(FW_RI_RES_WR) | |
Hariprasad Shenai | cf7fe64 | 2015-01-16 09:24:48 +0530 | [diff] [blame] | 55 | FW_RI_RES_WR_NRES_V(1) | |
Hariprasad Shenai | e2ac962 | 2014-11-07 09:35:25 +0530 | [diff] [blame] | 56 | FW_WR_COMPL_F); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
Hariprasad S | 6198dd8 | 2015-04-22 01:44:59 +0530 | [diff] [blame] | 58 | res_wr->cookie = (uintptr_t)&wr_wait; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 59 | res = res_wr->res; |
| 60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
| 61 | res->u.cq.op = FW_RI_RES_OP_RESET; |
| 62 | res->u.cq.iqid = cpu_to_be32(cq->cqid); |
| 63 | |
| 64 | c4iw_init_wr_wait(&wr_wait); |
| 65 | ret = c4iw_ofld_send(rdev, skb); |
| 66 | if (!ret) { |
Steve Wise | aadc4df | 2010-09-10 11:15:25 -0500 | [diff] [blame] | 67 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | kfree(cq->sw_queue); |
| 71 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
| 72 | cq->memsize, cq->queue, |
FUJITA Tomonori | f38926a | 2010-06-03 05:37:50 +0000 | [diff] [blame] | 73 | dma_unmap_addr(cq, mapping)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 74 | c4iw_put_cqid(rdev, cq->cqid, uctx); |
| 75 | return ret; |
| 76 | } |
| 77 | |
| 78 | static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, |
| 79 | struct c4iw_dev_ucontext *uctx) |
| 80 | { |
| 81 | struct fw_ri_res_wr *res_wr; |
| 82 | struct fw_ri_res *res; |
| 83 | int wr_len; |
| 84 | int user = (uctx != &rdev->uctx); |
| 85 | struct c4iw_wr_wait wr_wait; |
| 86 | int ret; |
| 87 | struct sk_buff *skb; |
| 88 | |
| 89 | cq->cqid = c4iw_get_cqid(rdev, uctx); |
| 90 | if (!cq->cqid) { |
| 91 | ret = -ENOMEM; |
| 92 | goto err1; |
| 93 | } |
| 94 | |
| 95 | if (!user) { |
| 96 | cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); |
| 97 | if (!cq->sw_queue) { |
| 98 | ret = -ENOMEM; |
| 99 | goto err2; |
| 100 | } |
| 101 | } |
| 102 | cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, |
| 103 | &cq->dma_addr, GFP_KERNEL); |
| 104 | if (!cq->queue) { |
| 105 | ret = -ENOMEM; |
| 106 | goto err3; |
| 107 | } |
FUJITA Tomonori | f38926a | 2010-06-03 05:37:50 +0000 | [diff] [blame] | 108 | dma_unmap_addr_set(cq, mapping, cq->dma_addr); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 109 | memset(cq->queue, 0, cq->memsize); |
| 110 | |
| 111 | /* build fw_ri_res_wr */ |
| 112 | wr_len = sizeof *res_wr + sizeof *res; |
| 113 | |
David Rientjes | d3c814e | 2010-07-21 02:44:56 +0000 | [diff] [blame] | 114 | skb = alloc_skb(wr_len, GFP_KERNEL); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 115 | if (!skb) { |
| 116 | ret = -ENOMEM; |
| 117 | goto err4; |
| 118 | } |
| 119 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
| 120 | |
| 121 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); |
| 122 | memset(res_wr, 0, wr_len); |
| 123 | res_wr->op_nres = cpu_to_be32( |
Hariprasad Shenai | e2ac962 | 2014-11-07 09:35:25 +0530 | [diff] [blame] | 124 | FW_WR_OP_V(FW_RI_RES_WR) | |
Hariprasad Shenai | cf7fe64 | 2015-01-16 09:24:48 +0530 | [diff] [blame] | 125 | FW_RI_RES_WR_NRES_V(1) | |
Hariprasad Shenai | e2ac962 | 2014-11-07 09:35:25 +0530 | [diff] [blame] | 126 | FW_WR_COMPL_F); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
Hariprasad S | 6198dd8 | 2015-04-22 01:44:59 +0530 | [diff] [blame] | 128 | res_wr->cookie = (uintptr_t)&wr_wait; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 129 | res = res_wr->res; |
| 130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
| 131 | res->u.cq.op = FW_RI_RES_OP_WRITE; |
| 132 | res->u.cq.iqid = cpu_to_be32(cq->cqid); |
| 133 | res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( |
Hariprasad Shenai | cf7fe64 | 2015-01-16 09:24:48 +0530 | [diff] [blame] | 134 | FW_RI_RES_WR_IQANUS_V(0) | |
| 135 | FW_RI_RES_WR_IQANUD_V(1) | |
| 136 | FW_RI_RES_WR_IQANDST_F | |
| 137 | FW_RI_RES_WR_IQANDSTINDEX_V( |
Hariprasad Shenai | cf38be6 | 2014-06-06 21:40:42 +0530 | [diff] [blame] | 138 | rdev->lldi.ciq_ids[cq->vector])); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 139 | res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( |
Hariprasad Shenai | cf7fe64 | 2015-01-16 09:24:48 +0530 | [diff] [blame] | 140 | FW_RI_RES_WR_IQDROPRSS_F | |
| 141 | FW_RI_RES_WR_IQPCIECH_V(2) | |
| 142 | FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | |
| 143 | FW_RI_RES_WR_IQO_F | |
| 144 | FW_RI_RES_WR_IQESIZE_V(1)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 145 | res->u.cq.iqsize = cpu_to_be16(cq->size); |
| 146 | res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); |
| 147 | |
| 148 | c4iw_init_wr_wait(&wr_wait); |
| 149 | |
| 150 | ret = c4iw_ofld_send(rdev, skb); |
| 151 | if (ret) |
| 152 | goto err4; |
| 153 | PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); |
Steve Wise | aadc4df | 2010-09-10 11:15:25 -0500 | [diff] [blame] | 154 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 155 | if (ret) |
| 156 | goto err4; |
| 157 | |
| 158 | cq->gen = 1; |
Hariprasad S | 74217d4 | 2015-06-09 18:23:12 +0530 | [diff] [blame] | 159 | cq->gts = rdev->lldi.gts_reg; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 160 | cq->rdev = rdev; |
Hariprasad S | 09ece8b | 2015-04-22 01:45:00 +0530 | [diff] [blame] | 161 | |
Hariprasad S | 74217d4 | 2015-06-09 18:23:12 +0530 | [diff] [blame] | 162 | cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, |
| 163 | &cq->bar2_qid, |
| 164 | user ? &cq->bar2_pa : NULL); |
| 165 | if (user && !cq->bar2_va) { |
| 166 | pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", |
| 167 | pci_name(rdev->lldi.pdev), cq->cqid); |
| 168 | ret = -EINVAL; |
| 169 | goto err4; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 170 | } |
| 171 | return 0; |
| 172 | err4: |
| 173 | dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, |
FUJITA Tomonori | f38926a | 2010-06-03 05:37:50 +0000 | [diff] [blame] | 174 | dma_unmap_addr(cq, mapping)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 175 | err3: |
| 176 | kfree(cq->sw_queue); |
| 177 | err2: |
| 178 | c4iw_put_cqid(rdev, cq->cqid, uctx); |
| 179 | err1: |
| 180 | return ret; |
| 181 | } |
| 182 | |
| 183 | static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) |
| 184 | { |
| 185 | struct t4_cqe cqe; |
| 186 | |
| 187 | PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, |
| 188 | wq, cq, cq->sw_cidx, cq->sw_pidx); |
| 189 | memset(&cqe, 0, sizeof(cqe)); |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 190 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | |
| 191 | CQE_OPCODE_V(FW_RI_SEND) | |
| 192 | CQE_TYPE_V(0) | |
| 193 | CQE_SWCQE_V(1) | |
| 194 | CQE_QPID_V(wq->sq.qid)); |
| 195 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 196 | cq->sw_queue[cq->sw_pidx] = cqe; |
| 197 | t4_swcq_produce(cq); |
| 198 | } |
| 199 | |
| 200 | int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) |
| 201 | { |
| 202 | int flushed = 0; |
| 203 | int in_use = wq->rq.in_use - count; |
| 204 | |
| 205 | BUG_ON(in_use < 0); |
| 206 | PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, |
| 207 | wq, cq, wq->rq.in_use, count); |
| 208 | while (in_use--) { |
| 209 | insert_recv_cqe(wq, cq); |
| 210 | flushed++; |
| 211 | } |
| 212 | return flushed; |
| 213 | } |
| 214 | |
| 215 | static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, |
| 216 | struct t4_swsqe *swcqe) |
| 217 | { |
| 218 | struct t4_cqe cqe; |
| 219 | |
| 220 | PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, |
| 221 | wq, cq, cq->sw_cidx, cq->sw_pidx); |
| 222 | memset(&cqe, 0, sizeof(cqe)); |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 223 | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | |
| 224 | CQE_OPCODE_V(swcqe->opcode) | |
| 225 | CQE_TYPE_V(1) | |
| 226 | CQE_SWCQE_V(1) | |
| 227 | CQE_QPID_V(wq->sq.qid)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 228 | CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 229 | cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 230 | cq->sw_queue[cq->sw_pidx] = cqe; |
| 231 | t4_swcq_produce(cq); |
| 232 | } |
| 233 | |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 234 | static void advance_oldest_read(struct t4_wq *wq); |
| 235 | |
| 236 | int c4iw_flush_sq(struct c4iw_qp *qhp) |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 237 | { |
| 238 | int flushed = 0; |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 239 | struct t4_wq *wq = &qhp->wq; |
| 240 | struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); |
| 241 | struct t4_cq *cq = &chp->cq; |
| 242 | int idx; |
| 243 | struct t4_swsqe *swsqe; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 244 | |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 245 | if (wq->sq.flush_cidx == -1) |
| 246 | wq->sq.flush_cidx = wq->sq.cidx; |
| 247 | idx = wq->sq.flush_cidx; |
| 248 | BUG_ON(idx >= wq->sq.size); |
| 249 | while (idx != wq->sq.pidx) { |
Steve Wise | b4e2901 | 2014-04-09 09:38:26 -0500 | [diff] [blame] | 250 | swsqe = &wq->sq.sw_sq[idx]; |
| 251 | BUG_ON(swsqe->flushed); |
| 252 | swsqe->flushed = 1; |
| 253 | insert_sq_cqe(wq, cq, swsqe); |
| 254 | if (wq->sq.oldest_read == swsqe) { |
| 255 | BUG_ON(swsqe->opcode != FW_RI_READ_REQ); |
| 256 | advance_oldest_read(wq); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 257 | } |
Steve Wise | b4e2901 | 2014-04-09 09:38:26 -0500 | [diff] [blame] | 258 | flushed++; |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 259 | if (++idx == wq->sq.size) |
| 260 | idx = 0; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 261 | } |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 262 | wq->sq.flush_cidx += flushed; |
| 263 | if (wq->sq.flush_cidx >= wq->sq.size) |
| 264 | wq->sq.flush_cidx -= wq->sq.size; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 265 | return flushed; |
| 266 | } |
| 267 | |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 268 | static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) |
| 269 | { |
| 270 | struct t4_swsqe *swsqe; |
| 271 | int cidx; |
| 272 | |
| 273 | if (wq->sq.flush_cidx == -1) |
| 274 | wq->sq.flush_cidx = wq->sq.cidx; |
| 275 | cidx = wq->sq.flush_cidx; |
| 276 | BUG_ON(cidx > wq->sq.size); |
| 277 | |
| 278 | while (cidx != wq->sq.pidx) { |
| 279 | swsqe = &wq->sq.sw_sq[cidx]; |
| 280 | if (!swsqe->signaled) { |
| 281 | if (++cidx == wq->sq.size) |
| 282 | cidx = 0; |
| 283 | } else if (swsqe->complete) { |
| 284 | |
| 285 | BUG_ON(swsqe->flushed); |
| 286 | |
| 287 | /* |
| 288 | * Insert this completed cqe into the swcq. |
| 289 | */ |
| 290 | PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", |
| 291 | __func__, cidx, cq->sw_pidx); |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 292 | swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 293 | cq->sw_queue[cq->sw_pidx] = swsqe->cqe; |
| 294 | t4_swcq_produce(cq); |
| 295 | swsqe->flushed = 1; |
| 296 | if (++cidx == wq->sq.size) |
| 297 | cidx = 0; |
| 298 | wq->sq.flush_cidx = cidx; |
| 299 | } else |
| 300 | break; |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, |
| 305 | struct t4_cqe *read_cqe) |
| 306 | { |
| 307 | read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; |
| 308 | read_cqe->len = htonl(wq->sq.oldest_read->read_len); |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 309 | read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) | |
| 310 | CQE_SWCQE_V(SW_CQE(hw_cqe)) | |
| 311 | CQE_OPCODE_V(FW_RI_READ_REQ) | |
| 312 | CQE_TYPE_V(1)); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 313 | read_cqe->bits_type_ts = hw_cqe->bits_type_ts; |
| 314 | } |
| 315 | |
| 316 | static void advance_oldest_read(struct t4_wq *wq) |
| 317 | { |
| 318 | |
| 319 | u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; |
| 320 | |
| 321 | if (rptr == wq->sq.size) |
| 322 | rptr = 0; |
| 323 | while (rptr != wq->sq.pidx) { |
| 324 | wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; |
| 325 | |
| 326 | if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) |
| 327 | return; |
| 328 | if (++rptr == wq->sq.size) |
| 329 | rptr = 0; |
| 330 | } |
| 331 | wq->sq.oldest_read = NULL; |
| 332 | } |
| 333 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 334 | /* |
| 335 | * Move all CQEs from the HWCQ into the SWCQ. |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 336 | * Deal with out-of-order and/or completions that complete |
| 337 | * prior unsignalled WRs. |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 338 | */ |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 339 | void c4iw_flush_hw_cq(struct c4iw_cq *chp) |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 340 | { |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 341 | struct t4_cqe *hw_cqe, *swcqe, read_cqe; |
| 342 | struct c4iw_qp *qhp; |
| 343 | struct t4_swsqe *swsqe; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 344 | int ret; |
| 345 | |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 346 | PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid); |
| 347 | ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); |
| 348 | |
| 349 | /* |
| 350 | * This logic is similar to poll_cq(), but not quite the same |
| 351 | * unfortunately. Need to move pertinent HW CQEs to the SW CQ but |
| 352 | * also do any translation magic that poll_cq() normally does. |
| 353 | */ |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 354 | while (!ret) { |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 355 | qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); |
| 356 | |
| 357 | /* |
| 358 | * drop CQEs with no associated QP |
| 359 | */ |
| 360 | if (qhp == NULL) |
| 361 | goto next_cqe; |
| 362 | |
| 363 | if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) |
| 364 | goto next_cqe; |
| 365 | |
| 366 | if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { |
| 367 | |
Steve Wise | 70b9c66 | 2014-03-21 20:40:32 +0530 | [diff] [blame] | 368 | /* If we have reached here because of async |
| 369 | * event or other error, and have egress error |
| 370 | * then drop |
| 371 | */ |
| 372 | if (CQE_TYPE(hw_cqe) == 1) |
| 373 | goto next_cqe; |
| 374 | |
| 375 | /* drop peer2peer RTR reads. |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 376 | */ |
| 377 | if (CQE_WRID_STAG(hw_cqe) == 1) |
| 378 | goto next_cqe; |
| 379 | |
| 380 | /* |
| 381 | * Eat completions for unsignaled read WRs. |
| 382 | */ |
| 383 | if (!qhp->wq.sq.oldest_read->signaled) { |
| 384 | advance_oldest_read(&qhp->wq); |
| 385 | goto next_cqe; |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * Don't write to the HWCQ, create a new read req CQE |
| 390 | * in local memory and move it into the swcq. |
| 391 | */ |
| 392 | create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); |
| 393 | hw_cqe = &read_cqe; |
| 394 | advance_oldest_read(&qhp->wq); |
| 395 | } |
| 396 | |
| 397 | /* if its a SQ completion, then do the magic to move all the |
| 398 | * unsignaled and now in-order completions into the swcq. |
| 399 | */ |
| 400 | if (SQ_TYPE(hw_cqe)) { |
| 401 | swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; |
| 402 | swsqe->cqe = *hw_cqe; |
| 403 | swsqe->complete = 1; |
| 404 | flush_completed_wrs(&qhp->wq, &chp->cq); |
| 405 | } else { |
| 406 | swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; |
| 407 | *swcqe = *hw_cqe; |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 408 | swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1)); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 409 | t4_swcq_produce(&chp->cq); |
| 410 | } |
| 411 | next_cqe: |
| 412 | t4_hwcq_consume(&chp->cq); |
| 413 | ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 414 | } |
| 415 | } |
| 416 | |
| 417 | static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) |
| 418 | { |
| 419 | if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) |
| 420 | return 0; |
| 421 | |
| 422 | if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) |
| 423 | return 0; |
| 424 | |
| 425 | if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) |
| 426 | return 0; |
| 427 | |
| 428 | if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) |
| 429 | return 0; |
| 430 | return 1; |
| 431 | } |
| 432 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 433 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) |
| 434 | { |
| 435 | struct t4_cqe *cqe; |
| 436 | u32 ptr; |
| 437 | |
| 438 | *count = 0; |
| 439 | PDBG("%s count zero %d\n", __func__, *count); |
| 440 | ptr = cq->sw_cidx; |
| 441 | while (ptr != cq->sw_pidx) { |
| 442 | cqe = &cq->sw_queue[ptr]; |
| 443 | if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && |
Jonathan Lallinger | c34c97a | 2011-10-20 13:25:14 -0500 | [diff] [blame] | 444 | (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 445 | (*count)++; |
| 446 | if (++ptr == cq->size) |
| 447 | ptr = 0; |
| 448 | } |
| 449 | PDBG("%s cq %p count %d\n", __func__, cq, *count); |
| 450 | } |
| 451 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 452 | /* |
| 453 | * poll_cq |
| 454 | * |
| 455 | * Caller must: |
| 456 | * check the validity of the first CQE, |
| 457 | * supply the wq assicated with the qpid. |
| 458 | * |
| 459 | * credit: cq credit to return to sge. |
| 460 | * cqe_flushed: 1 iff the CQE is flushed. |
| 461 | * cqe: copy of the polled CQE. |
| 462 | * |
| 463 | * return value: |
| 464 | * 0 CQE returned ok. |
| 465 | * -EAGAIN CQE skipped, try again. |
| 466 | * -EOVERFLOW CQ overflow detected. |
| 467 | */ |
| 468 | static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, |
| 469 | u8 *cqe_flushed, u64 *cookie, u32 *credit) |
| 470 | { |
| 471 | int ret = 0; |
| 472 | struct t4_cqe *hw_cqe, read_cqe; |
| 473 | |
| 474 | *cqe_flushed = 0; |
| 475 | *credit = 0; |
| 476 | ret = t4_next_cqe(cq, &hw_cqe); |
| 477 | if (ret) |
| 478 | return ret; |
| 479 | |
| 480 | PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x" |
| 481 | " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", |
| 482 | __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), |
| 483 | CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), |
| 484 | CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), |
| 485 | CQE_WRID_LOW(hw_cqe)); |
| 486 | |
| 487 | /* |
| 488 | * skip cqe's not affiliated with a QP. |
| 489 | */ |
| 490 | if (wq == NULL) { |
| 491 | ret = -EAGAIN; |
| 492 | goto skip_cqe; |
| 493 | } |
| 494 | |
| 495 | /* |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 496 | * skip hw cqe's if the wq is flushed. |
| 497 | */ |
| 498 | if (wq->flushed && !SW_CQE(hw_cqe)) { |
| 499 | ret = -EAGAIN; |
| 500 | goto skip_cqe; |
| 501 | } |
| 502 | |
| 503 | /* |
| 504 | * skip TERMINATE cqes... |
| 505 | */ |
| 506 | if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) { |
| 507 | ret = -EAGAIN; |
| 508 | goto skip_cqe; |
| 509 | } |
| 510 | |
| 511 | /* |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 512 | * Gotta tweak READ completions: |
| 513 | * 1) the cqe doesn't contain the sq_wptr from the wr. |
| 514 | * 2) opcode not reflected from the wr. |
| 515 | * 3) read_len not reflected from the wr. |
| 516 | * 4) cq_type is RQ_TYPE not SQ_TYPE. |
| 517 | */ |
| 518 | if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { |
| 519 | |
Steve Wise | 70b9c66 | 2014-03-21 20:40:32 +0530 | [diff] [blame] | 520 | /* If we have reached here because of async |
| 521 | * event or other error, and have egress error |
| 522 | * then drop |
| 523 | */ |
| 524 | if (CQE_TYPE(hw_cqe) == 1) { |
| 525 | if (CQE_STATUS(hw_cqe)) |
| 526 | t4_set_wq_in_error(wq); |
| 527 | ret = -EAGAIN; |
| 528 | goto skip_cqe; |
| 529 | } |
| 530 | |
| 531 | /* If this is an unsolicited read response, then the read |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 532 | * was generated by the kernel driver as part of peer-2-peer |
| 533 | * connection setup. So ignore the completion. |
| 534 | */ |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 535 | if (CQE_WRID_STAG(hw_cqe) == 1) { |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 536 | if (CQE_STATUS(hw_cqe)) |
| 537 | t4_set_wq_in_error(wq); |
| 538 | ret = -EAGAIN; |
| 539 | goto skip_cqe; |
| 540 | } |
| 541 | |
| 542 | /* |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 543 | * Eat completions for unsignaled read WRs. |
| 544 | */ |
| 545 | if (!wq->sq.oldest_read->signaled) { |
| 546 | advance_oldest_read(wq); |
| 547 | ret = -EAGAIN; |
| 548 | goto skip_cqe; |
| 549 | } |
| 550 | |
| 551 | /* |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 552 | * Don't write to the HWCQ, so create a new read req CQE |
| 553 | * in local memory. |
| 554 | */ |
| 555 | create_read_req_cqe(wq, hw_cqe, &read_cqe); |
| 556 | hw_cqe = &read_cqe; |
| 557 | advance_oldest_read(wq); |
| 558 | } |
| 559 | |
| 560 | if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 561 | *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 562 | t4_set_wq_in_error(wq); |
Steve Wise | 6ff0e34 | 2010-09-10 11:15:04 -0500 | [diff] [blame] | 563 | } |
| 564 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 565 | /* |
| 566 | * RECV completion. |
| 567 | */ |
| 568 | if (RQ_TYPE(hw_cqe)) { |
| 569 | |
| 570 | /* |
| 571 | * HW only validates 4 bits of MSN. So we must validate that |
| 572 | * the MSN in the SEND is the next expected MSN. If its not, |
| 573 | * then we complete this with T4_ERR_MSN and mark the wq in |
| 574 | * error. |
| 575 | */ |
| 576 | |
| 577 | if (t4_rq_empty(wq)) { |
| 578 | t4_set_wq_in_error(wq); |
| 579 | ret = -EAGAIN; |
| 580 | goto skip_cqe; |
| 581 | } |
| 582 | if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { |
| 583 | t4_set_wq_in_error(wq); |
Hariprasad Shenai | a56c66e | 2015-01-16 09:24:47 +0530 | [diff] [blame] | 584 | hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 585 | goto proc_cqe; |
| 586 | } |
| 587 | goto proc_cqe; |
| 588 | } |
| 589 | |
| 590 | /* |
| 591 | * If we get here its a send completion. |
| 592 | * |
| 593 | * Handle out of order completion. These get stuffed |
| 594 | * in the SW SQ. Then the SW SQ is walked to move any |
| 595 | * now in-order completions into the SW CQ. This handles |
| 596 | * 2 cases: |
| 597 | * 1) reaping unsignaled WRs when the first subsequent |
| 598 | * signaled WR is completed. |
| 599 | * 2) out of order read completions. |
| 600 | */ |
| 601 | if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { |
| 602 | struct t4_swsqe *swsqe; |
| 603 | |
| 604 | PDBG("%s out of order completion going in sw_sq at idx %u\n", |
| 605 | __func__, CQE_WRID_SQ_IDX(hw_cqe)); |
| 606 | swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; |
| 607 | swsqe->cqe = *hw_cqe; |
| 608 | swsqe->complete = 1; |
| 609 | ret = -EAGAIN; |
| 610 | goto flush_wq; |
| 611 | } |
| 612 | |
| 613 | proc_cqe: |
| 614 | *cqe = *hw_cqe; |
| 615 | |
| 616 | /* |
| 617 | * Reap the associated WR(s) that are freed up with this |
| 618 | * completion. |
| 619 | */ |
| 620 | if (SQ_TYPE(hw_cqe)) { |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 621 | int idx = CQE_WRID_SQ_IDX(hw_cqe); |
Steve Wise | 8a9c399 | 2014-03-19 17:44:42 +0530 | [diff] [blame] | 622 | BUG_ON(idx >= wq->sq.size); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 623 | |
| 624 | /* |
| 625 | * Account for any unsignaled completions completed by |
| 626 | * this signaled completion. In this case, cidx points |
| 627 | * to the first unsignaled one, and idx points to the |
| 628 | * signaled one. So adjust in_use based on this delta. |
| 629 | * if this is not completing any unsigned wrs, then the |
Steve Wise | 27ca34f | 2013-08-06 21:04:36 +0530 | [diff] [blame] | 630 | * delta will be 0. Handle wrapping also! |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 631 | */ |
Steve Wise | 27ca34f | 2013-08-06 21:04:36 +0530 | [diff] [blame] | 632 | if (idx < wq->sq.cidx) |
| 633 | wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; |
| 634 | else |
| 635 | wq->sq.in_use -= idx - wq->sq.cidx; |
Steve Wise | 8a9c399 | 2014-03-19 17:44:42 +0530 | [diff] [blame] | 636 | BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 637 | |
| 638 | wq->sq.cidx = (uint16_t)idx; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 639 | PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); |
| 640 | *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; |
Hariprasad Shenai | 7730b4c | 2014-07-14 21:34:54 +0530 | [diff] [blame] | 641 | if (c4iw_wr_log) |
| 642 | c4iw_log_wr_stats(wq, hw_cqe); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 643 | t4_sq_consume(wq); |
| 644 | } else { |
| 645 | PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); |
| 646 | *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; |
| 647 | BUG_ON(t4_rq_empty(wq)); |
Hariprasad Shenai | 7730b4c | 2014-07-14 21:34:54 +0530 | [diff] [blame] | 648 | if (c4iw_wr_log) |
| 649 | c4iw_log_wr_stats(wq, hw_cqe); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 650 | t4_rq_consume(wq); |
Steve Wise | 1cf24dc | 2013-08-06 21:04:35 +0530 | [diff] [blame] | 651 | goto skip_cqe; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | flush_wq: |
| 655 | /* |
| 656 | * Flush any completed cqes that are now in-order. |
| 657 | */ |
| 658 | flush_completed_wrs(wq, cq); |
| 659 | |
| 660 | skip_cqe: |
| 661 | if (SW_CQE(hw_cqe)) { |
| 662 | PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", |
| 663 | __func__, cq, cq->cqid, cq->sw_cidx); |
| 664 | t4_swcq_consume(cq); |
| 665 | } else { |
| 666 | PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", |
| 667 | __func__, cq, cq->cqid, cq->cidx); |
| 668 | t4_hwcq_consume(cq); |
| 669 | } |
| 670 | return ret; |
| 671 | } |
| 672 | |
| 673 | /* |
| 674 | * Get one cq entry from c4iw and map it to openib. |
| 675 | * |
| 676 | * Returns: |
| 677 | * 0 cqe returned |
| 678 | * -ENODATA EMPTY; |
| 679 | * -EAGAIN caller must try again |
| 680 | * any other -errno fatal error |
| 681 | */ |
| 682 | static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) |
| 683 | { |
| 684 | struct c4iw_qp *qhp = NULL; |
Steve Wise | 97df1c6 | 2014-04-09 09:38:28 -0500 | [diff] [blame] | 685 | struct t4_cqe uninitialized_var(cqe), *rd_cqe; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 686 | struct t4_wq *wq; |
| 687 | u32 credit = 0; |
| 688 | u8 cqe_flushed; |
| 689 | u64 cookie = 0; |
| 690 | int ret; |
| 691 | |
| 692 | ret = t4_next_cqe(&chp->cq, &rd_cqe); |
| 693 | |
| 694 | if (ret) |
| 695 | return ret; |
| 696 | |
| 697 | qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); |
| 698 | if (!qhp) |
| 699 | wq = NULL; |
| 700 | else { |
| 701 | spin_lock(&qhp->lock); |
| 702 | wq = &(qhp->wq); |
| 703 | } |
| 704 | ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); |
| 705 | if (ret) |
| 706 | goto out; |
| 707 | |
| 708 | wc->wr_id = cookie; |
| 709 | wc->qp = &qhp->ibqp; |
| 710 | wc->vendor_err = CQE_STATUS(&cqe); |
| 711 | wc->wc_flags = 0; |
| 712 | |
| 713 | PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x " |
| 714 | "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe), |
| 715 | CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), |
| 716 | CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie); |
| 717 | |
| 718 | if (CQE_TYPE(&cqe) == 0) { |
| 719 | if (!CQE_STATUS(&cqe)) |
| 720 | wc->byte_len = CQE_LEN(&cqe); |
| 721 | else |
| 722 | wc->byte_len = 0; |
| 723 | wc->opcode = IB_WC_RECV; |
| 724 | if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV || |
| 725 | CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { |
| 726 | wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); |
| 727 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
| 728 | } |
| 729 | } else { |
| 730 | switch (CQE_OPCODE(&cqe)) { |
| 731 | case FW_RI_RDMA_WRITE: |
| 732 | wc->opcode = IB_WC_RDMA_WRITE; |
| 733 | break; |
| 734 | case FW_RI_READ_REQ: |
| 735 | wc->opcode = IB_WC_RDMA_READ; |
| 736 | wc->byte_len = CQE_LEN(&cqe); |
| 737 | break; |
| 738 | case FW_RI_SEND_WITH_INV: |
| 739 | case FW_RI_SEND_WITH_SE_INV: |
| 740 | wc->opcode = IB_WC_SEND; |
| 741 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
| 742 | break; |
| 743 | case FW_RI_SEND: |
| 744 | case FW_RI_SEND_WITH_SE: |
| 745 | wc->opcode = IB_WC_SEND; |
| 746 | break; |
| 747 | case FW_RI_BIND_MW: |
| 748 | wc->opcode = IB_WC_BIND_MW; |
| 749 | break; |
| 750 | |
| 751 | case FW_RI_LOCAL_INV: |
| 752 | wc->opcode = IB_WC_LOCAL_INV; |
| 753 | break; |
| 754 | case FW_RI_FAST_REGISTER: |
| 755 | wc->opcode = IB_WC_FAST_REG_MR; |
| 756 | break; |
| 757 | default: |
| 758 | printk(KERN_ERR MOD "Unexpected opcode %d " |
| 759 | "in the CQE received for QPID=0x%0x\n", |
| 760 | CQE_OPCODE(&cqe), CQE_QPID(&cqe)); |
| 761 | ret = -EINVAL; |
| 762 | goto out; |
| 763 | } |
| 764 | } |
| 765 | |
| 766 | if (cqe_flushed) |
| 767 | wc->status = IB_WC_WR_FLUSH_ERR; |
| 768 | else { |
| 769 | |
| 770 | switch (CQE_STATUS(&cqe)) { |
| 771 | case T4_ERR_SUCCESS: |
| 772 | wc->status = IB_WC_SUCCESS; |
| 773 | break; |
| 774 | case T4_ERR_STAG: |
| 775 | wc->status = IB_WC_LOC_ACCESS_ERR; |
| 776 | break; |
| 777 | case T4_ERR_PDID: |
| 778 | wc->status = IB_WC_LOC_PROT_ERR; |
| 779 | break; |
| 780 | case T4_ERR_QPID: |
| 781 | case T4_ERR_ACCESS: |
| 782 | wc->status = IB_WC_LOC_ACCESS_ERR; |
| 783 | break; |
| 784 | case T4_ERR_WRAP: |
| 785 | wc->status = IB_WC_GENERAL_ERR; |
| 786 | break; |
| 787 | case T4_ERR_BOUND: |
| 788 | wc->status = IB_WC_LOC_LEN_ERR; |
| 789 | break; |
| 790 | case T4_ERR_INVALIDATE_SHARED_MR: |
| 791 | case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: |
| 792 | wc->status = IB_WC_MW_BIND_ERR; |
| 793 | break; |
| 794 | case T4_ERR_CRC: |
| 795 | case T4_ERR_MARKER: |
| 796 | case T4_ERR_PDU_LEN_ERR: |
| 797 | case T4_ERR_OUT_OF_RQE: |
| 798 | case T4_ERR_DDP_VERSION: |
| 799 | case T4_ERR_RDMA_VERSION: |
| 800 | case T4_ERR_DDP_QUEUE_NUM: |
| 801 | case T4_ERR_MSN: |
| 802 | case T4_ERR_TBIT: |
| 803 | case T4_ERR_MO: |
| 804 | case T4_ERR_MSN_RANGE: |
| 805 | case T4_ERR_IRD_OVERFLOW: |
| 806 | case T4_ERR_OPCODE: |
Steve Wise | 6ff0e34 | 2010-09-10 11:15:04 -0500 | [diff] [blame] | 807 | case T4_ERR_INTERNAL_ERR: |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 808 | wc->status = IB_WC_FATAL_ERR; |
| 809 | break; |
| 810 | case T4_ERR_SWFLUSH: |
| 811 | wc->status = IB_WC_WR_FLUSH_ERR; |
| 812 | break; |
| 813 | default: |
| 814 | printk(KERN_ERR MOD |
| 815 | "Unexpected cqe_status 0x%x for QPID=0x%0x\n", |
| 816 | CQE_STATUS(&cqe), CQE_QPID(&cqe)); |
Hariprasad S | 3661df1 | 2015-07-27 14:08:14 +0530 | [diff] [blame] | 817 | wc->status = IB_WC_FATAL_ERR; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 818 | } |
| 819 | } |
| 820 | out: |
| 821 | if (wq) |
| 822 | spin_unlock(&qhp->lock); |
| 823 | return ret; |
| 824 | } |
| 825 | |
| 826 | int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) |
| 827 | { |
| 828 | struct c4iw_cq *chp; |
| 829 | unsigned long flags; |
| 830 | int npolled; |
| 831 | int err = 0; |
| 832 | |
| 833 | chp = to_c4iw_cq(ibcq); |
| 834 | |
| 835 | spin_lock_irqsave(&chp->lock, flags); |
| 836 | for (npolled = 0; npolled < num_entries; ++npolled) { |
| 837 | do { |
| 838 | err = c4iw_poll_cq_one(chp, wc + npolled); |
| 839 | } while (err == -EAGAIN); |
| 840 | if (err) |
| 841 | break; |
| 842 | } |
| 843 | spin_unlock_irqrestore(&chp->lock, flags); |
| 844 | return !err || err == -ENODATA ? npolled : err; |
| 845 | } |
| 846 | |
| 847 | int c4iw_destroy_cq(struct ib_cq *ib_cq) |
| 848 | { |
| 849 | struct c4iw_cq *chp; |
| 850 | struct c4iw_ucontext *ucontext; |
| 851 | |
| 852 | PDBG("%s ib_cq %p\n", __func__, ib_cq); |
| 853 | chp = to_c4iw_cq(ib_cq); |
| 854 | |
| 855 | remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); |
| 856 | atomic_dec(&chp->refcnt); |
| 857 | wait_event(chp->wait, !atomic_read(&chp->refcnt)); |
| 858 | |
| 859 | ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) |
| 860 | : NULL; |
| 861 | destroy_cq(&chp->rhp->rdev, &chp->cq, |
| 862 | ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); |
| 863 | kfree(chp); |
| 864 | return 0; |
| 865 | } |
| 866 | |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 867 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, |
| 868 | const struct ib_cq_init_attr *attr, |
| 869 | struct ib_ucontext *ib_context, |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 870 | struct ib_udata *udata) |
| 871 | { |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 872 | int entries = attr->cqe; |
| 873 | int vector = attr->comp_vector; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 874 | struct c4iw_dev *rhp; |
| 875 | struct c4iw_cq *chp; |
| 876 | struct c4iw_create_cq_resp uresp; |
| 877 | struct c4iw_ucontext *ucontext = NULL; |
| 878 | int ret; |
Steve Wise | 1973e8b | 2010-06-10 19:03:06 +0000 | [diff] [blame] | 879 | size_t memsize, hwentries; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 880 | struct c4iw_mm_entry *mm, *mm2; |
| 881 | |
| 882 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); |
Matan Barak | bcf4c1e | 2015-06-11 16:35:20 +0300 | [diff] [blame] | 883 | if (attr->flags) |
| 884 | return ERR_PTR(-EINVAL); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 885 | |
| 886 | rhp = to_c4iw_dev(ibdev); |
| 887 | |
Hariprasad Shenai | cf38be6 | 2014-06-06 21:40:42 +0530 | [diff] [blame] | 888 | if (vector >= rhp->rdev.lldi.nciq) |
| 889 | return ERR_PTR(-EINVAL); |
| 890 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 891 | chp = kzalloc(sizeof(*chp), GFP_KERNEL); |
| 892 | if (!chp) |
| 893 | return ERR_PTR(-ENOMEM); |
| 894 | |
| 895 | if (ib_context) |
| 896 | ucontext = to_c4iw_ucontext(ib_context); |
| 897 | |
| 898 | /* account for the status page. */ |
| 899 | entries++; |
| 900 | |
Steve Wise | 895cf5f | 2010-05-20 16:57:38 -0500 | [diff] [blame] | 901 | /* IQ needs one extra entry to differentiate full vs empty. */ |
| 902 | entries++; |
| 903 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 904 | /* |
| 905 | * entries must be multiple of 16 for HW. |
| 906 | */ |
| 907 | entries = roundup(entries, 16); |
Steve Wise | 1973e8b | 2010-06-10 19:03:06 +0000 | [diff] [blame] | 908 | |
| 909 | /* |
| 910 | * Make actual HW queue 2x to avoid cdix_inc overflows. |
| 911 | */ |
Hariprasad Shenai | 04e10e2 | 2014-07-14 21:34:51 +0530 | [diff] [blame] | 912 | hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); |
Steve Wise | 1973e8b | 2010-06-10 19:03:06 +0000 | [diff] [blame] | 913 | |
| 914 | /* |
| 915 | * Make HW queue at least 64 entries so GTS updates aren't too |
| 916 | * frequent. |
| 917 | */ |
| 918 | if (hwentries < 64) |
| 919 | hwentries = 64; |
| 920 | |
| 921 | memsize = hwentries * sizeof *chp->cq.queue; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 922 | |
| 923 | /* |
| 924 | * memsize must be a multiple of the page size if its a user cq. |
| 925 | */ |
Hariprasad Shenai | 66eb19a | 2014-07-21 20:55:15 +0530 | [diff] [blame] | 926 | if (ucontext) |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 927 | memsize = roundup(memsize, PAGE_SIZE); |
Steve Wise | 1973e8b | 2010-06-10 19:03:06 +0000 | [diff] [blame] | 928 | chp->cq.size = hwentries; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 929 | chp->cq.memsize = memsize; |
Hariprasad Shenai | cf38be6 | 2014-06-06 21:40:42 +0530 | [diff] [blame] | 930 | chp->cq.vector = vector; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 931 | |
| 932 | ret = create_cq(&rhp->rdev, &chp->cq, |
| 933 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); |
| 934 | if (ret) |
| 935 | goto err1; |
| 936 | |
| 937 | chp->rhp = rhp; |
| 938 | chp->cq.size--; /* status page */ |
Steve Wise | 1973e8b | 2010-06-10 19:03:06 +0000 | [diff] [blame] | 939 | chp->ibcq.cqe = entries - 2; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 940 | spin_lock_init(&chp->lock); |
Kumar Sanghvi | 581bbe2 | 2011-10-24 21:20:21 +0530 | [diff] [blame] | 941 | spin_lock_init(&chp->comp_handler_lock); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 942 | atomic_set(&chp->refcnt, 1); |
| 943 | init_waitqueue_head(&chp->wait); |
| 944 | ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); |
| 945 | if (ret) |
| 946 | goto err2; |
| 947 | |
| 948 | if (ucontext) { |
| 949 | mm = kmalloc(sizeof *mm, GFP_KERNEL); |
| 950 | if (!mm) |
| 951 | goto err3; |
| 952 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); |
| 953 | if (!mm2) |
| 954 | goto err4; |
| 955 | |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 956 | uresp.qid_mask = rhp->rdev.cqmask; |
| 957 | uresp.cqid = chp->cq.cqid; |
| 958 | uresp.size = chp->cq.size; |
| 959 | uresp.memsize = chp->cq.memsize; |
| 960 | spin_lock(&ucontext->mmap_lock); |
| 961 | uresp.key = ucontext->key; |
| 962 | ucontext->key += PAGE_SIZE; |
| 963 | uresp.gts_key = ucontext->key; |
| 964 | ucontext->key += PAGE_SIZE; |
| 965 | spin_unlock(&ucontext->mmap_lock); |
Yann Droneaud | b6f04d3 | 2014-05-05 19:33:23 +0200 | [diff] [blame] | 966 | ret = ib_copy_to_udata(udata, &uresp, |
| 967 | sizeof(uresp) - sizeof(uresp.reserved)); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 968 | if (ret) |
| 969 | goto err5; |
| 970 | |
| 971 | mm->key = uresp.key; |
| 972 | mm->addr = virt_to_phys(chp->cq.queue); |
| 973 | mm->len = chp->cq.memsize; |
| 974 | insert_mmap(ucontext, mm); |
| 975 | |
| 976 | mm2->key = uresp.gts_key; |
Hariprasad S | 74217d4 | 2015-06-09 18:23:12 +0530 | [diff] [blame] | 977 | mm2->addr = chp->cq.bar2_pa; |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 978 | mm2->len = PAGE_SIZE; |
| 979 | insert_mmap(ucontext, mm2); |
| 980 | } |
| 981 | PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", |
| 982 | __func__, chp->cq.cqid, chp, chp->cq.size, |
Hariprasad S | 6198dd8 | 2015-04-22 01:44:59 +0530 | [diff] [blame] | 983 | chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); |
Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 984 | return &chp->ibcq; |
| 985 | err5: |
| 986 | kfree(mm2); |
| 987 | err4: |
| 988 | kfree(mm); |
| 989 | err3: |
| 990 | remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); |
| 991 | err2: |
| 992 | destroy_cq(&chp->rhp->rdev, &chp->cq, |
| 993 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); |
| 994 | err1: |
| 995 | kfree(chp); |
| 996 | return ERR_PTR(ret); |
| 997 | } |
| 998 | |
| 999 | int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) |
| 1000 | { |
| 1001 | return -ENOSYS; |
| 1002 | } |
| 1003 | |
| 1004 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) |
| 1005 | { |
| 1006 | struct c4iw_cq *chp; |
| 1007 | int ret; |
| 1008 | unsigned long flag; |
| 1009 | |
| 1010 | chp = to_c4iw_cq(ibcq); |
| 1011 | spin_lock_irqsave(&chp->lock, flag); |
| 1012 | ret = t4_arm_cq(&chp->cq, |
| 1013 | (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); |
| 1014 | spin_unlock_irqrestore(&chp->lock, flag); |
| 1015 | if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) |
| 1016 | ret = 0; |
| 1017 | return ret; |
| 1018 | } |