blob: 70b1808a08f4d12bf9d1668cb5bae06d1cd2e202 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
34
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include "iw_cxgb4.h"
36
Vipul Pandya2c974782012-05-18 15:29:28 +053037static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
Steve Wisea9c77192011-03-11 22:30:11 +000041static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050042module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000043MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050044
Vipul Pandya422eea02012-05-18 15:29:30 +053045int db_fc_threshold = 2000;
46module_param(db_fc_threshold, int, 0644);
47MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
48 "db flow control mode (default = 2000)");
49
Steve Wise2f5b48c2010-09-10 11:15:36 -050050static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
51{
52 unsigned long flag;
53 spin_lock_irqsave(&qhp->lock, flag);
54 qhp->attr.state = state;
55 spin_unlock_irqrestore(&qhp->lock, flag);
56}
57
Steve Wisec6d7b262010-09-13 11:23:57 -050058static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
59{
60 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
61}
62
63static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
64{
65 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
66 pci_unmap_addr(sq, mapping));
67}
68
69static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
70{
71 if (t4_sq_onchip(sq))
72 dealloc_oc_sq(rdev, sq);
73 else
74 dealloc_host_sq(rdev, sq);
75}
76
77static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
78{
79 if (!ocqp_support || !t4_ocqp_supported())
80 return -ENOSYS;
81 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
82 if (!sq->dma_addr)
83 return -ENOMEM;
84 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
85 rdev->lldi.vr->ocq.start;
86 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
87 rdev->lldi.vr->ocq.start);
88 sq->flags |= T4_SQ_ONCHIP;
89 return 0;
90}
91
92static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
93{
94 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
95 &(sq->dma_addr), GFP_KERNEL);
96 if (!sq->queue)
97 return -ENOMEM;
98 sq->phys_addr = virt_to_phys(sq->queue);
99 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
100 return 0;
101}
102
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
104 struct c4iw_dev_ucontext *uctx)
105{
106 /*
107 * uP clears EQ contexts when the connection exits rdma mode,
108 * so no need to post a RESET WR for these EQs.
109 */
110 dma_free_coherent(&(rdev->lldi.pdev->dev),
111 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000112 dma_unmap_addr(&wq->rq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500113 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700114 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
115 kfree(wq->rq.sw_rq);
116 kfree(wq->sq.sw_sq);
117 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
118 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
119 return 0;
120}
121
122static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
123 struct t4_cq *rcq, struct t4_cq *scq,
124 struct c4iw_dev_ucontext *uctx)
125{
126 int user = (uctx != &rdev->uctx);
127 struct fw_ri_res_wr *res_wr;
128 struct fw_ri_res *res;
129 int wr_len;
130 struct c4iw_wr_wait wr_wait;
131 struct sk_buff *skb;
132 int ret;
133 int eqsize;
134
135 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
136 if (!wq->sq.qid)
137 return -ENOMEM;
138
139 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000140 if (!wq->rq.qid) {
141 ret = -ENOMEM;
142 goto free_sq_qid;
143 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700144
145 if (!user) {
146 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
147 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000148 if (!wq->sq.sw_sq) {
149 ret = -ENOMEM;
150 goto free_rq_qid;
151 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700152
153 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
154 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000155 if (!wq->rq.sw_rq) {
156 ret = -ENOMEM;
157 goto free_sw_sq;
158 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700159 }
160
161 /*
162 * RQT must be a power of 2.
163 */
164 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
165 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000166 if (!wq->rq.rqt_hwaddr) {
167 ret = -ENOMEM;
168 goto free_sw_rq;
169 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700170
Steve Wisec6d7b262010-09-13 11:23:57 -0500171 if (user) {
Emil Goodec079c282012-08-19 17:59:40 +0000172 ret = alloc_oc_sq(rdev, &wq->sq);
173 if (ret)
174 goto free_hwaddr;
175
176 ret = alloc_host_sq(rdev, &wq->sq);
177 if (ret)
178 goto free_sq;
Steve Wisec6d7b262010-09-13 11:23:57 -0500179 } else
Emil Goodec079c282012-08-19 17:59:40 +0000180 ret = alloc_host_sq(rdev, &wq->sq);
181 if (ret)
182 goto free_hwaddr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700183 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000184 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700185
186 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
187 wq->rq.memsize, &(wq->rq.dma_addr),
188 GFP_KERNEL);
Wei Yongjun55e57a72013-03-15 09:42:12 +0000189 if (!wq->rq.queue) {
190 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000191 goto free_sq;
Wei Yongjun55e57a72013-03-15 09:42:12 +0000192 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700193 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
194 __func__, wq->sq.queue,
195 (unsigned long long)virt_to_phys(wq->sq.queue),
196 wq->rq.queue,
197 (unsigned long long)virt_to_phys(wq->rq.queue));
198 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000199 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700200
201 wq->db = rdev->lldi.db_reg;
202 wq->gts = rdev->lldi.gts_reg;
203 if (user) {
204 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
205 (wq->sq.qid << rdev->qpshift);
206 wq->sq.udb &= PAGE_MASK;
207 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
208 (wq->rq.qid << rdev->qpshift);
209 wq->rq.udb &= PAGE_MASK;
210 }
211 wq->rdev = rdev;
212 wq->rq.msn = 1;
213
214 /* build fw_ri_res_wr */
215 wr_len = sizeof *res_wr + 2 * sizeof *res;
216
David Rientjesd3c814e2010-07-21 02:44:56 +0000217 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700218 if (!skb) {
219 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000220 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700221 }
222 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
223
224 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
225 memset(res_wr, 0, wr_len);
226 res_wr->op_nres = cpu_to_be32(
227 FW_WR_OP(FW_RI_RES_WR) |
228 V_FW_RI_RES_WR_NRES(2) |
229 FW_WR_COMPL(1));
230 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700231 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700232 res = res_wr->res;
233 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
234 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
235
236 /*
237 * eqsize is the number of 64B entries plus the status page size.
238 */
239 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
240
241 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
242 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
243 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
244 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
Roel Kluin85d215b2011-05-09 22:06:22 -0700245 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700246 V_FW_RI_RES_WR_IQID(scq->cqid));
247 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
248 V_FW_RI_RES_WR_DCAEN(0) |
249 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000250 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000251 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700252 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
253 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
254 V_FW_RI_RES_WR_EQSIZE(eqsize));
255 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
256 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
257 res++;
258 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
259 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
260
261 /*
262 * eqsize is the number of 64B entries plus the status page size.
263 */
264 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
265 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
266 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
267 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
268 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
269 V_FW_RI_RES_WR_IQID(rcq->cqid));
270 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
271 V_FW_RI_RES_WR_DCAEN(0) |
272 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000273 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000274 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700275 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
276 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
277 V_FW_RI_RES_WR_EQSIZE(eqsize));
278 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
279 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
280
281 c4iw_init_wr_wait(&wr_wait);
282
283 ret = c4iw_ofld_send(rdev, skb);
284 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000285 goto free_dma;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500286 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700287 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000288 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700289
290 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
291 __func__, wq->sq.qid, wq->rq.qid, wq->db,
292 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
293
294 return 0;
Emil Goodec079c282012-08-19 17:59:40 +0000295free_dma:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700296 dma_free_coherent(&(rdev->lldi.pdev->dev),
297 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000298 dma_unmap_addr(&wq->rq, mapping));
Emil Goodec079c282012-08-19 17:59:40 +0000299free_sq:
Steve Wisec6d7b262010-09-13 11:23:57 -0500300 dealloc_sq(rdev, &wq->sq);
Emil Goodec079c282012-08-19 17:59:40 +0000301free_hwaddr:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700302 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000303free_sw_rq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700304 kfree(wq->rq.sw_rq);
Emil Goodec079c282012-08-19 17:59:40 +0000305free_sw_sq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700306 kfree(wq->sq.sw_sq);
Emil Goodec079c282012-08-19 17:59:40 +0000307free_rq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700308 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000309free_sq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700310 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000311 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700312}
313
Steve Wised37ac312010-06-10 19:03:00 +0000314static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
315 struct ib_send_wr *wr, int max, u32 *plenp)
316{
317 u8 *dstp, *srcp;
318 u32 plen = 0;
319 int i;
320 int rem, len;
321
322 dstp = (u8 *)immdp->data;
323 for (i = 0; i < wr->num_sge; i++) {
324 if ((plen + wr->sg_list[i].length) > max)
325 return -EMSGSIZE;
326 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
327 plen += wr->sg_list[i].length;
328 rem = wr->sg_list[i].length;
329 while (rem) {
330 if (dstp == (u8 *)&sq->queue[sq->size])
331 dstp = (u8 *)sq->queue;
332 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
333 len = rem;
334 else
335 len = (u8 *)&sq->queue[sq->size] - dstp;
336 memcpy(dstp, srcp, len);
337 dstp += len;
338 srcp += len;
339 rem -= len;
340 }
341 }
Steve Wise13fecb82010-09-10 11:14:53 -0500342 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
343 if (len)
344 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000345 immdp->op = FW_RI_DATA_IMMD;
346 immdp->r1 = 0;
347 immdp->r2 = 0;
348 immdp->immdlen = cpu_to_be32(plen);
349 *plenp = plen;
350 return 0;
351}
352
353static int build_isgl(__be64 *queue_start, __be64 *queue_end,
354 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
355 int num_sge, u32 *plenp)
356
Steve Wisecfdda9d2010-04-21 15:30:06 -0700357{
358 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000359 u32 plen = 0;
360 __be64 *flitp = (__be64 *)isglp->sge;
361
362 for (i = 0; i < num_sge; i++) {
363 if ((plen + sg_list[i].length) < plen)
364 return -EMSGSIZE;
365 plen += sg_list[i].length;
366 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
367 sg_list[i].length);
368 if (++flitp == queue_end)
369 flitp = queue_start;
370 *flitp = cpu_to_be64(sg_list[i].addr);
371 if (++flitp == queue_end)
372 flitp = queue_start;
373 }
Steve Wise13fecb82010-09-10 11:14:53 -0500374 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000375 isglp->op = FW_RI_DATA_ISGL;
376 isglp->r1 = 0;
377 isglp->nsge = cpu_to_be16(num_sge);
378 isglp->r2 = 0;
379 if (plenp)
380 *plenp = plen;
381 return 0;
382}
383
384static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
385 struct ib_send_wr *wr, u8 *len16)
386{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700387 u32 plen;
388 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000389 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700390
391 if (wr->num_sge > T4_MAX_SEND_SGE)
392 return -EINVAL;
393 switch (wr->opcode) {
394 case IB_WR_SEND:
395 if (wr->send_flags & IB_SEND_SOLICITED)
396 wqe->send.sendop_pkd = cpu_to_be32(
397 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
398 else
399 wqe->send.sendop_pkd = cpu_to_be32(
400 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
401 wqe->send.stag_inv = 0;
402 break;
403 case IB_WR_SEND_WITH_INV:
404 if (wr->send_flags & IB_SEND_SOLICITED)
405 wqe->send.sendop_pkd = cpu_to_be32(
406 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
407 else
408 wqe->send.sendop_pkd = cpu_to_be32(
409 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
410 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
411 break;
412
413 default:
414 return -EINVAL;
415 }
Steve Wised37ac312010-06-10 19:03:00 +0000416
Steve Wisecfdda9d2010-04-21 15:30:06 -0700417 plen = 0;
418 if (wr->num_sge) {
419 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000420 ret = build_immd(sq, wqe->send.u.immd_src, wr,
421 T4_MAX_SEND_INLINE, &plen);
422 if (ret)
423 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700424 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
425 plen;
426 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000427 ret = build_isgl((__be64 *)sq->queue,
428 (__be64 *)&sq->queue[sq->size],
429 wqe->send.u.isgl_src,
430 wr->sg_list, wr->num_sge, &plen);
431 if (ret)
432 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700433 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
434 wr->num_sge * sizeof(struct fw_ri_sge);
435 }
436 } else {
437 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
438 wqe->send.u.immd_src[0].r1 = 0;
439 wqe->send.u.immd_src[0].r2 = 0;
440 wqe->send.u.immd_src[0].immdlen = 0;
441 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000442 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700443 }
444 *len16 = DIV_ROUND_UP(size, 16);
445 wqe->send.plen = cpu_to_be32(plen);
446 return 0;
447}
448
Steve Wised37ac312010-06-10 19:03:00 +0000449static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
450 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700451{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700452 u32 plen;
453 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000454 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700455
Steve Wised37ac312010-06-10 19:03:00 +0000456 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700457 return -EINVAL;
458 wqe->write.r2 = 0;
459 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
460 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700461 if (wr->num_sge) {
462 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000463 ret = build_immd(sq, wqe->write.u.immd_src, wr,
464 T4_MAX_WRITE_INLINE, &plen);
465 if (ret)
466 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700467 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
468 plen;
469 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000470 ret = build_isgl((__be64 *)sq->queue,
471 (__be64 *)&sq->queue[sq->size],
472 wqe->write.u.isgl_src,
473 wr->sg_list, wr->num_sge, &plen);
474 if (ret)
475 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700476 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
477 wr->num_sge * sizeof(struct fw_ri_sge);
478 }
479 } else {
480 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
481 wqe->write.u.immd_src[0].r1 = 0;
482 wqe->write.u.immd_src[0].r2 = 0;
483 wqe->write.u.immd_src[0].immdlen = 0;
484 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000485 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700486 }
487 *len16 = DIV_ROUND_UP(size, 16);
488 wqe->write.plen = cpu_to_be32(plen);
489 return 0;
490}
491
492static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
493{
494 if (wr->num_sge > 1)
495 return -EINVAL;
496 if (wr->num_sge) {
497 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
498 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
499 >> 32));
500 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
501 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
502 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
503 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
504 >> 32));
505 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
506 } else {
507 wqe->read.stag_src = cpu_to_be32(2);
508 wqe->read.to_src_hi = 0;
509 wqe->read.to_src_lo = 0;
510 wqe->read.stag_sink = cpu_to_be32(2);
511 wqe->read.plen = 0;
512 wqe->read.to_sink_hi = 0;
513 wqe->read.to_sink_lo = 0;
514 }
515 wqe->read.r2 = 0;
516 wqe->read.r5 = 0;
517 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
518 return 0;
519}
520
521static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
522 struct ib_recv_wr *wr, u8 *len16)
523{
Steve Wised37ac312010-06-10 19:03:00 +0000524 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700525
Steve Wised37ac312010-06-10 19:03:00 +0000526 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
527 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
528 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
529 if (ret)
530 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700531 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
532 wr->num_sge * sizeof(struct fw_ri_sge), 16);
533 return 0;
534}
535
Steve Wise40dbf6e2010-09-17 15:40:15 -0500536static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
537 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700538{
539
540 struct fw_ri_immd *imdp;
541 __be64 *p;
542 int i;
543 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500544 int rem;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700545
546 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
547 return -EINVAL;
548
549 wqe->fr.qpbinde_to_dcacpu = 0;
550 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
551 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
552 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
553 wqe->fr.len_hi = 0;
554 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
555 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
556 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
557 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
558 0xffffffff);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500559 WARN_ON(pbllen > T4_MAX_FR_IMMD);
560 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
561 imdp->op = FW_RI_DATA_IMMD;
562 imdp->r1 = 0;
563 imdp->r2 = 0;
564 imdp->immdlen = cpu_to_be32(pbllen);
565 p = (__be64 *)(imdp + 1);
566 rem = pbllen;
567 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
568 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
569 rem -= sizeof *p;
570 if (++p == (__be64 *)&sq->queue[sq->size])
571 p = (__be64 *)sq->queue;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700572 }
Steve Wise40dbf6e2010-09-17 15:40:15 -0500573 BUG_ON(rem < 0);
574 while (rem) {
575 *p = 0;
576 rem -= sizeof *p;
577 if (++p == (__be64 *)&sq->queue[sq->size])
578 p = (__be64 *)sq->queue;
579 }
580 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700581 return 0;
582}
583
584static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
585 u8 *len16)
586{
587 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
588 wqe->inv.r2 = 0;
589 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
590 return 0;
591}
592
593void c4iw_qp_add_ref(struct ib_qp *qp)
594{
595 PDBG("%s ib_qp %p\n", __func__, qp);
596 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
597}
598
599void c4iw_qp_rem_ref(struct ib_qp *qp)
600{
601 PDBG("%s ib_qp %p\n", __func__, qp);
602 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
603 wake_up(&(to_c4iw_qp(qp)->wait));
604}
605
606int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
607 struct ib_send_wr **bad_wr)
608{
609 int err = 0;
610 u8 len16 = 0;
611 enum fw_wr_opcodes fw_opcode = 0;
612 enum fw_ri_wr_flags fw_flags;
613 struct c4iw_qp *qhp;
614 union t4_wr *wqe;
615 u32 num_wrs;
616 struct t4_swsqe *swsqe;
617 unsigned long flag;
618 u16 idx = 0;
619
620 qhp = to_c4iw_qp(ibqp);
621 spin_lock_irqsave(&qhp->lock, flag);
622 if (t4_wq_in_error(&qhp->wq)) {
623 spin_unlock_irqrestore(&qhp->lock, flag);
624 return -EINVAL;
625 }
626 num_wrs = t4_sq_avail(&qhp->wq);
627 if (num_wrs == 0) {
628 spin_unlock_irqrestore(&qhp->lock, flag);
629 return -ENOMEM;
630 }
631 while (wr) {
632 if (num_wrs == 0) {
633 err = -ENOMEM;
634 *bad_wr = wr;
635 break;
636 }
Steve Wised37ac312010-06-10 19:03:00 +0000637 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
638 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
639
Steve Wisecfdda9d2010-04-21 15:30:06 -0700640 fw_flags = 0;
641 if (wr->send_flags & IB_SEND_SOLICITED)
642 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
643 if (wr->send_flags & IB_SEND_SIGNALED)
644 fw_flags |= FW_RI_COMPLETION_FLAG;
645 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
646 switch (wr->opcode) {
647 case IB_WR_SEND_WITH_INV:
648 case IB_WR_SEND:
649 if (wr->send_flags & IB_SEND_FENCE)
650 fw_flags |= FW_RI_READ_FENCE_FLAG;
651 fw_opcode = FW_RI_SEND_WR;
652 if (wr->opcode == IB_WR_SEND)
653 swsqe->opcode = FW_RI_SEND;
654 else
655 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000656 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700657 break;
658 case IB_WR_RDMA_WRITE:
659 fw_opcode = FW_RI_RDMA_WRITE_WR;
660 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000661 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700662 break;
663 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500664 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700665 fw_opcode = FW_RI_RDMA_READ_WR;
666 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500667 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
Steve Wise410ade42010-09-17 15:40:09 -0500668 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise2f1fb502010-05-20 16:58:16 -0500669 else
670 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700671 err = build_rdma_read(wqe, wr, &len16);
672 if (err)
673 break;
674 swsqe->read_len = wr->sg_list[0].length;
675 if (!qhp->wq.sq.oldest_read)
676 qhp->wq.sq.oldest_read = swsqe;
677 break;
678 case IB_WR_FAST_REG_MR:
679 fw_opcode = FW_RI_FR_NSMR_WR;
680 swsqe->opcode = FW_RI_FAST_REGISTER;
Steve Wise40dbf6e2010-09-17 15:40:15 -0500681 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700682 break;
683 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500684 if (wr->send_flags & IB_SEND_FENCE)
685 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700686 fw_opcode = FW_RI_INV_LSTAG_WR;
687 swsqe->opcode = FW_RI_LOCAL_INV;
688 err = build_inv_stag(wqe, wr, &len16);
689 break;
690 default:
691 PDBG("%s post of type=%d TBD!\n", __func__,
692 wr->opcode);
693 err = -EINVAL;
694 }
695 if (err) {
696 *bad_wr = wr;
697 break;
698 }
699 swsqe->idx = qhp->wq.sq.pidx;
700 swsqe->complete = 0;
701 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
702 swsqe->wr_id = wr->wr_id;
703
704 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
705
706 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
707 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
708 swsqe->opcode, swsqe->read_len);
709 wr = wr->next;
710 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000711 t4_sq_produce(&qhp->wq, len16);
712 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700713 }
714 if (t4_wq_db_enabled(&qhp->wq))
715 t4_ring_sq_db(&qhp->wq, idx);
716 spin_unlock_irqrestore(&qhp->lock, flag);
717 return err;
718}
719
720int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
721 struct ib_recv_wr **bad_wr)
722{
723 int err = 0;
724 struct c4iw_qp *qhp;
725 union t4_recv_wr *wqe;
726 u32 num_wrs;
727 u8 len16 = 0;
728 unsigned long flag;
729 u16 idx = 0;
730
731 qhp = to_c4iw_qp(ibqp);
732 spin_lock_irqsave(&qhp->lock, flag);
733 if (t4_wq_in_error(&qhp->wq)) {
734 spin_unlock_irqrestore(&qhp->lock, flag);
735 return -EINVAL;
736 }
737 num_wrs = t4_rq_avail(&qhp->wq);
738 if (num_wrs == 0) {
739 spin_unlock_irqrestore(&qhp->lock, flag);
740 return -ENOMEM;
741 }
742 while (wr) {
743 if (wr->num_sge > T4_MAX_RECV_SGE) {
744 err = -EINVAL;
745 *bad_wr = wr;
746 break;
747 }
Steve Wised37ac312010-06-10 19:03:00 +0000748 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
749 qhp->wq.rq.wq_pidx *
750 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700751 if (num_wrs)
752 err = build_rdma_recv(qhp, wqe, wr, &len16);
753 else
754 err = -ENOMEM;
755 if (err) {
756 *bad_wr = wr;
757 break;
758 }
759
760 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
761
762 wqe->recv.opcode = FW_RI_RECV_WR;
763 wqe->recv.r1 = 0;
764 wqe->recv.wrid = qhp->wq.rq.pidx;
765 wqe->recv.r2[0] = 0;
766 wqe->recv.r2[1] = 0;
767 wqe->recv.r2[2] = 0;
768 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700769 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
770 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000771 t4_rq_produce(&qhp->wq, len16);
772 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700773 wr = wr->next;
774 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700775 }
776 if (t4_wq_db_enabled(&qhp->wq))
777 t4_ring_rq_db(&qhp->wq, idx);
778 spin_unlock_irqrestore(&qhp->lock, flag);
779 return err;
780}
781
782int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
783{
784 return -ENOSYS;
785}
786
787static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
788 u8 *ecode)
789{
790 int status;
791 int tagged;
792 int opcode;
793 int rqtype;
794 int send_inv;
795
796 if (!err_cqe) {
797 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
798 *ecode = 0;
799 return;
800 }
801
802 status = CQE_STATUS(err_cqe);
803 opcode = CQE_OPCODE(err_cqe);
804 rqtype = RQ_TYPE(err_cqe);
805 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
806 (opcode == FW_RI_SEND_WITH_SE_INV);
807 tagged = (opcode == FW_RI_RDMA_WRITE) ||
808 (rqtype && (opcode == FW_RI_READ_RESP));
809
810 switch (status) {
811 case T4_ERR_STAG:
812 if (send_inv) {
813 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
814 *ecode = RDMAP_CANT_INV_STAG;
815 } else {
816 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
817 *ecode = RDMAP_INV_STAG;
818 }
819 break;
820 case T4_ERR_PDID:
821 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
822 if ((opcode == FW_RI_SEND_WITH_INV) ||
823 (opcode == FW_RI_SEND_WITH_SE_INV))
824 *ecode = RDMAP_CANT_INV_STAG;
825 else
826 *ecode = RDMAP_STAG_NOT_ASSOC;
827 break;
828 case T4_ERR_QPID:
829 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
830 *ecode = RDMAP_STAG_NOT_ASSOC;
831 break;
832 case T4_ERR_ACCESS:
833 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
834 *ecode = RDMAP_ACC_VIOL;
835 break;
836 case T4_ERR_WRAP:
837 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
838 *ecode = RDMAP_TO_WRAP;
839 break;
840 case T4_ERR_BOUND:
841 if (tagged) {
842 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
843 *ecode = DDPT_BASE_BOUNDS;
844 } else {
845 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
846 *ecode = RDMAP_BASE_BOUNDS;
847 }
848 break;
849 case T4_ERR_INVALIDATE_SHARED_MR:
850 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
851 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
852 *ecode = RDMAP_CANT_INV_STAG;
853 break;
854 case T4_ERR_ECC:
855 case T4_ERR_ECC_PSTAG:
856 case T4_ERR_INTERNAL_ERR:
857 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
858 *ecode = 0;
859 break;
860 case T4_ERR_OUT_OF_RQE:
861 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
862 *ecode = DDPU_INV_MSN_NOBUF;
863 break;
864 case T4_ERR_PBL_ADDR_BOUND:
865 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
866 *ecode = DDPT_BASE_BOUNDS;
867 break;
868 case T4_ERR_CRC:
869 *layer_type = LAYER_MPA|DDP_LLP;
870 *ecode = MPA_CRC_ERR;
871 break;
872 case T4_ERR_MARKER:
873 *layer_type = LAYER_MPA|DDP_LLP;
874 *ecode = MPA_MARKER_ERR;
875 break;
876 case T4_ERR_PDU_LEN_ERR:
877 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
878 *ecode = DDPU_MSG_TOOBIG;
879 break;
880 case T4_ERR_DDP_VERSION:
881 if (tagged) {
882 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
883 *ecode = DDPT_INV_VERS;
884 } else {
885 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
886 *ecode = DDPU_INV_VERS;
887 }
888 break;
889 case T4_ERR_RDMA_VERSION:
890 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
891 *ecode = RDMAP_INV_VERS;
892 break;
893 case T4_ERR_OPCODE:
894 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
895 *ecode = RDMAP_INV_OPCODE;
896 break;
897 case T4_ERR_DDP_QUEUE_NUM:
898 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
899 *ecode = DDPU_INV_QN;
900 break;
901 case T4_ERR_MSN:
902 case T4_ERR_MSN_GAP:
903 case T4_ERR_MSN_RANGE:
904 case T4_ERR_IRD_OVERFLOW:
905 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
906 *ecode = DDPU_INV_MSN_RANGE;
907 break;
908 case T4_ERR_TBIT:
909 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
910 *ecode = 0;
911 break;
912 case T4_ERR_MO:
913 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
914 *ecode = DDPU_INV_MO;
915 break;
916 default:
917 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
918 *ecode = 0;
919 break;
920 }
921}
922
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700923static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
924 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700925{
926 struct fw_ri_wr *wqe;
927 struct sk_buff *skb;
928 struct terminate_message *term;
929
930 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
931 qhp->ep->hwtid);
932
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700933 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700934 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700935 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700936 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
937
938 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
939 memset(wqe, 0, sizeof *wqe);
940 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
941 wqe->flowid_len16 = cpu_to_be32(
942 FW_WR_FLOWID(qhp->ep->hwtid) |
943 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
944
945 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
946 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
947 term = (struct terminate_message *)wqe->u.terminate.termmsg;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530948 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
949 term->layer_etype = qhp->attr.layer_etype;
950 term->ecode = qhp->attr.ecode;
951 } else
952 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700953 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700954}
955
956/*
957 * Assumes qhp lock is held.
958 */
959static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -0500960 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700961{
962 int count;
963 int flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500964 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700965
966 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700967
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200968 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -0500969 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700970 spin_lock(&qhp->lock);
971 c4iw_flush_hw_cq(&rchp->cq);
972 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
973 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
974 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500975 spin_unlock_irqrestore(&rchp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530976 if (flushed) {
977 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700978 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530979 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
980 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700981
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200982 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -0500983 spin_lock_irqsave(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700984 spin_lock(&qhp->lock);
985 c4iw_flush_hw_cq(&schp->cq);
986 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
987 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
988 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500989 spin_unlock_irqrestore(&schp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530990 if (flushed) {
991 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700992 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530993 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
994 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700995}
996
Steve Wise2f5b48c2010-09-10 11:15:36 -0500997static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700998{
999 struct c4iw_cq *rchp, *schp;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301000 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001001
1002 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
1003 schp = get_chp(qhp->rhp, qhp->attr.scq);
1004
1005 if (qhp->ibqp.uobject) {
1006 t4_set_wq_in_error(&qhp->wq);
1007 t4_set_cq_in_error(&rchp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301008 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301009 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301010 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301011 if (schp != rchp) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001012 t4_set_cq_in_error(&schp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301013 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301014 (*schp->ibcq.comp_handler)(&schp->ibcq,
1015 schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301016 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301017 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001018 return;
1019 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001020 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001021}
1022
Steve Wise73d6fca2010-07-23 19:12:27 +00001023static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1024 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001025{
1026 struct fw_ri_wr *wqe;
1027 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001028 struct sk_buff *skb;
1029
1030 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +00001031 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001032
David Rientjesd3c814e2010-07-21 02:44:56 +00001033 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001034 if (!skb)
1035 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +00001036 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001037
1038 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1039 memset(wqe, 0, sizeof *wqe);
1040 wqe->op_compl = cpu_to_be32(
1041 FW_WR_OP(FW_RI_INIT_WR) |
1042 FW_WR_COMPL(1));
1043 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +00001044 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001045 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Steve Wise2f5b48c2010-09-10 11:15:36 -05001046 wqe->cookie = (unsigned long) &ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001047
1048 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001049 ret = c4iw_ofld_send(&rhp->rdev, skb);
1050 if (ret)
1051 goto out;
1052
Steve Wise2f5b48c2010-09-10 11:15:36 -05001053 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
Steve Wiseaadc4df2010-09-10 11:15:25 -05001054 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001055out:
1056 PDBG("%s ret %d\n", __func__, ret);
1057 return ret;
1058}
1059
1060static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1061{
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301062 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001063 memset(&init->u, 0, sizeof init->u);
1064 switch (p2p_type) {
1065 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1066 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1067 init->u.write.stag_sink = cpu_to_be32(1);
1068 init->u.write.to_sink = cpu_to_be64(1);
1069 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1070 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1071 sizeof(struct fw_ri_immd),
1072 16);
1073 break;
1074 case FW_RI_INIT_P2PTYPE_READ_REQ:
1075 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1076 init->u.read.stag_src = cpu_to_be32(1);
1077 init->u.read.to_src_lo = cpu_to_be32(1);
1078 init->u.read.stag_sink = cpu_to_be32(1);
1079 init->u.read.to_sink_lo = cpu_to_be32(1);
1080 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1081 break;
1082 }
1083}
1084
1085static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1086{
1087 struct fw_ri_wr *wqe;
1088 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001089 struct sk_buff *skb;
1090
1091 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1092 qhp->ep->hwtid);
1093
David Rientjesd3c814e2010-07-21 02:44:56 +00001094 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001095 if (!skb)
1096 return -ENOMEM;
1097 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1098
1099 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1100 memset(wqe, 0, sizeof *wqe);
1101 wqe->op_compl = cpu_to_be32(
1102 FW_WR_OP(FW_RI_INIT_WR) |
1103 FW_WR_COMPL(1));
1104 wqe->flowid_len16 = cpu_to_be32(
1105 FW_WR_FLOWID(qhp->ep->hwtid) |
1106 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1107
Steve Wise2f5b48c2010-09-10 11:15:36 -05001108 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001109
1110 wqe->u.init.type = FW_RI_TYPE_INIT;
1111 wqe->u.init.mpareqbit_p2ptype =
1112 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1113 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1114 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1115 if (qhp->attr.mpa_attr.recv_marker_enabled)
1116 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1117 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1118 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1119 if (qhp->attr.mpa_attr.crc_enabled)
1120 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1121
1122 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1123 FW_RI_QP_RDMA_WRITE_ENABLE |
1124 FW_RI_QP_BIND_ENABLE;
1125 if (!qhp->ibqp.uobject)
1126 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1127 FW_RI_QP_STAG0_ENABLE;
1128 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1129 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1130 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1131 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1132 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1133 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1134 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1135 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1136 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1137 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1138 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1139 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1140 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1141 rhp->rdev.lldi.vr->rq.start);
1142 if (qhp->attr.mpa_attr.initiator)
1143 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1144
Steve Wisecfdda9d2010-04-21 15:30:06 -07001145 ret = c4iw_ofld_send(&rhp->rdev, skb);
1146 if (ret)
1147 goto out;
1148
Steve Wise2f5b48c2010-09-10 11:15:36 -05001149 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1150 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001151out:
1152 PDBG("%s ret %d\n", __func__, ret);
1153 return ret;
1154}
1155
Vipul Pandya2c974782012-05-18 15:29:28 +05301156/*
1157 * Called by the library when the qp has user dbs disabled due to
1158 * a DB_FULL condition. This function will single-thread all user
1159 * DB rings to avoid overflowing the hw db-fifo.
1160 */
1161static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1162{
1163 int delay = db_delay_usecs;
1164
1165 mutex_lock(&qhp->rhp->db_mutex);
1166 do {
Vipul Pandya422eea02012-05-18 15:29:30 +05301167
1168 /*
1169 * The interrupt threshold is dbfifo_int_thresh << 6. So
1170 * make sure we don't cross that and generate an interrupt.
1171 */
1172 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1173 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
Vipul Pandyae5619c12012-09-05 02:01:54 +00001174 writel(QID(qid) | PIDX(inc), qhp->wq.db);
Vipul Pandya2c974782012-05-18 15:29:28 +05301175 break;
1176 }
1177 set_current_state(TASK_UNINTERRUPTIBLE);
1178 schedule_timeout(usecs_to_jiffies(delay));
Vipul Pandya422eea02012-05-18 15:29:30 +05301179 delay = min(delay << 1, 2000);
Vipul Pandya2c974782012-05-18 15:29:28 +05301180 } while (1);
1181 mutex_unlock(&qhp->rhp->db_mutex);
1182 return 0;
1183}
1184
Steve Wisecfdda9d2010-04-21 15:30:06 -07001185int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1186 enum c4iw_qp_attr_mask mask,
1187 struct c4iw_qp_attributes *attrs,
1188 int internal)
1189{
1190 int ret = 0;
1191 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001192 int disconnect = 0;
1193 int terminate = 0;
1194 int abort = 0;
1195 int free = 0;
1196 struct c4iw_ep *ep = NULL;
1197
1198 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1199 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1200 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1201
Steve Wise2f5b48c2010-09-10 11:15:36 -05001202 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001203
1204 /* Process attr changes if in IDLE */
1205 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1206 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1207 ret = -EIO;
1208 goto out;
1209 }
1210 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1211 newattr.enable_rdma_read = attrs->enable_rdma_read;
1212 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1213 newattr.enable_rdma_write = attrs->enable_rdma_write;
1214 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1215 newattr.enable_bind = attrs->enable_bind;
1216 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001217 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001218 ret = -EINVAL;
1219 goto out;
1220 }
1221 newattr.max_ord = attrs->max_ord;
1222 }
1223 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001224 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001225 ret = -EINVAL;
1226 goto out;
1227 }
1228 newattr.max_ird = attrs->max_ird;
1229 }
1230 qhp->attr = newattr;
1231 }
1232
Vipul Pandya2c974782012-05-18 15:29:28 +05301233 if (mask & C4IW_QP_ATTR_SQ_DB) {
1234 ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
1235 goto out;
1236 }
1237 if (mask & C4IW_QP_ATTR_RQ_DB) {
1238 ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
1239 goto out;
1240 }
1241
Steve Wisecfdda9d2010-04-21 15:30:06 -07001242 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1243 goto out;
1244 if (qhp->attr.state == attrs->next_state)
1245 goto out;
1246
1247 switch (qhp->attr.state) {
1248 case C4IW_QP_STATE_IDLE:
1249 switch (attrs->next_state) {
1250 case C4IW_QP_STATE_RTS:
1251 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1252 ret = -EINVAL;
1253 goto out;
1254 }
1255 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1256 ret = -EINVAL;
1257 goto out;
1258 }
1259 qhp->attr.mpa_attr = attrs->mpa_attr;
1260 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1261 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001262 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001263
1264 /*
1265 * Ref the endpoint here and deref when we
1266 * disassociate the endpoint from the QP. This
1267 * happens in CLOSING->IDLE transition or *->ERROR
1268 * transition.
1269 */
1270 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001271 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001272 if (ret)
1273 goto err;
1274 break;
1275 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001276 set_state(qhp, C4IW_QP_STATE_ERROR);
1277 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001278 break;
1279 default:
1280 ret = -EINVAL;
1281 goto out;
1282 }
1283 break;
1284 case C4IW_QP_STATE_RTS:
1285 switch (attrs->next_state) {
1286 case C4IW_QP_STATE_CLOSING:
1287 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001288 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001289 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001290 if (!internal) {
1291 abort = 0;
1292 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001293 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001294 }
Tom Tuckerd32ae392011-10-25 16:38:30 +05301295 if (qhp->ibqp.uobject)
1296 t4_set_wq_in_error(&qhp->wq);
Steve Wise73d6fca2010-07-23 19:12:27 +00001297 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001298 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001299 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001300 break;
1301 case C4IW_QP_STATE_TERMINATE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001302 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301303 qhp->attr.layer_etype = attrs->layer_etype;
1304 qhp->attr.ecode = attrs->ecode;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001305 if (qhp->ibqp.uobject)
1306 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001307 ep = qhp->ep;
Steve Wise0e42c1f2010-09-10 11:15:09 -05001308 if (!internal)
1309 terminate = 1;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001310 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001311 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001312 break;
1313 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001314 set_state(qhp, C4IW_QP_STATE_ERROR);
Tom Tuckerd32ae392011-10-25 16:38:30 +05301315 if (qhp->ibqp.uobject)
1316 t4_set_wq_in_error(&qhp->wq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001317 if (!internal) {
1318 abort = 1;
1319 disconnect = 1;
1320 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001321 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001322 }
1323 goto err;
1324 break;
1325 default:
1326 ret = -EINVAL;
1327 goto out;
1328 }
1329 break;
1330 case C4IW_QP_STATE_CLOSING:
1331 if (!internal) {
1332 ret = -EINVAL;
1333 goto out;
1334 }
1335 switch (attrs->next_state) {
1336 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001337 flush_qp(qhp);
1338 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001339 qhp->attr.llp_stream_handle = NULL;
1340 c4iw_put_ep(&qhp->ep->com);
1341 qhp->ep = NULL;
1342 wake_up(&qhp->wait);
1343 break;
1344 case C4IW_QP_STATE_ERROR:
1345 goto err;
1346 default:
1347 ret = -EINVAL;
1348 goto err;
1349 }
1350 break;
1351 case C4IW_QP_STATE_ERROR:
1352 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1353 ret = -EINVAL;
1354 goto out;
1355 }
1356 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1357 ret = -EINVAL;
1358 goto out;
1359 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001360 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001361 break;
1362 case C4IW_QP_STATE_TERMINATE:
1363 if (!internal) {
1364 ret = -EINVAL;
1365 goto out;
1366 }
1367 goto err;
1368 break;
1369 default:
1370 printk(KERN_ERR "%s in a bad state %d\n",
1371 __func__, qhp->attr.state);
1372 ret = -EINVAL;
1373 goto err;
1374 break;
1375 }
1376 goto out;
1377err:
1378 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1379 qhp->wq.sq.qid);
1380
1381 /* disassociate the LLP connection */
1382 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001383 if (!ep)
1384 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001385 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001386 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001387 free = 1;
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001388 abort = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001389 wake_up(&qhp->wait);
1390 BUG_ON(!ep);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001391 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001392out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001393 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001394
1395 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001396 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001397
1398 /*
1399 * If disconnect is 1, then we need to initiate a disconnect
1400 * on the EP. This can be a normal close (RTS->CLOSING) or
1401 * an abnormal close (RTS/CLOSING->ERROR).
1402 */
1403 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001404 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1405 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001406 c4iw_put_ep(&ep->com);
1407 }
1408
1409 /*
1410 * If free is 1, then we've disassociated the EP from the QP
1411 * and we need to dereference the EP.
1412 */
1413 if (free)
1414 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001415 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1416 return ret;
1417}
1418
Vipul Pandya422eea02012-05-18 15:29:30 +05301419static int enable_qp_db(int id, void *p, void *data)
1420{
1421 struct c4iw_qp *qp = p;
1422
1423 t4_enable_wq_db(&qp->wq);
1424 return 0;
1425}
1426
Steve Wisecfdda9d2010-04-21 15:30:06 -07001427int c4iw_destroy_qp(struct ib_qp *ib_qp)
1428{
1429 struct c4iw_dev *rhp;
1430 struct c4iw_qp *qhp;
1431 struct c4iw_qp_attributes attrs;
1432 struct c4iw_ucontext *ucontext;
1433
1434 qhp = to_c4iw_qp(ib_qp);
1435 rhp = qhp->rhp;
1436
1437 attrs.next_state = C4IW_QP_STATE_ERROR;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301438 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1439 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1440 else
1441 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001442 wait_event(qhp->wait, !qhp->ep);
1443
Vipul Pandya422eea02012-05-18 15:29:30 +05301444 spin_lock_irq(&rhp->lock);
1445 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1446 rhp->qpcnt--;
1447 BUG_ON(rhp->qpcnt < 0);
1448 if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
1449 rhp->rdev.stats.db_state_transitions++;
1450 rhp->db_state = NORMAL;
1451 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1452 }
1453 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001454 atomic_dec(&qhp->refcnt);
1455 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1456
1457 ucontext = ib_qp->uobject ?
1458 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1459 destroy_qp(&rhp->rdev, &qhp->wq,
1460 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1461
1462 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1463 kfree(qhp);
1464 return 0;
1465}
1466
Vipul Pandya422eea02012-05-18 15:29:30 +05301467static int disable_qp_db(int id, void *p, void *data)
1468{
1469 struct c4iw_qp *qp = p;
1470
1471 t4_disable_wq_db(&qp->wq);
1472 return 0;
1473}
1474
Steve Wisecfdda9d2010-04-21 15:30:06 -07001475struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1476 struct ib_udata *udata)
1477{
1478 struct c4iw_dev *rhp;
1479 struct c4iw_qp *qhp;
1480 struct c4iw_pd *php;
1481 struct c4iw_cq *schp;
1482 struct c4iw_cq *rchp;
1483 struct c4iw_create_qp_resp uresp;
1484 int sqsize, rqsize;
1485 struct c4iw_ucontext *ucontext;
1486 int ret;
Steve Wisec6d7b262010-09-13 11:23:57 -05001487 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001488
1489 PDBG("%s ib_pd %p\n", __func__, pd);
1490
1491 if (attrs->qp_type != IB_QPT_RC)
1492 return ERR_PTR(-EINVAL);
1493
1494 php = to_c4iw_pd(pd);
1495 rhp = php->rhp;
1496 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1497 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1498 if (!schp || !rchp)
1499 return ERR_PTR(-EINVAL);
1500
1501 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1502 return ERR_PTR(-EINVAL);
1503
1504 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1505 if (rqsize > T4_MAX_RQ_SIZE)
1506 return ERR_PTR(-E2BIG);
1507
1508 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1509 if (sqsize > T4_MAX_SQ_SIZE)
1510 return ERR_PTR(-E2BIG);
1511
1512 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1513
1514
1515 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1516 if (!qhp)
1517 return ERR_PTR(-ENOMEM);
1518 qhp->wq.sq.size = sqsize;
1519 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1520 qhp->wq.rq.size = rqsize;
1521 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1522
1523 if (ucontext) {
1524 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1525 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1526 }
1527
1528 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1529 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1530
1531 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1532 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1533 if (ret)
1534 goto err1;
1535
1536 attrs->cap.max_recv_wr = rqsize - 1;
1537 attrs->cap.max_send_wr = sqsize - 1;
1538 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1539
1540 qhp->rhp = rhp;
1541 qhp->attr.pd = php->pdid;
1542 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1543 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1544 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1545 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1546 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1547 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1548 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1549 qhp->attr.state = C4IW_QP_STATE_IDLE;
1550 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1551 qhp->attr.enable_rdma_read = 1;
1552 qhp->attr.enable_rdma_write = 1;
1553 qhp->attr.enable_bind = 1;
1554 qhp->attr.max_ord = 1;
1555 qhp->attr.max_ird = 1;
1556 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001557 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001558 init_waitqueue_head(&qhp->wait);
1559 atomic_set(&qhp->refcnt, 1);
1560
Vipul Pandya2c974782012-05-18 15:29:28 +05301561 spin_lock_irq(&rhp->lock);
1562 if (rhp->db_state != NORMAL)
1563 t4_disable_wq_db(&qhp->wq);
Vipul Pandya422eea02012-05-18 15:29:30 +05301564 if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1565 rhp->rdev.stats.db_state_transitions++;
1566 rhp->db_state = FLOW_CONTROL;
1567 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1568 }
Vipul Pandya2c974782012-05-18 15:29:28 +05301569 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1570 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001571 if (ret)
1572 goto err2;
1573
Steve Wisecfdda9d2010-04-21 15:30:06 -07001574 if (udata) {
1575 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1576 if (!mm1) {
1577 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001578 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001579 }
1580 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1581 if (!mm2) {
1582 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001583 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001584 }
1585 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1586 if (!mm3) {
1587 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001588 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001589 }
1590 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1591 if (!mm4) {
1592 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001593 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001594 }
Steve Wisec6d7b262010-09-13 11:23:57 -05001595 if (t4_sq_onchip(&qhp->wq.sq)) {
1596 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1597 if (!mm5) {
1598 ret = -ENOMEM;
1599 goto err7;
1600 }
1601 uresp.flags = C4IW_QPF_ONCHIP;
1602 } else
1603 uresp.flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001604 uresp.qid_mask = rhp->rdev.qpmask;
1605 uresp.sqid = qhp->wq.sq.qid;
1606 uresp.sq_size = qhp->wq.sq.size;
1607 uresp.sq_memsize = qhp->wq.sq.memsize;
1608 uresp.rqid = qhp->wq.rq.qid;
1609 uresp.rq_size = qhp->wq.rq.size;
1610 uresp.rq_memsize = qhp->wq.rq.memsize;
1611 spin_lock(&ucontext->mmap_lock);
Steve Wisec6d7b262010-09-13 11:23:57 -05001612 if (mm5) {
1613 uresp.ma_sync_key = ucontext->key;
1614 ucontext->key += PAGE_SIZE;
1615 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001616 uresp.sq_key = ucontext->key;
1617 ucontext->key += PAGE_SIZE;
1618 uresp.rq_key = ucontext->key;
1619 ucontext->key += PAGE_SIZE;
1620 uresp.sq_db_gts_key = ucontext->key;
1621 ucontext->key += PAGE_SIZE;
1622 uresp.rq_db_gts_key = ucontext->key;
1623 ucontext->key += PAGE_SIZE;
1624 spin_unlock(&ucontext->mmap_lock);
1625 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1626 if (ret)
Steve Wisec6d7b262010-09-13 11:23:57 -05001627 goto err8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001628 mm1->key = uresp.sq_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05001629 mm1->addr = qhp->wq.sq.phys_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001630 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1631 insert_mmap(ucontext, mm1);
1632 mm2->key = uresp.rq_key;
1633 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1634 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1635 insert_mmap(ucontext, mm2);
1636 mm3->key = uresp.sq_db_gts_key;
1637 mm3->addr = qhp->wq.sq.udb;
1638 mm3->len = PAGE_SIZE;
1639 insert_mmap(ucontext, mm3);
1640 mm4->key = uresp.rq_db_gts_key;
1641 mm4->addr = qhp->wq.rq.udb;
1642 mm4->len = PAGE_SIZE;
1643 insert_mmap(ucontext, mm4);
Steve Wisec6d7b262010-09-13 11:23:57 -05001644 if (mm5) {
1645 mm5->key = uresp.ma_sync_key;
1646 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1647 + A_PCIE_MA_SYNC) & PAGE_MASK;
1648 mm5->len = PAGE_SIZE;
1649 insert_mmap(ucontext, mm5);
1650 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001651 }
1652 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1653 init_timer(&(qhp->timer));
1654 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1655 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1656 qhp->wq.sq.qid);
1657 return &qhp->ibqp;
Steve Wisec6d7b262010-09-13 11:23:57 -05001658err8:
1659 kfree(mm5);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001660err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001661 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001662err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001663 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001664err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001665 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001666err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001667 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001668err3:
1669 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1670err2:
1671 destroy_qp(&rhp->rdev, &qhp->wq,
1672 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1673err1:
1674 kfree(qhp);
1675 return ERR_PTR(ret);
1676}
1677
1678int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1679 int attr_mask, struct ib_udata *udata)
1680{
1681 struct c4iw_dev *rhp;
1682 struct c4iw_qp *qhp;
1683 enum c4iw_qp_attr_mask mask = 0;
1684 struct c4iw_qp_attributes attrs;
1685
1686 PDBG("%s ib_qp %p\n", __func__, ibqp);
1687
1688 /* iwarp does not support the RTR state */
1689 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1690 attr_mask &= ~IB_QP_STATE;
1691
1692 /* Make sure we still have something left to do */
1693 if (!attr_mask)
1694 return 0;
1695
1696 memset(&attrs, 0, sizeof attrs);
1697 qhp = to_c4iw_qp(ibqp);
1698 rhp = qhp->rhp;
1699
1700 attrs.next_state = c4iw_convert_state(attr->qp_state);
1701 attrs.enable_rdma_read = (attr->qp_access_flags &
1702 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1703 attrs.enable_rdma_write = (attr->qp_access_flags &
1704 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1705 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1706
1707
1708 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1709 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1710 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1711 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1712 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1713
Vipul Pandya2c974782012-05-18 15:29:28 +05301714 /*
1715 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1716 * ringing the queue db when we're in DB_FULL mode.
1717 */
1718 attrs.sq_db_inc = attr->sq_psn;
1719 attrs.rq_db_inc = attr->rq_psn;
1720 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1721 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1722
Steve Wisecfdda9d2010-04-21 15:30:06 -07001723 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1724}
1725
1726struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1727{
1728 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1729 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1730}
Vipul Pandya67bbc052012-05-18 15:29:33 +05301731
1732int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1733 int attr_mask, struct ib_qp_init_attr *init_attr)
1734{
1735 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1736
1737 memset(attr, 0, sizeof *attr);
1738 memset(init_attr, 0, sizeof *init_attr);
1739 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1740 return 0;
1741}