blob: ed49ab345b6ea92ce44824d94b9e709dbf3ae814 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
34
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include "iw_cxgb4.h"
36
Vipul Pandya2c974782012-05-18 15:29:28 +053037static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
Steve Wisea9c77192011-03-11 22:30:11 +000041static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050042module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000043MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050044
Vipul Pandya422eea02012-05-18 15:29:30 +053045int db_fc_threshold = 2000;
46module_param(db_fc_threshold, int, 0644);
47MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
48 "db flow control mode (default = 2000)");
49
Steve Wise2f5b48c2010-09-10 11:15:36 -050050static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
51{
52 unsigned long flag;
53 spin_lock_irqsave(&qhp->lock, flag);
54 qhp->attr.state = state;
55 spin_unlock_irqrestore(&qhp->lock, flag);
56}
57
Steve Wisec6d7b262010-09-13 11:23:57 -050058static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
59{
60 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
61}
62
63static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
64{
65 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
66 pci_unmap_addr(sq, mapping));
67}
68
69static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
70{
71 if (t4_sq_onchip(sq))
72 dealloc_oc_sq(rdev, sq);
73 else
74 dealloc_host_sq(rdev, sq);
75}
76
77static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
78{
79 if (!ocqp_support || !t4_ocqp_supported())
80 return -ENOSYS;
81 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
82 if (!sq->dma_addr)
83 return -ENOMEM;
84 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
85 rdev->lldi.vr->ocq.start;
86 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
87 rdev->lldi.vr->ocq.start);
88 sq->flags |= T4_SQ_ONCHIP;
89 return 0;
90}
91
92static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
93{
94 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
95 &(sq->dma_addr), GFP_KERNEL);
96 if (!sq->queue)
97 return -ENOMEM;
98 sq->phys_addr = virt_to_phys(sq->queue);
99 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
100 return 0;
101}
102
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000103static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
104{
105 int ret = -ENOSYS;
106 if (user)
107 ret = alloc_oc_sq(rdev, sq);
108 if (ret)
109 ret = alloc_host_sq(rdev, sq);
110 return ret;
111}
112
Steve Wisecfdda9d2010-04-21 15:30:06 -0700113static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
114 struct c4iw_dev_ucontext *uctx)
115{
116 /*
117 * uP clears EQ contexts when the connection exits rdma mode,
118 * so no need to post a RESET WR for these EQs.
119 */
120 dma_free_coherent(&(rdev->lldi.pdev->dev),
121 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000122 dma_unmap_addr(&wq->rq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500123 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700124 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
125 kfree(wq->rq.sw_rq);
126 kfree(wq->sq.sw_sq);
127 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
128 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
129 return 0;
130}
131
132static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
133 struct t4_cq *rcq, struct t4_cq *scq,
134 struct c4iw_dev_ucontext *uctx)
135{
136 int user = (uctx != &rdev->uctx);
137 struct fw_ri_res_wr *res_wr;
138 struct fw_ri_res *res;
139 int wr_len;
140 struct c4iw_wr_wait wr_wait;
141 struct sk_buff *skb;
142 int ret;
143 int eqsize;
144
145 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
146 if (!wq->sq.qid)
147 return -ENOMEM;
148
149 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000150 if (!wq->rq.qid) {
151 ret = -ENOMEM;
152 goto free_sq_qid;
153 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700154
155 if (!user) {
156 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
157 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000158 if (!wq->sq.sw_sq) {
159 ret = -ENOMEM;
160 goto free_rq_qid;
161 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700162
163 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
164 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000165 if (!wq->rq.sw_rq) {
166 ret = -ENOMEM;
167 goto free_sw_sq;
168 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700169 }
170
171 /*
172 * RQT must be a power of 2.
173 */
174 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
175 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000176 if (!wq->rq.rqt_hwaddr) {
177 ret = -ENOMEM;
178 goto free_sw_rq;
179 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700180
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000181 ret = alloc_sq(rdev, &wq->sq, user);
182 if (ret)
183 goto free_hwaddr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700184 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000185 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700186
187 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
188 wq->rq.memsize, &(wq->rq.dma_addr),
189 GFP_KERNEL);
Wei Yongjun55e57a72013-03-15 09:42:12 +0000190 if (!wq->rq.queue) {
191 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000192 goto free_sq;
Wei Yongjun55e57a72013-03-15 09:42:12 +0000193 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700194 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
195 __func__, wq->sq.queue,
196 (unsigned long long)virt_to_phys(wq->sq.queue),
197 wq->rq.queue,
198 (unsigned long long)virt_to_phys(wq->rq.queue));
199 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000200 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700201
202 wq->db = rdev->lldi.db_reg;
203 wq->gts = rdev->lldi.gts_reg;
204 if (user) {
205 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
206 (wq->sq.qid << rdev->qpshift);
207 wq->sq.udb &= PAGE_MASK;
208 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
209 (wq->rq.qid << rdev->qpshift);
210 wq->rq.udb &= PAGE_MASK;
211 }
212 wq->rdev = rdev;
213 wq->rq.msn = 1;
214
215 /* build fw_ri_res_wr */
216 wr_len = sizeof *res_wr + 2 * sizeof *res;
217
David Rientjesd3c814e2010-07-21 02:44:56 +0000218 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700219 if (!skb) {
220 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000221 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700222 }
223 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
224
225 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
226 memset(res_wr, 0, wr_len);
227 res_wr->op_nres = cpu_to_be32(
228 FW_WR_OP(FW_RI_RES_WR) |
229 V_FW_RI_RES_WR_NRES(2) |
230 FW_WR_COMPL(1));
231 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700232 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700233 res = res_wr->res;
234 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
235 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
236
237 /*
238 * eqsize is the number of 64B entries plus the status page size.
239 */
240 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
241
242 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
243 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
244 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
245 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
Roel Kluin85d215b2011-05-09 22:06:22 -0700246 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700247 V_FW_RI_RES_WR_IQID(scq->cqid));
248 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
249 V_FW_RI_RES_WR_DCAEN(0) |
250 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000251 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000252 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700253 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
254 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
255 V_FW_RI_RES_WR_EQSIZE(eqsize));
256 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
257 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
258 res++;
259 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
260 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
261
262 /*
263 * eqsize is the number of 64B entries plus the status page size.
264 */
265 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
266 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
267 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
268 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
269 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
270 V_FW_RI_RES_WR_IQID(rcq->cqid));
271 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
272 V_FW_RI_RES_WR_DCAEN(0) |
273 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000274 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000275 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700276 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
277 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
278 V_FW_RI_RES_WR_EQSIZE(eqsize));
279 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
280 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
281
282 c4iw_init_wr_wait(&wr_wait);
283
284 ret = c4iw_ofld_send(rdev, skb);
285 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000286 goto free_dma;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500287 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700288 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000289 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700290
291 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
292 __func__, wq->sq.qid, wq->rq.qid, wq->db,
293 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
294
295 return 0;
Emil Goodec079c282012-08-19 17:59:40 +0000296free_dma:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700297 dma_free_coherent(&(rdev->lldi.pdev->dev),
298 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000299 dma_unmap_addr(&wq->rq, mapping));
Emil Goodec079c282012-08-19 17:59:40 +0000300free_sq:
Steve Wisec6d7b262010-09-13 11:23:57 -0500301 dealloc_sq(rdev, &wq->sq);
Emil Goodec079c282012-08-19 17:59:40 +0000302free_hwaddr:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700303 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000304free_sw_rq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700305 kfree(wq->rq.sw_rq);
Emil Goodec079c282012-08-19 17:59:40 +0000306free_sw_sq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700307 kfree(wq->sq.sw_sq);
Emil Goodec079c282012-08-19 17:59:40 +0000308free_rq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700309 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000310free_sq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000312 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700313}
314
Steve Wised37ac312010-06-10 19:03:00 +0000315static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
316 struct ib_send_wr *wr, int max, u32 *plenp)
317{
318 u8 *dstp, *srcp;
319 u32 plen = 0;
320 int i;
321 int rem, len;
322
323 dstp = (u8 *)immdp->data;
324 for (i = 0; i < wr->num_sge; i++) {
325 if ((plen + wr->sg_list[i].length) > max)
326 return -EMSGSIZE;
327 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
328 plen += wr->sg_list[i].length;
329 rem = wr->sg_list[i].length;
330 while (rem) {
331 if (dstp == (u8 *)&sq->queue[sq->size])
332 dstp = (u8 *)sq->queue;
333 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
334 len = rem;
335 else
336 len = (u8 *)&sq->queue[sq->size] - dstp;
337 memcpy(dstp, srcp, len);
338 dstp += len;
339 srcp += len;
340 rem -= len;
341 }
342 }
Steve Wise13fecb82010-09-10 11:14:53 -0500343 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
344 if (len)
345 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000346 immdp->op = FW_RI_DATA_IMMD;
347 immdp->r1 = 0;
348 immdp->r2 = 0;
349 immdp->immdlen = cpu_to_be32(plen);
350 *plenp = plen;
351 return 0;
352}
353
354static int build_isgl(__be64 *queue_start, __be64 *queue_end,
355 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
356 int num_sge, u32 *plenp)
357
Steve Wisecfdda9d2010-04-21 15:30:06 -0700358{
359 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000360 u32 plen = 0;
361 __be64 *flitp = (__be64 *)isglp->sge;
362
363 for (i = 0; i < num_sge; i++) {
364 if ((plen + sg_list[i].length) < plen)
365 return -EMSGSIZE;
366 plen += sg_list[i].length;
367 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
368 sg_list[i].length);
369 if (++flitp == queue_end)
370 flitp = queue_start;
371 *flitp = cpu_to_be64(sg_list[i].addr);
372 if (++flitp == queue_end)
373 flitp = queue_start;
374 }
Steve Wise13fecb82010-09-10 11:14:53 -0500375 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000376 isglp->op = FW_RI_DATA_ISGL;
377 isglp->r1 = 0;
378 isglp->nsge = cpu_to_be16(num_sge);
379 isglp->r2 = 0;
380 if (plenp)
381 *plenp = plen;
382 return 0;
383}
384
385static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
386 struct ib_send_wr *wr, u8 *len16)
387{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700388 u32 plen;
389 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000390 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700391
392 if (wr->num_sge > T4_MAX_SEND_SGE)
393 return -EINVAL;
394 switch (wr->opcode) {
395 case IB_WR_SEND:
396 if (wr->send_flags & IB_SEND_SOLICITED)
397 wqe->send.sendop_pkd = cpu_to_be32(
398 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
399 else
400 wqe->send.sendop_pkd = cpu_to_be32(
401 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
402 wqe->send.stag_inv = 0;
403 break;
404 case IB_WR_SEND_WITH_INV:
405 if (wr->send_flags & IB_SEND_SOLICITED)
406 wqe->send.sendop_pkd = cpu_to_be32(
407 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
408 else
409 wqe->send.sendop_pkd = cpu_to_be32(
410 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
411 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
412 break;
413
414 default:
415 return -EINVAL;
416 }
Steve Wised37ac312010-06-10 19:03:00 +0000417
Steve Wisecfdda9d2010-04-21 15:30:06 -0700418 plen = 0;
419 if (wr->num_sge) {
420 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000421 ret = build_immd(sq, wqe->send.u.immd_src, wr,
422 T4_MAX_SEND_INLINE, &plen);
423 if (ret)
424 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700425 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
426 plen;
427 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000428 ret = build_isgl((__be64 *)sq->queue,
429 (__be64 *)&sq->queue[sq->size],
430 wqe->send.u.isgl_src,
431 wr->sg_list, wr->num_sge, &plen);
432 if (ret)
433 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700434 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
435 wr->num_sge * sizeof(struct fw_ri_sge);
436 }
437 } else {
438 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
439 wqe->send.u.immd_src[0].r1 = 0;
440 wqe->send.u.immd_src[0].r2 = 0;
441 wqe->send.u.immd_src[0].immdlen = 0;
442 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000443 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700444 }
445 *len16 = DIV_ROUND_UP(size, 16);
446 wqe->send.plen = cpu_to_be32(plen);
447 return 0;
448}
449
Steve Wised37ac312010-06-10 19:03:00 +0000450static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
451 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700452{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700453 u32 plen;
454 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000455 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700456
Steve Wised37ac312010-06-10 19:03:00 +0000457 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700458 return -EINVAL;
459 wqe->write.r2 = 0;
460 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
461 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700462 if (wr->num_sge) {
463 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000464 ret = build_immd(sq, wqe->write.u.immd_src, wr,
465 T4_MAX_WRITE_INLINE, &plen);
466 if (ret)
467 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700468 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
469 plen;
470 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000471 ret = build_isgl((__be64 *)sq->queue,
472 (__be64 *)&sq->queue[sq->size],
473 wqe->write.u.isgl_src,
474 wr->sg_list, wr->num_sge, &plen);
475 if (ret)
476 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700477 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
478 wr->num_sge * sizeof(struct fw_ri_sge);
479 }
480 } else {
481 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
482 wqe->write.u.immd_src[0].r1 = 0;
483 wqe->write.u.immd_src[0].r2 = 0;
484 wqe->write.u.immd_src[0].immdlen = 0;
485 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000486 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700487 }
488 *len16 = DIV_ROUND_UP(size, 16);
489 wqe->write.plen = cpu_to_be32(plen);
490 return 0;
491}
492
493static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
494{
495 if (wr->num_sge > 1)
496 return -EINVAL;
497 if (wr->num_sge) {
498 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
499 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
500 >> 32));
501 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
502 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
503 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
504 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
505 >> 32));
506 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
507 } else {
508 wqe->read.stag_src = cpu_to_be32(2);
509 wqe->read.to_src_hi = 0;
510 wqe->read.to_src_lo = 0;
511 wqe->read.stag_sink = cpu_to_be32(2);
512 wqe->read.plen = 0;
513 wqe->read.to_sink_hi = 0;
514 wqe->read.to_sink_lo = 0;
515 }
516 wqe->read.r2 = 0;
517 wqe->read.r5 = 0;
518 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
519 return 0;
520}
521
522static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
523 struct ib_recv_wr *wr, u8 *len16)
524{
Steve Wised37ac312010-06-10 19:03:00 +0000525 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700526
Steve Wised37ac312010-06-10 19:03:00 +0000527 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
528 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
529 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
530 if (ret)
531 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700532 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
533 wr->num_sge * sizeof(struct fw_ri_sge), 16);
534 return 0;
535}
536
Steve Wise40dbf6e2010-09-17 15:40:15 -0500537static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
538 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700539{
540
541 struct fw_ri_immd *imdp;
542 __be64 *p;
543 int i;
544 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500545 int rem;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700546
547 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
548 return -EINVAL;
549
550 wqe->fr.qpbinde_to_dcacpu = 0;
551 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
552 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
553 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
554 wqe->fr.len_hi = 0;
555 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
556 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
557 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
558 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
559 0xffffffff);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500560 WARN_ON(pbllen > T4_MAX_FR_IMMD);
561 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
562 imdp->op = FW_RI_DATA_IMMD;
563 imdp->r1 = 0;
564 imdp->r2 = 0;
565 imdp->immdlen = cpu_to_be32(pbllen);
566 p = (__be64 *)(imdp + 1);
567 rem = pbllen;
568 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
569 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
570 rem -= sizeof *p;
571 if (++p == (__be64 *)&sq->queue[sq->size])
572 p = (__be64 *)sq->queue;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700573 }
Steve Wise40dbf6e2010-09-17 15:40:15 -0500574 BUG_ON(rem < 0);
575 while (rem) {
576 *p = 0;
577 rem -= sizeof *p;
578 if (++p == (__be64 *)&sq->queue[sq->size])
579 p = (__be64 *)sq->queue;
580 }
581 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700582 return 0;
583}
584
585static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
586 u8 *len16)
587{
588 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
589 wqe->inv.r2 = 0;
590 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
591 return 0;
592}
593
594void c4iw_qp_add_ref(struct ib_qp *qp)
595{
596 PDBG("%s ib_qp %p\n", __func__, qp);
597 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
598}
599
600void c4iw_qp_rem_ref(struct ib_qp *qp)
601{
602 PDBG("%s ib_qp %p\n", __func__, qp);
603 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
604 wake_up(&(to_c4iw_qp(qp)->wait));
605}
606
607int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
608 struct ib_send_wr **bad_wr)
609{
610 int err = 0;
611 u8 len16 = 0;
612 enum fw_wr_opcodes fw_opcode = 0;
613 enum fw_ri_wr_flags fw_flags;
614 struct c4iw_qp *qhp;
615 union t4_wr *wqe;
616 u32 num_wrs;
617 struct t4_swsqe *swsqe;
618 unsigned long flag;
619 u16 idx = 0;
620
621 qhp = to_c4iw_qp(ibqp);
622 spin_lock_irqsave(&qhp->lock, flag);
623 if (t4_wq_in_error(&qhp->wq)) {
624 spin_unlock_irqrestore(&qhp->lock, flag);
625 return -EINVAL;
626 }
627 num_wrs = t4_sq_avail(&qhp->wq);
628 if (num_wrs == 0) {
629 spin_unlock_irqrestore(&qhp->lock, flag);
630 return -ENOMEM;
631 }
632 while (wr) {
633 if (num_wrs == 0) {
634 err = -ENOMEM;
635 *bad_wr = wr;
636 break;
637 }
Steve Wised37ac312010-06-10 19:03:00 +0000638 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
639 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
640
Steve Wisecfdda9d2010-04-21 15:30:06 -0700641 fw_flags = 0;
642 if (wr->send_flags & IB_SEND_SOLICITED)
643 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
644 if (wr->send_flags & IB_SEND_SIGNALED)
645 fw_flags |= FW_RI_COMPLETION_FLAG;
646 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
647 switch (wr->opcode) {
648 case IB_WR_SEND_WITH_INV:
649 case IB_WR_SEND:
650 if (wr->send_flags & IB_SEND_FENCE)
651 fw_flags |= FW_RI_READ_FENCE_FLAG;
652 fw_opcode = FW_RI_SEND_WR;
653 if (wr->opcode == IB_WR_SEND)
654 swsqe->opcode = FW_RI_SEND;
655 else
656 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000657 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700658 break;
659 case IB_WR_RDMA_WRITE:
660 fw_opcode = FW_RI_RDMA_WRITE_WR;
661 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000662 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700663 break;
664 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500665 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700666 fw_opcode = FW_RI_RDMA_READ_WR;
667 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500668 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
Steve Wise410ade42010-09-17 15:40:09 -0500669 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise2f1fb502010-05-20 16:58:16 -0500670 else
671 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700672 err = build_rdma_read(wqe, wr, &len16);
673 if (err)
674 break;
675 swsqe->read_len = wr->sg_list[0].length;
676 if (!qhp->wq.sq.oldest_read)
677 qhp->wq.sq.oldest_read = swsqe;
678 break;
679 case IB_WR_FAST_REG_MR:
680 fw_opcode = FW_RI_FR_NSMR_WR;
681 swsqe->opcode = FW_RI_FAST_REGISTER;
Steve Wise40dbf6e2010-09-17 15:40:15 -0500682 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700683 break;
684 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500685 if (wr->send_flags & IB_SEND_FENCE)
686 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700687 fw_opcode = FW_RI_INV_LSTAG_WR;
688 swsqe->opcode = FW_RI_LOCAL_INV;
689 err = build_inv_stag(wqe, wr, &len16);
690 break;
691 default:
692 PDBG("%s post of type=%d TBD!\n", __func__,
693 wr->opcode);
694 err = -EINVAL;
695 }
696 if (err) {
697 *bad_wr = wr;
698 break;
699 }
700 swsqe->idx = qhp->wq.sq.pidx;
701 swsqe->complete = 0;
702 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
703 swsqe->wr_id = wr->wr_id;
704
705 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
706
707 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
708 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
709 swsqe->opcode, swsqe->read_len);
710 wr = wr->next;
711 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000712 t4_sq_produce(&qhp->wq, len16);
713 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700714 }
715 if (t4_wq_db_enabled(&qhp->wq))
716 t4_ring_sq_db(&qhp->wq, idx);
717 spin_unlock_irqrestore(&qhp->lock, flag);
718 return err;
719}
720
721int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
722 struct ib_recv_wr **bad_wr)
723{
724 int err = 0;
725 struct c4iw_qp *qhp;
726 union t4_recv_wr *wqe;
727 u32 num_wrs;
728 u8 len16 = 0;
729 unsigned long flag;
730 u16 idx = 0;
731
732 qhp = to_c4iw_qp(ibqp);
733 spin_lock_irqsave(&qhp->lock, flag);
734 if (t4_wq_in_error(&qhp->wq)) {
735 spin_unlock_irqrestore(&qhp->lock, flag);
736 return -EINVAL;
737 }
738 num_wrs = t4_rq_avail(&qhp->wq);
739 if (num_wrs == 0) {
740 spin_unlock_irqrestore(&qhp->lock, flag);
741 return -ENOMEM;
742 }
743 while (wr) {
744 if (wr->num_sge > T4_MAX_RECV_SGE) {
745 err = -EINVAL;
746 *bad_wr = wr;
747 break;
748 }
Steve Wised37ac312010-06-10 19:03:00 +0000749 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
750 qhp->wq.rq.wq_pidx *
751 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700752 if (num_wrs)
753 err = build_rdma_recv(qhp, wqe, wr, &len16);
754 else
755 err = -ENOMEM;
756 if (err) {
757 *bad_wr = wr;
758 break;
759 }
760
761 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
762
763 wqe->recv.opcode = FW_RI_RECV_WR;
764 wqe->recv.r1 = 0;
765 wqe->recv.wrid = qhp->wq.rq.pidx;
766 wqe->recv.r2[0] = 0;
767 wqe->recv.r2[1] = 0;
768 wqe->recv.r2[2] = 0;
769 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700770 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
771 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000772 t4_rq_produce(&qhp->wq, len16);
773 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700774 wr = wr->next;
775 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700776 }
777 if (t4_wq_db_enabled(&qhp->wq))
778 t4_ring_rq_db(&qhp->wq, idx);
779 spin_unlock_irqrestore(&qhp->lock, flag);
780 return err;
781}
782
783int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
784{
785 return -ENOSYS;
786}
787
788static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
789 u8 *ecode)
790{
791 int status;
792 int tagged;
793 int opcode;
794 int rqtype;
795 int send_inv;
796
797 if (!err_cqe) {
798 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
799 *ecode = 0;
800 return;
801 }
802
803 status = CQE_STATUS(err_cqe);
804 opcode = CQE_OPCODE(err_cqe);
805 rqtype = RQ_TYPE(err_cqe);
806 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
807 (opcode == FW_RI_SEND_WITH_SE_INV);
808 tagged = (opcode == FW_RI_RDMA_WRITE) ||
809 (rqtype && (opcode == FW_RI_READ_RESP));
810
811 switch (status) {
812 case T4_ERR_STAG:
813 if (send_inv) {
814 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
815 *ecode = RDMAP_CANT_INV_STAG;
816 } else {
817 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
818 *ecode = RDMAP_INV_STAG;
819 }
820 break;
821 case T4_ERR_PDID:
822 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
823 if ((opcode == FW_RI_SEND_WITH_INV) ||
824 (opcode == FW_RI_SEND_WITH_SE_INV))
825 *ecode = RDMAP_CANT_INV_STAG;
826 else
827 *ecode = RDMAP_STAG_NOT_ASSOC;
828 break;
829 case T4_ERR_QPID:
830 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
831 *ecode = RDMAP_STAG_NOT_ASSOC;
832 break;
833 case T4_ERR_ACCESS:
834 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
835 *ecode = RDMAP_ACC_VIOL;
836 break;
837 case T4_ERR_WRAP:
838 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
839 *ecode = RDMAP_TO_WRAP;
840 break;
841 case T4_ERR_BOUND:
842 if (tagged) {
843 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
844 *ecode = DDPT_BASE_BOUNDS;
845 } else {
846 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
847 *ecode = RDMAP_BASE_BOUNDS;
848 }
849 break;
850 case T4_ERR_INVALIDATE_SHARED_MR:
851 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
852 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
853 *ecode = RDMAP_CANT_INV_STAG;
854 break;
855 case T4_ERR_ECC:
856 case T4_ERR_ECC_PSTAG:
857 case T4_ERR_INTERNAL_ERR:
858 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
859 *ecode = 0;
860 break;
861 case T4_ERR_OUT_OF_RQE:
862 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
863 *ecode = DDPU_INV_MSN_NOBUF;
864 break;
865 case T4_ERR_PBL_ADDR_BOUND:
866 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
867 *ecode = DDPT_BASE_BOUNDS;
868 break;
869 case T4_ERR_CRC:
870 *layer_type = LAYER_MPA|DDP_LLP;
871 *ecode = MPA_CRC_ERR;
872 break;
873 case T4_ERR_MARKER:
874 *layer_type = LAYER_MPA|DDP_LLP;
875 *ecode = MPA_MARKER_ERR;
876 break;
877 case T4_ERR_PDU_LEN_ERR:
878 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
879 *ecode = DDPU_MSG_TOOBIG;
880 break;
881 case T4_ERR_DDP_VERSION:
882 if (tagged) {
883 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
884 *ecode = DDPT_INV_VERS;
885 } else {
886 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
887 *ecode = DDPU_INV_VERS;
888 }
889 break;
890 case T4_ERR_RDMA_VERSION:
891 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
892 *ecode = RDMAP_INV_VERS;
893 break;
894 case T4_ERR_OPCODE:
895 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
896 *ecode = RDMAP_INV_OPCODE;
897 break;
898 case T4_ERR_DDP_QUEUE_NUM:
899 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
900 *ecode = DDPU_INV_QN;
901 break;
902 case T4_ERR_MSN:
903 case T4_ERR_MSN_GAP:
904 case T4_ERR_MSN_RANGE:
905 case T4_ERR_IRD_OVERFLOW:
906 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
907 *ecode = DDPU_INV_MSN_RANGE;
908 break;
909 case T4_ERR_TBIT:
910 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
911 *ecode = 0;
912 break;
913 case T4_ERR_MO:
914 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
915 *ecode = DDPU_INV_MO;
916 break;
917 default:
918 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
919 *ecode = 0;
920 break;
921 }
922}
923
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700924static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
925 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700926{
927 struct fw_ri_wr *wqe;
928 struct sk_buff *skb;
929 struct terminate_message *term;
930
931 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
932 qhp->ep->hwtid);
933
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700934 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700935 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700936 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700937 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
938
939 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
940 memset(wqe, 0, sizeof *wqe);
941 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
942 wqe->flowid_len16 = cpu_to_be32(
943 FW_WR_FLOWID(qhp->ep->hwtid) |
944 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
945
946 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
947 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
948 term = (struct terminate_message *)wqe->u.terminate.termmsg;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530949 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
950 term->layer_etype = qhp->attr.layer_etype;
951 term->ecode = qhp->attr.ecode;
952 } else
953 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700954 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700955}
956
957/*
958 * Assumes qhp lock is held.
959 */
960static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -0500961 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700962{
963 int count;
964 int flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500965 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700966
967 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700968
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200969 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -0500970 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700971 spin_lock(&qhp->lock);
972 c4iw_flush_hw_cq(&rchp->cq);
973 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
974 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
975 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500976 spin_unlock_irqrestore(&rchp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530977 if (flushed) {
978 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700979 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530980 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
981 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700982
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200983 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -0500984 spin_lock_irqsave(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700985 spin_lock(&qhp->lock);
986 c4iw_flush_hw_cq(&schp->cq);
987 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
988 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
989 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500990 spin_unlock_irqrestore(&schp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530991 if (flushed) {
992 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700993 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530994 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
995 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700996}
997
Steve Wise2f5b48c2010-09-10 11:15:36 -0500998static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700999{
1000 struct c4iw_cq *rchp, *schp;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301001 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001002
1003 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
1004 schp = get_chp(qhp->rhp, qhp->attr.scq);
1005
1006 if (qhp->ibqp.uobject) {
1007 t4_set_wq_in_error(&qhp->wq);
1008 t4_set_cq_in_error(&rchp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301009 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301010 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301011 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301012 if (schp != rchp) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001013 t4_set_cq_in_error(&schp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301014 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301015 (*schp->ibcq.comp_handler)(&schp->ibcq,
1016 schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301017 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301018 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001019 return;
1020 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001021 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001022}
1023
Steve Wise73d6fca2010-07-23 19:12:27 +00001024static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1025 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001026{
1027 struct fw_ri_wr *wqe;
1028 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001029 struct sk_buff *skb;
1030
1031 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +00001032 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001033
David Rientjesd3c814e2010-07-21 02:44:56 +00001034 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001035 if (!skb)
1036 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +00001037 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001038
1039 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1040 memset(wqe, 0, sizeof *wqe);
1041 wqe->op_compl = cpu_to_be32(
1042 FW_WR_OP(FW_RI_INIT_WR) |
1043 FW_WR_COMPL(1));
1044 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +00001045 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001046 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Steve Wise2f5b48c2010-09-10 11:15:36 -05001047 wqe->cookie = (unsigned long) &ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001048
1049 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001050 ret = c4iw_ofld_send(&rhp->rdev, skb);
1051 if (ret)
1052 goto out;
1053
Steve Wise2f5b48c2010-09-10 11:15:36 -05001054 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
Steve Wiseaadc4df2010-09-10 11:15:25 -05001055 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001056out:
1057 PDBG("%s ret %d\n", __func__, ret);
1058 return ret;
1059}
1060
1061static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1062{
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301063 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001064 memset(&init->u, 0, sizeof init->u);
1065 switch (p2p_type) {
1066 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1067 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1068 init->u.write.stag_sink = cpu_to_be32(1);
1069 init->u.write.to_sink = cpu_to_be64(1);
1070 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1071 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1072 sizeof(struct fw_ri_immd),
1073 16);
1074 break;
1075 case FW_RI_INIT_P2PTYPE_READ_REQ:
1076 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1077 init->u.read.stag_src = cpu_to_be32(1);
1078 init->u.read.to_src_lo = cpu_to_be32(1);
1079 init->u.read.stag_sink = cpu_to_be32(1);
1080 init->u.read.to_sink_lo = cpu_to_be32(1);
1081 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1082 break;
1083 }
1084}
1085
1086static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1087{
1088 struct fw_ri_wr *wqe;
1089 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001090 struct sk_buff *skb;
1091
1092 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1093 qhp->ep->hwtid);
1094
David Rientjesd3c814e2010-07-21 02:44:56 +00001095 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001096 if (!skb)
1097 return -ENOMEM;
1098 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1099
1100 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1101 memset(wqe, 0, sizeof *wqe);
1102 wqe->op_compl = cpu_to_be32(
1103 FW_WR_OP(FW_RI_INIT_WR) |
1104 FW_WR_COMPL(1));
1105 wqe->flowid_len16 = cpu_to_be32(
1106 FW_WR_FLOWID(qhp->ep->hwtid) |
1107 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1108
Steve Wise2f5b48c2010-09-10 11:15:36 -05001109 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001110
1111 wqe->u.init.type = FW_RI_TYPE_INIT;
1112 wqe->u.init.mpareqbit_p2ptype =
1113 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1114 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1115 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1116 if (qhp->attr.mpa_attr.recv_marker_enabled)
1117 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1118 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1119 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1120 if (qhp->attr.mpa_attr.crc_enabled)
1121 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1122
1123 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1124 FW_RI_QP_RDMA_WRITE_ENABLE |
1125 FW_RI_QP_BIND_ENABLE;
1126 if (!qhp->ibqp.uobject)
1127 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1128 FW_RI_QP_STAG0_ENABLE;
1129 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1130 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1131 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1132 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1133 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1134 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1135 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1136 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1137 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1138 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1139 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1140 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1141 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1142 rhp->rdev.lldi.vr->rq.start);
1143 if (qhp->attr.mpa_attr.initiator)
1144 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1145
Steve Wisecfdda9d2010-04-21 15:30:06 -07001146 ret = c4iw_ofld_send(&rhp->rdev, skb);
1147 if (ret)
1148 goto out;
1149
Steve Wise2f5b48c2010-09-10 11:15:36 -05001150 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1151 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001152out:
1153 PDBG("%s ret %d\n", __func__, ret);
1154 return ret;
1155}
1156
Vipul Pandya2c974782012-05-18 15:29:28 +05301157/*
1158 * Called by the library when the qp has user dbs disabled due to
1159 * a DB_FULL condition. This function will single-thread all user
1160 * DB rings to avoid overflowing the hw db-fifo.
1161 */
1162static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1163{
1164 int delay = db_delay_usecs;
1165
1166 mutex_lock(&qhp->rhp->db_mutex);
1167 do {
Vipul Pandya422eea02012-05-18 15:29:30 +05301168
1169 /*
1170 * The interrupt threshold is dbfifo_int_thresh << 6. So
1171 * make sure we don't cross that and generate an interrupt.
1172 */
1173 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1174 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
Vipul Pandyae5619c12012-09-05 02:01:54 +00001175 writel(QID(qid) | PIDX(inc), qhp->wq.db);
Vipul Pandya2c974782012-05-18 15:29:28 +05301176 break;
1177 }
1178 set_current_state(TASK_UNINTERRUPTIBLE);
1179 schedule_timeout(usecs_to_jiffies(delay));
Vipul Pandya422eea02012-05-18 15:29:30 +05301180 delay = min(delay << 1, 2000);
Vipul Pandya2c974782012-05-18 15:29:28 +05301181 } while (1);
1182 mutex_unlock(&qhp->rhp->db_mutex);
1183 return 0;
1184}
1185
Steve Wisecfdda9d2010-04-21 15:30:06 -07001186int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1187 enum c4iw_qp_attr_mask mask,
1188 struct c4iw_qp_attributes *attrs,
1189 int internal)
1190{
1191 int ret = 0;
1192 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001193 int disconnect = 0;
1194 int terminate = 0;
1195 int abort = 0;
1196 int free = 0;
1197 struct c4iw_ep *ep = NULL;
1198
1199 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1200 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1201 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1202
Steve Wise2f5b48c2010-09-10 11:15:36 -05001203 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001204
1205 /* Process attr changes if in IDLE */
1206 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1207 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1208 ret = -EIO;
1209 goto out;
1210 }
1211 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1212 newattr.enable_rdma_read = attrs->enable_rdma_read;
1213 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1214 newattr.enable_rdma_write = attrs->enable_rdma_write;
1215 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1216 newattr.enable_bind = attrs->enable_bind;
1217 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001218 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001219 ret = -EINVAL;
1220 goto out;
1221 }
1222 newattr.max_ord = attrs->max_ord;
1223 }
1224 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001225 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001226 ret = -EINVAL;
1227 goto out;
1228 }
1229 newattr.max_ird = attrs->max_ird;
1230 }
1231 qhp->attr = newattr;
1232 }
1233
Vipul Pandya2c974782012-05-18 15:29:28 +05301234 if (mask & C4IW_QP_ATTR_SQ_DB) {
1235 ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
1236 goto out;
1237 }
1238 if (mask & C4IW_QP_ATTR_RQ_DB) {
1239 ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
1240 goto out;
1241 }
1242
Steve Wisecfdda9d2010-04-21 15:30:06 -07001243 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1244 goto out;
1245 if (qhp->attr.state == attrs->next_state)
1246 goto out;
1247
1248 switch (qhp->attr.state) {
1249 case C4IW_QP_STATE_IDLE:
1250 switch (attrs->next_state) {
1251 case C4IW_QP_STATE_RTS:
1252 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1253 ret = -EINVAL;
1254 goto out;
1255 }
1256 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1257 ret = -EINVAL;
1258 goto out;
1259 }
1260 qhp->attr.mpa_attr = attrs->mpa_attr;
1261 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1262 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001263 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001264
1265 /*
1266 * Ref the endpoint here and deref when we
1267 * disassociate the endpoint from the QP. This
1268 * happens in CLOSING->IDLE transition or *->ERROR
1269 * transition.
1270 */
1271 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001272 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001273 if (ret)
1274 goto err;
1275 break;
1276 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001277 set_state(qhp, C4IW_QP_STATE_ERROR);
1278 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001279 break;
1280 default:
1281 ret = -EINVAL;
1282 goto out;
1283 }
1284 break;
1285 case C4IW_QP_STATE_RTS:
1286 switch (attrs->next_state) {
1287 case C4IW_QP_STATE_CLOSING:
1288 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001289 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001290 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001291 if (!internal) {
1292 abort = 0;
1293 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001294 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001295 }
Tom Tuckerd32ae392011-10-25 16:38:30 +05301296 if (qhp->ibqp.uobject)
1297 t4_set_wq_in_error(&qhp->wq);
Steve Wise73d6fca2010-07-23 19:12:27 +00001298 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001299 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001300 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001301 break;
1302 case C4IW_QP_STATE_TERMINATE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001303 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301304 qhp->attr.layer_etype = attrs->layer_etype;
1305 qhp->attr.ecode = attrs->ecode;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001306 if (qhp->ibqp.uobject)
1307 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001308 ep = qhp->ep;
Steve Wise0e42c1f2010-09-10 11:15:09 -05001309 if (!internal)
1310 terminate = 1;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001311 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001312 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001313 break;
1314 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001315 set_state(qhp, C4IW_QP_STATE_ERROR);
Tom Tuckerd32ae392011-10-25 16:38:30 +05301316 if (qhp->ibqp.uobject)
1317 t4_set_wq_in_error(&qhp->wq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001318 if (!internal) {
1319 abort = 1;
1320 disconnect = 1;
1321 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001322 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001323 }
1324 goto err;
1325 break;
1326 default:
1327 ret = -EINVAL;
1328 goto out;
1329 }
1330 break;
1331 case C4IW_QP_STATE_CLOSING:
1332 if (!internal) {
1333 ret = -EINVAL;
1334 goto out;
1335 }
1336 switch (attrs->next_state) {
1337 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001338 flush_qp(qhp);
1339 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001340 qhp->attr.llp_stream_handle = NULL;
1341 c4iw_put_ep(&qhp->ep->com);
1342 qhp->ep = NULL;
1343 wake_up(&qhp->wait);
1344 break;
1345 case C4IW_QP_STATE_ERROR:
1346 goto err;
1347 default:
1348 ret = -EINVAL;
1349 goto err;
1350 }
1351 break;
1352 case C4IW_QP_STATE_ERROR:
1353 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1354 ret = -EINVAL;
1355 goto out;
1356 }
1357 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1358 ret = -EINVAL;
1359 goto out;
1360 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001361 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001362 break;
1363 case C4IW_QP_STATE_TERMINATE:
1364 if (!internal) {
1365 ret = -EINVAL;
1366 goto out;
1367 }
1368 goto err;
1369 break;
1370 default:
1371 printk(KERN_ERR "%s in a bad state %d\n",
1372 __func__, qhp->attr.state);
1373 ret = -EINVAL;
1374 goto err;
1375 break;
1376 }
1377 goto out;
1378err:
1379 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1380 qhp->wq.sq.qid);
1381
1382 /* disassociate the LLP connection */
1383 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001384 if (!ep)
1385 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001386 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001387 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001388 free = 1;
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001389 abort = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001390 wake_up(&qhp->wait);
1391 BUG_ON(!ep);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001392 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001393out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001394 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001395
1396 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001397 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001398
1399 /*
1400 * If disconnect is 1, then we need to initiate a disconnect
1401 * on the EP. This can be a normal close (RTS->CLOSING) or
1402 * an abnormal close (RTS/CLOSING->ERROR).
1403 */
1404 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001405 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1406 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001407 c4iw_put_ep(&ep->com);
1408 }
1409
1410 /*
1411 * If free is 1, then we've disassociated the EP from the QP
1412 * and we need to dereference the EP.
1413 */
1414 if (free)
1415 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001416 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1417 return ret;
1418}
1419
Vipul Pandya422eea02012-05-18 15:29:30 +05301420static int enable_qp_db(int id, void *p, void *data)
1421{
1422 struct c4iw_qp *qp = p;
1423
1424 t4_enable_wq_db(&qp->wq);
1425 return 0;
1426}
1427
Steve Wisecfdda9d2010-04-21 15:30:06 -07001428int c4iw_destroy_qp(struct ib_qp *ib_qp)
1429{
1430 struct c4iw_dev *rhp;
1431 struct c4iw_qp *qhp;
1432 struct c4iw_qp_attributes attrs;
1433 struct c4iw_ucontext *ucontext;
1434
1435 qhp = to_c4iw_qp(ib_qp);
1436 rhp = qhp->rhp;
1437
1438 attrs.next_state = C4IW_QP_STATE_ERROR;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301439 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1440 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1441 else
1442 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001443 wait_event(qhp->wait, !qhp->ep);
1444
Vipul Pandya422eea02012-05-18 15:29:30 +05301445 spin_lock_irq(&rhp->lock);
1446 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1447 rhp->qpcnt--;
1448 BUG_ON(rhp->qpcnt < 0);
1449 if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
1450 rhp->rdev.stats.db_state_transitions++;
1451 rhp->db_state = NORMAL;
1452 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1453 }
1454 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001455 atomic_dec(&qhp->refcnt);
1456 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1457
1458 ucontext = ib_qp->uobject ?
1459 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1460 destroy_qp(&rhp->rdev, &qhp->wq,
1461 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1462
1463 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1464 kfree(qhp);
1465 return 0;
1466}
1467
Vipul Pandya422eea02012-05-18 15:29:30 +05301468static int disable_qp_db(int id, void *p, void *data)
1469{
1470 struct c4iw_qp *qp = p;
1471
1472 t4_disable_wq_db(&qp->wq);
1473 return 0;
1474}
1475
Steve Wisecfdda9d2010-04-21 15:30:06 -07001476struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1477 struct ib_udata *udata)
1478{
1479 struct c4iw_dev *rhp;
1480 struct c4iw_qp *qhp;
1481 struct c4iw_pd *php;
1482 struct c4iw_cq *schp;
1483 struct c4iw_cq *rchp;
1484 struct c4iw_create_qp_resp uresp;
1485 int sqsize, rqsize;
1486 struct c4iw_ucontext *ucontext;
1487 int ret;
Steve Wisec6d7b262010-09-13 11:23:57 -05001488 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001489
1490 PDBG("%s ib_pd %p\n", __func__, pd);
1491
1492 if (attrs->qp_type != IB_QPT_RC)
1493 return ERR_PTR(-EINVAL);
1494
1495 php = to_c4iw_pd(pd);
1496 rhp = php->rhp;
1497 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1498 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1499 if (!schp || !rchp)
1500 return ERR_PTR(-EINVAL);
1501
1502 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1503 return ERR_PTR(-EINVAL);
1504
1505 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1506 if (rqsize > T4_MAX_RQ_SIZE)
1507 return ERR_PTR(-E2BIG);
1508
1509 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1510 if (sqsize > T4_MAX_SQ_SIZE)
1511 return ERR_PTR(-E2BIG);
1512
1513 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1514
1515
1516 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1517 if (!qhp)
1518 return ERR_PTR(-ENOMEM);
1519 qhp->wq.sq.size = sqsize;
1520 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1521 qhp->wq.rq.size = rqsize;
1522 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1523
1524 if (ucontext) {
1525 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1526 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1527 }
1528
1529 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1530 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1531
1532 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1533 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1534 if (ret)
1535 goto err1;
1536
1537 attrs->cap.max_recv_wr = rqsize - 1;
1538 attrs->cap.max_send_wr = sqsize - 1;
1539 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1540
1541 qhp->rhp = rhp;
1542 qhp->attr.pd = php->pdid;
1543 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1544 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1545 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1546 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1547 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1548 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1549 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1550 qhp->attr.state = C4IW_QP_STATE_IDLE;
1551 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1552 qhp->attr.enable_rdma_read = 1;
1553 qhp->attr.enable_rdma_write = 1;
1554 qhp->attr.enable_bind = 1;
1555 qhp->attr.max_ord = 1;
1556 qhp->attr.max_ird = 1;
1557 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001558 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001559 init_waitqueue_head(&qhp->wait);
1560 atomic_set(&qhp->refcnt, 1);
1561
Vipul Pandya2c974782012-05-18 15:29:28 +05301562 spin_lock_irq(&rhp->lock);
1563 if (rhp->db_state != NORMAL)
1564 t4_disable_wq_db(&qhp->wq);
Vipul Pandya422eea02012-05-18 15:29:30 +05301565 if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1566 rhp->rdev.stats.db_state_transitions++;
1567 rhp->db_state = FLOW_CONTROL;
1568 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1569 }
Vipul Pandya2c974782012-05-18 15:29:28 +05301570 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1571 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001572 if (ret)
1573 goto err2;
1574
Steve Wisecfdda9d2010-04-21 15:30:06 -07001575 if (udata) {
1576 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1577 if (!mm1) {
1578 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001579 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001580 }
1581 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1582 if (!mm2) {
1583 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001584 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001585 }
1586 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1587 if (!mm3) {
1588 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001589 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001590 }
1591 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1592 if (!mm4) {
1593 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001594 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001595 }
Steve Wisec6d7b262010-09-13 11:23:57 -05001596 if (t4_sq_onchip(&qhp->wq.sq)) {
1597 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1598 if (!mm5) {
1599 ret = -ENOMEM;
1600 goto err7;
1601 }
1602 uresp.flags = C4IW_QPF_ONCHIP;
1603 } else
1604 uresp.flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001605 uresp.qid_mask = rhp->rdev.qpmask;
1606 uresp.sqid = qhp->wq.sq.qid;
1607 uresp.sq_size = qhp->wq.sq.size;
1608 uresp.sq_memsize = qhp->wq.sq.memsize;
1609 uresp.rqid = qhp->wq.rq.qid;
1610 uresp.rq_size = qhp->wq.rq.size;
1611 uresp.rq_memsize = qhp->wq.rq.memsize;
1612 spin_lock(&ucontext->mmap_lock);
Steve Wisec6d7b262010-09-13 11:23:57 -05001613 if (mm5) {
1614 uresp.ma_sync_key = ucontext->key;
1615 ucontext->key += PAGE_SIZE;
1616 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001617 uresp.sq_key = ucontext->key;
1618 ucontext->key += PAGE_SIZE;
1619 uresp.rq_key = ucontext->key;
1620 ucontext->key += PAGE_SIZE;
1621 uresp.sq_db_gts_key = ucontext->key;
1622 ucontext->key += PAGE_SIZE;
1623 uresp.rq_db_gts_key = ucontext->key;
1624 ucontext->key += PAGE_SIZE;
1625 spin_unlock(&ucontext->mmap_lock);
1626 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1627 if (ret)
Steve Wisec6d7b262010-09-13 11:23:57 -05001628 goto err8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001629 mm1->key = uresp.sq_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05001630 mm1->addr = qhp->wq.sq.phys_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001631 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1632 insert_mmap(ucontext, mm1);
1633 mm2->key = uresp.rq_key;
1634 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1635 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1636 insert_mmap(ucontext, mm2);
1637 mm3->key = uresp.sq_db_gts_key;
1638 mm3->addr = qhp->wq.sq.udb;
1639 mm3->len = PAGE_SIZE;
1640 insert_mmap(ucontext, mm3);
1641 mm4->key = uresp.rq_db_gts_key;
1642 mm4->addr = qhp->wq.rq.udb;
1643 mm4->len = PAGE_SIZE;
1644 insert_mmap(ucontext, mm4);
Steve Wisec6d7b262010-09-13 11:23:57 -05001645 if (mm5) {
1646 mm5->key = uresp.ma_sync_key;
1647 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1648 + A_PCIE_MA_SYNC) & PAGE_MASK;
1649 mm5->len = PAGE_SIZE;
1650 insert_mmap(ucontext, mm5);
1651 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001652 }
1653 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1654 init_timer(&(qhp->timer));
1655 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1656 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1657 qhp->wq.sq.qid);
1658 return &qhp->ibqp;
Steve Wisec6d7b262010-09-13 11:23:57 -05001659err8:
1660 kfree(mm5);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001661err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001662 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001663err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001664 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001665err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001666 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001667err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001668 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001669err3:
1670 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1671err2:
1672 destroy_qp(&rhp->rdev, &qhp->wq,
1673 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1674err1:
1675 kfree(qhp);
1676 return ERR_PTR(ret);
1677}
1678
1679int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1680 int attr_mask, struct ib_udata *udata)
1681{
1682 struct c4iw_dev *rhp;
1683 struct c4iw_qp *qhp;
1684 enum c4iw_qp_attr_mask mask = 0;
1685 struct c4iw_qp_attributes attrs;
1686
1687 PDBG("%s ib_qp %p\n", __func__, ibqp);
1688
1689 /* iwarp does not support the RTR state */
1690 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1691 attr_mask &= ~IB_QP_STATE;
1692
1693 /* Make sure we still have something left to do */
1694 if (!attr_mask)
1695 return 0;
1696
1697 memset(&attrs, 0, sizeof attrs);
1698 qhp = to_c4iw_qp(ibqp);
1699 rhp = qhp->rhp;
1700
1701 attrs.next_state = c4iw_convert_state(attr->qp_state);
1702 attrs.enable_rdma_read = (attr->qp_access_flags &
1703 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1704 attrs.enable_rdma_write = (attr->qp_access_flags &
1705 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1706 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1707
1708
1709 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1710 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1711 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1712 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1713 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1714
Vipul Pandya2c974782012-05-18 15:29:28 +05301715 /*
1716 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1717 * ringing the queue db when we're in DB_FULL mode.
1718 */
1719 attrs.sq_db_inc = attr->sq_psn;
1720 attrs.rq_db_inc = attr->rq_psn;
1721 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1722 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1723
Steve Wisecfdda9d2010-04-21 15:30:06 -07001724 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1725}
1726
1727struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1728{
1729 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1730 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1731}
Vipul Pandya67bbc052012-05-18 15:29:33 +05301732
1733int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1734 int attr_mask, struct ib_qp_init_attr *init_attr)
1735{
1736 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1737
1738 memset(attr, 0, sizeof *attr);
1739 memset(init_attr, 0, sizeof *init_attr);
1740 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1741 return 0;
1742}