blob: 60056e2b8d990593644db10c7810edef9a4a8055 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
34
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include "iw_cxgb4.h"
36
Steve Wisea9c77192011-03-11 22:30:11 +000037static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050038module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000039MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050040
Steve Wise2f5b48c2010-09-10 11:15:36 -050041static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
42{
43 unsigned long flag;
44 spin_lock_irqsave(&qhp->lock, flag);
45 qhp->attr.state = state;
46 spin_unlock_irqrestore(&qhp->lock, flag);
47}
48
Steve Wisec6d7b262010-09-13 11:23:57 -050049static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
50{
51 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
52}
53
54static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
55{
56 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
57 pci_unmap_addr(sq, mapping));
58}
59
60static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
61{
62 if (t4_sq_onchip(sq))
63 dealloc_oc_sq(rdev, sq);
64 else
65 dealloc_host_sq(rdev, sq);
66}
67
68static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
69{
70 if (!ocqp_support || !t4_ocqp_supported())
71 return -ENOSYS;
72 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
73 if (!sq->dma_addr)
74 return -ENOMEM;
75 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
76 rdev->lldi.vr->ocq.start;
77 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
78 rdev->lldi.vr->ocq.start);
79 sq->flags |= T4_SQ_ONCHIP;
80 return 0;
81}
82
83static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
84{
85 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
86 &(sq->dma_addr), GFP_KERNEL);
87 if (!sq->queue)
88 return -ENOMEM;
89 sq->phys_addr = virt_to_phys(sq->queue);
90 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
91 return 0;
92}
93
Steve Wisecfdda9d2010-04-21 15:30:06 -070094static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
95 struct c4iw_dev_ucontext *uctx)
96{
97 /*
98 * uP clears EQ contexts when the connection exits rdma mode,
99 * so no need to post a RESET WR for these EQs.
100 */
101 dma_free_coherent(&(rdev->lldi.pdev->dev),
102 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000103 dma_unmap_addr(&wq->rq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500104 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700105 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
106 kfree(wq->rq.sw_rq);
107 kfree(wq->sq.sw_sq);
108 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
109 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
110 return 0;
111}
112
113static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
114 struct t4_cq *rcq, struct t4_cq *scq,
115 struct c4iw_dev_ucontext *uctx)
116{
117 int user = (uctx != &rdev->uctx);
118 struct fw_ri_res_wr *res_wr;
119 struct fw_ri_res *res;
120 int wr_len;
121 struct c4iw_wr_wait wr_wait;
122 struct sk_buff *skb;
123 int ret;
124 int eqsize;
125
126 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
127 if (!wq->sq.qid)
128 return -ENOMEM;
129
130 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
131 if (!wq->rq.qid)
132 goto err1;
133
134 if (!user) {
135 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
136 GFP_KERNEL);
137 if (!wq->sq.sw_sq)
138 goto err2;
139
140 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
141 GFP_KERNEL);
142 if (!wq->rq.sw_rq)
143 goto err3;
144 }
145
146 /*
147 * RQT must be a power of 2.
148 */
149 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
150 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
151 if (!wq->rq.rqt_hwaddr)
152 goto err4;
153
Steve Wisec6d7b262010-09-13 11:23:57 -0500154 if (user) {
155 if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
156 goto err5;
157 } else
158 if (alloc_host_sq(rdev, &wq->sq))
159 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700160 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000161 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700162
163 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
164 wq->rq.memsize, &(wq->rq.dma_addr),
165 GFP_KERNEL);
166 if (!wq->rq.queue)
167 goto err6;
168 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
169 __func__, wq->sq.queue,
170 (unsigned long long)virt_to_phys(wq->sq.queue),
171 wq->rq.queue,
172 (unsigned long long)virt_to_phys(wq->rq.queue));
173 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000174 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700175
176 wq->db = rdev->lldi.db_reg;
177 wq->gts = rdev->lldi.gts_reg;
178 if (user) {
179 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
180 (wq->sq.qid << rdev->qpshift);
181 wq->sq.udb &= PAGE_MASK;
182 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
183 (wq->rq.qid << rdev->qpshift);
184 wq->rq.udb &= PAGE_MASK;
185 }
186 wq->rdev = rdev;
187 wq->rq.msn = 1;
188
189 /* build fw_ri_res_wr */
190 wr_len = sizeof *res_wr + 2 * sizeof *res;
191
David Rientjesd3c814e2010-07-21 02:44:56 +0000192 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700193 if (!skb) {
194 ret = -ENOMEM;
195 goto err7;
196 }
197 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
198
199 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
200 memset(res_wr, 0, wr_len);
201 res_wr->op_nres = cpu_to_be32(
202 FW_WR_OP(FW_RI_RES_WR) |
203 V_FW_RI_RES_WR_NRES(2) |
204 FW_WR_COMPL(1));
205 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700206 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700207 res = res_wr->res;
208 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
209 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
210
211 /*
212 * eqsize is the number of 64B entries plus the status page size.
213 */
214 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
215
216 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
217 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
218 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
219 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
Roel Kluin85d215b2011-05-09 22:06:22 -0700220 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700221 V_FW_RI_RES_WR_IQID(scq->cqid));
222 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
223 V_FW_RI_RES_WR_DCAEN(0) |
224 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000225 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000226 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700227 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
228 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
229 V_FW_RI_RES_WR_EQSIZE(eqsize));
230 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
231 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
232 res++;
233 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
234 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
235
236 /*
237 * eqsize is the number of 64B entries plus the status page size.
238 */
239 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
240 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
241 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
242 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
243 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
244 V_FW_RI_RES_WR_IQID(rcq->cqid));
245 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
246 V_FW_RI_RES_WR_DCAEN(0) |
247 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000248 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000249 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700250 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
251 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
252 V_FW_RI_RES_WR_EQSIZE(eqsize));
253 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
254 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
255
256 c4iw_init_wr_wait(&wr_wait);
257
258 ret = c4iw_ofld_send(rdev, skb);
259 if (ret)
260 goto err7;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500261 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700262 if (ret)
263 goto err7;
264
265 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
266 __func__, wq->sq.qid, wq->rq.qid, wq->db,
267 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
268
269 return 0;
270err7:
271 dma_free_coherent(&(rdev->lldi.pdev->dev),
272 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000273 dma_unmap_addr(&wq->rq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700274err6:
Steve Wisec6d7b262010-09-13 11:23:57 -0500275 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700276err5:
277 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
278err4:
279 kfree(wq->rq.sw_rq);
280err3:
281 kfree(wq->sq.sw_sq);
282err2:
283 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
284err1:
285 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
286 return -ENOMEM;
287}
288
Steve Wised37ac312010-06-10 19:03:00 +0000289static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
290 struct ib_send_wr *wr, int max, u32 *plenp)
291{
292 u8 *dstp, *srcp;
293 u32 plen = 0;
294 int i;
295 int rem, len;
296
297 dstp = (u8 *)immdp->data;
298 for (i = 0; i < wr->num_sge; i++) {
299 if ((plen + wr->sg_list[i].length) > max)
300 return -EMSGSIZE;
301 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
302 plen += wr->sg_list[i].length;
303 rem = wr->sg_list[i].length;
304 while (rem) {
305 if (dstp == (u8 *)&sq->queue[sq->size])
306 dstp = (u8 *)sq->queue;
307 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
308 len = rem;
309 else
310 len = (u8 *)&sq->queue[sq->size] - dstp;
311 memcpy(dstp, srcp, len);
312 dstp += len;
313 srcp += len;
314 rem -= len;
315 }
316 }
Steve Wise13fecb82010-09-10 11:14:53 -0500317 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
318 if (len)
319 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000320 immdp->op = FW_RI_DATA_IMMD;
321 immdp->r1 = 0;
322 immdp->r2 = 0;
323 immdp->immdlen = cpu_to_be32(plen);
324 *plenp = plen;
325 return 0;
326}
327
328static int build_isgl(__be64 *queue_start, __be64 *queue_end,
329 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
330 int num_sge, u32 *plenp)
331
Steve Wisecfdda9d2010-04-21 15:30:06 -0700332{
333 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000334 u32 plen = 0;
335 __be64 *flitp = (__be64 *)isglp->sge;
336
337 for (i = 0; i < num_sge; i++) {
338 if ((plen + sg_list[i].length) < plen)
339 return -EMSGSIZE;
340 plen += sg_list[i].length;
341 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
342 sg_list[i].length);
343 if (++flitp == queue_end)
344 flitp = queue_start;
345 *flitp = cpu_to_be64(sg_list[i].addr);
346 if (++flitp == queue_end)
347 flitp = queue_start;
348 }
Steve Wise13fecb82010-09-10 11:14:53 -0500349 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000350 isglp->op = FW_RI_DATA_ISGL;
351 isglp->r1 = 0;
352 isglp->nsge = cpu_to_be16(num_sge);
353 isglp->r2 = 0;
354 if (plenp)
355 *plenp = plen;
356 return 0;
357}
358
359static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
360 struct ib_send_wr *wr, u8 *len16)
361{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700362 u32 plen;
363 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000364 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700365
366 if (wr->num_sge > T4_MAX_SEND_SGE)
367 return -EINVAL;
368 switch (wr->opcode) {
369 case IB_WR_SEND:
370 if (wr->send_flags & IB_SEND_SOLICITED)
371 wqe->send.sendop_pkd = cpu_to_be32(
372 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
373 else
374 wqe->send.sendop_pkd = cpu_to_be32(
375 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
376 wqe->send.stag_inv = 0;
377 break;
378 case IB_WR_SEND_WITH_INV:
379 if (wr->send_flags & IB_SEND_SOLICITED)
380 wqe->send.sendop_pkd = cpu_to_be32(
381 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
382 else
383 wqe->send.sendop_pkd = cpu_to_be32(
384 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
385 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
386 break;
387
388 default:
389 return -EINVAL;
390 }
Steve Wised37ac312010-06-10 19:03:00 +0000391
Steve Wisecfdda9d2010-04-21 15:30:06 -0700392 plen = 0;
393 if (wr->num_sge) {
394 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000395 ret = build_immd(sq, wqe->send.u.immd_src, wr,
396 T4_MAX_SEND_INLINE, &plen);
397 if (ret)
398 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700399 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
400 plen;
401 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000402 ret = build_isgl((__be64 *)sq->queue,
403 (__be64 *)&sq->queue[sq->size],
404 wqe->send.u.isgl_src,
405 wr->sg_list, wr->num_sge, &plen);
406 if (ret)
407 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700408 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
409 wr->num_sge * sizeof(struct fw_ri_sge);
410 }
411 } else {
412 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
413 wqe->send.u.immd_src[0].r1 = 0;
414 wqe->send.u.immd_src[0].r2 = 0;
415 wqe->send.u.immd_src[0].immdlen = 0;
416 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000417 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700418 }
419 *len16 = DIV_ROUND_UP(size, 16);
420 wqe->send.plen = cpu_to_be32(plen);
421 return 0;
422}
423
Steve Wised37ac312010-06-10 19:03:00 +0000424static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
425 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700426{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700427 u32 plen;
428 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000429 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700430
Steve Wised37ac312010-06-10 19:03:00 +0000431 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700432 return -EINVAL;
433 wqe->write.r2 = 0;
434 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
435 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700436 if (wr->num_sge) {
437 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000438 ret = build_immd(sq, wqe->write.u.immd_src, wr,
439 T4_MAX_WRITE_INLINE, &plen);
440 if (ret)
441 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700442 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
443 plen;
444 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000445 ret = build_isgl((__be64 *)sq->queue,
446 (__be64 *)&sq->queue[sq->size],
447 wqe->write.u.isgl_src,
448 wr->sg_list, wr->num_sge, &plen);
449 if (ret)
450 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700451 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
452 wr->num_sge * sizeof(struct fw_ri_sge);
453 }
454 } else {
455 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
456 wqe->write.u.immd_src[0].r1 = 0;
457 wqe->write.u.immd_src[0].r2 = 0;
458 wqe->write.u.immd_src[0].immdlen = 0;
459 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000460 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700461 }
462 *len16 = DIV_ROUND_UP(size, 16);
463 wqe->write.plen = cpu_to_be32(plen);
464 return 0;
465}
466
467static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
468{
469 if (wr->num_sge > 1)
470 return -EINVAL;
471 if (wr->num_sge) {
472 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
473 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
474 >> 32));
475 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
476 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
477 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
478 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
479 >> 32));
480 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
481 } else {
482 wqe->read.stag_src = cpu_to_be32(2);
483 wqe->read.to_src_hi = 0;
484 wqe->read.to_src_lo = 0;
485 wqe->read.stag_sink = cpu_to_be32(2);
486 wqe->read.plen = 0;
487 wqe->read.to_sink_hi = 0;
488 wqe->read.to_sink_lo = 0;
489 }
490 wqe->read.r2 = 0;
491 wqe->read.r5 = 0;
492 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
493 return 0;
494}
495
496static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
497 struct ib_recv_wr *wr, u8 *len16)
498{
Steve Wised37ac312010-06-10 19:03:00 +0000499 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700500
Steve Wised37ac312010-06-10 19:03:00 +0000501 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
502 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
503 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
504 if (ret)
505 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700506 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
507 wr->num_sge * sizeof(struct fw_ri_sge), 16);
508 return 0;
509}
510
Steve Wise40dbf6e2010-09-17 15:40:15 -0500511static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
512 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700513{
514
515 struct fw_ri_immd *imdp;
516 __be64 *p;
517 int i;
518 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500519 int rem;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700520
521 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
522 return -EINVAL;
523
524 wqe->fr.qpbinde_to_dcacpu = 0;
525 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
526 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
527 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
528 wqe->fr.len_hi = 0;
529 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
530 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
531 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
532 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
533 0xffffffff);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500534 WARN_ON(pbllen > T4_MAX_FR_IMMD);
535 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
536 imdp->op = FW_RI_DATA_IMMD;
537 imdp->r1 = 0;
538 imdp->r2 = 0;
539 imdp->immdlen = cpu_to_be32(pbllen);
540 p = (__be64 *)(imdp + 1);
541 rem = pbllen;
542 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
543 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
544 rem -= sizeof *p;
545 if (++p == (__be64 *)&sq->queue[sq->size])
546 p = (__be64 *)sq->queue;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700547 }
Steve Wise40dbf6e2010-09-17 15:40:15 -0500548 BUG_ON(rem < 0);
549 while (rem) {
550 *p = 0;
551 rem -= sizeof *p;
552 if (++p == (__be64 *)&sq->queue[sq->size])
553 p = (__be64 *)sq->queue;
554 }
555 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700556 return 0;
557}
558
559static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
560 u8 *len16)
561{
562 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
563 wqe->inv.r2 = 0;
564 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
565 return 0;
566}
567
568void c4iw_qp_add_ref(struct ib_qp *qp)
569{
570 PDBG("%s ib_qp %p\n", __func__, qp);
571 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
572}
573
574void c4iw_qp_rem_ref(struct ib_qp *qp)
575{
576 PDBG("%s ib_qp %p\n", __func__, qp);
577 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
578 wake_up(&(to_c4iw_qp(qp)->wait));
579}
580
581int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
582 struct ib_send_wr **bad_wr)
583{
584 int err = 0;
585 u8 len16 = 0;
586 enum fw_wr_opcodes fw_opcode = 0;
587 enum fw_ri_wr_flags fw_flags;
588 struct c4iw_qp *qhp;
589 union t4_wr *wqe;
590 u32 num_wrs;
591 struct t4_swsqe *swsqe;
592 unsigned long flag;
593 u16 idx = 0;
594
595 qhp = to_c4iw_qp(ibqp);
596 spin_lock_irqsave(&qhp->lock, flag);
597 if (t4_wq_in_error(&qhp->wq)) {
598 spin_unlock_irqrestore(&qhp->lock, flag);
599 return -EINVAL;
600 }
601 num_wrs = t4_sq_avail(&qhp->wq);
602 if (num_wrs == 0) {
603 spin_unlock_irqrestore(&qhp->lock, flag);
604 return -ENOMEM;
605 }
606 while (wr) {
607 if (num_wrs == 0) {
608 err = -ENOMEM;
609 *bad_wr = wr;
610 break;
611 }
Steve Wised37ac312010-06-10 19:03:00 +0000612 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
613 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
614
Steve Wisecfdda9d2010-04-21 15:30:06 -0700615 fw_flags = 0;
616 if (wr->send_flags & IB_SEND_SOLICITED)
617 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
618 if (wr->send_flags & IB_SEND_SIGNALED)
619 fw_flags |= FW_RI_COMPLETION_FLAG;
620 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
621 switch (wr->opcode) {
622 case IB_WR_SEND_WITH_INV:
623 case IB_WR_SEND:
624 if (wr->send_flags & IB_SEND_FENCE)
625 fw_flags |= FW_RI_READ_FENCE_FLAG;
626 fw_opcode = FW_RI_SEND_WR;
627 if (wr->opcode == IB_WR_SEND)
628 swsqe->opcode = FW_RI_SEND;
629 else
630 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000631 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700632 break;
633 case IB_WR_RDMA_WRITE:
634 fw_opcode = FW_RI_RDMA_WRITE_WR;
635 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000636 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700637 break;
638 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500639 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700640 fw_opcode = FW_RI_RDMA_READ_WR;
641 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500642 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
Steve Wise410ade42010-09-17 15:40:09 -0500643 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise2f1fb502010-05-20 16:58:16 -0500644 else
645 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700646 err = build_rdma_read(wqe, wr, &len16);
647 if (err)
648 break;
649 swsqe->read_len = wr->sg_list[0].length;
650 if (!qhp->wq.sq.oldest_read)
651 qhp->wq.sq.oldest_read = swsqe;
652 break;
653 case IB_WR_FAST_REG_MR:
654 fw_opcode = FW_RI_FR_NSMR_WR;
655 swsqe->opcode = FW_RI_FAST_REGISTER;
Steve Wise40dbf6e2010-09-17 15:40:15 -0500656 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700657 break;
658 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500659 if (wr->send_flags & IB_SEND_FENCE)
660 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700661 fw_opcode = FW_RI_INV_LSTAG_WR;
662 swsqe->opcode = FW_RI_LOCAL_INV;
663 err = build_inv_stag(wqe, wr, &len16);
664 break;
665 default:
666 PDBG("%s post of type=%d TBD!\n", __func__,
667 wr->opcode);
668 err = -EINVAL;
669 }
670 if (err) {
671 *bad_wr = wr;
672 break;
673 }
674 swsqe->idx = qhp->wq.sq.pidx;
675 swsqe->complete = 0;
676 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
677 swsqe->wr_id = wr->wr_id;
678
679 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
680
681 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
682 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
683 swsqe->opcode, swsqe->read_len);
684 wr = wr->next;
685 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000686 t4_sq_produce(&qhp->wq, len16);
687 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700688 }
689 if (t4_wq_db_enabled(&qhp->wq))
690 t4_ring_sq_db(&qhp->wq, idx);
691 spin_unlock_irqrestore(&qhp->lock, flag);
692 return err;
693}
694
695int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
696 struct ib_recv_wr **bad_wr)
697{
698 int err = 0;
699 struct c4iw_qp *qhp;
700 union t4_recv_wr *wqe;
701 u32 num_wrs;
702 u8 len16 = 0;
703 unsigned long flag;
704 u16 idx = 0;
705
706 qhp = to_c4iw_qp(ibqp);
707 spin_lock_irqsave(&qhp->lock, flag);
708 if (t4_wq_in_error(&qhp->wq)) {
709 spin_unlock_irqrestore(&qhp->lock, flag);
710 return -EINVAL;
711 }
712 num_wrs = t4_rq_avail(&qhp->wq);
713 if (num_wrs == 0) {
714 spin_unlock_irqrestore(&qhp->lock, flag);
715 return -ENOMEM;
716 }
717 while (wr) {
718 if (wr->num_sge > T4_MAX_RECV_SGE) {
719 err = -EINVAL;
720 *bad_wr = wr;
721 break;
722 }
Steve Wised37ac312010-06-10 19:03:00 +0000723 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
724 qhp->wq.rq.wq_pidx *
725 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700726 if (num_wrs)
727 err = build_rdma_recv(qhp, wqe, wr, &len16);
728 else
729 err = -ENOMEM;
730 if (err) {
731 *bad_wr = wr;
732 break;
733 }
734
735 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
736
737 wqe->recv.opcode = FW_RI_RECV_WR;
738 wqe->recv.r1 = 0;
739 wqe->recv.wrid = qhp->wq.rq.pidx;
740 wqe->recv.r2[0] = 0;
741 wqe->recv.r2[1] = 0;
742 wqe->recv.r2[2] = 0;
743 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700744 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
745 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000746 t4_rq_produce(&qhp->wq, len16);
747 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700748 wr = wr->next;
749 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700750 }
751 if (t4_wq_db_enabled(&qhp->wq))
752 t4_ring_rq_db(&qhp->wq, idx);
753 spin_unlock_irqrestore(&qhp->lock, flag);
754 return err;
755}
756
757int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
758{
759 return -ENOSYS;
760}
761
762static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
763 u8 *ecode)
764{
765 int status;
766 int tagged;
767 int opcode;
768 int rqtype;
769 int send_inv;
770
771 if (!err_cqe) {
772 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
773 *ecode = 0;
774 return;
775 }
776
777 status = CQE_STATUS(err_cqe);
778 opcode = CQE_OPCODE(err_cqe);
779 rqtype = RQ_TYPE(err_cqe);
780 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
781 (opcode == FW_RI_SEND_WITH_SE_INV);
782 tagged = (opcode == FW_RI_RDMA_WRITE) ||
783 (rqtype && (opcode == FW_RI_READ_RESP));
784
785 switch (status) {
786 case T4_ERR_STAG:
787 if (send_inv) {
788 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
789 *ecode = RDMAP_CANT_INV_STAG;
790 } else {
791 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
792 *ecode = RDMAP_INV_STAG;
793 }
794 break;
795 case T4_ERR_PDID:
796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
797 if ((opcode == FW_RI_SEND_WITH_INV) ||
798 (opcode == FW_RI_SEND_WITH_SE_INV))
799 *ecode = RDMAP_CANT_INV_STAG;
800 else
801 *ecode = RDMAP_STAG_NOT_ASSOC;
802 break;
803 case T4_ERR_QPID:
804 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
805 *ecode = RDMAP_STAG_NOT_ASSOC;
806 break;
807 case T4_ERR_ACCESS:
808 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
809 *ecode = RDMAP_ACC_VIOL;
810 break;
811 case T4_ERR_WRAP:
812 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
813 *ecode = RDMAP_TO_WRAP;
814 break;
815 case T4_ERR_BOUND:
816 if (tagged) {
817 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
818 *ecode = DDPT_BASE_BOUNDS;
819 } else {
820 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
821 *ecode = RDMAP_BASE_BOUNDS;
822 }
823 break;
824 case T4_ERR_INVALIDATE_SHARED_MR:
825 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
826 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
827 *ecode = RDMAP_CANT_INV_STAG;
828 break;
829 case T4_ERR_ECC:
830 case T4_ERR_ECC_PSTAG:
831 case T4_ERR_INTERNAL_ERR:
832 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
833 *ecode = 0;
834 break;
835 case T4_ERR_OUT_OF_RQE:
836 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
837 *ecode = DDPU_INV_MSN_NOBUF;
838 break;
839 case T4_ERR_PBL_ADDR_BOUND:
840 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
841 *ecode = DDPT_BASE_BOUNDS;
842 break;
843 case T4_ERR_CRC:
844 *layer_type = LAYER_MPA|DDP_LLP;
845 *ecode = MPA_CRC_ERR;
846 break;
847 case T4_ERR_MARKER:
848 *layer_type = LAYER_MPA|DDP_LLP;
849 *ecode = MPA_MARKER_ERR;
850 break;
851 case T4_ERR_PDU_LEN_ERR:
852 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
853 *ecode = DDPU_MSG_TOOBIG;
854 break;
855 case T4_ERR_DDP_VERSION:
856 if (tagged) {
857 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
858 *ecode = DDPT_INV_VERS;
859 } else {
860 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
861 *ecode = DDPU_INV_VERS;
862 }
863 break;
864 case T4_ERR_RDMA_VERSION:
865 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
866 *ecode = RDMAP_INV_VERS;
867 break;
868 case T4_ERR_OPCODE:
869 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
870 *ecode = RDMAP_INV_OPCODE;
871 break;
872 case T4_ERR_DDP_QUEUE_NUM:
873 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
874 *ecode = DDPU_INV_QN;
875 break;
876 case T4_ERR_MSN:
877 case T4_ERR_MSN_GAP:
878 case T4_ERR_MSN_RANGE:
879 case T4_ERR_IRD_OVERFLOW:
880 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
881 *ecode = DDPU_INV_MSN_RANGE;
882 break;
883 case T4_ERR_TBIT:
884 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
885 *ecode = 0;
886 break;
887 case T4_ERR_MO:
888 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
889 *ecode = DDPU_INV_MO;
890 break;
891 default:
892 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
893 *ecode = 0;
894 break;
895 }
896}
897
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700898static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
899 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700900{
901 struct fw_ri_wr *wqe;
902 struct sk_buff *skb;
903 struct terminate_message *term;
904
905 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
906 qhp->ep->hwtid);
907
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700908 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700909 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700910 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700911 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
912
913 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
914 memset(wqe, 0, sizeof *wqe);
915 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
916 wqe->flowid_len16 = cpu_to_be32(
917 FW_WR_FLOWID(qhp->ep->hwtid) |
918 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
919
920 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
921 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
922 term = (struct terminate_message *)wqe->u.terminate.termmsg;
923 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700924 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700925}
926
927/*
928 * Assumes qhp lock is held.
929 */
930static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -0500931 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700932{
933 int count;
934 int flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500935 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700936
937 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700938
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200939 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -0500940 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700941 spin_lock(&qhp->lock);
942 c4iw_flush_hw_cq(&rchp->cq);
943 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
944 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
945 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500946 spin_unlock_irqrestore(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700947 if (flushed)
948 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
949
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200950 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -0500951 spin_lock_irqsave(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700952 spin_lock(&qhp->lock);
953 c4iw_flush_hw_cq(&schp->cq);
954 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
955 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
956 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500957 spin_unlock_irqrestore(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700958 if (flushed)
959 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700960}
961
Steve Wise2f5b48c2010-09-10 11:15:36 -0500962static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700963{
964 struct c4iw_cq *rchp, *schp;
965
966 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
967 schp = get_chp(qhp->rhp, qhp->attr.scq);
968
969 if (qhp->ibqp.uobject) {
970 t4_set_wq_in_error(&qhp->wq);
971 t4_set_cq_in_error(&rchp->cq);
972 if (schp != rchp)
973 t4_set_cq_in_error(&schp->cq);
974 return;
975 }
Steve Wise2f5b48c2010-09-10 11:15:36 -0500976 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700977}
978
Steve Wise73d6fca2010-07-23 19:12:27 +0000979static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
980 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700981{
982 struct fw_ri_wr *wqe;
983 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700984 struct sk_buff *skb;
985
986 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +0000987 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700988
David Rientjesd3c814e2010-07-21 02:44:56 +0000989 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700990 if (!skb)
991 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +0000992 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700993
994 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
995 memset(wqe, 0, sizeof *wqe);
996 wqe->op_compl = cpu_to_be32(
997 FW_WR_OP(FW_RI_INIT_WR) |
998 FW_WR_COMPL(1));
999 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +00001000 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001001 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Steve Wise2f5b48c2010-09-10 11:15:36 -05001002 wqe->cookie = (unsigned long) &ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001003
1004 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001005 ret = c4iw_ofld_send(&rhp->rdev, skb);
1006 if (ret)
1007 goto out;
1008
Steve Wise2f5b48c2010-09-10 11:15:36 -05001009 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
Steve Wiseaadc4df2010-09-10 11:15:25 -05001010 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001011out:
1012 PDBG("%s ret %d\n", __func__, ret);
1013 return ret;
1014}
1015
1016static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1017{
1018 memset(&init->u, 0, sizeof init->u);
1019 switch (p2p_type) {
1020 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1021 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1022 init->u.write.stag_sink = cpu_to_be32(1);
1023 init->u.write.to_sink = cpu_to_be64(1);
1024 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1025 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1026 sizeof(struct fw_ri_immd),
1027 16);
1028 break;
1029 case FW_RI_INIT_P2PTYPE_READ_REQ:
1030 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1031 init->u.read.stag_src = cpu_to_be32(1);
1032 init->u.read.to_src_lo = cpu_to_be32(1);
1033 init->u.read.stag_sink = cpu_to_be32(1);
1034 init->u.read.to_sink_lo = cpu_to_be32(1);
1035 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1036 break;
1037 }
1038}
1039
1040static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1041{
1042 struct fw_ri_wr *wqe;
1043 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001044 struct sk_buff *skb;
1045
1046 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1047 qhp->ep->hwtid);
1048
David Rientjesd3c814e2010-07-21 02:44:56 +00001049 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001050 if (!skb)
1051 return -ENOMEM;
1052 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1053
1054 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1055 memset(wqe, 0, sizeof *wqe);
1056 wqe->op_compl = cpu_to_be32(
1057 FW_WR_OP(FW_RI_INIT_WR) |
1058 FW_WR_COMPL(1));
1059 wqe->flowid_len16 = cpu_to_be32(
1060 FW_WR_FLOWID(qhp->ep->hwtid) |
1061 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1062
Steve Wise2f5b48c2010-09-10 11:15:36 -05001063 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001064
1065 wqe->u.init.type = FW_RI_TYPE_INIT;
1066 wqe->u.init.mpareqbit_p2ptype =
1067 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1068 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1069 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1070 if (qhp->attr.mpa_attr.recv_marker_enabled)
1071 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1072 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1073 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1074 if (qhp->attr.mpa_attr.crc_enabled)
1075 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1076
1077 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1078 FW_RI_QP_RDMA_WRITE_ENABLE |
1079 FW_RI_QP_BIND_ENABLE;
1080 if (!qhp->ibqp.uobject)
1081 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1082 FW_RI_QP_STAG0_ENABLE;
1083 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1084 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1085 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1086 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1087 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1088 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1089 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1090 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1091 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1092 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1093 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1094 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1095 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1096 rhp->rdev.lldi.vr->rq.start);
1097 if (qhp->attr.mpa_attr.initiator)
1098 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1099
Steve Wisecfdda9d2010-04-21 15:30:06 -07001100 ret = c4iw_ofld_send(&rhp->rdev, skb);
1101 if (ret)
1102 goto out;
1103
Steve Wise2f5b48c2010-09-10 11:15:36 -05001104 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1105 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001106out:
1107 PDBG("%s ret %d\n", __func__, ret);
1108 return ret;
1109}
1110
1111int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1112 enum c4iw_qp_attr_mask mask,
1113 struct c4iw_qp_attributes *attrs,
1114 int internal)
1115{
1116 int ret = 0;
1117 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001118 int disconnect = 0;
1119 int terminate = 0;
1120 int abort = 0;
1121 int free = 0;
1122 struct c4iw_ep *ep = NULL;
1123
1124 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1125 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1126 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1127
Steve Wise2f5b48c2010-09-10 11:15:36 -05001128 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001129
1130 /* Process attr changes if in IDLE */
1131 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1132 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1133 ret = -EIO;
1134 goto out;
1135 }
1136 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1137 newattr.enable_rdma_read = attrs->enable_rdma_read;
1138 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1139 newattr.enable_rdma_write = attrs->enable_rdma_write;
1140 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1141 newattr.enable_bind = attrs->enable_bind;
1142 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001143 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001144 ret = -EINVAL;
1145 goto out;
1146 }
1147 newattr.max_ord = attrs->max_ord;
1148 }
1149 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001150 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001151 ret = -EINVAL;
1152 goto out;
1153 }
1154 newattr.max_ird = attrs->max_ird;
1155 }
1156 qhp->attr = newattr;
1157 }
1158
1159 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1160 goto out;
1161 if (qhp->attr.state == attrs->next_state)
1162 goto out;
1163
1164 switch (qhp->attr.state) {
1165 case C4IW_QP_STATE_IDLE:
1166 switch (attrs->next_state) {
1167 case C4IW_QP_STATE_RTS:
1168 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1169 ret = -EINVAL;
1170 goto out;
1171 }
1172 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1173 ret = -EINVAL;
1174 goto out;
1175 }
1176 qhp->attr.mpa_attr = attrs->mpa_attr;
1177 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1178 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001179 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001180
1181 /*
1182 * Ref the endpoint here and deref when we
1183 * disassociate the endpoint from the QP. This
1184 * happens in CLOSING->IDLE transition or *->ERROR
1185 * transition.
1186 */
1187 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001188 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001189 if (ret)
1190 goto err;
1191 break;
1192 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001193 set_state(qhp, C4IW_QP_STATE_ERROR);
1194 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001195 break;
1196 default:
1197 ret = -EINVAL;
1198 goto out;
1199 }
1200 break;
1201 case C4IW_QP_STATE_RTS:
1202 switch (attrs->next_state) {
1203 case C4IW_QP_STATE_CLOSING:
1204 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001205 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001206 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001207 if (!internal) {
1208 abort = 0;
1209 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001210 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001211 }
Steve Wise73d6fca2010-07-23 19:12:27 +00001212 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001213 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001214 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001215 break;
1216 case C4IW_QP_STATE_TERMINATE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001217 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001218 if (qhp->ibqp.uobject)
1219 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001220 ep = qhp->ep;
Steve Wise0e42c1f2010-09-10 11:15:09 -05001221 if (!internal)
1222 terminate = 1;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001223 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001224 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001225 break;
1226 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001227 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001228 if (!internal) {
1229 abort = 1;
1230 disconnect = 1;
1231 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001232 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001233 }
1234 goto err;
1235 break;
1236 default:
1237 ret = -EINVAL;
1238 goto out;
1239 }
1240 break;
1241 case C4IW_QP_STATE_CLOSING:
1242 if (!internal) {
1243 ret = -EINVAL;
1244 goto out;
1245 }
1246 switch (attrs->next_state) {
1247 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001248 flush_qp(qhp);
1249 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001250 qhp->attr.llp_stream_handle = NULL;
1251 c4iw_put_ep(&qhp->ep->com);
1252 qhp->ep = NULL;
1253 wake_up(&qhp->wait);
1254 break;
1255 case C4IW_QP_STATE_ERROR:
1256 goto err;
1257 default:
1258 ret = -EINVAL;
1259 goto err;
1260 }
1261 break;
1262 case C4IW_QP_STATE_ERROR:
1263 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1264 ret = -EINVAL;
1265 goto out;
1266 }
1267 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1268 ret = -EINVAL;
1269 goto out;
1270 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001271 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001272 break;
1273 case C4IW_QP_STATE_TERMINATE:
1274 if (!internal) {
1275 ret = -EINVAL;
1276 goto out;
1277 }
1278 goto err;
1279 break;
1280 default:
1281 printk(KERN_ERR "%s in a bad state %d\n",
1282 __func__, qhp->attr.state);
1283 ret = -EINVAL;
1284 goto err;
1285 break;
1286 }
1287 goto out;
1288err:
1289 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1290 qhp->wq.sq.qid);
1291
1292 /* disassociate the LLP connection */
1293 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001294 if (!ep)
1295 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001296 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001297 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001298 free = 1;
1299 wake_up(&qhp->wait);
1300 BUG_ON(!ep);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001301 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001302out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001303 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001304
1305 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001306 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001307
1308 /*
1309 * If disconnect is 1, then we need to initiate a disconnect
1310 * on the EP. This can be a normal close (RTS->CLOSING) or
1311 * an abnormal close (RTS/CLOSING->ERROR).
1312 */
1313 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001314 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1315 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001316 c4iw_put_ep(&ep->com);
1317 }
1318
1319 /*
1320 * If free is 1, then we've disassociated the EP from the QP
1321 * and we need to dereference the EP.
1322 */
1323 if (free)
1324 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001325 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1326 return ret;
1327}
1328
1329int c4iw_destroy_qp(struct ib_qp *ib_qp)
1330{
1331 struct c4iw_dev *rhp;
1332 struct c4iw_qp *qhp;
1333 struct c4iw_qp_attributes attrs;
1334 struct c4iw_ucontext *ucontext;
1335
1336 qhp = to_c4iw_qp(ib_qp);
1337 rhp = qhp->rhp;
1338
1339 attrs.next_state = C4IW_QP_STATE_ERROR;
1340 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1341 wait_event(qhp->wait, !qhp->ep);
1342
1343 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001344 atomic_dec(&qhp->refcnt);
1345 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1346
1347 ucontext = ib_qp->uobject ?
1348 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1349 destroy_qp(&rhp->rdev, &qhp->wq,
1350 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1351
1352 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1353 kfree(qhp);
1354 return 0;
1355}
1356
1357struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1358 struct ib_udata *udata)
1359{
1360 struct c4iw_dev *rhp;
1361 struct c4iw_qp *qhp;
1362 struct c4iw_pd *php;
1363 struct c4iw_cq *schp;
1364 struct c4iw_cq *rchp;
1365 struct c4iw_create_qp_resp uresp;
1366 int sqsize, rqsize;
1367 struct c4iw_ucontext *ucontext;
1368 int ret;
Steve Wisec6d7b262010-09-13 11:23:57 -05001369 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001370
1371 PDBG("%s ib_pd %p\n", __func__, pd);
1372
1373 if (attrs->qp_type != IB_QPT_RC)
1374 return ERR_PTR(-EINVAL);
1375
1376 php = to_c4iw_pd(pd);
1377 rhp = php->rhp;
1378 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1379 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1380 if (!schp || !rchp)
1381 return ERR_PTR(-EINVAL);
1382
1383 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1384 return ERR_PTR(-EINVAL);
1385
1386 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1387 if (rqsize > T4_MAX_RQ_SIZE)
1388 return ERR_PTR(-E2BIG);
1389
1390 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1391 if (sqsize > T4_MAX_SQ_SIZE)
1392 return ERR_PTR(-E2BIG);
1393
1394 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1395
1396
1397 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1398 if (!qhp)
1399 return ERR_PTR(-ENOMEM);
1400 qhp->wq.sq.size = sqsize;
1401 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1402 qhp->wq.rq.size = rqsize;
1403 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1404
1405 if (ucontext) {
1406 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1407 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1408 }
1409
1410 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1411 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1412
1413 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1414 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1415 if (ret)
1416 goto err1;
1417
1418 attrs->cap.max_recv_wr = rqsize - 1;
1419 attrs->cap.max_send_wr = sqsize - 1;
1420 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1421
1422 qhp->rhp = rhp;
1423 qhp->attr.pd = php->pdid;
1424 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1425 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1426 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1427 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1428 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1429 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1430 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1431 qhp->attr.state = C4IW_QP_STATE_IDLE;
1432 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1433 qhp->attr.enable_rdma_read = 1;
1434 qhp->attr.enable_rdma_write = 1;
1435 qhp->attr.enable_bind = 1;
1436 qhp->attr.max_ord = 1;
1437 qhp->attr.max_ird = 1;
1438 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001439 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001440 init_waitqueue_head(&qhp->wait);
1441 atomic_set(&qhp->refcnt, 1);
1442
1443 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1444 if (ret)
1445 goto err2;
1446
Steve Wisecfdda9d2010-04-21 15:30:06 -07001447 if (udata) {
1448 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1449 if (!mm1) {
1450 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001451 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001452 }
1453 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1454 if (!mm2) {
1455 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001456 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001457 }
1458 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1459 if (!mm3) {
1460 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001461 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001462 }
1463 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1464 if (!mm4) {
1465 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001466 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001467 }
Steve Wisec6d7b262010-09-13 11:23:57 -05001468 if (t4_sq_onchip(&qhp->wq.sq)) {
1469 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1470 if (!mm5) {
1471 ret = -ENOMEM;
1472 goto err7;
1473 }
1474 uresp.flags = C4IW_QPF_ONCHIP;
1475 } else
1476 uresp.flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001477 uresp.qid_mask = rhp->rdev.qpmask;
1478 uresp.sqid = qhp->wq.sq.qid;
1479 uresp.sq_size = qhp->wq.sq.size;
1480 uresp.sq_memsize = qhp->wq.sq.memsize;
1481 uresp.rqid = qhp->wq.rq.qid;
1482 uresp.rq_size = qhp->wq.rq.size;
1483 uresp.rq_memsize = qhp->wq.rq.memsize;
1484 spin_lock(&ucontext->mmap_lock);
Steve Wisec6d7b262010-09-13 11:23:57 -05001485 if (mm5) {
1486 uresp.ma_sync_key = ucontext->key;
1487 ucontext->key += PAGE_SIZE;
1488 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001489 uresp.sq_key = ucontext->key;
1490 ucontext->key += PAGE_SIZE;
1491 uresp.rq_key = ucontext->key;
1492 ucontext->key += PAGE_SIZE;
1493 uresp.sq_db_gts_key = ucontext->key;
1494 ucontext->key += PAGE_SIZE;
1495 uresp.rq_db_gts_key = ucontext->key;
1496 ucontext->key += PAGE_SIZE;
1497 spin_unlock(&ucontext->mmap_lock);
1498 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1499 if (ret)
Steve Wisec6d7b262010-09-13 11:23:57 -05001500 goto err8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001501 mm1->key = uresp.sq_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05001502 mm1->addr = qhp->wq.sq.phys_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001503 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1504 insert_mmap(ucontext, mm1);
1505 mm2->key = uresp.rq_key;
1506 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1507 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1508 insert_mmap(ucontext, mm2);
1509 mm3->key = uresp.sq_db_gts_key;
1510 mm3->addr = qhp->wq.sq.udb;
1511 mm3->len = PAGE_SIZE;
1512 insert_mmap(ucontext, mm3);
1513 mm4->key = uresp.rq_db_gts_key;
1514 mm4->addr = qhp->wq.rq.udb;
1515 mm4->len = PAGE_SIZE;
1516 insert_mmap(ucontext, mm4);
Steve Wisec6d7b262010-09-13 11:23:57 -05001517 if (mm5) {
1518 mm5->key = uresp.ma_sync_key;
1519 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1520 + A_PCIE_MA_SYNC) & PAGE_MASK;
1521 mm5->len = PAGE_SIZE;
1522 insert_mmap(ucontext, mm5);
1523 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001524 }
1525 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1526 init_timer(&(qhp->timer));
1527 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1528 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1529 qhp->wq.sq.qid);
1530 return &qhp->ibqp;
Steve Wisec6d7b262010-09-13 11:23:57 -05001531err8:
1532 kfree(mm5);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001533err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001534 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001535err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001536 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001537err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001538 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001539err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001540 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001541err3:
1542 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1543err2:
1544 destroy_qp(&rhp->rdev, &qhp->wq,
1545 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1546err1:
1547 kfree(qhp);
1548 return ERR_PTR(ret);
1549}
1550
1551int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1552 int attr_mask, struct ib_udata *udata)
1553{
1554 struct c4iw_dev *rhp;
1555 struct c4iw_qp *qhp;
1556 enum c4iw_qp_attr_mask mask = 0;
1557 struct c4iw_qp_attributes attrs;
1558
1559 PDBG("%s ib_qp %p\n", __func__, ibqp);
1560
1561 /* iwarp does not support the RTR state */
1562 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1563 attr_mask &= ~IB_QP_STATE;
1564
1565 /* Make sure we still have something left to do */
1566 if (!attr_mask)
1567 return 0;
1568
1569 memset(&attrs, 0, sizeof attrs);
1570 qhp = to_c4iw_qp(ibqp);
1571 rhp = qhp->rhp;
1572
1573 attrs.next_state = c4iw_convert_state(attr->qp_state);
1574 attrs.enable_rdma_read = (attr->qp_access_flags &
1575 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1576 attrs.enable_rdma_write = (attr->qp_access_flags &
1577 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1578 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1579
1580
1581 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1582 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1583 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1584 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1585 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1586
1587 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1588}
1589
1590struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1591{
1592 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1593 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1594}