blob: 1199d1b9baf69ac0f87e1209128bc31e58d75aa4 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +000043 dma_unmap_addr(&wq->rq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -070044 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +000046 dma_unmap_addr(&wq->sq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -070047 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000102 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000115 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
David Rientjesd3c814e2010-07-21 02:44:56 +0000133 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700147 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000165 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000188 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000220 dma_unmap_addr(&wq->rq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000224 dma_unmap_addr(&wq->sq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
Steve Wised37ac312010-06-10 19:03:00 +0000238static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
239 struct ib_send_wr *wr, int max, u32 *plenp)
240{
241 u8 *dstp, *srcp;
242 u32 plen = 0;
243 int i;
244 int rem, len;
245
246 dstp = (u8 *)immdp->data;
247 for (i = 0; i < wr->num_sge; i++) {
248 if ((plen + wr->sg_list[i].length) > max)
249 return -EMSGSIZE;
250 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
251 plen += wr->sg_list[i].length;
252 rem = wr->sg_list[i].length;
253 while (rem) {
254 if (dstp == (u8 *)&sq->queue[sq->size])
255 dstp = (u8 *)sq->queue;
256 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
257 len = rem;
258 else
259 len = (u8 *)&sq->queue[sq->size] - dstp;
260 memcpy(dstp, srcp, len);
261 dstp += len;
262 srcp += len;
263 rem -= len;
264 }
265 }
Steve Wise13fecb82010-09-10 11:14:53 -0500266 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
267 if (len)
268 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000269 immdp->op = FW_RI_DATA_IMMD;
270 immdp->r1 = 0;
271 immdp->r2 = 0;
272 immdp->immdlen = cpu_to_be32(plen);
273 *plenp = plen;
274 return 0;
275}
276
277static int build_isgl(__be64 *queue_start, __be64 *queue_end,
278 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
279 int num_sge, u32 *plenp)
280
Steve Wisecfdda9d2010-04-21 15:30:06 -0700281{
282 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000283 u32 plen = 0;
284 __be64 *flitp = (__be64 *)isglp->sge;
285
286 for (i = 0; i < num_sge; i++) {
287 if ((plen + sg_list[i].length) < plen)
288 return -EMSGSIZE;
289 plen += sg_list[i].length;
290 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
291 sg_list[i].length);
292 if (++flitp == queue_end)
293 flitp = queue_start;
294 *flitp = cpu_to_be64(sg_list[i].addr);
295 if (++flitp == queue_end)
296 flitp = queue_start;
297 }
Steve Wise13fecb82010-09-10 11:14:53 -0500298 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000299 isglp->op = FW_RI_DATA_ISGL;
300 isglp->r1 = 0;
301 isglp->nsge = cpu_to_be16(num_sge);
302 isglp->r2 = 0;
303 if (plenp)
304 *plenp = plen;
305 return 0;
306}
307
308static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
309 struct ib_send_wr *wr, u8 *len16)
310{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311 u32 plen;
312 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000313 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700314
315 if (wr->num_sge > T4_MAX_SEND_SGE)
316 return -EINVAL;
317 switch (wr->opcode) {
318 case IB_WR_SEND:
319 if (wr->send_flags & IB_SEND_SOLICITED)
320 wqe->send.sendop_pkd = cpu_to_be32(
321 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
322 else
323 wqe->send.sendop_pkd = cpu_to_be32(
324 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
325 wqe->send.stag_inv = 0;
326 break;
327 case IB_WR_SEND_WITH_INV:
328 if (wr->send_flags & IB_SEND_SOLICITED)
329 wqe->send.sendop_pkd = cpu_to_be32(
330 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
331 else
332 wqe->send.sendop_pkd = cpu_to_be32(
333 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
334 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
335 break;
336
337 default:
338 return -EINVAL;
339 }
Steve Wised37ac312010-06-10 19:03:00 +0000340
Steve Wisecfdda9d2010-04-21 15:30:06 -0700341 plen = 0;
342 if (wr->num_sge) {
343 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000344 ret = build_immd(sq, wqe->send.u.immd_src, wr,
345 T4_MAX_SEND_INLINE, &plen);
346 if (ret)
347 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700348 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
349 plen;
350 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000351 ret = build_isgl((__be64 *)sq->queue,
352 (__be64 *)&sq->queue[sq->size],
353 wqe->send.u.isgl_src,
354 wr->sg_list, wr->num_sge, &plen);
355 if (ret)
356 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700357 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
358 wr->num_sge * sizeof(struct fw_ri_sge);
359 }
360 } else {
361 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
362 wqe->send.u.immd_src[0].r1 = 0;
363 wqe->send.u.immd_src[0].r2 = 0;
364 wqe->send.u.immd_src[0].immdlen = 0;
365 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000366 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700367 }
368 *len16 = DIV_ROUND_UP(size, 16);
369 wqe->send.plen = cpu_to_be32(plen);
370 return 0;
371}
372
Steve Wised37ac312010-06-10 19:03:00 +0000373static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
374 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700375{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700376 u32 plen;
377 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000378 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700379
Steve Wised37ac312010-06-10 19:03:00 +0000380 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700381 return -EINVAL;
382 wqe->write.r2 = 0;
383 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
384 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700385 if (wr->num_sge) {
386 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000387 ret = build_immd(sq, wqe->write.u.immd_src, wr,
388 T4_MAX_WRITE_INLINE, &plen);
389 if (ret)
390 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700391 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
392 plen;
393 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000394 ret = build_isgl((__be64 *)sq->queue,
395 (__be64 *)&sq->queue[sq->size],
396 wqe->write.u.isgl_src,
397 wr->sg_list, wr->num_sge, &plen);
398 if (ret)
399 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700400 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
401 wr->num_sge * sizeof(struct fw_ri_sge);
402 }
403 } else {
404 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
405 wqe->write.u.immd_src[0].r1 = 0;
406 wqe->write.u.immd_src[0].r2 = 0;
407 wqe->write.u.immd_src[0].immdlen = 0;
408 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000409 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700410 }
411 *len16 = DIV_ROUND_UP(size, 16);
412 wqe->write.plen = cpu_to_be32(plen);
413 return 0;
414}
415
416static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
417{
418 if (wr->num_sge > 1)
419 return -EINVAL;
420 if (wr->num_sge) {
421 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
422 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
423 >> 32));
424 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
425 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
426 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
427 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
428 >> 32));
429 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
430 } else {
431 wqe->read.stag_src = cpu_to_be32(2);
432 wqe->read.to_src_hi = 0;
433 wqe->read.to_src_lo = 0;
434 wqe->read.stag_sink = cpu_to_be32(2);
435 wqe->read.plen = 0;
436 wqe->read.to_sink_hi = 0;
437 wqe->read.to_sink_lo = 0;
438 }
439 wqe->read.r2 = 0;
440 wqe->read.r5 = 0;
441 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
442 return 0;
443}
444
445static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
446 struct ib_recv_wr *wr, u8 *len16)
447{
Steve Wised37ac312010-06-10 19:03:00 +0000448 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700449
Steve Wised37ac312010-06-10 19:03:00 +0000450 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
451 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
452 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
453 if (ret)
454 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700455 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
456 wr->num_sge * sizeof(struct fw_ri_sge), 16);
457 return 0;
458}
459
460static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
461{
462
463 struct fw_ri_immd *imdp;
464 __be64 *p;
465 int i;
466 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
467
468 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
469 return -EINVAL;
470
471 wqe->fr.qpbinde_to_dcacpu = 0;
472 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
473 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
474 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
475 wqe->fr.len_hi = 0;
476 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
477 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
478 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
479 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
480 0xffffffff);
481 if (pbllen > T4_MAX_FR_IMMD) {
482 struct c4iw_fr_page_list *c4pl =
483 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
484 struct fw_ri_dsgl *sglp;
485
486 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
487 sglp->op = FW_RI_DATA_DSGL;
488 sglp->r1 = 0;
489 sglp->nsge = cpu_to_be16(1);
490 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
491 sglp->len0 = cpu_to_be32(pbllen);
492
493 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
494 } else {
495 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
496 imdp->op = FW_RI_DATA_IMMD;
497 imdp->r1 = 0;
498 imdp->r2 = 0;
499 imdp->immdlen = cpu_to_be32(pbllen);
500 p = (__be64 *)(imdp + 1);
501 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
502 *p = cpu_to_be64(
503 (u64)wr->wr.fast_reg.page_list->page_list[i]);
504 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
505 16);
506 }
507 return 0;
508}
509
510static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
511 u8 *len16)
512{
513 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
514 wqe->inv.r2 = 0;
515 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
516 return 0;
517}
518
519void c4iw_qp_add_ref(struct ib_qp *qp)
520{
521 PDBG("%s ib_qp %p\n", __func__, qp);
522 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
523}
524
525void c4iw_qp_rem_ref(struct ib_qp *qp)
526{
527 PDBG("%s ib_qp %p\n", __func__, qp);
528 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
529 wake_up(&(to_c4iw_qp(qp)->wait));
530}
531
532int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
533 struct ib_send_wr **bad_wr)
534{
535 int err = 0;
536 u8 len16 = 0;
537 enum fw_wr_opcodes fw_opcode = 0;
538 enum fw_ri_wr_flags fw_flags;
539 struct c4iw_qp *qhp;
540 union t4_wr *wqe;
541 u32 num_wrs;
542 struct t4_swsqe *swsqe;
543 unsigned long flag;
544 u16 idx = 0;
545
546 qhp = to_c4iw_qp(ibqp);
547 spin_lock_irqsave(&qhp->lock, flag);
548 if (t4_wq_in_error(&qhp->wq)) {
549 spin_unlock_irqrestore(&qhp->lock, flag);
550 return -EINVAL;
551 }
552 num_wrs = t4_sq_avail(&qhp->wq);
553 if (num_wrs == 0) {
554 spin_unlock_irqrestore(&qhp->lock, flag);
555 return -ENOMEM;
556 }
557 while (wr) {
558 if (num_wrs == 0) {
559 err = -ENOMEM;
560 *bad_wr = wr;
561 break;
562 }
Steve Wised37ac312010-06-10 19:03:00 +0000563 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
564 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
565
Steve Wisecfdda9d2010-04-21 15:30:06 -0700566 fw_flags = 0;
567 if (wr->send_flags & IB_SEND_SOLICITED)
568 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
569 if (wr->send_flags & IB_SEND_SIGNALED)
570 fw_flags |= FW_RI_COMPLETION_FLAG;
571 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
572 switch (wr->opcode) {
573 case IB_WR_SEND_WITH_INV:
574 case IB_WR_SEND:
575 if (wr->send_flags & IB_SEND_FENCE)
576 fw_flags |= FW_RI_READ_FENCE_FLAG;
577 fw_opcode = FW_RI_SEND_WR;
578 if (wr->opcode == IB_WR_SEND)
579 swsqe->opcode = FW_RI_SEND;
580 else
581 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000582 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700583 break;
584 case IB_WR_RDMA_WRITE:
585 fw_opcode = FW_RI_RDMA_WRITE_WR;
586 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000587 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700588 break;
589 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500590 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700591 fw_opcode = FW_RI_RDMA_READ_WR;
592 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500593 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
594 fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
595 else
596 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700597 err = build_rdma_read(wqe, wr, &len16);
598 if (err)
599 break;
600 swsqe->read_len = wr->sg_list[0].length;
601 if (!qhp->wq.sq.oldest_read)
602 qhp->wq.sq.oldest_read = swsqe;
603 break;
604 case IB_WR_FAST_REG_MR:
605 fw_opcode = FW_RI_FR_NSMR_WR;
606 swsqe->opcode = FW_RI_FAST_REGISTER;
607 err = build_fastreg(wqe, wr, &len16);
608 break;
609 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500610 if (wr->send_flags & IB_SEND_FENCE)
611 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700612 fw_opcode = FW_RI_INV_LSTAG_WR;
613 swsqe->opcode = FW_RI_LOCAL_INV;
614 err = build_inv_stag(wqe, wr, &len16);
615 break;
616 default:
617 PDBG("%s post of type=%d TBD!\n", __func__,
618 wr->opcode);
619 err = -EINVAL;
620 }
621 if (err) {
622 *bad_wr = wr;
623 break;
624 }
625 swsqe->idx = qhp->wq.sq.pidx;
626 swsqe->complete = 0;
627 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
628 swsqe->wr_id = wr->wr_id;
629
630 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
631
632 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
633 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
634 swsqe->opcode, swsqe->read_len);
635 wr = wr->next;
636 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000637 t4_sq_produce(&qhp->wq, len16);
638 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700639 }
640 if (t4_wq_db_enabled(&qhp->wq))
641 t4_ring_sq_db(&qhp->wq, idx);
642 spin_unlock_irqrestore(&qhp->lock, flag);
643 return err;
644}
645
646int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
647 struct ib_recv_wr **bad_wr)
648{
649 int err = 0;
650 struct c4iw_qp *qhp;
651 union t4_recv_wr *wqe;
652 u32 num_wrs;
653 u8 len16 = 0;
654 unsigned long flag;
655 u16 idx = 0;
656
657 qhp = to_c4iw_qp(ibqp);
658 spin_lock_irqsave(&qhp->lock, flag);
659 if (t4_wq_in_error(&qhp->wq)) {
660 spin_unlock_irqrestore(&qhp->lock, flag);
661 return -EINVAL;
662 }
663 num_wrs = t4_rq_avail(&qhp->wq);
664 if (num_wrs == 0) {
665 spin_unlock_irqrestore(&qhp->lock, flag);
666 return -ENOMEM;
667 }
668 while (wr) {
669 if (wr->num_sge > T4_MAX_RECV_SGE) {
670 err = -EINVAL;
671 *bad_wr = wr;
672 break;
673 }
Steve Wised37ac312010-06-10 19:03:00 +0000674 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
675 qhp->wq.rq.wq_pidx *
676 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700677 if (num_wrs)
678 err = build_rdma_recv(qhp, wqe, wr, &len16);
679 else
680 err = -ENOMEM;
681 if (err) {
682 *bad_wr = wr;
683 break;
684 }
685
686 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
687
688 wqe->recv.opcode = FW_RI_RECV_WR;
689 wqe->recv.r1 = 0;
690 wqe->recv.wrid = qhp->wq.rq.pidx;
691 wqe->recv.r2[0] = 0;
692 wqe->recv.r2[1] = 0;
693 wqe->recv.r2[2] = 0;
694 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700695 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
696 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000697 t4_rq_produce(&qhp->wq, len16);
698 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700699 wr = wr->next;
700 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700701 }
702 if (t4_wq_db_enabled(&qhp->wq))
703 t4_ring_rq_db(&qhp->wq, idx);
704 spin_unlock_irqrestore(&qhp->lock, flag);
705 return err;
706}
707
708int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
709{
710 return -ENOSYS;
711}
712
713static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
714 u8 *ecode)
715{
716 int status;
717 int tagged;
718 int opcode;
719 int rqtype;
720 int send_inv;
721
722 if (!err_cqe) {
723 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
724 *ecode = 0;
725 return;
726 }
727
728 status = CQE_STATUS(err_cqe);
729 opcode = CQE_OPCODE(err_cqe);
730 rqtype = RQ_TYPE(err_cqe);
731 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
732 (opcode == FW_RI_SEND_WITH_SE_INV);
733 tagged = (opcode == FW_RI_RDMA_WRITE) ||
734 (rqtype && (opcode == FW_RI_READ_RESP));
735
736 switch (status) {
737 case T4_ERR_STAG:
738 if (send_inv) {
739 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
740 *ecode = RDMAP_CANT_INV_STAG;
741 } else {
742 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
743 *ecode = RDMAP_INV_STAG;
744 }
745 break;
746 case T4_ERR_PDID:
747 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
748 if ((opcode == FW_RI_SEND_WITH_INV) ||
749 (opcode == FW_RI_SEND_WITH_SE_INV))
750 *ecode = RDMAP_CANT_INV_STAG;
751 else
752 *ecode = RDMAP_STAG_NOT_ASSOC;
753 break;
754 case T4_ERR_QPID:
755 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
756 *ecode = RDMAP_STAG_NOT_ASSOC;
757 break;
758 case T4_ERR_ACCESS:
759 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
760 *ecode = RDMAP_ACC_VIOL;
761 break;
762 case T4_ERR_WRAP:
763 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
764 *ecode = RDMAP_TO_WRAP;
765 break;
766 case T4_ERR_BOUND:
767 if (tagged) {
768 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
769 *ecode = DDPT_BASE_BOUNDS;
770 } else {
771 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
772 *ecode = RDMAP_BASE_BOUNDS;
773 }
774 break;
775 case T4_ERR_INVALIDATE_SHARED_MR:
776 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
777 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
778 *ecode = RDMAP_CANT_INV_STAG;
779 break;
780 case T4_ERR_ECC:
781 case T4_ERR_ECC_PSTAG:
782 case T4_ERR_INTERNAL_ERR:
783 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
784 *ecode = 0;
785 break;
786 case T4_ERR_OUT_OF_RQE:
787 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
788 *ecode = DDPU_INV_MSN_NOBUF;
789 break;
790 case T4_ERR_PBL_ADDR_BOUND:
791 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
792 *ecode = DDPT_BASE_BOUNDS;
793 break;
794 case T4_ERR_CRC:
795 *layer_type = LAYER_MPA|DDP_LLP;
796 *ecode = MPA_CRC_ERR;
797 break;
798 case T4_ERR_MARKER:
799 *layer_type = LAYER_MPA|DDP_LLP;
800 *ecode = MPA_MARKER_ERR;
801 break;
802 case T4_ERR_PDU_LEN_ERR:
803 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
804 *ecode = DDPU_MSG_TOOBIG;
805 break;
806 case T4_ERR_DDP_VERSION:
807 if (tagged) {
808 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
809 *ecode = DDPT_INV_VERS;
810 } else {
811 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
812 *ecode = DDPU_INV_VERS;
813 }
814 break;
815 case T4_ERR_RDMA_VERSION:
816 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
817 *ecode = RDMAP_INV_VERS;
818 break;
819 case T4_ERR_OPCODE:
820 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
821 *ecode = RDMAP_INV_OPCODE;
822 break;
823 case T4_ERR_DDP_QUEUE_NUM:
824 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
825 *ecode = DDPU_INV_QN;
826 break;
827 case T4_ERR_MSN:
828 case T4_ERR_MSN_GAP:
829 case T4_ERR_MSN_RANGE:
830 case T4_ERR_IRD_OVERFLOW:
831 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
832 *ecode = DDPU_INV_MSN_RANGE;
833 break;
834 case T4_ERR_TBIT:
835 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
836 *ecode = 0;
837 break;
838 case T4_ERR_MO:
839 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
840 *ecode = DDPU_INV_MO;
841 break;
842 default:
843 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
844 *ecode = 0;
845 break;
846 }
847}
848
849int c4iw_post_zb_read(struct c4iw_qp *qhp)
850{
851 union t4_wr *wqe;
852 struct sk_buff *skb;
853 u8 len16;
854
855 PDBG("%s enter\n", __func__);
856 skb = alloc_skb(40, GFP_KERNEL);
857 if (!skb) {
858 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
859 return -ENOMEM;
860 }
861 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
862
863 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
864 memset(wqe, 0, sizeof wqe->read);
865 wqe->read.r2 = cpu_to_be64(0);
866 wqe->read.stag_sink = cpu_to_be32(1);
867 wqe->read.to_sink_hi = cpu_to_be32(0);
868 wqe->read.to_sink_lo = cpu_to_be32(1);
869 wqe->read.stag_src = cpu_to_be32(1);
870 wqe->read.plen = cpu_to_be32(0);
871 wqe->read.to_src_hi = cpu_to_be32(0);
872 wqe->read.to_src_lo = cpu_to_be32(1);
873 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
874 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
875
876 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
877}
878
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700879static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
880 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700881{
882 struct fw_ri_wr *wqe;
883 struct sk_buff *skb;
884 struct terminate_message *term;
885
886 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
887 qhp->ep->hwtid);
888
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700889 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700890 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700891 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700892 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
893
894 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
895 memset(wqe, 0, sizeof *wqe);
896 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
897 wqe->flowid_len16 = cpu_to_be32(
898 FW_WR_FLOWID(qhp->ep->hwtid) |
899 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
900
901 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
902 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
903 term = (struct terminate_message *)wqe->u.terminate.termmsg;
904 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700905 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700906}
907
908/*
909 * Assumes qhp lock is held.
910 */
911static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
912 struct c4iw_cq *schp, unsigned long *flag)
913{
914 int count;
915 int flushed;
916
917 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
918 /* take a ref on the qhp since we must release the lock */
919 atomic_inc(&qhp->refcnt);
920 spin_unlock_irqrestore(&qhp->lock, *flag);
921
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200922 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wisecfdda9d2010-04-21 15:30:06 -0700923 spin_lock_irqsave(&rchp->lock, *flag);
924 spin_lock(&qhp->lock);
925 c4iw_flush_hw_cq(&rchp->cq);
926 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
927 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
928 spin_unlock(&qhp->lock);
929 spin_unlock_irqrestore(&rchp->lock, *flag);
930 if (flushed)
931 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
932
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200933 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wisecfdda9d2010-04-21 15:30:06 -0700934 spin_lock_irqsave(&schp->lock, *flag);
935 spin_lock(&qhp->lock);
936 c4iw_flush_hw_cq(&schp->cq);
937 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
938 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
939 spin_unlock(&qhp->lock);
940 spin_unlock_irqrestore(&schp->lock, *flag);
941 if (flushed)
942 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
943
944 /* deref */
945 if (atomic_dec_and_test(&qhp->refcnt))
946 wake_up(&qhp->wait);
947
948 spin_lock_irqsave(&qhp->lock, *flag);
949}
950
951static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
952{
953 struct c4iw_cq *rchp, *schp;
954
955 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
956 schp = get_chp(qhp->rhp, qhp->attr.scq);
957
958 if (qhp->ibqp.uobject) {
959 t4_set_wq_in_error(&qhp->wq);
960 t4_set_cq_in_error(&rchp->cq);
961 if (schp != rchp)
962 t4_set_cq_in_error(&schp->cq);
963 return;
964 }
965 __flush_qp(qhp, rchp, schp, flag);
966}
967
Steve Wise73d6fca2010-07-23 19:12:27 +0000968static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
969 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700970{
971 struct fw_ri_wr *wqe;
972 int ret;
973 struct c4iw_wr_wait wr_wait;
974 struct sk_buff *skb;
975
976 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +0000977 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700978
David Rientjesd3c814e2010-07-21 02:44:56 +0000979 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700980 if (!skb)
981 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +0000982 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700983
984 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
985 memset(wqe, 0, sizeof *wqe);
986 wqe->op_compl = cpu_to_be32(
987 FW_WR_OP(FW_RI_INIT_WR) |
988 FW_WR_COMPL(1));
989 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +0000990 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700991 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700992 wqe->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700993
994 wqe->u.fini.type = FW_RI_TYPE_FINI;
995 c4iw_init_wr_wait(&wr_wait);
996 ret = c4iw_ofld_send(&rhp->rdev, skb);
997 if (ret)
998 goto out;
999
1000 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1001 if (!wr_wait.done) {
1002 printk(KERN_ERR MOD "Device %s not responding!\n",
1003 pci_name(rhp->rdev.lldi.pdev));
1004 rhp->rdev.flags = T4_FATAL_ERROR;
1005 ret = -EIO;
1006 } else {
1007 ret = wr_wait.ret;
1008 if (ret)
1009 printk(KERN_WARNING MOD
1010 "%s: Abnormal close qpid %d ret %u\n",
1011 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
1012 ret);
1013 }
1014out:
1015 PDBG("%s ret %d\n", __func__, ret);
1016 return ret;
1017}
1018
1019static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1020{
1021 memset(&init->u, 0, sizeof init->u);
1022 switch (p2p_type) {
1023 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1024 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1025 init->u.write.stag_sink = cpu_to_be32(1);
1026 init->u.write.to_sink = cpu_to_be64(1);
1027 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1028 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1029 sizeof(struct fw_ri_immd),
1030 16);
1031 break;
1032 case FW_RI_INIT_P2PTYPE_READ_REQ:
1033 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1034 init->u.read.stag_src = cpu_to_be32(1);
1035 init->u.read.to_src_lo = cpu_to_be32(1);
1036 init->u.read.stag_sink = cpu_to_be32(1);
1037 init->u.read.to_sink_lo = cpu_to_be32(1);
1038 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1039 break;
1040 }
1041}
1042
1043static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1044{
1045 struct fw_ri_wr *wqe;
1046 int ret;
1047 struct c4iw_wr_wait wr_wait;
1048 struct sk_buff *skb;
1049
1050 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1051 qhp->ep->hwtid);
1052
David Rientjesd3c814e2010-07-21 02:44:56 +00001053 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001054 if (!skb)
1055 return -ENOMEM;
1056 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1057
1058 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1059 memset(wqe, 0, sizeof *wqe);
1060 wqe->op_compl = cpu_to_be32(
1061 FW_WR_OP(FW_RI_INIT_WR) |
1062 FW_WR_COMPL(1));
1063 wqe->flowid_len16 = cpu_to_be32(
1064 FW_WR_FLOWID(qhp->ep->hwtid) |
1065 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1066
Roland Dreierc8e081a2010-09-27 17:51:04 -07001067 wqe->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001068
1069 wqe->u.init.type = FW_RI_TYPE_INIT;
1070 wqe->u.init.mpareqbit_p2ptype =
1071 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1072 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1073 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1074 if (qhp->attr.mpa_attr.recv_marker_enabled)
1075 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1076 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1077 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1078 if (qhp->attr.mpa_attr.crc_enabled)
1079 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1080
1081 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1082 FW_RI_QP_RDMA_WRITE_ENABLE |
1083 FW_RI_QP_BIND_ENABLE;
1084 if (!qhp->ibqp.uobject)
1085 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1086 FW_RI_QP_STAG0_ENABLE;
1087 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1088 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1089 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1090 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1091 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1092 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1093 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1094 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1095 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1096 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1097 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1098 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1099 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1100 rhp->rdev.lldi.vr->rq.start);
1101 if (qhp->attr.mpa_attr.initiator)
1102 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1103
1104 c4iw_init_wr_wait(&wr_wait);
1105 ret = c4iw_ofld_send(&rhp->rdev, skb);
1106 if (ret)
1107 goto out;
1108
1109 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1110 if (!wr_wait.done) {
1111 printk(KERN_ERR MOD "Device %s not responding!\n",
1112 pci_name(rhp->rdev.lldi.pdev));
1113 rhp->rdev.flags = T4_FATAL_ERROR;
1114 ret = -EIO;
1115 } else
1116 ret = wr_wait.ret;
1117out:
1118 PDBG("%s ret %d\n", __func__, ret);
1119 return ret;
1120}
1121
1122int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1123 enum c4iw_qp_attr_mask mask,
1124 struct c4iw_qp_attributes *attrs,
1125 int internal)
1126{
1127 int ret = 0;
1128 struct c4iw_qp_attributes newattr = qhp->attr;
1129 unsigned long flag;
1130 int disconnect = 0;
1131 int terminate = 0;
1132 int abort = 0;
1133 int free = 0;
1134 struct c4iw_ep *ep = NULL;
1135
1136 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1137 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1138 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1139
1140 spin_lock_irqsave(&qhp->lock, flag);
1141
1142 /* Process attr changes if in IDLE */
1143 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1144 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1145 ret = -EIO;
1146 goto out;
1147 }
1148 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1149 newattr.enable_rdma_read = attrs->enable_rdma_read;
1150 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1151 newattr.enable_rdma_write = attrs->enable_rdma_write;
1152 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1153 newattr.enable_bind = attrs->enable_bind;
1154 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001155 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001156 ret = -EINVAL;
1157 goto out;
1158 }
1159 newattr.max_ord = attrs->max_ord;
1160 }
1161 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001162 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001163 ret = -EINVAL;
1164 goto out;
1165 }
1166 newattr.max_ird = attrs->max_ird;
1167 }
1168 qhp->attr = newattr;
1169 }
1170
1171 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1172 goto out;
1173 if (qhp->attr.state == attrs->next_state)
1174 goto out;
1175
1176 switch (qhp->attr.state) {
1177 case C4IW_QP_STATE_IDLE:
1178 switch (attrs->next_state) {
1179 case C4IW_QP_STATE_RTS:
1180 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1181 ret = -EINVAL;
1182 goto out;
1183 }
1184 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1185 ret = -EINVAL;
1186 goto out;
1187 }
1188 qhp->attr.mpa_attr = attrs->mpa_attr;
1189 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1190 qhp->ep = qhp->attr.llp_stream_handle;
1191 qhp->attr.state = C4IW_QP_STATE_RTS;
1192
1193 /*
1194 * Ref the endpoint here and deref when we
1195 * disassociate the endpoint from the QP. This
1196 * happens in CLOSING->IDLE transition or *->ERROR
1197 * transition.
1198 */
1199 c4iw_get_ep(&qhp->ep->com);
1200 spin_unlock_irqrestore(&qhp->lock, flag);
1201 ret = rdma_init(rhp, qhp);
1202 spin_lock_irqsave(&qhp->lock, flag);
1203 if (ret)
1204 goto err;
1205 break;
1206 case C4IW_QP_STATE_ERROR:
1207 qhp->attr.state = C4IW_QP_STATE_ERROR;
1208 flush_qp(qhp, &flag);
1209 break;
1210 default:
1211 ret = -EINVAL;
1212 goto out;
1213 }
1214 break;
1215 case C4IW_QP_STATE_RTS:
1216 switch (attrs->next_state) {
1217 case C4IW_QP_STATE_CLOSING:
1218 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1219 qhp->attr.state = C4IW_QP_STATE_CLOSING;
Steve Wise73d6fca2010-07-23 19:12:27 +00001220 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001221 if (!internal) {
1222 abort = 0;
1223 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001224 c4iw_get_ep(&ep->com);
1225 }
1226 spin_unlock_irqrestore(&qhp->lock, flag);
Steve Wise73d6fca2010-07-23 19:12:27 +00001227 ret = rdma_fini(rhp, qhp, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001228 spin_lock_irqsave(&qhp->lock, flag);
1229 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001230 c4iw_get_ep(&ep->com);
1231 disconnect = abort = 1;
1232 goto err;
1233 }
1234 break;
1235 case C4IW_QP_STATE_TERMINATE:
1236 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1237 if (qhp->ibqp.uobject)
1238 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001239 ep = qhp->ep;
1240 c4iw_get_ep(&ep->com);
Steve Wise0e42c1f2010-09-10 11:15:09 -05001241 if (!internal)
1242 terminate = 1;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001243 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001244 break;
1245 case C4IW_QP_STATE_ERROR:
1246 qhp->attr.state = C4IW_QP_STATE_ERROR;
1247 if (!internal) {
1248 abort = 1;
1249 disconnect = 1;
1250 ep = qhp->ep;
1251 c4iw_get_ep(&ep->com);
1252 }
1253 goto err;
1254 break;
1255 default:
1256 ret = -EINVAL;
1257 goto out;
1258 }
1259 break;
1260 case C4IW_QP_STATE_CLOSING:
1261 if (!internal) {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 switch (attrs->next_state) {
1266 case C4IW_QP_STATE_IDLE:
1267 flush_qp(qhp, &flag);
1268 qhp->attr.state = C4IW_QP_STATE_IDLE;
1269 qhp->attr.llp_stream_handle = NULL;
1270 c4iw_put_ep(&qhp->ep->com);
1271 qhp->ep = NULL;
1272 wake_up(&qhp->wait);
1273 break;
1274 case C4IW_QP_STATE_ERROR:
1275 goto err;
1276 default:
1277 ret = -EINVAL;
1278 goto err;
1279 }
1280 break;
1281 case C4IW_QP_STATE_ERROR:
1282 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1283 ret = -EINVAL;
1284 goto out;
1285 }
1286 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1287 ret = -EINVAL;
1288 goto out;
1289 }
1290 qhp->attr.state = C4IW_QP_STATE_IDLE;
1291 break;
1292 case C4IW_QP_STATE_TERMINATE:
1293 if (!internal) {
1294 ret = -EINVAL;
1295 goto out;
1296 }
1297 goto err;
1298 break;
1299 default:
1300 printk(KERN_ERR "%s in a bad state %d\n",
1301 __func__, qhp->attr.state);
1302 ret = -EINVAL;
1303 goto err;
1304 break;
1305 }
1306 goto out;
1307err:
1308 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1309 qhp->wq.sq.qid);
1310
1311 /* disassociate the LLP connection */
1312 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001313 if (!ep)
1314 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001315 qhp->ep = NULL;
1316 qhp->attr.state = C4IW_QP_STATE_ERROR;
1317 free = 1;
1318 wake_up(&qhp->wait);
1319 BUG_ON(!ep);
1320 flush_qp(qhp, &flag);
1321out:
1322 spin_unlock_irqrestore(&qhp->lock, flag);
1323
1324 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001325 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001326
1327 /*
1328 * If disconnect is 1, then we need to initiate a disconnect
1329 * on the EP. This can be a normal close (RTS->CLOSING) or
1330 * an abnormal close (RTS/CLOSING->ERROR).
1331 */
1332 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001333 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1334 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001335 c4iw_put_ep(&ep->com);
1336 }
1337
1338 /*
1339 * If free is 1, then we've disassociated the EP from the QP
1340 * and we need to dereference the EP.
1341 */
1342 if (free)
1343 c4iw_put_ep(&ep->com);
1344
1345 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1346 return ret;
1347}
1348
1349int c4iw_destroy_qp(struct ib_qp *ib_qp)
1350{
1351 struct c4iw_dev *rhp;
1352 struct c4iw_qp *qhp;
1353 struct c4iw_qp_attributes attrs;
1354 struct c4iw_ucontext *ucontext;
1355
1356 qhp = to_c4iw_qp(ib_qp);
1357 rhp = qhp->rhp;
1358
1359 attrs.next_state = C4IW_QP_STATE_ERROR;
1360 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1361 wait_event(qhp->wait, !qhp->ep);
1362
1363 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001364 atomic_dec(&qhp->refcnt);
1365 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1366
1367 ucontext = ib_qp->uobject ?
1368 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1369 destroy_qp(&rhp->rdev, &qhp->wq,
1370 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1371
1372 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1373 kfree(qhp);
1374 return 0;
1375}
1376
1377struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1378 struct ib_udata *udata)
1379{
1380 struct c4iw_dev *rhp;
1381 struct c4iw_qp *qhp;
1382 struct c4iw_pd *php;
1383 struct c4iw_cq *schp;
1384 struct c4iw_cq *rchp;
1385 struct c4iw_create_qp_resp uresp;
1386 int sqsize, rqsize;
1387 struct c4iw_ucontext *ucontext;
1388 int ret;
1389 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1390
1391 PDBG("%s ib_pd %p\n", __func__, pd);
1392
1393 if (attrs->qp_type != IB_QPT_RC)
1394 return ERR_PTR(-EINVAL);
1395
1396 php = to_c4iw_pd(pd);
1397 rhp = php->rhp;
1398 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1399 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1400 if (!schp || !rchp)
1401 return ERR_PTR(-EINVAL);
1402
1403 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1404 return ERR_PTR(-EINVAL);
1405
1406 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1407 if (rqsize > T4_MAX_RQ_SIZE)
1408 return ERR_PTR(-E2BIG);
1409
1410 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1411 if (sqsize > T4_MAX_SQ_SIZE)
1412 return ERR_PTR(-E2BIG);
1413
1414 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1415
1416
1417 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1418 if (!qhp)
1419 return ERR_PTR(-ENOMEM);
1420 qhp->wq.sq.size = sqsize;
1421 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1422 qhp->wq.rq.size = rqsize;
1423 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1424
1425 if (ucontext) {
1426 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1427 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1428 }
1429
1430 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1431 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1432
1433 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1434 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1435 if (ret)
1436 goto err1;
1437
1438 attrs->cap.max_recv_wr = rqsize - 1;
1439 attrs->cap.max_send_wr = sqsize - 1;
1440 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1441
1442 qhp->rhp = rhp;
1443 qhp->attr.pd = php->pdid;
1444 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1445 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1446 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1447 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1448 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1449 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1450 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1451 qhp->attr.state = C4IW_QP_STATE_IDLE;
1452 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1453 qhp->attr.enable_rdma_read = 1;
1454 qhp->attr.enable_rdma_write = 1;
1455 qhp->attr.enable_bind = 1;
1456 qhp->attr.max_ord = 1;
1457 qhp->attr.max_ird = 1;
1458 spin_lock_init(&qhp->lock);
1459 init_waitqueue_head(&qhp->wait);
1460 atomic_set(&qhp->refcnt, 1);
1461
1462 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1463 if (ret)
1464 goto err2;
1465
Steve Wisecfdda9d2010-04-21 15:30:06 -07001466 if (udata) {
1467 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1468 if (!mm1) {
1469 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001470 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001471 }
1472 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1473 if (!mm2) {
1474 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001475 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001476 }
1477 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1478 if (!mm3) {
1479 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001480 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001481 }
1482 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1483 if (!mm4) {
1484 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001485 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001486 }
1487
1488 uresp.qid_mask = rhp->rdev.qpmask;
1489 uresp.sqid = qhp->wq.sq.qid;
1490 uresp.sq_size = qhp->wq.sq.size;
1491 uresp.sq_memsize = qhp->wq.sq.memsize;
1492 uresp.rqid = qhp->wq.rq.qid;
1493 uresp.rq_size = qhp->wq.rq.size;
1494 uresp.rq_memsize = qhp->wq.rq.memsize;
1495 spin_lock(&ucontext->mmap_lock);
1496 uresp.sq_key = ucontext->key;
1497 ucontext->key += PAGE_SIZE;
1498 uresp.rq_key = ucontext->key;
1499 ucontext->key += PAGE_SIZE;
1500 uresp.sq_db_gts_key = ucontext->key;
1501 ucontext->key += PAGE_SIZE;
1502 uresp.rq_db_gts_key = ucontext->key;
1503 ucontext->key += PAGE_SIZE;
1504 spin_unlock(&ucontext->mmap_lock);
1505 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1506 if (ret)
Steve Wise30a6a622010-05-20 16:58:21 -05001507 goto err7;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001508 mm1->key = uresp.sq_key;
1509 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1510 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1511 insert_mmap(ucontext, mm1);
1512 mm2->key = uresp.rq_key;
1513 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1514 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1515 insert_mmap(ucontext, mm2);
1516 mm3->key = uresp.sq_db_gts_key;
1517 mm3->addr = qhp->wq.sq.udb;
1518 mm3->len = PAGE_SIZE;
1519 insert_mmap(ucontext, mm3);
1520 mm4->key = uresp.rq_db_gts_key;
1521 mm4->addr = qhp->wq.rq.udb;
1522 mm4->len = PAGE_SIZE;
1523 insert_mmap(ucontext, mm4);
1524 }
1525 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1526 init_timer(&(qhp->timer));
1527 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1528 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1529 qhp->wq.sq.qid);
1530 return &qhp->ibqp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001531err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001532 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001533err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001534 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001535err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001536 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001537err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001538 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001539err3:
1540 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1541err2:
1542 destroy_qp(&rhp->rdev, &qhp->wq,
1543 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1544err1:
1545 kfree(qhp);
1546 return ERR_PTR(ret);
1547}
1548
1549int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1550 int attr_mask, struct ib_udata *udata)
1551{
1552 struct c4iw_dev *rhp;
1553 struct c4iw_qp *qhp;
1554 enum c4iw_qp_attr_mask mask = 0;
1555 struct c4iw_qp_attributes attrs;
1556
1557 PDBG("%s ib_qp %p\n", __func__, ibqp);
1558
1559 /* iwarp does not support the RTR state */
1560 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1561 attr_mask &= ~IB_QP_STATE;
1562
1563 /* Make sure we still have something left to do */
1564 if (!attr_mask)
1565 return 0;
1566
1567 memset(&attrs, 0, sizeof attrs);
1568 qhp = to_c4iw_qp(ibqp);
1569 rhp = qhp->rhp;
1570
1571 attrs.next_state = c4iw_convert_state(attr->qp_state);
1572 attrs.enable_rdma_read = (attr->qp_access_flags &
1573 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1574 attrs.enable_rdma_write = (attr->qp_access_flags &
1575 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1576 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1577
1578
1579 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1580 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1581 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1582 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1583 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1584
1585 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1586}
1587
1588struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1589{
1590 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1591 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1592}