blob: bd56c841ef755dfa21b090fd04a27b8697dbddb9 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
43 pci_unmap_addr(&wq->rq, mapping));
44 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
46 pci_unmap_addr(&wq->sq, mapping));
47 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
102 pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
115 pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
133 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
147 res_wr->cookie = (u64)&wr_wait;
148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
165 V_FW_RI_RES_WR_FBMIN(3) |
166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
188 V_FW_RI_RES_WR_FBMIN(3) |
189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
201 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
202 if (!wr_wait.done) {
203 printk(KERN_ERR MOD "Device %s not responding!\n",
204 pci_name(rdev->lldi.pdev));
205 rdev->flags = T4_FATAL_ERROR;
206 ret = -EIO;
207 } else
208 ret = wr_wait.ret;
209 if (ret)
210 goto err7;
211
212 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
213 __func__, wq->sq.qid, wq->rq.qid, wq->db,
214 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
215
216 return 0;
217err7:
218 dma_free_coherent(&(rdev->lldi.pdev->dev),
219 wq->rq.memsize, wq->rq.queue,
220 pci_unmap_addr(&wq->rq, mapping));
221err6:
222 dma_free_coherent(&(rdev->lldi.pdev->dev),
223 wq->sq.memsize, wq->sq.queue,
224 pci_unmap_addr(&wq->sq, mapping));
225err5:
226 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
227err4:
228 kfree(wq->rq.sw_rq);
229err3:
230 kfree(wq->sq.sw_sq);
231err2:
232 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
233err1:
234 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
235 return -ENOMEM;
236}
237
238static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
239{
240 int i;
241 u32 plen;
242 int size;
243 u8 *datap;
244
245 if (wr->num_sge > T4_MAX_SEND_SGE)
246 return -EINVAL;
247 switch (wr->opcode) {
248 case IB_WR_SEND:
249 if (wr->send_flags & IB_SEND_SOLICITED)
250 wqe->send.sendop_pkd = cpu_to_be32(
251 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
252 else
253 wqe->send.sendop_pkd = cpu_to_be32(
254 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
255 wqe->send.stag_inv = 0;
256 break;
257 case IB_WR_SEND_WITH_INV:
258 if (wr->send_flags & IB_SEND_SOLICITED)
259 wqe->send.sendop_pkd = cpu_to_be32(
260 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
261 else
262 wqe->send.sendop_pkd = cpu_to_be32(
263 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
264 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
265 break;
266
267 default:
268 return -EINVAL;
269 }
270 plen = 0;
271 if (wr->num_sge) {
272 if (wr->send_flags & IB_SEND_INLINE) {
273 datap = (u8 *)wqe->send.u.immd_src[0].data;
274 for (i = 0; i < wr->num_sge; i++) {
275 if ((plen + wr->sg_list[i].length) >
276 T4_MAX_SEND_INLINE) {
277 return -EMSGSIZE;
278 }
279 plen += wr->sg_list[i].length;
280 memcpy(datap,
281 (void *)(unsigned long)wr->sg_list[i].addr,
282 wr->sg_list[i].length);
283 datap += wr->sg_list[i].length;
284 }
285 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
286 wqe->send.u.immd_src[0].r1 = 0;
287 wqe->send.u.immd_src[0].r2 = 0;
288 wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
289 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
290 plen;
291 } else {
292 for (i = 0; i < wr->num_sge; i++) {
293 if ((plen + wr->sg_list[i].length) < plen)
294 return -EMSGSIZE;
295 plen += wr->sg_list[i].length;
296 wqe->send.u.isgl_src[0].sge[i].stag =
297 cpu_to_be32(wr->sg_list[i].lkey);
298 wqe->send.u.isgl_src[0].sge[i].len =
299 cpu_to_be32(wr->sg_list[i].length);
300 wqe->send.u.isgl_src[0].sge[i].to =
301 cpu_to_be64(wr->sg_list[i].addr);
302 }
303 wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
304 wqe->send.u.isgl_src[0].r1 = 0;
305 wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
306 wqe->send.u.isgl_src[0].r2 = 0;
307 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
308 wr->num_sge * sizeof(struct fw_ri_sge);
309 }
310 } else {
311 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
312 wqe->send.u.immd_src[0].r1 = 0;
313 wqe->send.u.immd_src[0].r2 = 0;
314 wqe->send.u.immd_src[0].immdlen = 0;
315 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
316 }
317 *len16 = DIV_ROUND_UP(size, 16);
318 wqe->send.plen = cpu_to_be32(plen);
319 return 0;
320}
321
322static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
323{
324 int i;
325 u32 plen;
326 int size;
327 u8 *datap;
328
329 if (wr->num_sge > T4_MAX_WRITE_SGE)
330 return -EINVAL;
331 wqe->write.r2 = 0;
332 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
333 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
337 datap = (u8 *)wqe->write.u.immd_src[0].data;
338 for (i = 0; i < wr->num_sge; i++) {
339 if ((plen + wr->sg_list[i].length) >
340 T4_MAX_WRITE_INLINE) {
341 return -EMSGSIZE;
342 }
343 plen += wr->sg_list[i].length;
344 memcpy(datap,
345 (void *)(unsigned long)wr->sg_list[i].addr,
346 wr->sg_list[i].length);
347 datap += wr->sg_list[i].length;
348 }
349 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
350 wqe->write.u.immd_src[0].r1 = 0;
351 wqe->write.u.immd_src[0].r2 = 0;
352 wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
353 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
354 plen;
355 } else {
356 for (i = 0; i < wr->num_sge; i++) {
357 if ((plen + wr->sg_list[i].length) < plen)
358 return -EMSGSIZE;
359 plen += wr->sg_list[i].length;
360 wqe->write.u.isgl_src[0].sge[i].stag =
361 cpu_to_be32(wr->sg_list[i].lkey);
362 wqe->write.u.isgl_src[0].sge[i].len =
363 cpu_to_be32(wr->sg_list[i].length);
364 wqe->write.u.isgl_src[0].sge[i].to =
365 cpu_to_be64(wr->sg_list[i].addr);
366 }
367 wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
368 wqe->write.u.isgl_src[0].r1 = 0;
369 wqe->write.u.isgl_src[0].nsge =
370 cpu_to_be16(wr->num_sge);
371 wqe->write.u.isgl_src[0].r2 = 0;
372 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
373 wr->num_sge * sizeof(struct fw_ri_sge);
374 }
375 } else {
376 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
377 wqe->write.u.immd_src[0].r1 = 0;
378 wqe->write.u.immd_src[0].r2 = 0;
379 wqe->write.u.immd_src[0].immdlen = 0;
380 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
381 }
382 *len16 = DIV_ROUND_UP(size, 16);
383 wqe->write.plen = cpu_to_be32(plen);
384 return 0;
385}
386
387static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
388{
389 if (wr->num_sge > 1)
390 return -EINVAL;
391 if (wr->num_sge) {
392 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
393 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
394 >> 32));
395 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
396 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
397 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
398 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
399 >> 32));
400 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
401 } else {
402 wqe->read.stag_src = cpu_to_be32(2);
403 wqe->read.to_src_hi = 0;
404 wqe->read.to_src_lo = 0;
405 wqe->read.stag_sink = cpu_to_be32(2);
406 wqe->read.plen = 0;
407 wqe->read.to_sink_hi = 0;
408 wqe->read.to_sink_lo = 0;
409 }
410 wqe->read.r2 = 0;
411 wqe->read.r5 = 0;
412 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
413 return 0;
414}
415
416static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
417 struct ib_recv_wr *wr, u8 *len16)
418{
419 int i;
420 int plen = 0;
421
422 for (i = 0; i < wr->num_sge; i++) {
423 if ((plen + wr->sg_list[i].length) < plen)
424 return -EMSGSIZE;
425 plen += wr->sg_list[i].length;
426 wqe->recv.isgl.sge[i].stag =
427 cpu_to_be32(wr->sg_list[i].lkey);
428 wqe->recv.isgl.sge[i].len =
429 cpu_to_be32(wr->sg_list[i].length);
430 wqe->recv.isgl.sge[i].to =
431 cpu_to_be64(wr->sg_list[i].addr);
432 }
433 for (; i < T4_MAX_RECV_SGE; i++) {
434 wqe->recv.isgl.sge[i].stag = 0;
435 wqe->recv.isgl.sge[i].len = 0;
436 wqe->recv.isgl.sge[i].to = 0;
437 }
438 wqe->recv.isgl.op = FW_RI_DATA_ISGL;
439 wqe->recv.isgl.r1 = 0;
440 wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
441 wqe->recv.isgl.r2 = 0;
442 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
443 wr->num_sge * sizeof(struct fw_ri_sge), 16);
444 return 0;
445}
446
447static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
448{
449
450 struct fw_ri_immd *imdp;
451 __be64 *p;
452 int i;
453 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
454
455 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
456 return -EINVAL;
457
458 wqe->fr.qpbinde_to_dcacpu = 0;
459 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
460 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
461 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
462 wqe->fr.len_hi = 0;
463 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
464 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
465 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
466 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
467 0xffffffff);
468 if (pbllen > T4_MAX_FR_IMMD) {
469 struct c4iw_fr_page_list *c4pl =
470 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
471 struct fw_ri_dsgl *sglp;
472
473 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
474 sglp->op = FW_RI_DATA_DSGL;
475 sglp->r1 = 0;
476 sglp->nsge = cpu_to_be16(1);
477 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
478 sglp->len0 = cpu_to_be32(pbllen);
479
480 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
481 } else {
482 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
483 imdp->op = FW_RI_DATA_IMMD;
484 imdp->r1 = 0;
485 imdp->r2 = 0;
486 imdp->immdlen = cpu_to_be32(pbllen);
487 p = (__be64 *)(imdp + 1);
488 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
489 *p = cpu_to_be64(
490 (u64)wr->wr.fast_reg.page_list->page_list[i]);
491 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
492 16);
493 }
494 return 0;
495}
496
497static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
498 u8 *len16)
499{
500 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
501 wqe->inv.r2 = 0;
502 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
503 return 0;
504}
505
506void c4iw_qp_add_ref(struct ib_qp *qp)
507{
508 PDBG("%s ib_qp %p\n", __func__, qp);
509 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
510}
511
512void c4iw_qp_rem_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
516 wake_up(&(to_c4iw_qp(qp)->wait));
517}
518
519int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
520 struct ib_send_wr **bad_wr)
521{
522 int err = 0;
523 u8 len16 = 0;
524 enum fw_wr_opcodes fw_opcode = 0;
525 enum fw_ri_wr_flags fw_flags;
526 struct c4iw_qp *qhp;
527 union t4_wr *wqe;
528 u32 num_wrs;
529 struct t4_swsqe *swsqe;
530 unsigned long flag;
531 u16 idx = 0;
532
533 qhp = to_c4iw_qp(ibqp);
534 spin_lock_irqsave(&qhp->lock, flag);
535 if (t4_wq_in_error(&qhp->wq)) {
536 spin_unlock_irqrestore(&qhp->lock, flag);
537 return -EINVAL;
538 }
539 num_wrs = t4_sq_avail(&qhp->wq);
540 if (num_wrs == 0) {
541 spin_unlock_irqrestore(&qhp->lock, flag);
542 return -ENOMEM;
543 }
544 while (wr) {
545 if (num_wrs == 0) {
546 err = -ENOMEM;
547 *bad_wr = wr;
548 break;
549 }
550 wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
551 fw_flags = 0;
552 if (wr->send_flags & IB_SEND_SOLICITED)
553 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
554 if (wr->send_flags & IB_SEND_SIGNALED)
555 fw_flags |= FW_RI_COMPLETION_FLAG;
556 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
557 switch (wr->opcode) {
558 case IB_WR_SEND_WITH_INV:
559 case IB_WR_SEND:
560 if (wr->send_flags & IB_SEND_FENCE)
561 fw_flags |= FW_RI_READ_FENCE_FLAG;
562 fw_opcode = FW_RI_SEND_WR;
563 if (wr->opcode == IB_WR_SEND)
564 swsqe->opcode = FW_RI_SEND;
565 else
566 swsqe->opcode = FW_RI_SEND_WITH_INV;
567 err = build_rdma_send(wqe, wr, &len16);
568 break;
569 case IB_WR_RDMA_WRITE:
570 fw_opcode = FW_RI_RDMA_WRITE_WR;
571 swsqe->opcode = FW_RI_RDMA_WRITE;
572 err = build_rdma_write(wqe, wr, &len16);
573 break;
574 case IB_WR_RDMA_READ:
575 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16);
579 if (err)
580 break;
581 swsqe->read_len = wr->sg_list[0].length;
582 if (!qhp->wq.sq.oldest_read)
583 qhp->wq.sq.oldest_read = swsqe;
584 break;
585 case IB_WR_FAST_REG_MR:
586 fw_opcode = FW_RI_FR_NSMR_WR;
587 swsqe->opcode = FW_RI_FAST_REGISTER;
588 err = build_fastreg(wqe, wr, &len16);
589 break;
590 case IB_WR_LOCAL_INV:
591 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16);
594 break;
595 default:
596 PDBG("%s post of type=%d TBD!\n", __func__,
597 wr->opcode);
598 err = -EINVAL;
599 }
600 if (err) {
601 *bad_wr = wr;
602 break;
603 }
604 swsqe->idx = qhp->wq.sq.pidx;
605 swsqe->complete = 0;
606 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
607 swsqe->wr_id = wr->wr_id;
608
609 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
610
611 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
612 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
613 swsqe->opcode, swsqe->read_len);
614 wr = wr->next;
615 num_wrs--;
616 t4_sq_produce(&qhp->wq);
617 idx++;
618 }
619 if (t4_wq_db_enabled(&qhp->wq))
620 t4_ring_sq_db(&qhp->wq, idx);
621 spin_unlock_irqrestore(&qhp->lock, flag);
622 return err;
623}
624
625int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
626 struct ib_recv_wr **bad_wr)
627{
628 int err = 0;
629 struct c4iw_qp *qhp;
630 union t4_recv_wr *wqe;
631 u32 num_wrs;
632 u8 len16 = 0;
633 unsigned long flag;
634 u16 idx = 0;
635
636 qhp = to_c4iw_qp(ibqp);
637 spin_lock_irqsave(&qhp->lock, flag);
638 if (t4_wq_in_error(&qhp->wq)) {
639 spin_unlock_irqrestore(&qhp->lock, flag);
640 return -EINVAL;
641 }
642 num_wrs = t4_rq_avail(&qhp->wq);
643 if (num_wrs == 0) {
644 spin_unlock_irqrestore(&qhp->lock, flag);
645 return -ENOMEM;
646 }
647 while (wr) {
648 if (wr->num_sge > T4_MAX_RECV_SGE) {
649 err = -EINVAL;
650 *bad_wr = wr;
651 break;
652 }
653 wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
654 if (num_wrs)
655 err = build_rdma_recv(qhp, wqe, wr, &len16);
656 else
657 err = -ENOMEM;
658 if (err) {
659 *bad_wr = wr;
660 break;
661 }
662
663 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
664
665 wqe->recv.opcode = FW_RI_RECV_WR;
666 wqe->recv.r1 = 0;
667 wqe->recv.wrid = qhp->wq.rq.pidx;
668 wqe->recv.r2[0] = 0;
669 wqe->recv.r2[1] = 0;
670 wqe->recv.r2[2] = 0;
671 wqe->recv.len16 = len16;
672 if (len16 < 5)
673 wqe->flits[8] = 0;
674
675 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
676 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
677 t4_rq_produce(&qhp->wq);
678 wr = wr->next;
679 num_wrs--;
680 idx++;
681 }
682 if (t4_wq_db_enabled(&qhp->wq))
683 t4_ring_rq_db(&qhp->wq, idx);
684 spin_unlock_irqrestore(&qhp->lock, flag);
685 return err;
686}
687
688int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
689{
690 return -ENOSYS;
691}
692
693static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
694 u8 *ecode)
695{
696 int status;
697 int tagged;
698 int opcode;
699 int rqtype;
700 int send_inv;
701
702 if (!err_cqe) {
703 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
704 *ecode = 0;
705 return;
706 }
707
708 status = CQE_STATUS(err_cqe);
709 opcode = CQE_OPCODE(err_cqe);
710 rqtype = RQ_TYPE(err_cqe);
711 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
712 (opcode == FW_RI_SEND_WITH_SE_INV);
713 tagged = (opcode == FW_RI_RDMA_WRITE) ||
714 (rqtype && (opcode == FW_RI_READ_RESP));
715
716 switch (status) {
717 case T4_ERR_STAG:
718 if (send_inv) {
719 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
720 *ecode = RDMAP_CANT_INV_STAG;
721 } else {
722 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
723 *ecode = RDMAP_INV_STAG;
724 }
725 break;
726 case T4_ERR_PDID:
727 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
728 if ((opcode == FW_RI_SEND_WITH_INV) ||
729 (opcode == FW_RI_SEND_WITH_SE_INV))
730 *ecode = RDMAP_CANT_INV_STAG;
731 else
732 *ecode = RDMAP_STAG_NOT_ASSOC;
733 break;
734 case T4_ERR_QPID:
735 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
736 *ecode = RDMAP_STAG_NOT_ASSOC;
737 break;
738 case T4_ERR_ACCESS:
739 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
740 *ecode = RDMAP_ACC_VIOL;
741 break;
742 case T4_ERR_WRAP:
743 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
744 *ecode = RDMAP_TO_WRAP;
745 break;
746 case T4_ERR_BOUND:
747 if (tagged) {
748 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
749 *ecode = DDPT_BASE_BOUNDS;
750 } else {
751 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
752 *ecode = RDMAP_BASE_BOUNDS;
753 }
754 break;
755 case T4_ERR_INVALIDATE_SHARED_MR:
756 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
757 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
758 *ecode = RDMAP_CANT_INV_STAG;
759 break;
760 case T4_ERR_ECC:
761 case T4_ERR_ECC_PSTAG:
762 case T4_ERR_INTERNAL_ERR:
763 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
764 *ecode = 0;
765 break;
766 case T4_ERR_OUT_OF_RQE:
767 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
768 *ecode = DDPU_INV_MSN_NOBUF;
769 break;
770 case T4_ERR_PBL_ADDR_BOUND:
771 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
772 *ecode = DDPT_BASE_BOUNDS;
773 break;
774 case T4_ERR_CRC:
775 *layer_type = LAYER_MPA|DDP_LLP;
776 *ecode = MPA_CRC_ERR;
777 break;
778 case T4_ERR_MARKER:
779 *layer_type = LAYER_MPA|DDP_LLP;
780 *ecode = MPA_MARKER_ERR;
781 break;
782 case T4_ERR_PDU_LEN_ERR:
783 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
784 *ecode = DDPU_MSG_TOOBIG;
785 break;
786 case T4_ERR_DDP_VERSION:
787 if (tagged) {
788 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
789 *ecode = DDPT_INV_VERS;
790 } else {
791 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
792 *ecode = DDPU_INV_VERS;
793 }
794 break;
795 case T4_ERR_RDMA_VERSION:
796 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
797 *ecode = RDMAP_INV_VERS;
798 break;
799 case T4_ERR_OPCODE:
800 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
801 *ecode = RDMAP_INV_OPCODE;
802 break;
803 case T4_ERR_DDP_QUEUE_NUM:
804 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
805 *ecode = DDPU_INV_QN;
806 break;
807 case T4_ERR_MSN:
808 case T4_ERR_MSN_GAP:
809 case T4_ERR_MSN_RANGE:
810 case T4_ERR_IRD_OVERFLOW:
811 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
812 *ecode = DDPU_INV_MSN_RANGE;
813 break;
814 case T4_ERR_TBIT:
815 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
816 *ecode = 0;
817 break;
818 case T4_ERR_MO:
819 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
820 *ecode = DDPU_INV_MO;
821 break;
822 default:
823 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
824 *ecode = 0;
825 break;
826 }
827}
828
829int c4iw_post_zb_read(struct c4iw_qp *qhp)
830{
831 union t4_wr *wqe;
832 struct sk_buff *skb;
833 u8 len16;
834
835 PDBG("%s enter\n", __func__);
836 skb = alloc_skb(40, GFP_KERNEL);
837 if (!skb) {
838 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
839 return -ENOMEM;
840 }
841 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
842
843 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
844 memset(wqe, 0, sizeof wqe->read);
845 wqe->read.r2 = cpu_to_be64(0);
846 wqe->read.stag_sink = cpu_to_be32(1);
847 wqe->read.to_sink_hi = cpu_to_be32(0);
848 wqe->read.to_sink_lo = cpu_to_be32(1);
849 wqe->read.stag_src = cpu_to_be32(1);
850 wqe->read.plen = cpu_to_be32(0);
851 wqe->read.to_src_hi = cpu_to_be32(0);
852 wqe->read.to_src_lo = cpu_to_be32(1);
853 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
854 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
855
856 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
857}
858
859int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
860{
861 struct fw_ri_wr *wqe;
862 struct sk_buff *skb;
863 struct terminate_message *term;
864
865 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
866 qhp->ep->hwtid);
867
868 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
869 if (!skb)
870 return -ENOMEM;
871 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
872
873 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
874 memset(wqe, 0, sizeof *wqe);
875 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
876 wqe->flowid_len16 = cpu_to_be32(
877 FW_WR_FLOWID(qhp->ep->hwtid) |
878 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
879
880 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
881 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
882 term = (struct terminate_message *)wqe->u.terminate.termmsg;
883 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
884 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
885}
886
887/*
888 * Assumes qhp lock is held.
889 */
890static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
891 struct c4iw_cq *schp, unsigned long *flag)
892{
893 int count;
894 int flushed;
895
896 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
897 /* take a ref on the qhp since we must release the lock */
898 atomic_inc(&qhp->refcnt);
899 spin_unlock_irqrestore(&qhp->lock, *flag);
900
901 /* locking heirarchy: cq lock first, then qp lock. */
902 spin_lock_irqsave(&rchp->lock, *flag);
903 spin_lock(&qhp->lock);
904 c4iw_flush_hw_cq(&rchp->cq);
905 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
906 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
907 spin_unlock(&qhp->lock);
908 spin_unlock_irqrestore(&rchp->lock, *flag);
909 if (flushed)
910 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
911
912 /* locking heirarchy: cq lock first, then qp lock. */
913 spin_lock_irqsave(&schp->lock, *flag);
914 spin_lock(&qhp->lock);
915 c4iw_flush_hw_cq(&schp->cq);
916 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
917 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
918 spin_unlock(&qhp->lock);
919 spin_unlock_irqrestore(&schp->lock, *flag);
920 if (flushed)
921 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
922
923 /* deref */
924 if (atomic_dec_and_test(&qhp->refcnt))
925 wake_up(&qhp->wait);
926
927 spin_lock_irqsave(&qhp->lock, *flag);
928}
929
930static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
931{
932 struct c4iw_cq *rchp, *schp;
933
934 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
935 schp = get_chp(qhp->rhp, qhp->attr.scq);
936
937 if (qhp->ibqp.uobject) {
938 t4_set_wq_in_error(&qhp->wq);
939 t4_set_cq_in_error(&rchp->cq);
940 if (schp != rchp)
941 t4_set_cq_in_error(&schp->cq);
942 return;
943 }
944 __flush_qp(qhp, rchp, schp, flag);
945}
946
947static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
948{
949 struct fw_ri_wr *wqe;
950 int ret;
951 struct c4iw_wr_wait wr_wait;
952 struct sk_buff *skb;
953
954 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
955 qhp->ep->hwtid);
956
957 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
958 if (!skb)
959 return -ENOMEM;
960 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
961
962 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
963 memset(wqe, 0, sizeof *wqe);
964 wqe->op_compl = cpu_to_be32(
965 FW_WR_OP(FW_RI_INIT_WR) |
966 FW_WR_COMPL(1));
967 wqe->flowid_len16 = cpu_to_be32(
968 FW_WR_FLOWID(qhp->ep->hwtid) |
969 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
970 wqe->cookie = (u64)&wr_wait;
971
972 wqe->u.fini.type = FW_RI_TYPE_FINI;
973 c4iw_init_wr_wait(&wr_wait);
974 ret = c4iw_ofld_send(&rhp->rdev, skb);
975 if (ret)
976 goto out;
977
978 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
979 if (!wr_wait.done) {
980 printk(KERN_ERR MOD "Device %s not responding!\n",
981 pci_name(rhp->rdev.lldi.pdev));
982 rhp->rdev.flags = T4_FATAL_ERROR;
983 ret = -EIO;
984 } else {
985 ret = wr_wait.ret;
986 if (ret)
987 printk(KERN_WARNING MOD
988 "%s: Abnormal close qpid %d ret %u\n",
989 pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid,
990 ret);
991 }
992out:
993 PDBG("%s ret %d\n", __func__, ret);
994 return ret;
995}
996
997static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
998{
999 memset(&init->u, 0, sizeof init->u);
1000 switch (p2p_type) {
1001 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1002 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1003 init->u.write.stag_sink = cpu_to_be32(1);
1004 init->u.write.to_sink = cpu_to_be64(1);
1005 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1006 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1007 sizeof(struct fw_ri_immd),
1008 16);
1009 break;
1010 case FW_RI_INIT_P2PTYPE_READ_REQ:
1011 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1012 init->u.read.stag_src = cpu_to_be32(1);
1013 init->u.read.to_src_lo = cpu_to_be32(1);
1014 init->u.read.stag_sink = cpu_to_be32(1);
1015 init->u.read.to_sink_lo = cpu_to_be32(1);
1016 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1017 break;
1018 }
1019}
1020
1021static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1022{
1023 struct fw_ri_wr *wqe;
1024 int ret;
1025 struct c4iw_wr_wait wr_wait;
1026 struct sk_buff *skb;
1027
1028 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1029 qhp->ep->hwtid);
1030
1031 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
1032 if (!skb)
1033 return -ENOMEM;
1034 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1035
1036 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1037 memset(wqe, 0, sizeof *wqe);
1038 wqe->op_compl = cpu_to_be32(
1039 FW_WR_OP(FW_RI_INIT_WR) |
1040 FW_WR_COMPL(1));
1041 wqe->flowid_len16 = cpu_to_be32(
1042 FW_WR_FLOWID(qhp->ep->hwtid) |
1043 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1044
1045 wqe->cookie = (u64)&wr_wait;
1046
1047 wqe->u.init.type = FW_RI_TYPE_INIT;
1048 wqe->u.init.mpareqbit_p2ptype =
1049 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1050 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1051 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1052 if (qhp->attr.mpa_attr.recv_marker_enabled)
1053 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1054 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1055 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1056 if (qhp->attr.mpa_attr.crc_enabled)
1057 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1058
1059 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1060 FW_RI_QP_RDMA_WRITE_ENABLE |
1061 FW_RI_QP_BIND_ENABLE;
1062 if (!qhp->ibqp.uobject)
1063 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1064 FW_RI_QP_STAG0_ENABLE;
1065 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1066 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1067 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1068 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1069 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1070 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1071 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1072 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1073 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1074 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1075 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1076 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1077 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1078 rhp->rdev.lldi.vr->rq.start);
1079 if (qhp->attr.mpa_attr.initiator)
1080 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1081
1082 c4iw_init_wr_wait(&wr_wait);
1083 ret = c4iw_ofld_send(&rhp->rdev, skb);
1084 if (ret)
1085 goto out;
1086
1087 wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO);
1088 if (!wr_wait.done) {
1089 printk(KERN_ERR MOD "Device %s not responding!\n",
1090 pci_name(rhp->rdev.lldi.pdev));
1091 rhp->rdev.flags = T4_FATAL_ERROR;
1092 ret = -EIO;
1093 } else
1094 ret = wr_wait.ret;
1095out:
1096 PDBG("%s ret %d\n", __func__, ret);
1097 return ret;
1098}
1099
1100int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1101 enum c4iw_qp_attr_mask mask,
1102 struct c4iw_qp_attributes *attrs,
1103 int internal)
1104{
1105 int ret = 0;
1106 struct c4iw_qp_attributes newattr = qhp->attr;
1107 unsigned long flag;
1108 int disconnect = 0;
1109 int terminate = 0;
1110 int abort = 0;
1111 int free = 0;
1112 struct c4iw_ep *ep = NULL;
1113
1114 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1115 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1116 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1117
1118 spin_lock_irqsave(&qhp->lock, flag);
1119
1120 /* Process attr changes if in IDLE */
1121 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1122 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1123 ret = -EIO;
1124 goto out;
1125 }
1126 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1127 newattr.enable_rdma_read = attrs->enable_rdma_read;
1128 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1129 newattr.enable_rdma_write = attrs->enable_rdma_write;
1130 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1131 newattr.enable_bind = attrs->enable_bind;
1132 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1133 if (attrs->max_ord > T4_MAX_READ_DEPTH) {
1134 ret = -EINVAL;
1135 goto out;
1136 }
1137 newattr.max_ord = attrs->max_ord;
1138 }
1139 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1140 if (attrs->max_ird > T4_MAX_READ_DEPTH) {
1141 ret = -EINVAL;
1142 goto out;
1143 }
1144 newattr.max_ird = attrs->max_ird;
1145 }
1146 qhp->attr = newattr;
1147 }
1148
1149 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1150 goto out;
1151 if (qhp->attr.state == attrs->next_state)
1152 goto out;
1153
1154 switch (qhp->attr.state) {
1155 case C4IW_QP_STATE_IDLE:
1156 switch (attrs->next_state) {
1157 case C4IW_QP_STATE_RTS:
1158 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1159 ret = -EINVAL;
1160 goto out;
1161 }
1162 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1163 ret = -EINVAL;
1164 goto out;
1165 }
1166 qhp->attr.mpa_attr = attrs->mpa_attr;
1167 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1168 qhp->ep = qhp->attr.llp_stream_handle;
1169 qhp->attr.state = C4IW_QP_STATE_RTS;
1170
1171 /*
1172 * Ref the endpoint here and deref when we
1173 * disassociate the endpoint from the QP. This
1174 * happens in CLOSING->IDLE transition or *->ERROR
1175 * transition.
1176 */
1177 c4iw_get_ep(&qhp->ep->com);
1178 spin_unlock_irqrestore(&qhp->lock, flag);
1179 ret = rdma_init(rhp, qhp);
1180 spin_lock_irqsave(&qhp->lock, flag);
1181 if (ret)
1182 goto err;
1183 break;
1184 case C4IW_QP_STATE_ERROR:
1185 qhp->attr.state = C4IW_QP_STATE_ERROR;
1186 flush_qp(qhp, &flag);
1187 break;
1188 default:
1189 ret = -EINVAL;
1190 goto out;
1191 }
1192 break;
1193 case C4IW_QP_STATE_RTS:
1194 switch (attrs->next_state) {
1195 case C4IW_QP_STATE_CLOSING:
1196 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1197 qhp->attr.state = C4IW_QP_STATE_CLOSING;
1198 if (!internal) {
1199 abort = 0;
1200 disconnect = 1;
1201 ep = qhp->ep;
1202 c4iw_get_ep(&ep->com);
1203 }
1204 spin_unlock_irqrestore(&qhp->lock, flag);
1205 ret = rdma_fini(rhp, qhp);
1206 spin_lock_irqsave(&qhp->lock, flag);
1207 if (ret) {
1208 ep = qhp->ep;
1209 c4iw_get_ep(&ep->com);
1210 disconnect = abort = 1;
1211 goto err;
1212 }
1213 break;
1214 case C4IW_QP_STATE_TERMINATE:
1215 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1216 if (qhp->ibqp.uobject)
1217 t4_set_wq_in_error(&qhp->wq);
1218 if (!internal) {
1219 ep = qhp->ep;
1220 c4iw_get_ep(&ep->com);
1221 terminate = 1;
1222 disconnect = 1;
1223 }
1224 break;
1225 case C4IW_QP_STATE_ERROR:
1226 qhp->attr.state = C4IW_QP_STATE_ERROR;
1227 if (!internal) {
1228 abort = 1;
1229 disconnect = 1;
1230 ep = qhp->ep;
1231 c4iw_get_ep(&ep->com);
1232 }
1233 goto err;
1234 break;
1235 default:
1236 ret = -EINVAL;
1237 goto out;
1238 }
1239 break;
1240 case C4IW_QP_STATE_CLOSING:
1241 if (!internal) {
1242 ret = -EINVAL;
1243 goto out;
1244 }
1245 switch (attrs->next_state) {
1246 case C4IW_QP_STATE_IDLE:
1247 flush_qp(qhp, &flag);
1248 qhp->attr.state = C4IW_QP_STATE_IDLE;
1249 qhp->attr.llp_stream_handle = NULL;
1250 c4iw_put_ep(&qhp->ep->com);
1251 qhp->ep = NULL;
1252 wake_up(&qhp->wait);
1253 break;
1254 case C4IW_QP_STATE_ERROR:
1255 goto err;
1256 default:
1257 ret = -EINVAL;
1258 goto err;
1259 }
1260 break;
1261 case C4IW_QP_STATE_ERROR:
1262 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1263 ret = -EINVAL;
1264 goto out;
1265 }
1266 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270 qhp->attr.state = C4IW_QP_STATE_IDLE;
1271 break;
1272 case C4IW_QP_STATE_TERMINATE:
1273 if (!internal) {
1274 ret = -EINVAL;
1275 goto out;
1276 }
1277 goto err;
1278 break;
1279 default:
1280 printk(KERN_ERR "%s in a bad state %d\n",
1281 __func__, qhp->attr.state);
1282 ret = -EINVAL;
1283 goto err;
1284 break;
1285 }
1286 goto out;
1287err:
1288 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1289 qhp->wq.sq.qid);
1290
1291 /* disassociate the LLP connection */
1292 qhp->attr.llp_stream_handle = NULL;
1293 ep = qhp->ep;
1294 qhp->ep = NULL;
1295 qhp->attr.state = C4IW_QP_STATE_ERROR;
1296 free = 1;
1297 wake_up(&qhp->wait);
1298 BUG_ON(!ep);
1299 flush_qp(qhp, &flag);
1300out:
1301 spin_unlock_irqrestore(&qhp->lock, flag);
1302
1303 if (terminate)
1304 c4iw_post_terminate(qhp, NULL);
1305
1306 /*
1307 * If disconnect is 1, then we need to initiate a disconnect
1308 * on the EP. This can be a normal close (RTS->CLOSING) or
1309 * an abnormal close (RTS/CLOSING->ERROR).
1310 */
1311 if (disconnect) {
1312 c4iw_ep_disconnect(ep, abort, GFP_KERNEL);
1313 c4iw_put_ep(&ep->com);
1314 }
1315
1316 /*
1317 * If free is 1, then we've disassociated the EP from the QP
1318 * and we need to dereference the EP.
1319 */
1320 if (free)
1321 c4iw_put_ep(&ep->com);
1322
1323 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1324 return ret;
1325}
1326
1327int c4iw_destroy_qp(struct ib_qp *ib_qp)
1328{
1329 struct c4iw_dev *rhp;
1330 struct c4iw_qp *qhp;
1331 struct c4iw_qp_attributes attrs;
1332 struct c4iw_ucontext *ucontext;
1333
1334 qhp = to_c4iw_qp(ib_qp);
1335 rhp = qhp->rhp;
1336
1337 attrs.next_state = C4IW_QP_STATE_ERROR;
1338 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1339 wait_event(qhp->wait, !qhp->ep);
1340
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345
1346 ucontext = ib_qp->uobject ?
1347 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1348 destroy_qp(&rhp->rdev, &qhp->wq,
1349 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1350
1351 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1352 kfree(qhp);
1353 return 0;
1354}
1355
1356struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1357 struct ib_udata *udata)
1358{
1359 struct c4iw_dev *rhp;
1360 struct c4iw_qp *qhp;
1361 struct c4iw_pd *php;
1362 struct c4iw_cq *schp;
1363 struct c4iw_cq *rchp;
1364 struct c4iw_create_qp_resp uresp;
1365 int sqsize, rqsize;
1366 struct c4iw_ucontext *ucontext;
1367 int ret;
1368 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1369
1370 PDBG("%s ib_pd %p\n", __func__, pd);
1371
1372 if (attrs->qp_type != IB_QPT_RC)
1373 return ERR_PTR(-EINVAL);
1374
1375 php = to_c4iw_pd(pd);
1376 rhp = php->rhp;
1377 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1378 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1379 if (!schp || !rchp)
1380 return ERR_PTR(-EINVAL);
1381
1382 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1383 return ERR_PTR(-EINVAL);
1384
1385 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1386 if (rqsize > T4_MAX_RQ_SIZE)
1387 return ERR_PTR(-E2BIG);
1388
1389 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1390 if (sqsize > T4_MAX_SQ_SIZE)
1391 return ERR_PTR(-E2BIG);
1392
1393 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1394
1395
1396 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1397 if (!qhp)
1398 return ERR_PTR(-ENOMEM);
1399 qhp->wq.sq.size = sqsize;
1400 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1401 qhp->wq.rq.size = rqsize;
1402 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1403
1404 if (ucontext) {
1405 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1406 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1407 }
1408
1409 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1410 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1411
1412 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1413 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1414 if (ret)
1415 goto err1;
1416
1417 attrs->cap.max_recv_wr = rqsize - 1;
1418 attrs->cap.max_send_wr = sqsize - 1;
1419 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1420
1421 qhp->rhp = rhp;
1422 qhp->attr.pd = php->pdid;
1423 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1424 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1425 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1426 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1427 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1428 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1429 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1430 qhp->attr.state = C4IW_QP_STATE_IDLE;
1431 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1432 qhp->attr.enable_rdma_read = 1;
1433 qhp->attr.enable_rdma_write = 1;
1434 qhp->attr.enable_bind = 1;
1435 qhp->attr.max_ord = 1;
1436 qhp->attr.max_ird = 1;
1437 spin_lock_init(&qhp->lock);
1438 init_waitqueue_head(&qhp->wait);
1439 atomic_set(&qhp->refcnt, 1);
1440
1441 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1442 if (ret)
1443 goto err2;
1444
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) {
1452 ret = -ENOMEM;
1453 goto err4;
1454 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) {
1457 ret = -ENOMEM;
1458 goto err5;
1459 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) {
1462 ret = -ENOMEM;
1463 goto err6;
1464 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) {
1467 ret = -ENOMEM;
1468 goto err7;
1469 }
1470
1471 uresp.qid_mask = rhp->rdev.qpmask;
1472 uresp.sqid = qhp->wq.sq.qid;
1473 uresp.sq_size = qhp->wq.sq.size;
1474 uresp.sq_memsize = qhp->wq.sq.memsize;
1475 uresp.rqid = qhp->wq.rq.qid;
1476 uresp.rq_size = qhp->wq.rq.size;
1477 uresp.rq_memsize = qhp->wq.rq.memsize;
1478 spin_lock(&ucontext->mmap_lock);
1479 uresp.sq_key = ucontext->key;
1480 ucontext->key += PAGE_SIZE;
1481 uresp.rq_key = ucontext->key;
1482 ucontext->key += PAGE_SIZE;
1483 uresp.sq_db_gts_key = ucontext->key;
1484 ucontext->key += PAGE_SIZE;
1485 uresp.rq_db_gts_key = ucontext->key;
1486 ucontext->key += PAGE_SIZE;
1487 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret)
1490 goto err8;
1491 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1494 insert_mmap(ucontext, mm1);
1495 mm2->key = uresp.rq_key;
1496 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1497 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1498 insert_mmap(ucontext, mm2);
1499 mm3->key = uresp.sq_db_gts_key;
1500 mm3->addr = qhp->wq.sq.udb;
1501 mm3->len = PAGE_SIZE;
1502 insert_mmap(ucontext, mm3);
1503 mm4->key = uresp.rq_db_gts_key;
1504 mm4->addr = qhp->wq.rq.udb;
1505 mm4->len = PAGE_SIZE;
1506 insert_mmap(ucontext, mm4);
1507 }
1508 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1509 init_timer(&(qhp->timer));
1510 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid);
1513 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7:
1517 kfree(mm3);
1518err6:
1519 kfree(mm2);
1520err5:
1521 kfree(mm1);
1522err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1524err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2:
1527 destroy_qp(&rhp->rdev, &qhp->wq,
1528 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1529err1:
1530 kfree(qhp);
1531 return ERR_PTR(ret);
1532}
1533
1534int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1535 int attr_mask, struct ib_udata *udata)
1536{
1537 struct c4iw_dev *rhp;
1538 struct c4iw_qp *qhp;
1539 enum c4iw_qp_attr_mask mask = 0;
1540 struct c4iw_qp_attributes attrs;
1541
1542 PDBG("%s ib_qp %p\n", __func__, ibqp);
1543
1544 /* iwarp does not support the RTR state */
1545 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1546 attr_mask &= ~IB_QP_STATE;
1547
1548 /* Make sure we still have something left to do */
1549 if (!attr_mask)
1550 return 0;
1551
1552 memset(&attrs, 0, sizeof attrs);
1553 qhp = to_c4iw_qp(ibqp);
1554 rhp = qhp->rhp;
1555
1556 attrs.next_state = c4iw_convert_state(attr->qp_state);
1557 attrs.enable_rdma_read = (attr->qp_access_flags &
1558 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1559 attrs.enable_rdma_write = (attr->qp_access_flags &
1560 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1561 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1562
1563
1564 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1565 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1566 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1567 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1568 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1569
1570 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1571}
1572
1573struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1574{
1575 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1576 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1577}