blob: 90833d701631424436fdc166190fe4eaa7db2090 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
34
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include "iw_cxgb4.h"
36
Vipul Pandya2c974782012-05-18 15:29:28 +053037static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
Steve Wisea9c77192011-03-11 22:30:11 +000041static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050042module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000043MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050044
Vipul Pandya3cbdb922013-03-14 05:08:59 +000045int db_fc_threshold = 1000;
Vipul Pandya422eea02012-05-18 15:29:30 +053046module_param(db_fc_threshold, int, 0644);
Vipul Pandya3cbdb922013-03-14 05:08:59 +000047MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
Vipul Pandya422eea02012-05-18 15:29:30 +053056
Vipul Pandya42b6a942013-03-14 05:09:01 +000057static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60
Steve Wise2f5b48c2010-09-10 11:15:36 -050061static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
62{
63 unsigned long flag;
64 spin_lock_irqsave(&qhp->lock, flag);
65 qhp->attr.state = state;
66 spin_unlock_irqrestore(&qhp->lock, flag);
67}
68
Steve Wisec6d7b262010-09-13 11:23:57 -050069static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
70{
71 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
72}
73
74static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
75{
76 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
77 pci_unmap_addr(sq, mapping));
78}
79
80static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
81{
82 if (t4_sq_onchip(sq))
83 dealloc_oc_sq(rdev, sq);
84 else
85 dealloc_host_sq(rdev, sq);
86}
87
88static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
89{
Vipul Pandyaf079af72013-03-14 05:08:58 +000090 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
Steve Wisec6d7b262010-09-13 11:23:57 -050091 return -ENOSYS;
92 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
93 if (!sq->dma_addr)
94 return -ENOMEM;
95 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
96 rdev->lldi.vr->ocq.start;
97 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
98 rdev->lldi.vr->ocq.start);
99 sq->flags |= T4_SQ_ONCHIP;
100 return 0;
101}
102
103static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
104{
105 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
106 &(sq->dma_addr), GFP_KERNEL);
107 if (!sq->queue)
108 return -ENOMEM;
109 sq->phys_addr = virt_to_phys(sq->queue);
110 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
111 return 0;
112}
113
Steve Wisecfdda9d2010-04-21 15:30:06 -0700114static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
115 struct c4iw_dev_ucontext *uctx)
116{
117 /*
118 * uP clears EQ contexts when the connection exits rdma mode,
119 * so no need to post a RESET WR for these EQs.
120 */
121 dma_free_coherent(&(rdev->lldi.pdev->dev),
122 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000123 dma_unmap_addr(&wq->rq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500124 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700125 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
126 kfree(wq->rq.sw_rq);
127 kfree(wq->sq.sw_sq);
128 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
129 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
130 return 0;
131}
132
133static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
134 struct t4_cq *rcq, struct t4_cq *scq,
135 struct c4iw_dev_ucontext *uctx)
136{
137 int user = (uctx != &rdev->uctx);
138 struct fw_ri_res_wr *res_wr;
139 struct fw_ri_res *res;
140 int wr_len;
141 struct c4iw_wr_wait wr_wait;
142 struct sk_buff *skb;
143 int ret;
144 int eqsize;
145
146 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
147 if (!wq->sq.qid)
148 return -ENOMEM;
149
150 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000151 if (!wq->rq.qid) {
152 ret = -ENOMEM;
153 goto free_sq_qid;
154 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700155
156 if (!user) {
157 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
158 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000159 if (!wq->sq.sw_sq) {
160 ret = -ENOMEM;
161 goto free_rq_qid;
162 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700163
164 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
165 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000166 if (!wq->rq.sw_rq) {
167 ret = -ENOMEM;
168 goto free_sw_sq;
169 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700170 }
171
172 /*
173 * RQT must be a power of 2.
174 */
175 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
176 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000177 if (!wq->rq.rqt_hwaddr) {
178 ret = -ENOMEM;
179 goto free_sw_rq;
180 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700181
Steve Wisec6d7b262010-09-13 11:23:57 -0500182 if (user) {
Emil Goodec079c282012-08-19 17:59:40 +0000183 ret = alloc_oc_sq(rdev, &wq->sq);
184 if (ret)
185 goto free_hwaddr;
186
187 ret = alloc_host_sq(rdev, &wq->sq);
188 if (ret)
189 goto free_sq;
Steve Wisec6d7b262010-09-13 11:23:57 -0500190 } else
Emil Goodec079c282012-08-19 17:59:40 +0000191 ret = alloc_host_sq(rdev, &wq->sq);
192 if (ret)
193 goto free_hwaddr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700194 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000195 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700196
197 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
198 wq->rq.memsize, &(wq->rq.dma_addr),
199 GFP_KERNEL);
200 if (!wq->rq.queue)
Emil Goodec079c282012-08-19 17:59:40 +0000201 goto free_sq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700202 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
203 __func__, wq->sq.queue,
204 (unsigned long long)virt_to_phys(wq->sq.queue),
205 wq->rq.queue,
206 (unsigned long long)virt_to_phys(wq->rq.queue));
207 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000208 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700209
210 wq->db = rdev->lldi.db_reg;
211 wq->gts = rdev->lldi.gts_reg;
212 if (user) {
213 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
214 (wq->sq.qid << rdev->qpshift);
215 wq->sq.udb &= PAGE_MASK;
216 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
217 (wq->rq.qid << rdev->qpshift);
218 wq->rq.udb &= PAGE_MASK;
219 }
220 wq->rdev = rdev;
221 wq->rq.msn = 1;
222
223 /* build fw_ri_res_wr */
224 wr_len = sizeof *res_wr + 2 * sizeof *res;
225
David Rientjesd3c814e2010-07-21 02:44:56 +0000226 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700227 if (!skb) {
228 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000229 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700230 }
231 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
232
233 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
234 memset(res_wr, 0, wr_len);
235 res_wr->op_nres = cpu_to_be32(
236 FW_WR_OP(FW_RI_RES_WR) |
237 V_FW_RI_RES_WR_NRES(2) |
238 FW_WR_COMPL(1));
239 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700240 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700241 res = res_wr->res;
242 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
243 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
244
245 /*
246 * eqsize is the number of 64B entries plus the status page size.
247 */
248 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
249
250 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
251 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
252 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
253 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
Roel Kluin85d215b2011-05-09 22:06:22 -0700254 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700255 V_FW_RI_RES_WR_IQID(scq->cqid));
256 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
257 V_FW_RI_RES_WR_DCAEN(0) |
258 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000259 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000260 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700261 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
262 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
263 V_FW_RI_RES_WR_EQSIZE(eqsize));
264 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
265 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
266 res++;
267 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
268 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
269
270 /*
271 * eqsize is the number of 64B entries plus the status page size.
272 */
273 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
274 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
275 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
276 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
277 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
278 V_FW_RI_RES_WR_IQID(rcq->cqid));
279 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
280 V_FW_RI_RES_WR_DCAEN(0) |
281 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000282 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000283 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700284 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
285 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
286 V_FW_RI_RES_WR_EQSIZE(eqsize));
287 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
288 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
289
290 c4iw_init_wr_wait(&wr_wait);
291
292 ret = c4iw_ofld_send(rdev, skb);
293 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000294 goto free_dma;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500295 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700296 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000297 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700298
299 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
300 __func__, wq->sq.qid, wq->rq.qid, wq->db,
301 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
302
303 return 0;
Emil Goodec079c282012-08-19 17:59:40 +0000304free_dma:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700305 dma_free_coherent(&(rdev->lldi.pdev->dev),
306 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000307 dma_unmap_addr(&wq->rq, mapping));
Emil Goodec079c282012-08-19 17:59:40 +0000308free_sq:
Steve Wisec6d7b262010-09-13 11:23:57 -0500309 dealloc_sq(rdev, &wq->sq);
Emil Goodec079c282012-08-19 17:59:40 +0000310free_hwaddr:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000312free_sw_rq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700313 kfree(wq->rq.sw_rq);
Emil Goodec079c282012-08-19 17:59:40 +0000314free_sw_sq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700315 kfree(wq->sq.sw_sq);
Emil Goodec079c282012-08-19 17:59:40 +0000316free_rq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700317 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000318free_sq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700319 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000320 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700321}
322
Steve Wised37ac312010-06-10 19:03:00 +0000323static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
324 struct ib_send_wr *wr, int max, u32 *plenp)
325{
326 u8 *dstp, *srcp;
327 u32 plen = 0;
328 int i;
329 int rem, len;
330
331 dstp = (u8 *)immdp->data;
332 for (i = 0; i < wr->num_sge; i++) {
333 if ((plen + wr->sg_list[i].length) > max)
334 return -EMSGSIZE;
335 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
336 plen += wr->sg_list[i].length;
337 rem = wr->sg_list[i].length;
338 while (rem) {
339 if (dstp == (u8 *)&sq->queue[sq->size])
340 dstp = (u8 *)sq->queue;
341 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
342 len = rem;
343 else
344 len = (u8 *)&sq->queue[sq->size] - dstp;
345 memcpy(dstp, srcp, len);
346 dstp += len;
347 srcp += len;
348 rem -= len;
349 }
350 }
Steve Wise13fecb82010-09-10 11:14:53 -0500351 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
352 if (len)
353 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000354 immdp->op = FW_RI_DATA_IMMD;
355 immdp->r1 = 0;
356 immdp->r2 = 0;
357 immdp->immdlen = cpu_to_be32(plen);
358 *plenp = plen;
359 return 0;
360}
361
362static int build_isgl(__be64 *queue_start, __be64 *queue_end,
363 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
364 int num_sge, u32 *plenp)
365
Steve Wisecfdda9d2010-04-21 15:30:06 -0700366{
367 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000368 u32 plen = 0;
369 __be64 *flitp = (__be64 *)isglp->sge;
370
371 for (i = 0; i < num_sge; i++) {
372 if ((plen + sg_list[i].length) < plen)
373 return -EMSGSIZE;
374 plen += sg_list[i].length;
375 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
376 sg_list[i].length);
377 if (++flitp == queue_end)
378 flitp = queue_start;
379 *flitp = cpu_to_be64(sg_list[i].addr);
380 if (++flitp == queue_end)
381 flitp = queue_start;
382 }
Steve Wise13fecb82010-09-10 11:14:53 -0500383 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000384 isglp->op = FW_RI_DATA_ISGL;
385 isglp->r1 = 0;
386 isglp->nsge = cpu_to_be16(num_sge);
387 isglp->r2 = 0;
388 if (plenp)
389 *plenp = plen;
390 return 0;
391}
392
393static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
394 struct ib_send_wr *wr, u8 *len16)
395{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700396 u32 plen;
397 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000398 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700399
400 if (wr->num_sge > T4_MAX_SEND_SGE)
401 return -EINVAL;
402 switch (wr->opcode) {
403 case IB_WR_SEND:
404 if (wr->send_flags & IB_SEND_SOLICITED)
405 wqe->send.sendop_pkd = cpu_to_be32(
406 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
407 else
408 wqe->send.sendop_pkd = cpu_to_be32(
409 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
410 wqe->send.stag_inv = 0;
411 break;
412 case IB_WR_SEND_WITH_INV:
413 if (wr->send_flags & IB_SEND_SOLICITED)
414 wqe->send.sendop_pkd = cpu_to_be32(
415 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
416 else
417 wqe->send.sendop_pkd = cpu_to_be32(
418 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
419 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
420 break;
421
422 default:
423 return -EINVAL;
424 }
Steve Wised37ac312010-06-10 19:03:00 +0000425
Steve Wisecfdda9d2010-04-21 15:30:06 -0700426 plen = 0;
427 if (wr->num_sge) {
428 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000429 ret = build_immd(sq, wqe->send.u.immd_src, wr,
430 T4_MAX_SEND_INLINE, &plen);
431 if (ret)
432 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700433 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
434 plen;
435 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000436 ret = build_isgl((__be64 *)sq->queue,
437 (__be64 *)&sq->queue[sq->size],
438 wqe->send.u.isgl_src,
439 wr->sg_list, wr->num_sge, &plen);
440 if (ret)
441 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700442 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
443 wr->num_sge * sizeof(struct fw_ri_sge);
444 }
445 } else {
446 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
447 wqe->send.u.immd_src[0].r1 = 0;
448 wqe->send.u.immd_src[0].r2 = 0;
449 wqe->send.u.immd_src[0].immdlen = 0;
450 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000451 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700452 }
453 *len16 = DIV_ROUND_UP(size, 16);
454 wqe->send.plen = cpu_to_be32(plen);
455 return 0;
456}
457
Steve Wised37ac312010-06-10 19:03:00 +0000458static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
459 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700460{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700461 u32 plen;
462 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000463 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700464
Steve Wised37ac312010-06-10 19:03:00 +0000465 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700466 return -EINVAL;
467 wqe->write.r2 = 0;
468 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
469 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700470 if (wr->num_sge) {
471 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000472 ret = build_immd(sq, wqe->write.u.immd_src, wr,
473 T4_MAX_WRITE_INLINE, &plen);
474 if (ret)
475 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700476 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
477 plen;
478 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000479 ret = build_isgl((__be64 *)sq->queue,
480 (__be64 *)&sq->queue[sq->size],
481 wqe->write.u.isgl_src,
482 wr->sg_list, wr->num_sge, &plen);
483 if (ret)
484 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700485 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
486 wr->num_sge * sizeof(struct fw_ri_sge);
487 }
488 } else {
489 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
490 wqe->write.u.immd_src[0].r1 = 0;
491 wqe->write.u.immd_src[0].r2 = 0;
492 wqe->write.u.immd_src[0].immdlen = 0;
493 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000494 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700495 }
496 *len16 = DIV_ROUND_UP(size, 16);
497 wqe->write.plen = cpu_to_be32(plen);
498 return 0;
499}
500
501static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
502{
503 if (wr->num_sge > 1)
504 return -EINVAL;
505 if (wr->num_sge) {
506 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
507 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
508 >> 32));
509 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
510 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
511 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
512 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
513 >> 32));
514 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
515 } else {
516 wqe->read.stag_src = cpu_to_be32(2);
517 wqe->read.to_src_hi = 0;
518 wqe->read.to_src_lo = 0;
519 wqe->read.stag_sink = cpu_to_be32(2);
520 wqe->read.plen = 0;
521 wqe->read.to_sink_hi = 0;
522 wqe->read.to_sink_lo = 0;
523 }
524 wqe->read.r2 = 0;
525 wqe->read.r5 = 0;
526 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
527 return 0;
528}
529
530static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
531 struct ib_recv_wr *wr, u8 *len16)
532{
Steve Wised37ac312010-06-10 19:03:00 +0000533 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700534
Steve Wised37ac312010-06-10 19:03:00 +0000535 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
536 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
537 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
538 if (ret)
539 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700540 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
541 wr->num_sge * sizeof(struct fw_ri_sge), 16);
542 return 0;
543}
544
Steve Wise40dbf6e2010-09-17 15:40:15 -0500545static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
Vipul Pandya42b6a942013-03-14 05:09:01 +0000546 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700547{
548
549 struct fw_ri_immd *imdp;
550 __be64 *p;
551 int i;
552 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500553 int rem;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700554
555 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
556 return -EINVAL;
557
558 wqe->fr.qpbinde_to_dcacpu = 0;
559 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
560 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
561 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
562 wqe->fr.len_hi = 0;
563 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
564 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
565 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
566 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
567 0xffffffff);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000568
569 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
570 struct c4iw_fr_page_list *c4pl =
571 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
572 struct fw_ri_dsgl *sglp;
573
574 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
575 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
576 cpu_to_be64((u64)
577 wr->wr.fast_reg.page_list->page_list[i]);
578 }
579
580 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
581 sglp->op = FW_RI_DATA_DSGL;
582 sglp->r1 = 0;
583 sglp->nsge = cpu_to_be16(1);
584 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
585 sglp->len0 = cpu_to_be32(pbllen);
586
587 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
588 } else {
589 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
590 imdp->op = FW_RI_DATA_IMMD;
591 imdp->r1 = 0;
592 imdp->r2 = 0;
593 imdp->immdlen = cpu_to_be32(pbllen);
594 p = (__be64 *)(imdp + 1);
595 rem = pbllen;
596 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
597 *p = cpu_to_be64(
598 (u64)wr->wr.fast_reg.page_list->page_list[i]);
599 rem -= sizeof(*p);
600 if (++p == (__be64 *)&sq->queue[sq->size])
601 p = (__be64 *)sq->queue;
602 }
603 BUG_ON(rem < 0);
604 while (rem) {
605 *p = 0;
606 rem -= sizeof(*p);
607 if (++p == (__be64 *)&sq->queue[sq->size])
608 p = (__be64 *)sq->queue;
609 }
610 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
611 + pbllen, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700612 }
613 return 0;
614}
615
616static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
617 u8 *len16)
618{
619 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
620 wqe->inv.r2 = 0;
621 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
622 return 0;
623}
624
625void c4iw_qp_add_ref(struct ib_qp *qp)
626{
627 PDBG("%s ib_qp %p\n", __func__, qp);
628 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
629}
630
631void c4iw_qp_rem_ref(struct ib_qp *qp)
632{
633 PDBG("%s ib_qp %p\n", __func__, qp);
634 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
635 wake_up(&(to_c4iw_qp(qp)->wait));
636}
637
638int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
639 struct ib_send_wr **bad_wr)
640{
641 int err = 0;
642 u8 len16 = 0;
643 enum fw_wr_opcodes fw_opcode = 0;
644 enum fw_ri_wr_flags fw_flags;
645 struct c4iw_qp *qhp;
646 union t4_wr *wqe;
647 u32 num_wrs;
648 struct t4_swsqe *swsqe;
649 unsigned long flag;
650 u16 idx = 0;
651
652 qhp = to_c4iw_qp(ibqp);
653 spin_lock_irqsave(&qhp->lock, flag);
654 if (t4_wq_in_error(&qhp->wq)) {
655 spin_unlock_irqrestore(&qhp->lock, flag);
656 return -EINVAL;
657 }
658 num_wrs = t4_sq_avail(&qhp->wq);
659 if (num_wrs == 0) {
660 spin_unlock_irqrestore(&qhp->lock, flag);
661 return -ENOMEM;
662 }
663 while (wr) {
664 if (num_wrs == 0) {
665 err = -ENOMEM;
666 *bad_wr = wr;
667 break;
668 }
Steve Wised37ac312010-06-10 19:03:00 +0000669 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
670 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
671
Steve Wisecfdda9d2010-04-21 15:30:06 -0700672 fw_flags = 0;
673 if (wr->send_flags & IB_SEND_SOLICITED)
674 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
675 if (wr->send_flags & IB_SEND_SIGNALED)
676 fw_flags |= FW_RI_COMPLETION_FLAG;
677 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
678 switch (wr->opcode) {
679 case IB_WR_SEND_WITH_INV:
680 case IB_WR_SEND:
681 if (wr->send_flags & IB_SEND_FENCE)
682 fw_flags |= FW_RI_READ_FENCE_FLAG;
683 fw_opcode = FW_RI_SEND_WR;
684 if (wr->opcode == IB_WR_SEND)
685 swsqe->opcode = FW_RI_SEND;
686 else
687 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000688 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700689 break;
690 case IB_WR_RDMA_WRITE:
691 fw_opcode = FW_RI_RDMA_WRITE_WR;
692 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000693 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700694 break;
695 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500696 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700697 fw_opcode = FW_RI_RDMA_READ_WR;
698 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500699 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
Steve Wise410ade42010-09-17 15:40:09 -0500700 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise2f1fb502010-05-20 16:58:16 -0500701 else
702 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700703 err = build_rdma_read(wqe, wr, &len16);
704 if (err)
705 break;
706 swsqe->read_len = wr->sg_list[0].length;
707 if (!qhp->wq.sq.oldest_read)
708 qhp->wq.sq.oldest_read = swsqe;
709 break;
710 case IB_WR_FAST_REG_MR:
711 fw_opcode = FW_RI_FR_NSMR_WR;
712 swsqe->opcode = FW_RI_FAST_REGISTER;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000713 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
714 is_t5(
715 qhp->rhp->rdev.lldi.adapter_type) ?
716 1 : 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700717 break;
718 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500719 if (wr->send_flags & IB_SEND_FENCE)
720 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700721 fw_opcode = FW_RI_INV_LSTAG_WR;
722 swsqe->opcode = FW_RI_LOCAL_INV;
723 err = build_inv_stag(wqe, wr, &len16);
724 break;
725 default:
726 PDBG("%s post of type=%d TBD!\n", __func__,
727 wr->opcode);
728 err = -EINVAL;
729 }
730 if (err) {
731 *bad_wr = wr;
732 break;
733 }
734 swsqe->idx = qhp->wq.sq.pidx;
735 swsqe->complete = 0;
736 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
737 swsqe->wr_id = wr->wr_id;
738
739 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
740
741 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
742 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
743 swsqe->opcode, swsqe->read_len);
744 wr = wr->next;
745 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000746 t4_sq_produce(&qhp->wq, len16);
747 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700748 }
749 if (t4_wq_db_enabled(&qhp->wq))
750 t4_ring_sq_db(&qhp->wq, idx);
751 spin_unlock_irqrestore(&qhp->lock, flag);
752 return err;
753}
754
755int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
756 struct ib_recv_wr **bad_wr)
757{
758 int err = 0;
759 struct c4iw_qp *qhp;
760 union t4_recv_wr *wqe;
761 u32 num_wrs;
762 u8 len16 = 0;
763 unsigned long flag;
764 u16 idx = 0;
765
766 qhp = to_c4iw_qp(ibqp);
767 spin_lock_irqsave(&qhp->lock, flag);
768 if (t4_wq_in_error(&qhp->wq)) {
769 spin_unlock_irqrestore(&qhp->lock, flag);
770 return -EINVAL;
771 }
772 num_wrs = t4_rq_avail(&qhp->wq);
773 if (num_wrs == 0) {
774 spin_unlock_irqrestore(&qhp->lock, flag);
775 return -ENOMEM;
776 }
777 while (wr) {
778 if (wr->num_sge > T4_MAX_RECV_SGE) {
779 err = -EINVAL;
780 *bad_wr = wr;
781 break;
782 }
Steve Wised37ac312010-06-10 19:03:00 +0000783 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
784 qhp->wq.rq.wq_pidx *
785 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700786 if (num_wrs)
787 err = build_rdma_recv(qhp, wqe, wr, &len16);
788 else
789 err = -ENOMEM;
790 if (err) {
791 *bad_wr = wr;
792 break;
793 }
794
795 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
796
797 wqe->recv.opcode = FW_RI_RECV_WR;
798 wqe->recv.r1 = 0;
799 wqe->recv.wrid = qhp->wq.rq.pidx;
800 wqe->recv.r2[0] = 0;
801 wqe->recv.r2[1] = 0;
802 wqe->recv.r2[2] = 0;
803 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700804 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
805 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000806 t4_rq_produce(&qhp->wq, len16);
807 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700808 wr = wr->next;
809 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700810 }
811 if (t4_wq_db_enabled(&qhp->wq))
812 t4_ring_rq_db(&qhp->wq, idx);
813 spin_unlock_irqrestore(&qhp->lock, flag);
814 return err;
815}
816
817int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
818{
819 return -ENOSYS;
820}
821
822static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
823 u8 *ecode)
824{
825 int status;
826 int tagged;
827 int opcode;
828 int rqtype;
829 int send_inv;
830
831 if (!err_cqe) {
832 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
833 *ecode = 0;
834 return;
835 }
836
837 status = CQE_STATUS(err_cqe);
838 opcode = CQE_OPCODE(err_cqe);
839 rqtype = RQ_TYPE(err_cqe);
840 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
841 (opcode == FW_RI_SEND_WITH_SE_INV);
842 tagged = (opcode == FW_RI_RDMA_WRITE) ||
843 (rqtype && (opcode == FW_RI_READ_RESP));
844
845 switch (status) {
846 case T4_ERR_STAG:
847 if (send_inv) {
848 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
849 *ecode = RDMAP_CANT_INV_STAG;
850 } else {
851 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
852 *ecode = RDMAP_INV_STAG;
853 }
854 break;
855 case T4_ERR_PDID:
856 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
857 if ((opcode == FW_RI_SEND_WITH_INV) ||
858 (opcode == FW_RI_SEND_WITH_SE_INV))
859 *ecode = RDMAP_CANT_INV_STAG;
860 else
861 *ecode = RDMAP_STAG_NOT_ASSOC;
862 break;
863 case T4_ERR_QPID:
864 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
865 *ecode = RDMAP_STAG_NOT_ASSOC;
866 break;
867 case T4_ERR_ACCESS:
868 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
869 *ecode = RDMAP_ACC_VIOL;
870 break;
871 case T4_ERR_WRAP:
872 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
873 *ecode = RDMAP_TO_WRAP;
874 break;
875 case T4_ERR_BOUND:
876 if (tagged) {
877 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
878 *ecode = DDPT_BASE_BOUNDS;
879 } else {
880 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
881 *ecode = RDMAP_BASE_BOUNDS;
882 }
883 break;
884 case T4_ERR_INVALIDATE_SHARED_MR:
885 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
886 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
887 *ecode = RDMAP_CANT_INV_STAG;
888 break;
889 case T4_ERR_ECC:
890 case T4_ERR_ECC_PSTAG:
891 case T4_ERR_INTERNAL_ERR:
892 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
893 *ecode = 0;
894 break;
895 case T4_ERR_OUT_OF_RQE:
896 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
897 *ecode = DDPU_INV_MSN_NOBUF;
898 break;
899 case T4_ERR_PBL_ADDR_BOUND:
900 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
901 *ecode = DDPT_BASE_BOUNDS;
902 break;
903 case T4_ERR_CRC:
904 *layer_type = LAYER_MPA|DDP_LLP;
905 *ecode = MPA_CRC_ERR;
906 break;
907 case T4_ERR_MARKER:
908 *layer_type = LAYER_MPA|DDP_LLP;
909 *ecode = MPA_MARKER_ERR;
910 break;
911 case T4_ERR_PDU_LEN_ERR:
912 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
913 *ecode = DDPU_MSG_TOOBIG;
914 break;
915 case T4_ERR_DDP_VERSION:
916 if (tagged) {
917 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
918 *ecode = DDPT_INV_VERS;
919 } else {
920 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
921 *ecode = DDPU_INV_VERS;
922 }
923 break;
924 case T4_ERR_RDMA_VERSION:
925 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
926 *ecode = RDMAP_INV_VERS;
927 break;
928 case T4_ERR_OPCODE:
929 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
930 *ecode = RDMAP_INV_OPCODE;
931 break;
932 case T4_ERR_DDP_QUEUE_NUM:
933 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
934 *ecode = DDPU_INV_QN;
935 break;
936 case T4_ERR_MSN:
937 case T4_ERR_MSN_GAP:
938 case T4_ERR_MSN_RANGE:
939 case T4_ERR_IRD_OVERFLOW:
940 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
941 *ecode = DDPU_INV_MSN_RANGE;
942 break;
943 case T4_ERR_TBIT:
944 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
945 *ecode = 0;
946 break;
947 case T4_ERR_MO:
948 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
949 *ecode = DDPU_INV_MO;
950 break;
951 default:
952 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
953 *ecode = 0;
954 break;
955 }
956}
957
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700958static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
959 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700960{
961 struct fw_ri_wr *wqe;
962 struct sk_buff *skb;
963 struct terminate_message *term;
964
965 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
966 qhp->ep->hwtid);
967
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700968 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700969 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700970 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700971 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
972
973 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
974 memset(wqe, 0, sizeof *wqe);
975 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
976 wqe->flowid_len16 = cpu_to_be32(
977 FW_WR_FLOWID(qhp->ep->hwtid) |
978 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
979
980 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
981 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
982 term = (struct terminate_message *)wqe->u.terminate.termmsg;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530983 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
984 term->layer_etype = qhp->attr.layer_etype;
985 term->ecode = qhp->attr.ecode;
986 } else
987 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700988 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700989}
990
991/*
992 * Assumes qhp lock is held.
993 */
994static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -0500995 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700996{
997 int count;
998 int flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500999 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001000
1001 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001002
Uwe Kleine-König732bee72010-06-11 12:16:59 +02001003 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001004 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001005 spin_lock(&qhp->lock);
1006 c4iw_flush_hw_cq(&rchp->cq);
1007 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1008 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1009 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001010 spin_unlock_irqrestore(&rchp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301011 if (flushed) {
1012 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001013 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301014 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1015 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001016
Uwe Kleine-König732bee72010-06-11 12:16:59 +02001017 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001018 spin_lock_irqsave(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001019 spin_lock(&qhp->lock);
1020 c4iw_flush_hw_cq(&schp->cq);
1021 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
1022 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
1023 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001024 spin_unlock_irqrestore(&schp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301025 if (flushed) {
1026 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001027 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301028 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1029 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001030}
1031
Steve Wise2f5b48c2010-09-10 11:15:36 -05001032static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001033{
1034 struct c4iw_cq *rchp, *schp;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301035 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001036
1037 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
1038 schp = get_chp(qhp->rhp, qhp->attr.scq);
1039
1040 if (qhp->ibqp.uobject) {
1041 t4_set_wq_in_error(&qhp->wq);
1042 t4_set_cq_in_error(&rchp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301043 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301044 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301045 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301046 if (schp != rchp) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001047 t4_set_cq_in_error(&schp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301048 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301049 (*schp->ibcq.comp_handler)(&schp->ibcq,
1050 schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301051 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301052 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001053 return;
1054 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001055 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001056}
1057
Steve Wise73d6fca2010-07-23 19:12:27 +00001058static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1059 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001060{
1061 struct fw_ri_wr *wqe;
1062 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001063 struct sk_buff *skb;
1064
1065 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +00001066 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001067
David Rientjesd3c814e2010-07-21 02:44:56 +00001068 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001069 if (!skb)
1070 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +00001071 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001072
1073 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1074 memset(wqe, 0, sizeof *wqe);
1075 wqe->op_compl = cpu_to_be32(
1076 FW_WR_OP(FW_RI_INIT_WR) |
1077 FW_WR_COMPL(1));
1078 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +00001079 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001080 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Steve Wise2f5b48c2010-09-10 11:15:36 -05001081 wqe->cookie = (unsigned long) &ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001082
1083 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001084 ret = c4iw_ofld_send(&rhp->rdev, skb);
1085 if (ret)
1086 goto out;
1087
Steve Wise2f5b48c2010-09-10 11:15:36 -05001088 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
Steve Wiseaadc4df2010-09-10 11:15:25 -05001089 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001090out:
1091 PDBG("%s ret %d\n", __func__, ret);
1092 return ret;
1093}
1094
1095static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1096{
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301097 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001098 memset(&init->u, 0, sizeof init->u);
1099 switch (p2p_type) {
1100 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1101 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1102 init->u.write.stag_sink = cpu_to_be32(1);
1103 init->u.write.to_sink = cpu_to_be64(1);
1104 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1105 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1106 sizeof(struct fw_ri_immd),
1107 16);
1108 break;
1109 case FW_RI_INIT_P2PTYPE_READ_REQ:
1110 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1111 init->u.read.stag_src = cpu_to_be32(1);
1112 init->u.read.to_src_lo = cpu_to_be32(1);
1113 init->u.read.stag_sink = cpu_to_be32(1);
1114 init->u.read.to_sink_lo = cpu_to_be32(1);
1115 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1116 break;
1117 }
1118}
1119
1120static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1121{
1122 struct fw_ri_wr *wqe;
1123 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001124 struct sk_buff *skb;
1125
1126 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1127 qhp->ep->hwtid);
1128
David Rientjesd3c814e2010-07-21 02:44:56 +00001129 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001130 if (!skb)
1131 return -ENOMEM;
1132 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1133
1134 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1135 memset(wqe, 0, sizeof *wqe);
1136 wqe->op_compl = cpu_to_be32(
1137 FW_WR_OP(FW_RI_INIT_WR) |
1138 FW_WR_COMPL(1));
1139 wqe->flowid_len16 = cpu_to_be32(
1140 FW_WR_FLOWID(qhp->ep->hwtid) |
1141 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1142
Steve Wise2f5b48c2010-09-10 11:15:36 -05001143 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001144
1145 wqe->u.init.type = FW_RI_TYPE_INIT;
1146 wqe->u.init.mpareqbit_p2ptype =
1147 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1148 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1149 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1150 if (qhp->attr.mpa_attr.recv_marker_enabled)
1151 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1152 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1153 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1154 if (qhp->attr.mpa_attr.crc_enabled)
1155 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1156
1157 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1158 FW_RI_QP_RDMA_WRITE_ENABLE |
1159 FW_RI_QP_BIND_ENABLE;
1160 if (!qhp->ibqp.uobject)
1161 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1162 FW_RI_QP_STAG0_ENABLE;
1163 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1164 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1165 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1166 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1167 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1168 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1169 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1170 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1171 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1172 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1173 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1174 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1175 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1176 rhp->rdev.lldi.vr->rq.start);
1177 if (qhp->attr.mpa_attr.initiator)
1178 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1179
Steve Wisecfdda9d2010-04-21 15:30:06 -07001180 ret = c4iw_ofld_send(&rhp->rdev, skb);
1181 if (ret)
1182 goto out;
1183
Steve Wise2f5b48c2010-09-10 11:15:36 -05001184 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1185 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001186out:
1187 PDBG("%s ret %d\n", __func__, ret);
1188 return ret;
1189}
1190
Vipul Pandya2c974782012-05-18 15:29:28 +05301191/*
1192 * Called by the library when the qp has user dbs disabled due to
1193 * a DB_FULL condition. This function will single-thread all user
1194 * DB rings to avoid overflowing the hw db-fifo.
1195 */
1196static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1197{
1198 int delay = db_delay_usecs;
1199
1200 mutex_lock(&qhp->rhp->db_mutex);
1201 do {
Vipul Pandya422eea02012-05-18 15:29:30 +05301202
1203 /*
1204 * The interrupt threshold is dbfifo_int_thresh << 6. So
1205 * make sure we don't cross that and generate an interrupt.
1206 */
1207 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1208 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
Vipul Pandyae5619c12012-09-05 02:01:54 +00001209 writel(QID(qid) | PIDX(inc), qhp->wq.db);
Vipul Pandya2c974782012-05-18 15:29:28 +05301210 break;
1211 }
1212 set_current_state(TASK_UNINTERRUPTIBLE);
1213 schedule_timeout(usecs_to_jiffies(delay));
Vipul Pandya422eea02012-05-18 15:29:30 +05301214 delay = min(delay << 1, 2000);
Vipul Pandya2c974782012-05-18 15:29:28 +05301215 } while (1);
1216 mutex_unlock(&qhp->rhp->db_mutex);
1217 return 0;
1218}
1219
Steve Wisecfdda9d2010-04-21 15:30:06 -07001220int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1221 enum c4iw_qp_attr_mask mask,
1222 struct c4iw_qp_attributes *attrs,
1223 int internal)
1224{
1225 int ret = 0;
1226 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001227 int disconnect = 0;
1228 int terminate = 0;
1229 int abort = 0;
1230 int free = 0;
1231 struct c4iw_ep *ep = NULL;
1232
1233 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1234 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1235 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1236
Steve Wise2f5b48c2010-09-10 11:15:36 -05001237 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001238
1239 /* Process attr changes if in IDLE */
1240 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1241 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1242 ret = -EIO;
1243 goto out;
1244 }
1245 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1246 newattr.enable_rdma_read = attrs->enable_rdma_read;
1247 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1248 newattr.enable_rdma_write = attrs->enable_rdma_write;
1249 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1250 newattr.enable_bind = attrs->enable_bind;
1251 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001252 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001253 ret = -EINVAL;
1254 goto out;
1255 }
1256 newattr.max_ord = attrs->max_ord;
1257 }
1258 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001259 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001260 ret = -EINVAL;
1261 goto out;
1262 }
1263 newattr.max_ird = attrs->max_ird;
1264 }
1265 qhp->attr = newattr;
1266 }
1267
Vipul Pandya2c974782012-05-18 15:29:28 +05301268 if (mask & C4IW_QP_ATTR_SQ_DB) {
1269 ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
1270 goto out;
1271 }
1272 if (mask & C4IW_QP_ATTR_RQ_DB) {
1273 ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
1274 goto out;
1275 }
1276
Steve Wisecfdda9d2010-04-21 15:30:06 -07001277 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1278 goto out;
1279 if (qhp->attr.state == attrs->next_state)
1280 goto out;
1281
1282 switch (qhp->attr.state) {
1283 case C4IW_QP_STATE_IDLE:
1284 switch (attrs->next_state) {
1285 case C4IW_QP_STATE_RTS:
1286 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1287 ret = -EINVAL;
1288 goto out;
1289 }
1290 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1291 ret = -EINVAL;
1292 goto out;
1293 }
1294 qhp->attr.mpa_attr = attrs->mpa_attr;
1295 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1296 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001297 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001298
1299 /*
1300 * Ref the endpoint here and deref when we
1301 * disassociate the endpoint from the QP. This
1302 * happens in CLOSING->IDLE transition or *->ERROR
1303 * transition.
1304 */
1305 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001306 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001307 if (ret)
1308 goto err;
1309 break;
1310 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001311 set_state(qhp, C4IW_QP_STATE_ERROR);
1312 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001313 break;
1314 default:
1315 ret = -EINVAL;
1316 goto out;
1317 }
1318 break;
1319 case C4IW_QP_STATE_RTS:
1320 switch (attrs->next_state) {
1321 case C4IW_QP_STATE_CLOSING:
1322 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001323 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001324 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001325 if (!internal) {
1326 abort = 0;
1327 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001328 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001329 }
Tom Tuckerd32ae392011-10-25 16:38:30 +05301330 if (qhp->ibqp.uobject)
1331 t4_set_wq_in_error(&qhp->wq);
Steve Wise73d6fca2010-07-23 19:12:27 +00001332 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001333 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001334 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001335 break;
1336 case C4IW_QP_STATE_TERMINATE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001337 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301338 qhp->attr.layer_etype = attrs->layer_etype;
1339 qhp->attr.ecode = attrs->ecode;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001340 if (qhp->ibqp.uobject)
1341 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001342 ep = qhp->ep;
Steve Wise0e42c1f2010-09-10 11:15:09 -05001343 if (!internal)
1344 terminate = 1;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001345 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001346 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001347 break;
1348 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001349 set_state(qhp, C4IW_QP_STATE_ERROR);
Tom Tuckerd32ae392011-10-25 16:38:30 +05301350 if (qhp->ibqp.uobject)
1351 t4_set_wq_in_error(&qhp->wq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001352 if (!internal) {
1353 abort = 1;
1354 disconnect = 1;
1355 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001356 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001357 }
1358 goto err;
1359 break;
1360 default:
1361 ret = -EINVAL;
1362 goto out;
1363 }
1364 break;
1365 case C4IW_QP_STATE_CLOSING:
1366 if (!internal) {
1367 ret = -EINVAL;
1368 goto out;
1369 }
1370 switch (attrs->next_state) {
1371 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001372 flush_qp(qhp);
1373 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001374 qhp->attr.llp_stream_handle = NULL;
1375 c4iw_put_ep(&qhp->ep->com);
1376 qhp->ep = NULL;
1377 wake_up(&qhp->wait);
1378 break;
1379 case C4IW_QP_STATE_ERROR:
1380 goto err;
1381 default:
1382 ret = -EINVAL;
1383 goto err;
1384 }
1385 break;
1386 case C4IW_QP_STATE_ERROR:
1387 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1388 ret = -EINVAL;
1389 goto out;
1390 }
1391 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1392 ret = -EINVAL;
1393 goto out;
1394 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001395 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001396 break;
1397 case C4IW_QP_STATE_TERMINATE:
1398 if (!internal) {
1399 ret = -EINVAL;
1400 goto out;
1401 }
1402 goto err;
1403 break;
1404 default:
1405 printk(KERN_ERR "%s in a bad state %d\n",
1406 __func__, qhp->attr.state);
1407 ret = -EINVAL;
1408 goto err;
1409 break;
1410 }
1411 goto out;
1412err:
1413 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1414 qhp->wq.sq.qid);
1415
1416 /* disassociate the LLP connection */
1417 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001418 if (!ep)
1419 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001420 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001421 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001422 free = 1;
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001423 abort = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001424 wake_up(&qhp->wait);
1425 BUG_ON(!ep);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001426 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001427out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001428 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001429
1430 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001431 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001432
1433 /*
1434 * If disconnect is 1, then we need to initiate a disconnect
1435 * on the EP. This can be a normal close (RTS->CLOSING) or
1436 * an abnormal close (RTS/CLOSING->ERROR).
1437 */
1438 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001439 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1440 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001441 c4iw_put_ep(&ep->com);
1442 }
1443
1444 /*
1445 * If free is 1, then we've disassociated the EP from the QP
1446 * and we need to dereference the EP.
1447 */
1448 if (free)
1449 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001450 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1451 return ret;
1452}
1453
Vipul Pandya422eea02012-05-18 15:29:30 +05301454static int enable_qp_db(int id, void *p, void *data)
1455{
1456 struct c4iw_qp *qp = p;
1457
1458 t4_enable_wq_db(&qp->wq);
1459 return 0;
1460}
1461
Steve Wisecfdda9d2010-04-21 15:30:06 -07001462int c4iw_destroy_qp(struct ib_qp *ib_qp)
1463{
1464 struct c4iw_dev *rhp;
1465 struct c4iw_qp *qhp;
1466 struct c4iw_qp_attributes attrs;
1467 struct c4iw_ucontext *ucontext;
1468
1469 qhp = to_c4iw_qp(ib_qp);
1470 rhp = qhp->rhp;
1471
1472 attrs.next_state = C4IW_QP_STATE_ERROR;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301473 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1474 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1475 else
1476 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001477 wait_event(qhp->wait, !qhp->ep);
1478
Vipul Pandya422eea02012-05-18 15:29:30 +05301479 spin_lock_irq(&rhp->lock);
1480 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1481 rhp->qpcnt--;
1482 BUG_ON(rhp->qpcnt < 0);
1483 if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
1484 rhp->rdev.stats.db_state_transitions++;
1485 rhp->db_state = NORMAL;
1486 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1487 }
Vipul Pandya80ccdd62013-03-14 05:09:00 +00001488 if (db_coalescing_threshold >= 0)
1489 if (rhp->qpcnt <= db_coalescing_threshold)
1490 cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
Vipul Pandya422eea02012-05-18 15:29:30 +05301491 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001492 atomic_dec(&qhp->refcnt);
1493 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1494
1495 ucontext = ib_qp->uobject ?
1496 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1497 destroy_qp(&rhp->rdev, &qhp->wq,
1498 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1499
1500 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1501 kfree(qhp);
1502 return 0;
1503}
1504
Vipul Pandya422eea02012-05-18 15:29:30 +05301505static int disable_qp_db(int id, void *p, void *data)
1506{
1507 struct c4iw_qp *qp = p;
1508
1509 t4_disable_wq_db(&qp->wq);
1510 return 0;
1511}
1512
Steve Wisecfdda9d2010-04-21 15:30:06 -07001513struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1514 struct ib_udata *udata)
1515{
1516 struct c4iw_dev *rhp;
1517 struct c4iw_qp *qhp;
1518 struct c4iw_pd *php;
1519 struct c4iw_cq *schp;
1520 struct c4iw_cq *rchp;
1521 struct c4iw_create_qp_resp uresp;
1522 int sqsize, rqsize;
1523 struct c4iw_ucontext *ucontext;
1524 int ret;
Steve Wisec6d7b262010-09-13 11:23:57 -05001525 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001526
1527 PDBG("%s ib_pd %p\n", __func__, pd);
1528
1529 if (attrs->qp_type != IB_QPT_RC)
1530 return ERR_PTR(-EINVAL);
1531
1532 php = to_c4iw_pd(pd);
1533 rhp = php->rhp;
1534 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1535 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1536 if (!schp || !rchp)
1537 return ERR_PTR(-EINVAL);
1538
1539 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1540 return ERR_PTR(-EINVAL);
1541
1542 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1543 if (rqsize > T4_MAX_RQ_SIZE)
1544 return ERR_PTR(-E2BIG);
1545
1546 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1547 if (sqsize > T4_MAX_SQ_SIZE)
1548 return ERR_PTR(-E2BIG);
1549
1550 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1551
1552
1553 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1554 if (!qhp)
1555 return ERR_PTR(-ENOMEM);
1556 qhp->wq.sq.size = sqsize;
1557 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1558 qhp->wq.rq.size = rqsize;
1559 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1560
1561 if (ucontext) {
1562 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1563 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1564 }
1565
1566 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1567 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1568
1569 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1570 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1571 if (ret)
1572 goto err1;
1573
1574 attrs->cap.max_recv_wr = rqsize - 1;
1575 attrs->cap.max_send_wr = sqsize - 1;
1576 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1577
1578 qhp->rhp = rhp;
1579 qhp->attr.pd = php->pdid;
1580 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1581 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1582 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1583 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1584 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1585 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1586 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1587 qhp->attr.state = C4IW_QP_STATE_IDLE;
1588 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1589 qhp->attr.enable_rdma_read = 1;
1590 qhp->attr.enable_rdma_write = 1;
1591 qhp->attr.enable_bind = 1;
1592 qhp->attr.max_ord = 1;
1593 qhp->attr.max_ird = 1;
1594 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001595 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001596 init_waitqueue_head(&qhp->wait);
1597 atomic_set(&qhp->refcnt, 1);
1598
Vipul Pandya2c974782012-05-18 15:29:28 +05301599 spin_lock_irq(&rhp->lock);
1600 if (rhp->db_state != NORMAL)
1601 t4_disable_wq_db(&qhp->wq);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00001602 rhp->qpcnt++;
1603 if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
Vipul Pandya422eea02012-05-18 15:29:30 +05301604 rhp->rdev.stats.db_state_transitions++;
1605 rhp->db_state = FLOW_CONTROL;
1606 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1607 }
Vipul Pandya80ccdd62013-03-14 05:09:00 +00001608 if (db_coalescing_threshold >= 0)
1609 if (rhp->qpcnt > db_coalescing_threshold)
1610 cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
Vipul Pandya2c974782012-05-18 15:29:28 +05301611 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1612 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001613 if (ret)
1614 goto err2;
1615
Steve Wisecfdda9d2010-04-21 15:30:06 -07001616 if (udata) {
1617 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1618 if (!mm1) {
1619 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001620 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001621 }
1622 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1623 if (!mm2) {
1624 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001625 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001626 }
1627 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1628 if (!mm3) {
1629 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001630 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001631 }
1632 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1633 if (!mm4) {
1634 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001635 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001636 }
Steve Wisec6d7b262010-09-13 11:23:57 -05001637 if (t4_sq_onchip(&qhp->wq.sq)) {
1638 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1639 if (!mm5) {
1640 ret = -ENOMEM;
1641 goto err7;
1642 }
1643 uresp.flags = C4IW_QPF_ONCHIP;
1644 } else
1645 uresp.flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001646 uresp.qid_mask = rhp->rdev.qpmask;
1647 uresp.sqid = qhp->wq.sq.qid;
1648 uresp.sq_size = qhp->wq.sq.size;
1649 uresp.sq_memsize = qhp->wq.sq.memsize;
1650 uresp.rqid = qhp->wq.rq.qid;
1651 uresp.rq_size = qhp->wq.rq.size;
1652 uresp.rq_memsize = qhp->wq.rq.memsize;
1653 spin_lock(&ucontext->mmap_lock);
Steve Wisec6d7b262010-09-13 11:23:57 -05001654 if (mm5) {
1655 uresp.ma_sync_key = ucontext->key;
1656 ucontext->key += PAGE_SIZE;
1657 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001658 uresp.sq_key = ucontext->key;
1659 ucontext->key += PAGE_SIZE;
1660 uresp.rq_key = ucontext->key;
1661 ucontext->key += PAGE_SIZE;
1662 uresp.sq_db_gts_key = ucontext->key;
1663 ucontext->key += PAGE_SIZE;
1664 uresp.rq_db_gts_key = ucontext->key;
1665 ucontext->key += PAGE_SIZE;
1666 spin_unlock(&ucontext->mmap_lock);
1667 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1668 if (ret)
Steve Wisec6d7b262010-09-13 11:23:57 -05001669 goto err8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001670 mm1->key = uresp.sq_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05001671 mm1->addr = qhp->wq.sq.phys_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001672 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1673 insert_mmap(ucontext, mm1);
1674 mm2->key = uresp.rq_key;
1675 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1676 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1677 insert_mmap(ucontext, mm2);
1678 mm3->key = uresp.sq_db_gts_key;
1679 mm3->addr = qhp->wq.sq.udb;
1680 mm3->len = PAGE_SIZE;
1681 insert_mmap(ucontext, mm3);
1682 mm4->key = uresp.rq_db_gts_key;
1683 mm4->addr = qhp->wq.rq.udb;
1684 mm4->len = PAGE_SIZE;
1685 insert_mmap(ucontext, mm4);
Steve Wisec6d7b262010-09-13 11:23:57 -05001686 if (mm5) {
1687 mm5->key = uresp.ma_sync_key;
1688 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1689 + A_PCIE_MA_SYNC) & PAGE_MASK;
1690 mm5->len = PAGE_SIZE;
1691 insert_mmap(ucontext, mm5);
1692 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001693 }
1694 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1695 init_timer(&(qhp->timer));
1696 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1697 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1698 qhp->wq.sq.qid);
1699 return &qhp->ibqp;
Steve Wisec6d7b262010-09-13 11:23:57 -05001700err8:
1701 kfree(mm5);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001702err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001703 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001704err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001705 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001706err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001707 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001708err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001709 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001710err3:
1711 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1712err2:
1713 destroy_qp(&rhp->rdev, &qhp->wq,
1714 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1715err1:
1716 kfree(qhp);
1717 return ERR_PTR(ret);
1718}
1719
1720int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1721 int attr_mask, struct ib_udata *udata)
1722{
1723 struct c4iw_dev *rhp;
1724 struct c4iw_qp *qhp;
1725 enum c4iw_qp_attr_mask mask = 0;
1726 struct c4iw_qp_attributes attrs;
1727
1728 PDBG("%s ib_qp %p\n", __func__, ibqp);
1729
1730 /* iwarp does not support the RTR state */
1731 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1732 attr_mask &= ~IB_QP_STATE;
1733
1734 /* Make sure we still have something left to do */
1735 if (!attr_mask)
1736 return 0;
1737
1738 memset(&attrs, 0, sizeof attrs);
1739 qhp = to_c4iw_qp(ibqp);
1740 rhp = qhp->rhp;
1741
1742 attrs.next_state = c4iw_convert_state(attr->qp_state);
1743 attrs.enable_rdma_read = (attr->qp_access_flags &
1744 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1745 attrs.enable_rdma_write = (attr->qp_access_flags &
1746 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1747 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1748
1749
1750 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1751 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1752 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1753 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1754 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1755
Vipul Pandya2c974782012-05-18 15:29:28 +05301756 /*
1757 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1758 * ringing the queue db when we're in DB_FULL mode.
1759 */
1760 attrs.sq_db_inc = attr->sq_psn;
1761 attrs.rq_db_inc = attr->rq_psn;
1762 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1763 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1764
Steve Wisecfdda9d2010-04-21 15:30:06 -07001765 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1766}
1767
1768struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1769{
1770 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1771 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1772}
Vipul Pandya67bbc052012-05-18 15:29:33 +05301773
1774int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1775 int attr_mask, struct ib_qp_init_attr *init_attr)
1776{
1777 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1778
1779 memset(attr, 0, sizeof *attr);
1780 memset(init_attr, 0, sizeof *init_attr);
1781 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1782 return 0;
1783}