blob: 723ad290bd9d0739d1256b3b9bc9d91a1fa9cf20 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
34
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include "iw_cxgb4.h"
36
Vipul Pandya2c974782012-05-18 15:29:28 +053037static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
Steve Wisea9c77192011-03-11 22:30:11 +000041static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050042module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000043MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050044
Vipul Pandya3cbdb922013-03-14 05:08:59 +000045int db_fc_threshold = 1000;
Vipul Pandya422eea02012-05-18 15:29:30 +053046module_param(db_fc_threshold, int, 0644);
Vipul Pandya3cbdb922013-03-14 05:08:59 +000047MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
Vipul Pandya422eea02012-05-18 15:29:30 +053056
Vipul Pandya42b6a942013-03-14 05:09:01 +000057static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60
Steve Wise2f5b48c2010-09-10 11:15:36 -050061static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
62{
63 unsigned long flag;
64 spin_lock_irqsave(&qhp->lock, flag);
65 qhp->attr.state = state;
66 spin_unlock_irqrestore(&qhp->lock, flag);
67}
68
Steve Wisec6d7b262010-09-13 11:23:57 -050069static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
70{
71 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
72}
73
74static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
75{
76 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
77 pci_unmap_addr(sq, mapping));
78}
79
80static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
81{
82 if (t4_sq_onchip(sq))
83 dealloc_oc_sq(rdev, sq);
84 else
85 dealloc_host_sq(rdev, sq);
86}
87
88static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
89{
Vipul Pandyaf079af72013-03-14 05:08:58 +000090 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
Steve Wisec6d7b262010-09-13 11:23:57 -050091 return -ENOSYS;
92 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
93 if (!sq->dma_addr)
94 return -ENOMEM;
95 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
96 rdev->lldi.vr->ocq.start;
97 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
98 rdev->lldi.vr->ocq.start);
99 sq->flags |= T4_SQ_ONCHIP;
100 return 0;
101}
102
103static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
104{
105 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
106 &(sq->dma_addr), GFP_KERNEL);
107 if (!sq->queue)
108 return -ENOMEM;
109 sq->phys_addr = virt_to_phys(sq->queue);
110 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
111 return 0;
112}
113
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000114static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
115{
116 int ret = -ENOSYS;
117 if (user)
118 ret = alloc_oc_sq(rdev, sq);
119 if (ret)
120 ret = alloc_host_sq(rdev, sq);
121 return ret;
122}
123
Steve Wisecfdda9d2010-04-21 15:30:06 -0700124static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
125 struct c4iw_dev_ucontext *uctx)
126{
127 /*
128 * uP clears EQ contexts when the connection exits rdma mode,
129 * so no need to post a RESET WR for these EQs.
130 */
131 dma_free_coherent(&(rdev->lldi.pdev->dev),
132 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000133 dma_unmap_addr(&wq->rq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500134 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700135 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
136 kfree(wq->rq.sw_rq);
137 kfree(wq->sq.sw_sq);
138 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
139 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
140 return 0;
141}
142
143static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
144 struct t4_cq *rcq, struct t4_cq *scq,
145 struct c4iw_dev_ucontext *uctx)
146{
147 int user = (uctx != &rdev->uctx);
148 struct fw_ri_res_wr *res_wr;
149 struct fw_ri_res *res;
150 int wr_len;
151 struct c4iw_wr_wait wr_wait;
152 struct sk_buff *skb;
Vipul Pandya9919d5b2013-03-14 05:09:04 +0000153 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700154 int eqsize;
155
156 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
157 if (!wq->sq.qid)
158 return -ENOMEM;
159
160 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000161 if (!wq->rq.qid) {
162 ret = -ENOMEM;
163 goto free_sq_qid;
164 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700165
166 if (!user) {
167 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
168 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000169 if (!wq->sq.sw_sq) {
170 ret = -ENOMEM;
171 goto free_rq_qid;
172 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700173
174 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
175 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000176 if (!wq->rq.sw_rq) {
177 ret = -ENOMEM;
178 goto free_sw_sq;
179 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700180 }
181
182 /*
183 * RQT must be a power of 2.
184 */
185 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
186 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000187 if (!wq->rq.rqt_hwaddr) {
188 ret = -ENOMEM;
189 goto free_sw_rq;
190 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700191
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000192 ret = alloc_sq(rdev, &wq->sq, user);
193 if (ret)
194 goto free_hwaddr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700195 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000196 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700197
198 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
199 wq->rq.memsize, &(wq->rq.dma_addr),
200 GFP_KERNEL);
Wei Yongjun55e57a72013-03-15 09:42:12 +0000201 if (!wq->rq.queue) {
202 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000203 goto free_sq;
Wei Yongjun55e57a72013-03-15 09:42:12 +0000204 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700205 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
206 __func__, wq->sq.queue,
207 (unsigned long long)virt_to_phys(wq->sq.queue),
208 wq->rq.queue,
209 (unsigned long long)virt_to_phys(wq->rq.queue));
210 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000211 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700212
213 wq->db = rdev->lldi.db_reg;
214 wq->gts = rdev->lldi.gts_reg;
215 if (user) {
216 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
217 (wq->sq.qid << rdev->qpshift);
218 wq->sq.udb &= PAGE_MASK;
219 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
220 (wq->rq.qid << rdev->qpshift);
221 wq->rq.udb &= PAGE_MASK;
222 }
223 wq->rdev = rdev;
224 wq->rq.msn = 1;
225
226 /* build fw_ri_res_wr */
227 wr_len = sizeof *res_wr + 2 * sizeof *res;
228
David Rientjesd3c814e2010-07-21 02:44:56 +0000229 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700230 if (!skb) {
231 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000232 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700233 }
234 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
235
236 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
237 memset(res_wr, 0, wr_len);
238 res_wr->op_nres = cpu_to_be32(
239 FW_WR_OP(FW_RI_RES_WR) |
240 V_FW_RI_RES_WR_NRES(2) |
241 FW_WR_COMPL(1));
242 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700243 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700244 res = res_wr->res;
245 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
246 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
247
248 /*
249 * eqsize is the number of 64B entries plus the status page size.
250 */
251 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
252
253 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
254 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
255 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
256 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
Roel Kluin85d215b2011-05-09 22:06:22 -0700257 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700258 V_FW_RI_RES_WR_IQID(scq->cqid));
259 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
260 V_FW_RI_RES_WR_DCAEN(0) |
261 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000262 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000263 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700264 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
265 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
266 V_FW_RI_RES_WR_EQSIZE(eqsize));
267 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
268 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
269 res++;
270 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
271 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
272
273 /*
274 * eqsize is the number of 64B entries plus the status page size.
275 */
276 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
277 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
278 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
279 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
280 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
281 V_FW_RI_RES_WR_IQID(rcq->cqid));
282 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
283 V_FW_RI_RES_WR_DCAEN(0) |
284 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000285 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wise6a09a9d2011-01-21 17:00:29 +0000286 V_FW_RI_RES_WR_FBMAX(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700287 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
288 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
289 V_FW_RI_RES_WR_EQSIZE(eqsize));
290 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
291 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
292
293 c4iw_init_wr_wait(&wr_wait);
294
295 ret = c4iw_ofld_send(rdev, skb);
296 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000297 goto free_dma;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500298 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700299 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000300 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700301
302 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
303 __func__, wq->sq.qid, wq->rq.qid, wq->db,
304 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
305
306 return 0;
Emil Goodec079c282012-08-19 17:59:40 +0000307free_dma:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700308 dma_free_coherent(&(rdev->lldi.pdev->dev),
309 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000310 dma_unmap_addr(&wq->rq, mapping));
Emil Goodec079c282012-08-19 17:59:40 +0000311free_sq:
Steve Wisec6d7b262010-09-13 11:23:57 -0500312 dealloc_sq(rdev, &wq->sq);
Emil Goodec079c282012-08-19 17:59:40 +0000313free_hwaddr:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700314 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000315free_sw_rq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700316 kfree(wq->rq.sw_rq);
Emil Goodec079c282012-08-19 17:59:40 +0000317free_sw_sq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700318 kfree(wq->sq.sw_sq);
Emil Goodec079c282012-08-19 17:59:40 +0000319free_rq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700320 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000321free_sq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700322 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000323 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700324}
325
Steve Wised37ac312010-06-10 19:03:00 +0000326static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
327 struct ib_send_wr *wr, int max, u32 *plenp)
328{
329 u8 *dstp, *srcp;
330 u32 plen = 0;
331 int i;
332 int rem, len;
333
334 dstp = (u8 *)immdp->data;
335 for (i = 0; i < wr->num_sge; i++) {
336 if ((plen + wr->sg_list[i].length) > max)
337 return -EMSGSIZE;
338 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
339 plen += wr->sg_list[i].length;
340 rem = wr->sg_list[i].length;
341 while (rem) {
342 if (dstp == (u8 *)&sq->queue[sq->size])
343 dstp = (u8 *)sq->queue;
344 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
345 len = rem;
346 else
347 len = (u8 *)&sq->queue[sq->size] - dstp;
348 memcpy(dstp, srcp, len);
349 dstp += len;
350 srcp += len;
351 rem -= len;
352 }
353 }
Steve Wise13fecb82010-09-10 11:14:53 -0500354 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
355 if (len)
356 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000357 immdp->op = FW_RI_DATA_IMMD;
358 immdp->r1 = 0;
359 immdp->r2 = 0;
360 immdp->immdlen = cpu_to_be32(plen);
361 *plenp = plen;
362 return 0;
363}
364
365static int build_isgl(__be64 *queue_start, __be64 *queue_end,
366 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
367 int num_sge, u32 *plenp)
368
Steve Wisecfdda9d2010-04-21 15:30:06 -0700369{
370 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000371 u32 plen = 0;
372 __be64 *flitp = (__be64 *)isglp->sge;
373
374 for (i = 0; i < num_sge; i++) {
375 if ((plen + sg_list[i].length) < plen)
376 return -EMSGSIZE;
377 plen += sg_list[i].length;
378 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
379 sg_list[i].length);
380 if (++flitp == queue_end)
381 flitp = queue_start;
382 *flitp = cpu_to_be64(sg_list[i].addr);
383 if (++flitp == queue_end)
384 flitp = queue_start;
385 }
Steve Wise13fecb82010-09-10 11:14:53 -0500386 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000387 isglp->op = FW_RI_DATA_ISGL;
388 isglp->r1 = 0;
389 isglp->nsge = cpu_to_be16(num_sge);
390 isglp->r2 = 0;
391 if (plenp)
392 *plenp = plen;
393 return 0;
394}
395
396static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
397 struct ib_send_wr *wr, u8 *len16)
398{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700399 u32 plen;
400 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000401 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700402
403 if (wr->num_sge > T4_MAX_SEND_SGE)
404 return -EINVAL;
405 switch (wr->opcode) {
406 case IB_WR_SEND:
407 if (wr->send_flags & IB_SEND_SOLICITED)
408 wqe->send.sendop_pkd = cpu_to_be32(
409 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
410 else
411 wqe->send.sendop_pkd = cpu_to_be32(
412 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
413 wqe->send.stag_inv = 0;
414 break;
415 case IB_WR_SEND_WITH_INV:
416 if (wr->send_flags & IB_SEND_SOLICITED)
417 wqe->send.sendop_pkd = cpu_to_be32(
418 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
419 else
420 wqe->send.sendop_pkd = cpu_to_be32(
421 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
422 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
423 break;
424
425 default:
426 return -EINVAL;
427 }
Steve Wised37ac312010-06-10 19:03:00 +0000428
Steve Wisecfdda9d2010-04-21 15:30:06 -0700429 plen = 0;
430 if (wr->num_sge) {
431 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000432 ret = build_immd(sq, wqe->send.u.immd_src, wr,
433 T4_MAX_SEND_INLINE, &plen);
434 if (ret)
435 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700436 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
437 plen;
438 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000439 ret = build_isgl((__be64 *)sq->queue,
440 (__be64 *)&sq->queue[sq->size],
441 wqe->send.u.isgl_src,
442 wr->sg_list, wr->num_sge, &plen);
443 if (ret)
444 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700445 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
446 wr->num_sge * sizeof(struct fw_ri_sge);
447 }
448 } else {
449 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
450 wqe->send.u.immd_src[0].r1 = 0;
451 wqe->send.u.immd_src[0].r2 = 0;
452 wqe->send.u.immd_src[0].immdlen = 0;
453 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000454 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700455 }
456 *len16 = DIV_ROUND_UP(size, 16);
457 wqe->send.plen = cpu_to_be32(plen);
458 return 0;
459}
460
Steve Wised37ac312010-06-10 19:03:00 +0000461static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
462 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700463{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700464 u32 plen;
465 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000466 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700467
Steve Wised37ac312010-06-10 19:03:00 +0000468 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700469 return -EINVAL;
470 wqe->write.r2 = 0;
471 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
472 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700473 if (wr->num_sge) {
474 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000475 ret = build_immd(sq, wqe->write.u.immd_src, wr,
476 T4_MAX_WRITE_INLINE, &plen);
477 if (ret)
478 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700479 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
480 plen;
481 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000482 ret = build_isgl((__be64 *)sq->queue,
483 (__be64 *)&sq->queue[sq->size],
484 wqe->write.u.isgl_src,
485 wr->sg_list, wr->num_sge, &plen);
486 if (ret)
487 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700488 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
489 wr->num_sge * sizeof(struct fw_ri_sge);
490 }
491 } else {
492 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
493 wqe->write.u.immd_src[0].r1 = 0;
494 wqe->write.u.immd_src[0].r2 = 0;
495 wqe->write.u.immd_src[0].immdlen = 0;
496 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000497 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700498 }
499 *len16 = DIV_ROUND_UP(size, 16);
500 wqe->write.plen = cpu_to_be32(plen);
501 return 0;
502}
503
504static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
505{
506 if (wr->num_sge > 1)
507 return -EINVAL;
508 if (wr->num_sge) {
509 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
510 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
511 >> 32));
512 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
513 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
514 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
515 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
516 >> 32));
517 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
518 } else {
519 wqe->read.stag_src = cpu_to_be32(2);
520 wqe->read.to_src_hi = 0;
521 wqe->read.to_src_lo = 0;
522 wqe->read.stag_sink = cpu_to_be32(2);
523 wqe->read.plen = 0;
524 wqe->read.to_sink_hi = 0;
525 wqe->read.to_sink_lo = 0;
526 }
527 wqe->read.r2 = 0;
528 wqe->read.r5 = 0;
529 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
530 return 0;
531}
532
533static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
534 struct ib_recv_wr *wr, u8 *len16)
535{
Steve Wised37ac312010-06-10 19:03:00 +0000536 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700537
Steve Wised37ac312010-06-10 19:03:00 +0000538 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
539 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
540 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
541 if (ret)
542 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700543 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
544 wr->num_sge * sizeof(struct fw_ri_sge), 16);
545 return 0;
546}
547
Steve Wise40dbf6e2010-09-17 15:40:15 -0500548static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
Vipul Pandya42b6a942013-03-14 05:09:01 +0000549 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700550{
551
552 struct fw_ri_immd *imdp;
553 __be64 *p;
554 int i;
555 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500556 int rem;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700557
558 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
559 return -EINVAL;
560
561 wqe->fr.qpbinde_to_dcacpu = 0;
562 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
563 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
564 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
565 wqe->fr.len_hi = 0;
566 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
567 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
568 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
569 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
570 0xffffffff);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000571
572 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
573 struct c4iw_fr_page_list *c4pl =
574 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
575 struct fw_ri_dsgl *sglp;
576
577 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
578 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
579 cpu_to_be64((u64)
580 wr->wr.fast_reg.page_list->page_list[i]);
581 }
582
583 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
584 sglp->op = FW_RI_DATA_DSGL;
585 sglp->r1 = 0;
586 sglp->nsge = cpu_to_be16(1);
587 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
588 sglp->len0 = cpu_to_be32(pbllen);
589
590 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
591 } else {
592 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
593 imdp->op = FW_RI_DATA_IMMD;
594 imdp->r1 = 0;
595 imdp->r2 = 0;
596 imdp->immdlen = cpu_to_be32(pbllen);
597 p = (__be64 *)(imdp + 1);
598 rem = pbllen;
599 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
600 *p = cpu_to_be64(
601 (u64)wr->wr.fast_reg.page_list->page_list[i]);
602 rem -= sizeof(*p);
603 if (++p == (__be64 *)&sq->queue[sq->size])
604 p = (__be64 *)sq->queue;
605 }
606 BUG_ON(rem < 0);
607 while (rem) {
608 *p = 0;
609 rem -= sizeof(*p);
610 if (++p == (__be64 *)&sq->queue[sq->size])
611 p = (__be64 *)sq->queue;
612 }
613 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
614 + pbllen, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700615 }
616 return 0;
617}
618
619static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
620 u8 *len16)
621{
622 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
623 wqe->inv.r2 = 0;
624 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
625 return 0;
626}
627
628void c4iw_qp_add_ref(struct ib_qp *qp)
629{
630 PDBG("%s ib_qp %p\n", __func__, qp);
631 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
632}
633
634void c4iw_qp_rem_ref(struct ib_qp *qp)
635{
636 PDBG("%s ib_qp %p\n", __func__, qp);
637 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
638 wake_up(&(to_c4iw_qp(qp)->wait));
639}
640
641int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
642 struct ib_send_wr **bad_wr)
643{
644 int err = 0;
645 u8 len16 = 0;
646 enum fw_wr_opcodes fw_opcode = 0;
647 enum fw_ri_wr_flags fw_flags;
648 struct c4iw_qp *qhp;
649 union t4_wr *wqe;
650 u32 num_wrs;
651 struct t4_swsqe *swsqe;
652 unsigned long flag;
653 u16 idx = 0;
654
655 qhp = to_c4iw_qp(ibqp);
656 spin_lock_irqsave(&qhp->lock, flag);
657 if (t4_wq_in_error(&qhp->wq)) {
658 spin_unlock_irqrestore(&qhp->lock, flag);
659 return -EINVAL;
660 }
661 num_wrs = t4_sq_avail(&qhp->wq);
662 if (num_wrs == 0) {
663 spin_unlock_irqrestore(&qhp->lock, flag);
664 return -ENOMEM;
665 }
666 while (wr) {
667 if (num_wrs == 0) {
668 err = -ENOMEM;
669 *bad_wr = wr;
670 break;
671 }
Steve Wised37ac312010-06-10 19:03:00 +0000672 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
673 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
674
Steve Wisecfdda9d2010-04-21 15:30:06 -0700675 fw_flags = 0;
676 if (wr->send_flags & IB_SEND_SOLICITED)
677 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
Steve Wiseba32de92014-03-19 17:44:43 +0530678 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700679 fw_flags |= FW_RI_COMPLETION_FLAG;
680 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
681 switch (wr->opcode) {
682 case IB_WR_SEND_WITH_INV:
683 case IB_WR_SEND:
684 if (wr->send_flags & IB_SEND_FENCE)
685 fw_flags |= FW_RI_READ_FENCE_FLAG;
686 fw_opcode = FW_RI_SEND_WR;
687 if (wr->opcode == IB_WR_SEND)
688 swsqe->opcode = FW_RI_SEND;
689 else
690 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000691 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700692 break;
693 case IB_WR_RDMA_WRITE:
694 fw_opcode = FW_RI_RDMA_WRITE_WR;
695 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000696 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700697 break;
698 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500699 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700700 fw_opcode = FW_RI_RDMA_READ_WR;
701 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500702 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
Steve Wise410ade42010-09-17 15:40:09 -0500703 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise2f1fb502010-05-20 16:58:16 -0500704 else
705 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700706 err = build_rdma_read(wqe, wr, &len16);
707 if (err)
708 break;
709 swsqe->read_len = wr->sg_list[0].length;
710 if (!qhp->wq.sq.oldest_read)
711 qhp->wq.sq.oldest_read = swsqe;
712 break;
713 case IB_WR_FAST_REG_MR:
714 fw_opcode = FW_RI_FR_NSMR_WR;
715 swsqe->opcode = FW_RI_FAST_REGISTER;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000716 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
717 is_t5(
718 qhp->rhp->rdev.lldi.adapter_type) ?
719 1 : 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700720 break;
721 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500722 if (wr->send_flags & IB_SEND_FENCE)
723 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700724 fw_opcode = FW_RI_INV_LSTAG_WR;
725 swsqe->opcode = FW_RI_LOCAL_INV;
726 err = build_inv_stag(wqe, wr, &len16);
727 break;
728 default:
729 PDBG("%s post of type=%d TBD!\n", __func__,
730 wr->opcode);
731 err = -EINVAL;
732 }
733 if (err) {
734 *bad_wr = wr;
735 break;
736 }
737 swsqe->idx = qhp->wq.sq.pidx;
738 swsqe->complete = 0;
Steve Wiseba32de92014-03-19 17:44:43 +0530739 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
740 qhp->sq_sig_all;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530741 swsqe->flushed = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700742 swsqe->wr_id = wr->wr_id;
743
744 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
745
746 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
747 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
748 swsqe->opcode, swsqe->read_len);
749 wr = wr->next;
750 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000751 t4_sq_produce(&qhp->wq, len16);
752 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700753 }
754 if (t4_wq_db_enabled(&qhp->wq))
755 t4_ring_sq_db(&qhp->wq, idx);
756 spin_unlock_irqrestore(&qhp->lock, flag);
757 return err;
758}
759
760int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
761 struct ib_recv_wr **bad_wr)
762{
763 int err = 0;
764 struct c4iw_qp *qhp;
765 union t4_recv_wr *wqe;
766 u32 num_wrs;
767 u8 len16 = 0;
768 unsigned long flag;
769 u16 idx = 0;
770
771 qhp = to_c4iw_qp(ibqp);
772 spin_lock_irqsave(&qhp->lock, flag);
773 if (t4_wq_in_error(&qhp->wq)) {
774 spin_unlock_irqrestore(&qhp->lock, flag);
775 return -EINVAL;
776 }
777 num_wrs = t4_rq_avail(&qhp->wq);
778 if (num_wrs == 0) {
779 spin_unlock_irqrestore(&qhp->lock, flag);
780 return -ENOMEM;
781 }
782 while (wr) {
783 if (wr->num_sge > T4_MAX_RECV_SGE) {
784 err = -EINVAL;
785 *bad_wr = wr;
786 break;
787 }
Steve Wised37ac312010-06-10 19:03:00 +0000788 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
789 qhp->wq.rq.wq_pidx *
790 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700791 if (num_wrs)
792 err = build_rdma_recv(qhp, wqe, wr, &len16);
793 else
794 err = -ENOMEM;
795 if (err) {
796 *bad_wr = wr;
797 break;
798 }
799
800 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
801
802 wqe->recv.opcode = FW_RI_RECV_WR;
803 wqe->recv.r1 = 0;
804 wqe->recv.wrid = qhp->wq.rq.pidx;
805 wqe->recv.r2[0] = 0;
806 wqe->recv.r2[1] = 0;
807 wqe->recv.r2[2] = 0;
808 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700809 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
810 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000811 t4_rq_produce(&qhp->wq, len16);
812 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700813 wr = wr->next;
814 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700815 }
816 if (t4_wq_db_enabled(&qhp->wq))
817 t4_ring_rq_db(&qhp->wq, idx);
818 spin_unlock_irqrestore(&qhp->lock, flag);
819 return err;
820}
821
822int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
823{
824 return -ENOSYS;
825}
826
827static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
828 u8 *ecode)
829{
830 int status;
831 int tagged;
832 int opcode;
833 int rqtype;
834 int send_inv;
835
836 if (!err_cqe) {
837 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
838 *ecode = 0;
839 return;
840 }
841
842 status = CQE_STATUS(err_cqe);
843 opcode = CQE_OPCODE(err_cqe);
844 rqtype = RQ_TYPE(err_cqe);
845 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
846 (opcode == FW_RI_SEND_WITH_SE_INV);
847 tagged = (opcode == FW_RI_RDMA_WRITE) ||
848 (rqtype && (opcode == FW_RI_READ_RESP));
849
850 switch (status) {
851 case T4_ERR_STAG:
852 if (send_inv) {
853 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
854 *ecode = RDMAP_CANT_INV_STAG;
855 } else {
856 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
857 *ecode = RDMAP_INV_STAG;
858 }
859 break;
860 case T4_ERR_PDID:
861 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
862 if ((opcode == FW_RI_SEND_WITH_INV) ||
863 (opcode == FW_RI_SEND_WITH_SE_INV))
864 *ecode = RDMAP_CANT_INV_STAG;
865 else
866 *ecode = RDMAP_STAG_NOT_ASSOC;
867 break;
868 case T4_ERR_QPID:
869 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
870 *ecode = RDMAP_STAG_NOT_ASSOC;
871 break;
872 case T4_ERR_ACCESS:
873 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
874 *ecode = RDMAP_ACC_VIOL;
875 break;
876 case T4_ERR_WRAP:
877 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
878 *ecode = RDMAP_TO_WRAP;
879 break;
880 case T4_ERR_BOUND:
881 if (tagged) {
882 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
883 *ecode = DDPT_BASE_BOUNDS;
884 } else {
885 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
886 *ecode = RDMAP_BASE_BOUNDS;
887 }
888 break;
889 case T4_ERR_INVALIDATE_SHARED_MR:
890 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
891 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
892 *ecode = RDMAP_CANT_INV_STAG;
893 break;
894 case T4_ERR_ECC:
895 case T4_ERR_ECC_PSTAG:
896 case T4_ERR_INTERNAL_ERR:
897 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
898 *ecode = 0;
899 break;
900 case T4_ERR_OUT_OF_RQE:
901 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
902 *ecode = DDPU_INV_MSN_NOBUF;
903 break;
904 case T4_ERR_PBL_ADDR_BOUND:
905 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
906 *ecode = DDPT_BASE_BOUNDS;
907 break;
908 case T4_ERR_CRC:
909 *layer_type = LAYER_MPA|DDP_LLP;
910 *ecode = MPA_CRC_ERR;
911 break;
912 case T4_ERR_MARKER:
913 *layer_type = LAYER_MPA|DDP_LLP;
914 *ecode = MPA_MARKER_ERR;
915 break;
916 case T4_ERR_PDU_LEN_ERR:
917 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
918 *ecode = DDPU_MSG_TOOBIG;
919 break;
920 case T4_ERR_DDP_VERSION:
921 if (tagged) {
922 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
923 *ecode = DDPT_INV_VERS;
924 } else {
925 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
926 *ecode = DDPU_INV_VERS;
927 }
928 break;
929 case T4_ERR_RDMA_VERSION:
930 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
931 *ecode = RDMAP_INV_VERS;
932 break;
933 case T4_ERR_OPCODE:
934 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
935 *ecode = RDMAP_INV_OPCODE;
936 break;
937 case T4_ERR_DDP_QUEUE_NUM:
938 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
939 *ecode = DDPU_INV_QN;
940 break;
941 case T4_ERR_MSN:
942 case T4_ERR_MSN_GAP:
943 case T4_ERR_MSN_RANGE:
944 case T4_ERR_IRD_OVERFLOW:
945 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
946 *ecode = DDPU_INV_MSN_RANGE;
947 break;
948 case T4_ERR_TBIT:
949 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
950 *ecode = 0;
951 break;
952 case T4_ERR_MO:
953 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
954 *ecode = DDPU_INV_MO;
955 break;
956 default:
957 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
958 *ecode = 0;
959 break;
960 }
961}
962
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700963static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
964 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700965{
966 struct fw_ri_wr *wqe;
967 struct sk_buff *skb;
968 struct terminate_message *term;
969
970 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
971 qhp->ep->hwtid);
972
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700973 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700974 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700975 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700976 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
977
978 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
979 memset(wqe, 0, sizeof *wqe);
980 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
981 wqe->flowid_len16 = cpu_to_be32(
982 FW_WR_FLOWID(qhp->ep->hwtid) |
983 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
984
985 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
986 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
987 term = (struct terminate_message *)wqe->u.terminate.termmsg;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530988 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
989 term->layer_etype = qhp->attr.layer_etype;
990 term->ecode = qhp->attr.ecode;
991 } else
992 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700993 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700994}
995
996/*
997 * Assumes qhp lock is held.
998 */
999static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -05001000 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001001{
1002 int count;
1003 int flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001004 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001005
1006 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001007
Uwe Kleine-König732bee72010-06-11 12:16:59 +02001008 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001009 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001010 spin_lock(&qhp->lock);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301011
1012 if (qhp->wq.flushed) {
1013 spin_unlock(&qhp->lock);
1014 spin_unlock_irqrestore(&rchp->lock, flag);
1015 return;
1016 }
1017 qhp->wq.flushed = 1;
1018
1019 c4iw_flush_hw_cq(rchp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001020 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1021 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1022 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001023 spin_unlock_irqrestore(&rchp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301024 if (flushed) {
1025 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001026 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301027 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1028 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001029
Uwe Kleine-König732bee72010-06-11 12:16:59 +02001030 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001031 spin_lock_irqsave(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001032 spin_lock(&qhp->lock);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301033 if (schp != rchp)
1034 c4iw_flush_hw_cq(schp);
1035 flushed = c4iw_flush_sq(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001036 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001037 spin_unlock_irqrestore(&schp->lock, flag);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301038 if (flushed) {
1039 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001040 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301041 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1042 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001043}
1044
Steve Wise2f5b48c2010-09-10 11:15:36 -05001045static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001046{
1047 struct c4iw_cq *rchp, *schp;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301048 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001049
Steve Wise1cf24dc2013-08-06 21:04:35 +05301050 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1051 schp = to_c4iw_cq(qhp->ibqp.send_cq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001052
Steve Wise1cf24dc2013-08-06 21:04:35 +05301053 t4_set_wq_in_error(&qhp->wq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001054 if (qhp->ibqp.uobject) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001055 t4_set_cq_in_error(&rchp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301056 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301057 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301058 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301059 if (schp != rchp) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001060 t4_set_cq_in_error(&schp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301061 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301062 (*schp->ibcq.comp_handler)(&schp->ibcq,
1063 schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301064 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301065 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001066 return;
1067 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001068 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001069}
1070
Steve Wise73d6fca2010-07-23 19:12:27 +00001071static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1072 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001073{
1074 struct fw_ri_wr *wqe;
1075 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001076 struct sk_buff *skb;
1077
1078 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +00001079 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001080
David Rientjesd3c814e2010-07-21 02:44:56 +00001081 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001082 if (!skb)
1083 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +00001084 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001085
1086 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1087 memset(wqe, 0, sizeof *wqe);
1088 wqe->op_compl = cpu_to_be32(
1089 FW_WR_OP(FW_RI_INIT_WR) |
1090 FW_WR_COMPL(1));
1091 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +00001092 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001093 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Steve Wise2f5b48c2010-09-10 11:15:36 -05001094 wqe->cookie = (unsigned long) &ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001095
1096 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001097 ret = c4iw_ofld_send(&rhp->rdev, skb);
1098 if (ret)
1099 goto out;
1100
Steve Wise2f5b48c2010-09-10 11:15:36 -05001101 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
Steve Wiseaadc4df2010-09-10 11:15:25 -05001102 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001103out:
1104 PDBG("%s ret %d\n", __func__, ret);
1105 return ret;
1106}
1107
1108static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1109{
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301110 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001111 memset(&init->u, 0, sizeof init->u);
1112 switch (p2p_type) {
1113 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1114 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1115 init->u.write.stag_sink = cpu_to_be32(1);
1116 init->u.write.to_sink = cpu_to_be64(1);
1117 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1118 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1119 sizeof(struct fw_ri_immd),
1120 16);
1121 break;
1122 case FW_RI_INIT_P2PTYPE_READ_REQ:
1123 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1124 init->u.read.stag_src = cpu_to_be32(1);
1125 init->u.read.to_src_lo = cpu_to_be32(1);
1126 init->u.read.stag_sink = cpu_to_be32(1);
1127 init->u.read.to_sink_lo = cpu_to_be32(1);
1128 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1129 break;
1130 }
1131}
1132
1133static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1134{
1135 struct fw_ri_wr *wqe;
1136 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001137 struct sk_buff *skb;
1138
1139 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1140 qhp->ep->hwtid);
1141
David Rientjesd3c814e2010-07-21 02:44:56 +00001142 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001143 if (!skb)
1144 return -ENOMEM;
1145 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1146
1147 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1148 memset(wqe, 0, sizeof *wqe);
1149 wqe->op_compl = cpu_to_be32(
1150 FW_WR_OP(FW_RI_INIT_WR) |
1151 FW_WR_COMPL(1));
1152 wqe->flowid_len16 = cpu_to_be32(
1153 FW_WR_FLOWID(qhp->ep->hwtid) |
1154 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1155
Steve Wise2f5b48c2010-09-10 11:15:36 -05001156 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001157
1158 wqe->u.init.type = FW_RI_TYPE_INIT;
1159 wqe->u.init.mpareqbit_p2ptype =
1160 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1161 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1162 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1163 if (qhp->attr.mpa_attr.recv_marker_enabled)
1164 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1165 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1166 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1167 if (qhp->attr.mpa_attr.crc_enabled)
1168 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1169
1170 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1171 FW_RI_QP_RDMA_WRITE_ENABLE |
1172 FW_RI_QP_BIND_ENABLE;
1173 if (!qhp->ibqp.uobject)
1174 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1175 FW_RI_QP_STAG0_ENABLE;
1176 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1177 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1178 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1179 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1180 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1181 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1182 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1183 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1184 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1185 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1186 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1187 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1188 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1189 rhp->rdev.lldi.vr->rq.start);
1190 if (qhp->attr.mpa_attr.initiator)
1191 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1192
Steve Wisecfdda9d2010-04-21 15:30:06 -07001193 ret = c4iw_ofld_send(&rhp->rdev, skb);
1194 if (ret)
1195 goto out;
1196
Steve Wise2f5b48c2010-09-10 11:15:36 -05001197 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1198 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001199out:
1200 PDBG("%s ret %d\n", __func__, ret);
1201 return ret;
1202}
1203
Vipul Pandya2c974782012-05-18 15:29:28 +05301204/*
1205 * Called by the library when the qp has user dbs disabled due to
1206 * a DB_FULL condition. This function will single-thread all user
1207 * DB rings to avoid overflowing the hw db-fifo.
1208 */
1209static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1210{
1211 int delay = db_delay_usecs;
1212
1213 mutex_lock(&qhp->rhp->db_mutex);
1214 do {
Vipul Pandya422eea02012-05-18 15:29:30 +05301215
1216 /*
1217 * The interrupt threshold is dbfifo_int_thresh << 6. So
1218 * make sure we don't cross that and generate an interrupt.
1219 */
1220 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1221 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
Vipul Pandyae5619c12012-09-05 02:01:54 +00001222 writel(QID(qid) | PIDX(inc), qhp->wq.db);
Vipul Pandya2c974782012-05-18 15:29:28 +05301223 break;
1224 }
1225 set_current_state(TASK_UNINTERRUPTIBLE);
1226 schedule_timeout(usecs_to_jiffies(delay));
Vipul Pandya422eea02012-05-18 15:29:30 +05301227 delay = min(delay << 1, 2000);
Vipul Pandya2c974782012-05-18 15:29:28 +05301228 } while (1);
1229 mutex_unlock(&qhp->rhp->db_mutex);
1230 return 0;
1231}
1232
Steve Wisecfdda9d2010-04-21 15:30:06 -07001233int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1234 enum c4iw_qp_attr_mask mask,
1235 struct c4iw_qp_attributes *attrs,
1236 int internal)
1237{
1238 int ret = 0;
1239 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001240 int disconnect = 0;
1241 int terminate = 0;
1242 int abort = 0;
1243 int free = 0;
1244 struct c4iw_ep *ep = NULL;
1245
1246 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1247 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1248 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1249
Steve Wise2f5b48c2010-09-10 11:15:36 -05001250 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001251
1252 /* Process attr changes if in IDLE */
1253 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1254 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1255 ret = -EIO;
1256 goto out;
1257 }
1258 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1259 newattr.enable_rdma_read = attrs->enable_rdma_read;
1260 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1261 newattr.enable_rdma_write = attrs->enable_rdma_write;
1262 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1263 newattr.enable_bind = attrs->enable_bind;
1264 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001265 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001266 ret = -EINVAL;
1267 goto out;
1268 }
1269 newattr.max_ord = attrs->max_ord;
1270 }
1271 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001272 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001273 ret = -EINVAL;
1274 goto out;
1275 }
1276 newattr.max_ird = attrs->max_ird;
1277 }
1278 qhp->attr = newattr;
1279 }
1280
Vipul Pandya2c974782012-05-18 15:29:28 +05301281 if (mask & C4IW_QP_ATTR_SQ_DB) {
1282 ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
1283 goto out;
1284 }
1285 if (mask & C4IW_QP_ATTR_RQ_DB) {
1286 ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
1287 goto out;
1288 }
1289
Steve Wisecfdda9d2010-04-21 15:30:06 -07001290 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1291 goto out;
1292 if (qhp->attr.state == attrs->next_state)
1293 goto out;
1294
1295 switch (qhp->attr.state) {
1296 case C4IW_QP_STATE_IDLE:
1297 switch (attrs->next_state) {
1298 case C4IW_QP_STATE_RTS:
1299 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1300 ret = -EINVAL;
1301 goto out;
1302 }
1303 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1304 ret = -EINVAL;
1305 goto out;
1306 }
1307 qhp->attr.mpa_attr = attrs->mpa_attr;
1308 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1309 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001310 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001311
1312 /*
1313 * Ref the endpoint here and deref when we
1314 * disassociate the endpoint from the QP. This
1315 * happens in CLOSING->IDLE transition or *->ERROR
1316 * transition.
1317 */
1318 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001319 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001320 if (ret)
1321 goto err;
1322 break;
1323 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001324 set_state(qhp, C4IW_QP_STATE_ERROR);
1325 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001326 break;
1327 default:
1328 ret = -EINVAL;
1329 goto out;
1330 }
1331 break;
1332 case C4IW_QP_STATE_RTS:
1333 switch (attrs->next_state) {
1334 case C4IW_QP_STATE_CLOSING:
1335 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001336 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001337 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001338 if (!internal) {
1339 abort = 0;
1340 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001341 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001342 }
Steve Wise1cf24dc2013-08-06 21:04:35 +05301343 t4_set_wq_in_error(&qhp->wq);
Steve Wise73d6fca2010-07-23 19:12:27 +00001344 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001345 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001346 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001347 break;
1348 case C4IW_QP_STATE_TERMINATE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001349 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301350 qhp->attr.layer_etype = attrs->layer_etype;
1351 qhp->attr.ecode = attrs->ecode;
Steve Wise1cf24dc2013-08-06 21:04:35 +05301352 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001353 ep = qhp->ep;
Steve Wise09992572013-08-06 21:04:40 +05301354 disconnect = 1;
Steve Wise0e42c1f2010-09-10 11:15:09 -05001355 if (!internal)
1356 terminate = 1;
Steve Wise09992572013-08-06 21:04:40 +05301357 else {
1358 ret = rdma_fini(rhp, qhp, ep);
1359 if (ret)
1360 goto err;
1361 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001362 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001363 break;
1364 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001365 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301366 t4_set_wq_in_error(&qhp->wq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001367 if (!internal) {
1368 abort = 1;
1369 disconnect = 1;
1370 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001371 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001372 }
1373 goto err;
1374 break;
1375 default:
1376 ret = -EINVAL;
1377 goto out;
1378 }
1379 break;
1380 case C4IW_QP_STATE_CLOSING:
1381 if (!internal) {
1382 ret = -EINVAL;
1383 goto out;
1384 }
1385 switch (attrs->next_state) {
1386 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001387 flush_qp(qhp);
1388 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001389 qhp->attr.llp_stream_handle = NULL;
1390 c4iw_put_ep(&qhp->ep->com);
1391 qhp->ep = NULL;
1392 wake_up(&qhp->wait);
1393 break;
1394 case C4IW_QP_STATE_ERROR:
1395 goto err;
1396 default:
1397 ret = -EINVAL;
1398 goto err;
1399 }
1400 break;
1401 case C4IW_QP_STATE_ERROR:
1402 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1403 ret = -EINVAL;
1404 goto out;
1405 }
1406 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1407 ret = -EINVAL;
1408 goto out;
1409 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001410 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001411 break;
1412 case C4IW_QP_STATE_TERMINATE:
1413 if (!internal) {
1414 ret = -EINVAL;
1415 goto out;
1416 }
1417 goto err;
1418 break;
1419 default:
1420 printk(KERN_ERR "%s in a bad state %d\n",
1421 __func__, qhp->attr.state);
1422 ret = -EINVAL;
1423 goto err;
1424 break;
1425 }
1426 goto out;
1427err:
1428 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1429 qhp->wq.sq.qid);
1430
1431 /* disassociate the LLP connection */
1432 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001433 if (!ep)
1434 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001435 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001436 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001437 free = 1;
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001438 abort = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001439 wake_up(&qhp->wait);
1440 BUG_ON(!ep);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001441 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001442out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001443 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001444
1445 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001446 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001447
1448 /*
1449 * If disconnect is 1, then we need to initiate a disconnect
1450 * on the EP. This can be a normal close (RTS->CLOSING) or
1451 * an abnormal close (RTS/CLOSING->ERROR).
1452 */
1453 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001454 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1455 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001456 c4iw_put_ep(&ep->com);
1457 }
1458
1459 /*
1460 * If free is 1, then we've disassociated the EP from the QP
1461 * and we need to dereference the EP.
1462 */
1463 if (free)
1464 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001465 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1466 return ret;
1467}
1468
Vipul Pandya422eea02012-05-18 15:29:30 +05301469static int enable_qp_db(int id, void *p, void *data)
1470{
1471 struct c4iw_qp *qp = p;
1472
1473 t4_enable_wq_db(&qp->wq);
1474 return 0;
1475}
1476
Steve Wisecfdda9d2010-04-21 15:30:06 -07001477int c4iw_destroy_qp(struct ib_qp *ib_qp)
1478{
1479 struct c4iw_dev *rhp;
1480 struct c4iw_qp *qhp;
1481 struct c4iw_qp_attributes attrs;
1482 struct c4iw_ucontext *ucontext;
1483
1484 qhp = to_c4iw_qp(ib_qp);
1485 rhp = qhp->rhp;
1486
1487 attrs.next_state = C4IW_QP_STATE_ERROR;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301488 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1489 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1490 else
1491 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001492 wait_event(qhp->wait, !qhp->ep);
1493
Vipul Pandya422eea02012-05-18 15:29:30 +05301494 spin_lock_irq(&rhp->lock);
1495 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1496 rhp->qpcnt--;
1497 BUG_ON(rhp->qpcnt < 0);
1498 if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
1499 rhp->rdev.stats.db_state_transitions++;
1500 rhp->db_state = NORMAL;
1501 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1502 }
Vipul Pandya80ccdd62013-03-14 05:09:00 +00001503 if (db_coalescing_threshold >= 0)
1504 if (rhp->qpcnt <= db_coalescing_threshold)
1505 cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
Vipul Pandya422eea02012-05-18 15:29:30 +05301506 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001507 atomic_dec(&qhp->refcnt);
1508 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1509
1510 ucontext = ib_qp->uobject ?
1511 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1512 destroy_qp(&rhp->rdev, &qhp->wq,
1513 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1514
1515 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1516 kfree(qhp);
1517 return 0;
1518}
1519
Vipul Pandya422eea02012-05-18 15:29:30 +05301520static int disable_qp_db(int id, void *p, void *data)
1521{
1522 struct c4iw_qp *qp = p;
1523
1524 t4_disable_wq_db(&qp->wq);
1525 return 0;
1526}
1527
Steve Wisecfdda9d2010-04-21 15:30:06 -07001528struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1529 struct ib_udata *udata)
1530{
1531 struct c4iw_dev *rhp;
1532 struct c4iw_qp *qhp;
1533 struct c4iw_pd *php;
1534 struct c4iw_cq *schp;
1535 struct c4iw_cq *rchp;
1536 struct c4iw_create_qp_resp uresp;
Dan Carpenterff1706f2013-10-19 12:14:12 +03001537 unsigned int sqsize, rqsize;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001538 struct c4iw_ucontext *ucontext;
1539 int ret;
Steve Wisec6d7b262010-09-13 11:23:57 -05001540 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001541
1542 PDBG("%s ib_pd %p\n", __func__, pd);
1543
1544 if (attrs->qp_type != IB_QPT_RC)
1545 return ERR_PTR(-EINVAL);
1546
1547 php = to_c4iw_pd(pd);
1548 rhp = php->rhp;
1549 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1550 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1551 if (!schp || !rchp)
1552 return ERR_PTR(-EINVAL);
1553
1554 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1555 return ERR_PTR(-EINVAL);
1556
1557 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1558 if (rqsize > T4_MAX_RQ_SIZE)
1559 return ERR_PTR(-E2BIG);
1560
1561 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1562 if (sqsize > T4_MAX_SQ_SIZE)
1563 return ERR_PTR(-E2BIG);
1564
1565 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1566
Steve Wisecfdda9d2010-04-21 15:30:06 -07001567 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1568 if (!qhp)
1569 return ERR_PTR(-ENOMEM);
1570 qhp->wq.sq.size = sqsize;
1571 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
Steve Wise1cf24dc2013-08-06 21:04:35 +05301572 qhp->wq.sq.flush_cidx = -1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001573 qhp->wq.rq.size = rqsize;
1574 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1575
1576 if (ucontext) {
1577 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1578 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1579 }
1580
1581 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1582 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1583
1584 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1585 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1586 if (ret)
1587 goto err1;
1588
1589 attrs->cap.max_recv_wr = rqsize - 1;
1590 attrs->cap.max_send_wr = sqsize - 1;
1591 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1592
1593 qhp->rhp = rhp;
1594 qhp->attr.pd = php->pdid;
1595 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1596 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1597 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1598 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1599 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1600 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1601 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1602 qhp->attr.state = C4IW_QP_STATE_IDLE;
1603 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1604 qhp->attr.enable_rdma_read = 1;
1605 qhp->attr.enable_rdma_write = 1;
1606 qhp->attr.enable_bind = 1;
1607 qhp->attr.max_ord = 1;
1608 qhp->attr.max_ird = 1;
Steve Wiseba32de92014-03-19 17:44:43 +05301609 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001610 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001611 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001612 init_waitqueue_head(&qhp->wait);
1613 atomic_set(&qhp->refcnt, 1);
1614
Vipul Pandya2c974782012-05-18 15:29:28 +05301615 spin_lock_irq(&rhp->lock);
1616 if (rhp->db_state != NORMAL)
1617 t4_disable_wq_db(&qhp->wq);
Vipul Pandya3cbdb922013-03-14 05:08:59 +00001618 rhp->qpcnt++;
1619 if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
Vipul Pandya422eea02012-05-18 15:29:30 +05301620 rhp->rdev.stats.db_state_transitions++;
1621 rhp->db_state = FLOW_CONTROL;
1622 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1623 }
Vipul Pandya80ccdd62013-03-14 05:09:00 +00001624 if (db_coalescing_threshold >= 0)
1625 if (rhp->qpcnt > db_coalescing_threshold)
1626 cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
Vipul Pandya2c974782012-05-18 15:29:28 +05301627 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1628 spin_unlock_irq(&rhp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001629 if (ret)
1630 goto err2;
1631
Steve Wisecfdda9d2010-04-21 15:30:06 -07001632 if (udata) {
1633 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1634 if (!mm1) {
1635 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001636 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001637 }
1638 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1639 if (!mm2) {
1640 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001641 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001642 }
1643 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1644 if (!mm3) {
1645 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001646 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001647 }
1648 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1649 if (!mm4) {
1650 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001651 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001652 }
Steve Wisec6d7b262010-09-13 11:23:57 -05001653 if (t4_sq_onchip(&qhp->wq.sq)) {
1654 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1655 if (!mm5) {
1656 ret = -ENOMEM;
1657 goto err7;
1658 }
1659 uresp.flags = C4IW_QPF_ONCHIP;
1660 } else
1661 uresp.flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001662 uresp.qid_mask = rhp->rdev.qpmask;
1663 uresp.sqid = qhp->wq.sq.qid;
1664 uresp.sq_size = qhp->wq.sq.size;
1665 uresp.sq_memsize = qhp->wq.sq.memsize;
1666 uresp.rqid = qhp->wq.rq.qid;
1667 uresp.rq_size = qhp->wq.rq.size;
1668 uresp.rq_memsize = qhp->wq.rq.memsize;
1669 spin_lock(&ucontext->mmap_lock);
Steve Wisec6d7b262010-09-13 11:23:57 -05001670 if (mm5) {
1671 uresp.ma_sync_key = ucontext->key;
1672 ucontext->key += PAGE_SIZE;
Dan Carpenterae1fe072013-07-25 19:48:32 +03001673 } else {
1674 uresp.ma_sync_key = 0;
Steve Wisec6d7b262010-09-13 11:23:57 -05001675 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001676 uresp.sq_key = ucontext->key;
1677 ucontext->key += PAGE_SIZE;
1678 uresp.rq_key = ucontext->key;
1679 ucontext->key += PAGE_SIZE;
1680 uresp.sq_db_gts_key = ucontext->key;
1681 ucontext->key += PAGE_SIZE;
1682 uresp.rq_db_gts_key = ucontext->key;
1683 ucontext->key += PAGE_SIZE;
1684 spin_unlock(&ucontext->mmap_lock);
1685 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1686 if (ret)
Steve Wisec6d7b262010-09-13 11:23:57 -05001687 goto err8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001688 mm1->key = uresp.sq_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05001689 mm1->addr = qhp->wq.sq.phys_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001690 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1691 insert_mmap(ucontext, mm1);
1692 mm2->key = uresp.rq_key;
1693 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1694 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1695 insert_mmap(ucontext, mm2);
1696 mm3->key = uresp.sq_db_gts_key;
1697 mm3->addr = qhp->wq.sq.udb;
1698 mm3->len = PAGE_SIZE;
1699 insert_mmap(ucontext, mm3);
1700 mm4->key = uresp.rq_db_gts_key;
1701 mm4->addr = qhp->wq.rq.udb;
1702 mm4->len = PAGE_SIZE;
1703 insert_mmap(ucontext, mm4);
Steve Wisec6d7b262010-09-13 11:23:57 -05001704 if (mm5) {
1705 mm5->key = uresp.ma_sync_key;
1706 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1707 + A_PCIE_MA_SYNC) & PAGE_MASK;
1708 mm5->len = PAGE_SIZE;
1709 insert_mmap(ucontext, mm5);
1710 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001711 }
1712 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1713 init_timer(&(qhp->timer));
1714 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1715 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1716 qhp->wq.sq.qid);
1717 return &qhp->ibqp;
Steve Wisec6d7b262010-09-13 11:23:57 -05001718err8:
1719 kfree(mm5);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001720err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001721 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001722err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001723 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001724err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001725 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001726err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001727 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001728err3:
1729 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1730err2:
1731 destroy_qp(&rhp->rdev, &qhp->wq,
1732 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1733err1:
1734 kfree(qhp);
1735 return ERR_PTR(ret);
1736}
1737
1738int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1739 int attr_mask, struct ib_udata *udata)
1740{
1741 struct c4iw_dev *rhp;
1742 struct c4iw_qp *qhp;
1743 enum c4iw_qp_attr_mask mask = 0;
1744 struct c4iw_qp_attributes attrs;
1745
1746 PDBG("%s ib_qp %p\n", __func__, ibqp);
1747
1748 /* iwarp does not support the RTR state */
1749 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1750 attr_mask &= ~IB_QP_STATE;
1751
1752 /* Make sure we still have something left to do */
1753 if (!attr_mask)
1754 return 0;
1755
1756 memset(&attrs, 0, sizeof attrs);
1757 qhp = to_c4iw_qp(ibqp);
1758 rhp = qhp->rhp;
1759
1760 attrs.next_state = c4iw_convert_state(attr->qp_state);
1761 attrs.enable_rdma_read = (attr->qp_access_flags &
1762 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1763 attrs.enable_rdma_write = (attr->qp_access_flags &
1764 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1765 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1766
1767
1768 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1769 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1770 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1771 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1772 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1773
Vipul Pandya2c974782012-05-18 15:29:28 +05301774 /*
1775 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1776 * ringing the queue db when we're in DB_FULL mode.
1777 */
1778 attrs.sq_db_inc = attr->sq_psn;
1779 attrs.rq_db_inc = attr->rq_psn;
1780 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1781 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1782
Steve Wisecfdda9d2010-04-21 15:30:06 -07001783 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1784}
1785
1786struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1787{
1788 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1789 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1790}
Vipul Pandya67bbc052012-05-18 15:29:33 +05301791
1792int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1793 int attr_mask, struct ib_qp_init_attr *init_attr)
1794{
1795 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1796
1797 memset(attr, 0, sizeof *attr);
1798 memset(init_attr, 0, sizeof *init_attr);
1799 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1800 return 0;
1801}