blob: aac75a06876879d6a1925589967ab1d177becdba [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
34
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include "iw_cxgb4.h"
36
Vipul Pandya2c974782012-05-18 15:29:28 +053037static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
Steve Wisea9c77192011-03-11 22:30:11 +000041static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050042module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000043MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050044
Vipul Pandya3cbdb922013-03-14 05:08:59 +000045int db_fc_threshold = 1000;
Vipul Pandya422eea02012-05-18 15:29:30 +053046module_param(db_fc_threshold, int, 0644);
Vipul Pandya3cbdb922013-03-14 05:08:59 +000047MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
Vipul Pandya422eea02012-05-18 15:29:30 +053056
Vipul Pandya42b6a942013-03-14 05:09:01 +000057static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053061static int alloc_ird(struct c4iw_dev *dev, u32 ird)
62{
63 int ret = 0;
64
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
68 else
69 ret = -ENOMEM;
70 spin_unlock_irq(&dev->lock);
71
72 if (ret)
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
75
76 return ret;
77}
78
79static void free_ird(struct c4iw_dev *dev, int ird)
80{
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
84}
85
Steve Wise2f5b48c2010-09-10 11:15:36 -050086static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
87{
88 unsigned long flag;
89 spin_lock_irqsave(&qhp->lock, flag);
90 qhp->attr.state = state;
91 spin_unlock_irqrestore(&qhp->lock, flag);
92}
93
Steve Wisec6d7b262010-09-13 11:23:57 -050094static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
95{
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
97}
98
99static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
100{
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 pci_unmap_addr(sq, mapping));
103}
104
105static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
106{
107 if (t4_sq_onchip(sq))
108 dealloc_oc_sq(rdev, sq);
109 else
110 dealloc_host_sq(rdev, sq);
111}
112
113static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
114{
Vipul Pandyaf079af72013-03-14 05:08:58 +0000115 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
Steve Wisec6d7b262010-09-13 11:23:57 -0500116 return -ENOSYS;
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
118 if (!sq->dma_addr)
119 return -ENOMEM;
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 rdev->lldi.vr->ocq.start;
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 rdev->lldi.vr->ocq.start);
124 sq->flags |= T4_SQ_ONCHIP;
125 return 0;
126}
127
128static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
129{
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 &(sq->dma_addr), GFP_KERNEL);
132 if (!sq->queue)
133 return -ENOMEM;
134 sq->phys_addr = virt_to_phys(sq->queue);
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
136 return 0;
137}
138
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000139static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
140{
141 int ret = -ENOSYS;
142 if (user)
143 ret = alloc_oc_sq(rdev, sq);
144 if (ret)
145 ret = alloc_host_sq(rdev, sq);
146 return ret;
147}
148
Steve Wisecfdda9d2010-04-21 15:30:06 -0700149static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 struct c4iw_dev_ucontext *uctx)
151{
152 /*
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
155 */
156 dma_free_coherent(&(rdev->lldi.pdev->dev),
157 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000158 dma_unmap_addr(&wq->rq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500159 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
161 kfree(wq->rq.sw_rq);
162 kfree(wq->sq.sw_sq);
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
165 return 0;
166}
167
Hariprasad S74217d42015-06-09 18:23:12 +0530168/*
169 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170 * then this is a user mapping so compute the page-aligned physical address
171 * for mapping.
172 */
173void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
174 enum cxgb4_bar2_qtype qtype,
175 unsigned int *pbar2_qid, u64 *pbar2_pa)
176{
177 u64 bar2_qoffset;
178 int ret;
179
180 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
181 pbar2_pa ? 1 : 0,
182 &bar2_qoffset, pbar2_qid);
183 if (ret)
184 return NULL;
185
186 if (pbar2_pa)
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188 return rdev->bar2_kva + bar2_qoffset;
189}
190
Steve Wisecfdda9d2010-04-21 15:30:06 -0700191static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
192 struct t4_cq *rcq, struct t4_cq *scq,
193 struct c4iw_dev_ucontext *uctx)
194{
195 int user = (uctx != &rdev->uctx);
196 struct fw_ri_res_wr *res_wr;
197 struct fw_ri_res *res;
198 int wr_len;
199 struct c4iw_wr_wait wr_wait;
200 struct sk_buff *skb;
Vipul Pandya9919d5b2013-03-14 05:09:04 +0000201 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700202 int eqsize;
203
204 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
205 if (!wq->sq.qid)
206 return -ENOMEM;
207
208 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000209 if (!wq->rq.qid) {
210 ret = -ENOMEM;
211 goto free_sq_qid;
212 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700213
214 if (!user) {
215 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
216 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000217 if (!wq->sq.sw_sq) {
218 ret = -ENOMEM;
219 goto free_rq_qid;
220 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700221
222 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
223 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000224 if (!wq->rq.sw_rq) {
225 ret = -ENOMEM;
226 goto free_sw_sq;
227 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700228 }
229
230 /*
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530231 * RQT must be a power of 2 and at least 16 deep.
Steve Wisecfdda9d2010-04-21 15:30:06 -0700232 */
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530233 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700234 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000235 if (!wq->rq.rqt_hwaddr) {
236 ret = -ENOMEM;
237 goto free_sw_rq;
238 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700239
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000240 ret = alloc_sq(rdev, &wq->sq, user);
241 if (ret)
242 goto free_hwaddr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700243 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000244 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700245
246 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
247 wq->rq.memsize, &(wq->rq.dma_addr),
248 GFP_KERNEL);
Wei Yongjun55e57a72013-03-15 09:42:12 +0000249 if (!wq->rq.queue) {
250 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000251 goto free_sq;
Wei Yongjun55e57a72013-03-15 09:42:12 +0000252 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700253 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
254 __func__, wq->sq.queue,
255 (unsigned long long)virt_to_phys(wq->sq.queue),
256 wq->rq.queue,
257 (unsigned long long)virt_to_phys(wq->rq.queue));
258 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000259 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700260
261 wq->db = rdev->lldi.db_reg;
Steve Wisefa658a92014-04-09 09:38:25 -0500262
Hariprasad S74217d42015-06-09 18:23:12 +0530263 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
264 &wq->sq.bar2_qid,
265 user ? &wq->sq.bar2_pa : NULL);
266 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
267 &wq->rq.bar2_qid,
268 user ? &wq->rq.bar2_pa : NULL);
269
270 /*
271 * User mode must have bar2 access.
272 */
273 if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700277 }
Hariprasad S74217d42015-06-09 18:23:12 +0530278
Steve Wisecfdda9d2010-04-21 15:30:06 -0700279 wq->rdev = rdev;
280 wq->rq.msn = 1;
281
282 /* build fw_ri_res_wr */
283 wr_len = sizeof *res_wr + 2 * sizeof *res;
284
David Rientjesd3c814e2010-07-21 02:44:56 +0000285 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700286 if (!skb) {
287 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000288 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700289 }
290 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
291
292 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
293 memset(res_wr, 0, wr_len);
294 res_wr->op_nres = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530295 FW_WR_OP_V(FW_RI_RES_WR) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530296 FW_RI_RES_WR_NRES_V(2) |
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530297 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700298 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Hariprasad S6198dd82015-04-22 01:44:59 +0530299 res_wr->cookie = (uintptr_t)&wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700300 res = res_wr->res;
301 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
302 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
303
304 /*
305 * eqsize is the number of 64B entries plus the status page size.
306 */
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530307 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
308 rdev->hw_queue.t4_eq_status_entries;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700309
310 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530311 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
312 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
313 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
314 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
315 FW_RI_RES_WR_IQID_V(scq->cqid));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700316 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530317 FW_RI_RES_WR_DCAEN_V(0) |
318 FW_RI_RES_WR_DCACPU_V(0) |
319 FW_RI_RES_WR_FBMIN_V(2) |
320 FW_RI_RES_WR_FBMAX_V(2) |
321 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
322 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
323 FW_RI_RES_WR_EQSIZE_V(eqsize));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700324 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
325 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
326 res++;
327 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
328 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
329
330 /*
331 * eqsize is the number of 64B entries plus the status page size.
332 */
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530333 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
334 rdev->hw_queue.t4_eq_status_entries;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700335 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530336 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
337 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
338 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
339 FW_RI_RES_WR_IQID_V(rcq->cqid));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700340 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530341 FW_RI_RES_WR_DCAEN_V(0) |
342 FW_RI_RES_WR_DCACPU_V(0) |
343 FW_RI_RES_WR_FBMIN_V(2) |
344 FW_RI_RES_WR_FBMAX_V(2) |
345 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347 FW_RI_RES_WR_EQSIZE_V(eqsize));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700348 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
349 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
350
351 c4iw_init_wr_wait(&wr_wait);
352
353 ret = c4iw_ofld_send(rdev, skb);
354 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000355 goto free_dma;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500356 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700357 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000358 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700359
Hariprasad S74217d42015-06-09 18:23:12 +0530360 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700361 __func__, wq->sq.qid, wq->rq.qid, wq->db,
Hariprasad S74217d42015-06-09 18:23:12 +0530362 wq->sq.bar2_va, wq->rq.bar2_va);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700363
364 return 0;
Emil Goodec079c282012-08-19 17:59:40 +0000365free_dma:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700366 dma_free_coherent(&(rdev->lldi.pdev->dev),
367 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000368 dma_unmap_addr(&wq->rq, mapping));
Emil Goodec079c282012-08-19 17:59:40 +0000369free_sq:
Steve Wisec6d7b262010-09-13 11:23:57 -0500370 dealloc_sq(rdev, &wq->sq);
Emil Goodec079c282012-08-19 17:59:40 +0000371free_hwaddr:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700372 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000373free_sw_rq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700374 kfree(wq->rq.sw_rq);
Emil Goodec079c282012-08-19 17:59:40 +0000375free_sw_sq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700376 kfree(wq->sq.sw_sq);
Emil Goodec079c282012-08-19 17:59:40 +0000377free_rq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700378 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000379free_sq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700380 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000381 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700382}
383
Steve Wised37ac312010-06-10 19:03:00 +0000384static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
385 struct ib_send_wr *wr, int max, u32 *plenp)
386{
387 u8 *dstp, *srcp;
388 u32 plen = 0;
389 int i;
390 int rem, len;
391
392 dstp = (u8 *)immdp->data;
393 for (i = 0; i < wr->num_sge; i++) {
394 if ((plen + wr->sg_list[i].length) > max)
395 return -EMSGSIZE;
396 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
397 plen += wr->sg_list[i].length;
398 rem = wr->sg_list[i].length;
399 while (rem) {
400 if (dstp == (u8 *)&sq->queue[sq->size])
401 dstp = (u8 *)sq->queue;
402 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
403 len = rem;
404 else
405 len = (u8 *)&sq->queue[sq->size] - dstp;
406 memcpy(dstp, srcp, len);
407 dstp += len;
408 srcp += len;
409 rem -= len;
410 }
411 }
Steve Wise13fecb82010-09-10 11:14:53 -0500412 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
413 if (len)
414 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000415 immdp->op = FW_RI_DATA_IMMD;
416 immdp->r1 = 0;
417 immdp->r2 = 0;
418 immdp->immdlen = cpu_to_be32(plen);
419 *plenp = plen;
420 return 0;
421}
422
423static int build_isgl(__be64 *queue_start, __be64 *queue_end,
424 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
425 int num_sge, u32 *plenp)
426
Steve Wisecfdda9d2010-04-21 15:30:06 -0700427{
428 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000429 u32 plen = 0;
430 __be64 *flitp = (__be64 *)isglp->sge;
431
432 for (i = 0; i < num_sge; i++) {
433 if ((plen + sg_list[i].length) < plen)
434 return -EMSGSIZE;
435 plen += sg_list[i].length;
436 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
437 sg_list[i].length);
438 if (++flitp == queue_end)
439 flitp = queue_start;
440 *flitp = cpu_to_be64(sg_list[i].addr);
441 if (++flitp == queue_end)
442 flitp = queue_start;
443 }
Steve Wise13fecb82010-09-10 11:14:53 -0500444 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000445 isglp->op = FW_RI_DATA_ISGL;
446 isglp->r1 = 0;
447 isglp->nsge = cpu_to_be16(num_sge);
448 isglp->r2 = 0;
449 if (plenp)
450 *plenp = plen;
451 return 0;
452}
453
454static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
455 struct ib_send_wr *wr, u8 *len16)
456{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700457 u32 plen;
458 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000459 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700460
461 if (wr->num_sge > T4_MAX_SEND_SGE)
462 return -EINVAL;
463 switch (wr->opcode) {
464 case IB_WR_SEND:
465 if (wr->send_flags & IB_SEND_SOLICITED)
466 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530467 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700468 else
469 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530470 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700471 wqe->send.stag_inv = 0;
472 break;
473 case IB_WR_SEND_WITH_INV:
474 if (wr->send_flags & IB_SEND_SOLICITED)
475 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530476 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700477 else
478 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530479 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700480 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
481 break;
482
483 default:
484 return -EINVAL;
485 }
Steve Wisec3f98fa2014-04-09 09:38:27 -0500486 wqe->send.r3 = 0;
487 wqe->send.r4 = 0;
Steve Wised37ac312010-06-10 19:03:00 +0000488
Steve Wisecfdda9d2010-04-21 15:30:06 -0700489 plen = 0;
490 if (wr->num_sge) {
491 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000492 ret = build_immd(sq, wqe->send.u.immd_src, wr,
493 T4_MAX_SEND_INLINE, &plen);
494 if (ret)
495 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700496 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
497 plen;
498 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000499 ret = build_isgl((__be64 *)sq->queue,
500 (__be64 *)&sq->queue[sq->size],
501 wqe->send.u.isgl_src,
502 wr->sg_list, wr->num_sge, &plen);
503 if (ret)
504 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700505 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
506 wr->num_sge * sizeof(struct fw_ri_sge);
507 }
508 } else {
509 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
510 wqe->send.u.immd_src[0].r1 = 0;
511 wqe->send.u.immd_src[0].r2 = 0;
512 wqe->send.u.immd_src[0].immdlen = 0;
513 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000514 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700515 }
516 *len16 = DIV_ROUND_UP(size, 16);
517 wqe->send.plen = cpu_to_be32(plen);
518 return 0;
519}
520
Steve Wised37ac312010-06-10 19:03:00 +0000521static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
522 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700523{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700524 u32 plen;
525 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000526 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700527
Steve Wised37ac312010-06-10 19:03:00 +0000528 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700529 return -EINVAL;
530 wqe->write.r2 = 0;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100531 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
532 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700533 if (wr->num_sge) {
534 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000535 ret = build_immd(sq, wqe->write.u.immd_src, wr,
536 T4_MAX_WRITE_INLINE, &plen);
537 if (ret)
538 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700539 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
540 plen;
541 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000542 ret = build_isgl((__be64 *)sq->queue,
543 (__be64 *)&sq->queue[sq->size],
544 wqe->write.u.isgl_src,
545 wr->sg_list, wr->num_sge, &plen);
546 if (ret)
547 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700548 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
549 wr->num_sge * sizeof(struct fw_ri_sge);
550 }
551 } else {
552 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
553 wqe->write.u.immd_src[0].r1 = 0;
554 wqe->write.u.immd_src[0].r2 = 0;
555 wqe->write.u.immd_src[0].immdlen = 0;
556 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000557 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700558 }
559 *len16 = DIV_ROUND_UP(size, 16);
560 wqe->write.plen = cpu_to_be32(plen);
561 return 0;
562}
563
564static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
565{
566 if (wr->num_sge > 1)
567 return -EINVAL;
568 if (wr->num_sge) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100569 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
570 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
Steve Wisecfdda9d2010-04-21 15:30:06 -0700571 >> 32));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100572 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700573 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
574 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
575 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
576 >> 32));
577 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
578 } else {
579 wqe->read.stag_src = cpu_to_be32(2);
580 wqe->read.to_src_hi = 0;
581 wqe->read.to_src_lo = 0;
582 wqe->read.stag_sink = cpu_to_be32(2);
583 wqe->read.plen = 0;
584 wqe->read.to_sink_hi = 0;
585 wqe->read.to_sink_lo = 0;
586 }
587 wqe->read.r2 = 0;
588 wqe->read.r5 = 0;
589 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
590 return 0;
591}
592
593static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
594 struct ib_recv_wr *wr, u8 *len16)
595{
Steve Wised37ac312010-06-10 19:03:00 +0000596 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700597
Steve Wised37ac312010-06-10 19:03:00 +0000598 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
599 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
600 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
601 if (ret)
602 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700603 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
604 wr->num_sge * sizeof(struct fw_ri_sge), 16);
605 return 0;
606}
607
Sagi Grimberg8376b862015-10-13 19:11:30 +0300608static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
609 struct ib_reg_wr *wr, u8 *len16, u8 t5dev)
610{
611 struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
612 struct fw_ri_immd *imdp;
613 __be64 *p;
614 int i;
615 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
616 int rem;
617
618 if (mhp->mpl_len > t4_max_fr_depth(use_dsgl))
619 return -EINVAL;
620
621 wqe->fr.qpbinde_to_dcacpu = 0;
622 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
623 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
624 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
625 wqe->fr.len_hi = 0;
626 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
627 wqe->fr.stag = cpu_to_be32(wr->key);
628 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
629 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
630 0xffffffff);
631
632 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
633 struct fw_ri_dsgl *sglp;
634
635 for (i = 0; i < mhp->mpl_len; i++)
636 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
637
638 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
639 sglp->op = FW_RI_DATA_DSGL;
640 sglp->r1 = 0;
641 sglp->nsge = cpu_to_be16(1);
642 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
643 sglp->len0 = cpu_to_be32(pbllen);
644
645 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
646 } else {
647 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
648 imdp->op = FW_RI_DATA_IMMD;
649 imdp->r1 = 0;
650 imdp->r2 = 0;
651 imdp->immdlen = cpu_to_be32(pbllen);
652 p = (__be64 *)(imdp + 1);
653 rem = pbllen;
654 for (i = 0; i < mhp->mpl_len; i++) {
655 *p = cpu_to_be64((u64)mhp->mpl[i]);
656 rem -= sizeof(*p);
657 if (++p == (__be64 *)&sq->queue[sq->size])
658 p = (__be64 *)sq->queue;
659 }
660 BUG_ON(rem < 0);
661 while (rem) {
662 *p = 0;
663 rem -= sizeof(*p);
664 if (++p == (__be64 *)&sq->queue[sq->size])
665 p = (__be64 *)sq->queue;
666 }
667 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
668 + pbllen, 16);
669 }
670 return 0;
671}
672
Steve Wise40dbf6e2010-09-17 15:40:15 -0500673static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100674 struct ib_send_wr *send_wr, u8 *len16, u8 t5dev)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700675{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100676 struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
Sagi Grimberg8376b862015-10-13 19:11:30 +0300677
Steve Wisecfdda9d2010-04-21 15:30:06 -0700678 struct fw_ri_immd *imdp;
679 __be64 *p;
680 int i;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100681 int pbllen = roundup(wr->page_list_len * sizeof(u64), 32);
Steve Wise40dbf6e2010-09-17 15:40:15 -0500682 int rem;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700683
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100684 if (wr->page_list_len > t4_max_fr_depth(use_dsgl))
Steve Wisecfdda9d2010-04-21 15:30:06 -0700685 return -EINVAL;
686
687 wqe->fr.qpbinde_to_dcacpu = 0;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100688 wqe->fr.pgsz_shift = wr->page_shift - 12;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700689 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100690 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access_flags);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700691 wqe->fr.len_hi = 0;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100692 wqe->fr.len_lo = cpu_to_be32(wr->length);
693 wqe->fr.stag = cpu_to_be32(wr->rkey);
694 wqe->fr.va_hi = cpu_to_be32(wr->iova_start >> 32);
695 wqe->fr.va_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000696
697 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
698 struct c4iw_fr_page_list *c4pl =
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100699 to_c4iw_fr_page_list(wr->page_list);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000700 struct fw_ri_dsgl *sglp;
701
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100702 for (i = 0; i < wr->page_list_len; i++) {
703 wr->page_list->page_list[i] = (__force u64)
704 cpu_to_be64((u64)wr->page_list->page_list[i]);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000705 }
706
707 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
708 sglp->op = FW_RI_DATA_DSGL;
709 sglp->r1 = 0;
710 sglp->nsge = cpu_to_be16(1);
711 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
712 sglp->len0 = cpu_to_be32(pbllen);
713
714 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
715 } else {
716 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
717 imdp->op = FW_RI_DATA_IMMD;
718 imdp->r1 = 0;
719 imdp->r2 = 0;
720 imdp->immdlen = cpu_to_be32(pbllen);
721 p = (__be64 *)(imdp + 1);
722 rem = pbllen;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100723 for (i = 0; i < wr->page_list_len; i++) {
724 *p = cpu_to_be64((u64)wr->page_list->page_list[i]);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000725 rem -= sizeof(*p);
726 if (++p == (__be64 *)&sq->queue[sq->size])
727 p = (__be64 *)sq->queue;
728 }
729 BUG_ON(rem < 0);
730 while (rem) {
731 *p = 0;
732 rem -= sizeof(*p);
733 if (++p == (__be64 *)&sq->queue[sq->size])
734 p = (__be64 *)sq->queue;
735 }
736 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
737 + pbllen, 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700738 }
739 return 0;
740}
741
742static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
743 u8 *len16)
744{
745 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
746 wqe->inv.r2 = 0;
747 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
748 return 0;
749}
750
751void c4iw_qp_add_ref(struct ib_qp *qp)
752{
753 PDBG("%s ib_qp %p\n", __func__, qp);
754 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
755}
756
757void c4iw_qp_rem_ref(struct ib_qp *qp)
758{
759 PDBG("%s ib_qp %p\n", __func__, qp);
760 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
761 wake_up(&(to_c4iw_qp(qp)->wait));
762}
763
Steve Wise05eb2382014-03-14 21:52:08 +0530764static void add_to_fc_list(struct list_head *head, struct list_head *entry)
765{
766 if (list_empty(entry))
767 list_add_tail(entry, head);
768}
769
770static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
771{
772 unsigned long flags;
773
774 spin_lock_irqsave(&qhp->rhp->lock, flags);
775 spin_lock(&qhp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -0500776 if (qhp->rhp->db_state == NORMAL)
Hariprasad S963cab52015-09-23 17:19:27 +0530777 t4_ring_sq_db(&qhp->wq, inc, NULL);
Steve Wisefa658a92014-04-09 09:38:25 -0500778 else {
Steve Wise05eb2382014-03-14 21:52:08 +0530779 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
780 qhp->wq.sq.wq_pidx_inc += inc;
781 }
782 spin_unlock(&qhp->lock);
783 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
784 return 0;
785}
786
787static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
788{
789 unsigned long flags;
790
791 spin_lock_irqsave(&qhp->rhp->lock, flags);
792 spin_lock(&qhp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -0500793 if (qhp->rhp->db_state == NORMAL)
Hariprasad S963cab52015-09-23 17:19:27 +0530794 t4_ring_rq_db(&qhp->wq, inc, NULL);
Steve Wisefa658a92014-04-09 09:38:25 -0500795 else {
Steve Wise05eb2382014-03-14 21:52:08 +0530796 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
797 qhp->wq.rq.wq_pidx_inc += inc;
798 }
799 spin_unlock(&qhp->lock);
800 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
801 return 0;
802}
803
Steve Wisecfdda9d2010-04-21 15:30:06 -0700804int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
805 struct ib_send_wr **bad_wr)
806{
807 int err = 0;
808 u8 len16 = 0;
809 enum fw_wr_opcodes fw_opcode = 0;
810 enum fw_ri_wr_flags fw_flags;
811 struct c4iw_qp *qhp;
Steve Wisefa658a92014-04-09 09:38:25 -0500812 union t4_wr *wqe = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700813 u32 num_wrs;
814 struct t4_swsqe *swsqe;
815 unsigned long flag;
816 u16 idx = 0;
817
818 qhp = to_c4iw_qp(ibqp);
819 spin_lock_irqsave(&qhp->lock, flag);
820 if (t4_wq_in_error(&qhp->wq)) {
821 spin_unlock_irqrestore(&qhp->lock, flag);
822 return -EINVAL;
823 }
824 num_wrs = t4_sq_avail(&qhp->wq);
825 if (num_wrs == 0) {
826 spin_unlock_irqrestore(&qhp->lock, flag);
827 return -ENOMEM;
828 }
829 while (wr) {
830 if (num_wrs == 0) {
831 err = -ENOMEM;
832 *bad_wr = wr;
833 break;
834 }
Steve Wised37ac312010-06-10 19:03:00 +0000835 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
836 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
837
Steve Wisecfdda9d2010-04-21 15:30:06 -0700838 fw_flags = 0;
839 if (wr->send_flags & IB_SEND_SOLICITED)
840 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
Steve Wiseba32de92014-03-19 17:44:43 +0530841 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700842 fw_flags |= FW_RI_COMPLETION_FLAG;
843 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
844 switch (wr->opcode) {
845 case IB_WR_SEND_WITH_INV:
846 case IB_WR_SEND:
847 if (wr->send_flags & IB_SEND_FENCE)
848 fw_flags |= FW_RI_READ_FENCE_FLAG;
849 fw_opcode = FW_RI_SEND_WR;
850 if (wr->opcode == IB_WR_SEND)
851 swsqe->opcode = FW_RI_SEND;
852 else
853 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000854 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700855 break;
856 case IB_WR_RDMA_WRITE:
857 fw_opcode = FW_RI_RDMA_WRITE_WR;
858 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000859 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700860 break;
861 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500862 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700863 fw_opcode = FW_RI_RDMA_READ_WR;
864 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500865 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
Steve Wise410ade42010-09-17 15:40:09 -0500866 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise2f1fb502010-05-20 16:58:16 -0500867 else
868 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700869 err = build_rdma_read(wqe, wr, &len16);
870 if (err)
871 break;
872 swsqe->read_len = wr->sg_list[0].length;
873 if (!qhp->wq.sq.oldest_read)
874 qhp->wq.sq.oldest_read = swsqe;
875 break;
876 case IB_WR_FAST_REG_MR:
877 fw_opcode = FW_RI_FR_NSMR_WR;
878 swsqe->opcode = FW_RI_FAST_REGISTER;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000879 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
Hariprasad S963cab52015-09-23 17:19:27 +0530880 !is_t4(
Vipul Pandya42b6a942013-03-14 05:09:01 +0000881 qhp->rhp->rdev.lldi.adapter_type) ?
882 1 : 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700883 break;
Sagi Grimberg8376b862015-10-13 19:11:30 +0300884 case IB_WR_REG_MR:
885 fw_opcode = FW_RI_FR_NSMR_WR;
886 swsqe->opcode = FW_RI_FAST_REGISTER;
887 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
888 is_t5(
889 qhp->rhp->rdev.lldi.adapter_type) ?
890 1 : 0);
891 break;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700892 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500893 if (wr->send_flags & IB_SEND_FENCE)
894 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700895 fw_opcode = FW_RI_INV_LSTAG_WR;
896 swsqe->opcode = FW_RI_LOCAL_INV;
897 err = build_inv_stag(wqe, wr, &len16);
898 break;
899 default:
900 PDBG("%s post of type=%d TBD!\n", __func__,
901 wr->opcode);
902 err = -EINVAL;
903 }
904 if (err) {
905 *bad_wr = wr;
906 break;
907 }
908 swsqe->idx = qhp->wq.sq.pidx;
909 swsqe->complete = 0;
Steve Wiseba32de92014-03-19 17:44:43 +0530910 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
911 qhp->sq_sig_all;
Steve Wise1cf24dc2013-08-06 21:04:35 +0530912 swsqe->flushed = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700913 swsqe->wr_id = wr->wr_id;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530914 if (c4iw_wr_log) {
915 swsqe->sge_ts = cxgb4_read_sge_timestamp(
916 qhp->rhp->rdev.lldi.ports[0]);
917 getnstimeofday(&swsqe->host_ts);
918 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700919
920 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
921
922 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
923 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
924 swsqe->opcode, swsqe->read_len);
925 wr = wr->next;
926 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000927 t4_sq_produce(&qhp->wq, len16);
928 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700929 }
Steve Wise05eb2382014-03-14 21:52:08 +0530930 if (!qhp->rhp->rdev.status_page->db_off) {
Hariprasad S963cab52015-09-23 17:19:27 +0530931 t4_ring_sq_db(&qhp->wq, idx, wqe);
Steve Wise05eb2382014-03-14 21:52:08 +0530932 spin_unlock_irqrestore(&qhp->lock, flag);
933 } else {
934 spin_unlock_irqrestore(&qhp->lock, flag);
935 ring_kernel_sq_db(qhp, idx);
936 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700937 return err;
938}
939
940int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
941 struct ib_recv_wr **bad_wr)
942{
943 int err = 0;
944 struct c4iw_qp *qhp;
Steve Wisefa658a92014-04-09 09:38:25 -0500945 union t4_recv_wr *wqe = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700946 u32 num_wrs;
947 u8 len16 = 0;
948 unsigned long flag;
949 u16 idx = 0;
950
951 qhp = to_c4iw_qp(ibqp);
952 spin_lock_irqsave(&qhp->lock, flag);
953 if (t4_wq_in_error(&qhp->wq)) {
954 spin_unlock_irqrestore(&qhp->lock, flag);
955 return -EINVAL;
956 }
957 num_wrs = t4_rq_avail(&qhp->wq);
958 if (num_wrs == 0) {
959 spin_unlock_irqrestore(&qhp->lock, flag);
960 return -ENOMEM;
961 }
962 while (wr) {
963 if (wr->num_sge > T4_MAX_RECV_SGE) {
964 err = -EINVAL;
965 *bad_wr = wr;
966 break;
967 }
Steve Wised37ac312010-06-10 19:03:00 +0000968 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
969 qhp->wq.rq.wq_pidx *
970 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700971 if (num_wrs)
972 err = build_rdma_recv(qhp, wqe, wr, &len16);
973 else
974 err = -ENOMEM;
975 if (err) {
976 *bad_wr = wr;
977 break;
978 }
979
980 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530981 if (c4iw_wr_log) {
982 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
983 cxgb4_read_sge_timestamp(
984 qhp->rhp->rdev.lldi.ports[0]);
985 getnstimeofday(
986 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
987 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700988
989 wqe->recv.opcode = FW_RI_RECV_WR;
990 wqe->recv.r1 = 0;
991 wqe->recv.wrid = qhp->wq.rq.pidx;
992 wqe->recv.r2[0] = 0;
993 wqe->recv.r2[1] = 0;
994 wqe->recv.r2[2] = 0;
995 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700996 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
997 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000998 t4_rq_produce(&qhp->wq, len16);
999 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001000 wr = wr->next;
1001 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001002 }
Steve Wise05eb2382014-03-14 21:52:08 +05301003 if (!qhp->rhp->rdev.status_page->db_off) {
Hariprasad S963cab52015-09-23 17:19:27 +05301004 t4_ring_rq_db(&qhp->wq, idx, wqe);
Steve Wise05eb2382014-03-14 21:52:08 +05301005 spin_unlock_irqrestore(&qhp->lock, flag);
1006 } else {
1007 spin_unlock_irqrestore(&qhp->lock, flag);
1008 ring_kernel_rq_db(qhp, idx);
1009 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001010 return err;
1011}
1012
1013int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
1014{
1015 return -ENOSYS;
1016}
1017
1018static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1019 u8 *ecode)
1020{
1021 int status;
1022 int tagged;
1023 int opcode;
1024 int rqtype;
1025 int send_inv;
1026
1027 if (!err_cqe) {
1028 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1029 *ecode = 0;
1030 return;
1031 }
1032
1033 status = CQE_STATUS(err_cqe);
1034 opcode = CQE_OPCODE(err_cqe);
1035 rqtype = RQ_TYPE(err_cqe);
1036 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1037 (opcode == FW_RI_SEND_WITH_SE_INV);
1038 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1039 (rqtype && (opcode == FW_RI_READ_RESP));
1040
1041 switch (status) {
1042 case T4_ERR_STAG:
1043 if (send_inv) {
1044 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1045 *ecode = RDMAP_CANT_INV_STAG;
1046 } else {
1047 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1048 *ecode = RDMAP_INV_STAG;
1049 }
1050 break;
1051 case T4_ERR_PDID:
1052 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1053 if ((opcode == FW_RI_SEND_WITH_INV) ||
1054 (opcode == FW_RI_SEND_WITH_SE_INV))
1055 *ecode = RDMAP_CANT_INV_STAG;
1056 else
1057 *ecode = RDMAP_STAG_NOT_ASSOC;
1058 break;
1059 case T4_ERR_QPID:
1060 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1061 *ecode = RDMAP_STAG_NOT_ASSOC;
1062 break;
1063 case T4_ERR_ACCESS:
1064 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1065 *ecode = RDMAP_ACC_VIOL;
1066 break;
1067 case T4_ERR_WRAP:
1068 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1069 *ecode = RDMAP_TO_WRAP;
1070 break;
1071 case T4_ERR_BOUND:
1072 if (tagged) {
1073 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1074 *ecode = DDPT_BASE_BOUNDS;
1075 } else {
1076 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1077 *ecode = RDMAP_BASE_BOUNDS;
1078 }
1079 break;
1080 case T4_ERR_INVALIDATE_SHARED_MR:
1081 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1082 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1083 *ecode = RDMAP_CANT_INV_STAG;
1084 break;
1085 case T4_ERR_ECC:
1086 case T4_ERR_ECC_PSTAG:
1087 case T4_ERR_INTERNAL_ERR:
1088 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1089 *ecode = 0;
1090 break;
1091 case T4_ERR_OUT_OF_RQE:
1092 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1093 *ecode = DDPU_INV_MSN_NOBUF;
1094 break;
1095 case T4_ERR_PBL_ADDR_BOUND:
1096 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1097 *ecode = DDPT_BASE_BOUNDS;
1098 break;
1099 case T4_ERR_CRC:
1100 *layer_type = LAYER_MPA|DDP_LLP;
1101 *ecode = MPA_CRC_ERR;
1102 break;
1103 case T4_ERR_MARKER:
1104 *layer_type = LAYER_MPA|DDP_LLP;
1105 *ecode = MPA_MARKER_ERR;
1106 break;
1107 case T4_ERR_PDU_LEN_ERR:
1108 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1109 *ecode = DDPU_MSG_TOOBIG;
1110 break;
1111 case T4_ERR_DDP_VERSION:
1112 if (tagged) {
1113 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1114 *ecode = DDPT_INV_VERS;
1115 } else {
1116 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1117 *ecode = DDPU_INV_VERS;
1118 }
1119 break;
1120 case T4_ERR_RDMA_VERSION:
1121 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1122 *ecode = RDMAP_INV_VERS;
1123 break;
1124 case T4_ERR_OPCODE:
1125 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1126 *ecode = RDMAP_INV_OPCODE;
1127 break;
1128 case T4_ERR_DDP_QUEUE_NUM:
1129 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1130 *ecode = DDPU_INV_QN;
1131 break;
1132 case T4_ERR_MSN:
1133 case T4_ERR_MSN_GAP:
1134 case T4_ERR_MSN_RANGE:
1135 case T4_ERR_IRD_OVERFLOW:
1136 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1137 *ecode = DDPU_INV_MSN_RANGE;
1138 break;
1139 case T4_ERR_TBIT:
1140 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1141 *ecode = 0;
1142 break;
1143 case T4_ERR_MO:
1144 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1145 *ecode = DDPU_INV_MO;
1146 break;
1147 default:
1148 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1149 *ecode = 0;
1150 break;
1151 }
1152}
1153
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001154static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1155 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001156{
1157 struct fw_ri_wr *wqe;
1158 struct sk_buff *skb;
1159 struct terminate_message *term;
1160
1161 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1162 qhp->ep->hwtid);
1163
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001164 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001165 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001166 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001167 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1168
1169 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1170 memset(wqe, 0, sizeof *wqe);
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301171 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001172 wqe->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301173 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1174 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001175
1176 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1177 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1178 term = (struct terminate_message *)wqe->u.terminate.termmsg;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301179 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1180 term->layer_etype = qhp->attr.layer_etype;
1181 term->ecode = qhp->attr.ecode;
1182 } else
1183 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001184 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001185}
1186
1187/*
1188 * Assumes qhp lock is held.
1189 */
1190static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -05001191 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001192{
1193 int count;
Steve Wise678ea9b2014-07-31 14:35:43 -05001194 int rq_flushed, sq_flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001195 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001196
1197 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001198
Uwe Kleine-König732bee72010-06-11 12:16:59 +02001199 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001200 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001201 spin_lock(&qhp->lock);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301202
1203 if (qhp->wq.flushed) {
1204 spin_unlock(&qhp->lock);
1205 spin_unlock_irqrestore(&rchp->lock, flag);
1206 return;
1207 }
1208 qhp->wq.flushed = 1;
1209
1210 c4iw_flush_hw_cq(rchp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001211 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
Steve Wise678ea9b2014-07-31 14:35:43 -05001212 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001213 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001214 spin_unlock_irqrestore(&rchp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001215
Uwe Kleine-König732bee72010-06-11 12:16:59 +02001216 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001217 spin_lock_irqsave(&schp->lock, flag);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001218 spin_lock(&qhp->lock);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301219 if (schp != rchp)
1220 c4iw_flush_hw_cq(schp);
Steve Wise678ea9b2014-07-31 14:35:43 -05001221 sq_flushed = c4iw_flush_sq(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001222 spin_unlock(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001223 spin_unlock_irqrestore(&schp->lock, flag);
Steve Wise678ea9b2014-07-31 14:35:43 -05001224
1225 if (schp == rchp) {
1226 if (t4_clear_cq_armed(&rchp->cq) &&
1227 (rq_flushed || sq_flushed)) {
1228 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1229 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1230 rchp->ibcq.cq_context);
1231 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1232 }
1233 } else {
1234 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1235 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1236 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1237 rchp->ibcq.cq_context);
1238 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1239 }
1240 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1241 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1242 (*schp->ibcq.comp_handler)(&schp->ibcq,
1243 schp->ibcq.cq_context);
1244 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1245 }
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301246 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001247}
1248
Steve Wise2f5b48c2010-09-10 11:15:36 -05001249static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001250{
1251 struct c4iw_cq *rchp, *schp;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301252 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001253
Steve Wise1cf24dc2013-08-06 21:04:35 +05301254 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1255 schp = to_c4iw_cq(qhp->ibqp.send_cq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001256
Steve Wise1cf24dc2013-08-06 21:04:35 +05301257 t4_set_wq_in_error(&qhp->wq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001258 if (qhp->ibqp.uobject) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001259 t4_set_cq_in_error(&rchp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301260 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301261 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301262 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301263 if (schp != rchp) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001264 t4_set_cq_in_error(&schp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301265 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301266 (*schp->ibcq.comp_handler)(&schp->ibcq,
1267 schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301268 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301269 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001270 return;
1271 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001272 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001273}
1274
Steve Wise73d6fca2010-07-23 19:12:27 +00001275static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1276 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001277{
1278 struct fw_ri_wr *wqe;
1279 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001280 struct sk_buff *skb;
1281
1282 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +00001283 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001284
David Rientjesd3c814e2010-07-21 02:44:56 +00001285 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001286 if (!skb)
1287 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +00001288 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001289
1290 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1291 memset(wqe, 0, sizeof *wqe);
1292 wqe->op_compl = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301293 FW_WR_OP_V(FW_RI_INIT_WR) |
1294 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001295 wqe->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301296 FW_WR_FLOWID_V(ep->hwtid) |
1297 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
Hariprasad S6198dd82015-04-22 01:44:59 +05301298 wqe->cookie = (uintptr_t)&ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001299
1300 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001301 ret = c4iw_ofld_send(&rhp->rdev, skb);
1302 if (ret)
1303 goto out;
1304
Steve Wise2f5b48c2010-09-10 11:15:36 -05001305 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
Steve Wiseaadc4df2010-09-10 11:15:25 -05001306 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001307out:
1308 PDBG("%s ret %d\n", __func__, ret);
1309 return ret;
1310}
1311
1312static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1313{
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301314 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001315 memset(&init->u, 0, sizeof init->u);
1316 switch (p2p_type) {
1317 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1318 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1319 init->u.write.stag_sink = cpu_to_be32(1);
1320 init->u.write.to_sink = cpu_to_be64(1);
1321 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1322 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1323 sizeof(struct fw_ri_immd),
1324 16);
1325 break;
1326 case FW_RI_INIT_P2PTYPE_READ_REQ:
1327 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1328 init->u.read.stag_src = cpu_to_be32(1);
1329 init->u.read.to_src_lo = cpu_to_be32(1);
1330 init->u.read.stag_sink = cpu_to_be32(1);
1331 init->u.read.to_sink_lo = cpu_to_be32(1);
1332 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1333 break;
1334 }
1335}
1336
1337static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1338{
1339 struct fw_ri_wr *wqe;
1340 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001341 struct sk_buff *skb;
1342
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301343 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1344 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001345
David Rientjesd3c814e2010-07-21 02:44:56 +00001346 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301347 if (!skb) {
1348 ret = -ENOMEM;
1349 goto out;
1350 }
1351 ret = alloc_ird(rhp, qhp->attr.max_ird);
1352 if (ret) {
1353 qhp->attr.max_ird = 0;
1354 kfree_skb(skb);
1355 goto out;
1356 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001357 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1358
1359 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1360 memset(wqe, 0, sizeof *wqe);
1361 wqe->op_compl = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301362 FW_WR_OP_V(FW_RI_INIT_WR) |
1363 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001364 wqe->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301365 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1366 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001367
Hariprasad S6198dd82015-04-22 01:44:59 +05301368 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001369
1370 wqe->u.init.type = FW_RI_TYPE_INIT;
1371 wqe->u.init.mpareqbit_p2ptype =
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05301372 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1373 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001374 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1375 if (qhp->attr.mpa_attr.recv_marker_enabled)
1376 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1377 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1378 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1379 if (qhp->attr.mpa_attr.crc_enabled)
1380 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1381
1382 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1383 FW_RI_QP_RDMA_WRITE_ENABLE |
1384 FW_RI_QP_BIND_ENABLE;
1385 if (!qhp->ibqp.uobject)
1386 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1387 FW_RI_QP_STAG0_ENABLE;
1388 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1389 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1390 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1391 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1392 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1393 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1394 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1395 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1396 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1397 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1398 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1399 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1400 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1401 rhp->rdev.lldi.vr->rq.start);
1402 if (qhp->attr.mpa_attr.initiator)
1403 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1404
Steve Wisecfdda9d2010-04-21 15:30:06 -07001405 ret = c4iw_ofld_send(&rhp->rdev, skb);
1406 if (ret)
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301407 goto err1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001408
Steve Wise2f5b48c2010-09-10 11:15:36 -05001409 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1410 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301411 if (!ret)
1412 goto out;
1413err1:
1414 free_ird(rhp, qhp->attr.max_ird);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001415out:
1416 PDBG("%s ret %d\n", __func__, ret);
1417 return ret;
1418}
1419
1420int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1421 enum c4iw_qp_attr_mask mask,
1422 struct c4iw_qp_attributes *attrs,
1423 int internal)
1424{
1425 int ret = 0;
1426 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001427 int disconnect = 0;
1428 int terminate = 0;
1429 int abort = 0;
1430 int free = 0;
1431 struct c4iw_ep *ep = NULL;
1432
1433 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1434 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1435 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1436
Steve Wise2f5b48c2010-09-10 11:15:36 -05001437 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001438
1439 /* Process attr changes if in IDLE */
1440 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1441 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1442 ret = -EIO;
1443 goto out;
1444 }
1445 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1446 newattr.enable_rdma_read = attrs->enable_rdma_read;
1447 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1448 newattr.enable_rdma_write = attrs->enable_rdma_write;
1449 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1450 newattr.enable_bind = attrs->enable_bind;
1451 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001452 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001453 ret = -EINVAL;
1454 goto out;
1455 }
1456 newattr.max_ord = attrs->max_ord;
1457 }
1458 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301459 if (attrs->max_ird > cur_max_read_depth(rhp)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001460 ret = -EINVAL;
1461 goto out;
1462 }
1463 newattr.max_ird = attrs->max_ird;
1464 }
1465 qhp->attr = newattr;
1466 }
1467
Vipul Pandya2c974782012-05-18 15:29:28 +05301468 if (mask & C4IW_QP_ATTR_SQ_DB) {
Steve Wise05eb2382014-03-14 21:52:08 +05301469 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
Vipul Pandya2c974782012-05-18 15:29:28 +05301470 goto out;
1471 }
1472 if (mask & C4IW_QP_ATTR_RQ_DB) {
Steve Wise05eb2382014-03-14 21:52:08 +05301473 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
Vipul Pandya2c974782012-05-18 15:29:28 +05301474 goto out;
1475 }
1476
Steve Wisecfdda9d2010-04-21 15:30:06 -07001477 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1478 goto out;
1479 if (qhp->attr.state == attrs->next_state)
1480 goto out;
1481
1482 switch (qhp->attr.state) {
1483 case C4IW_QP_STATE_IDLE:
1484 switch (attrs->next_state) {
1485 case C4IW_QP_STATE_RTS:
1486 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1487 ret = -EINVAL;
1488 goto out;
1489 }
1490 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1491 ret = -EINVAL;
1492 goto out;
1493 }
1494 qhp->attr.mpa_attr = attrs->mpa_attr;
1495 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1496 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001497 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001498
1499 /*
1500 * Ref the endpoint here and deref when we
1501 * disassociate the endpoint from the QP. This
1502 * happens in CLOSING->IDLE transition or *->ERROR
1503 * transition.
1504 */
1505 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001506 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001507 if (ret)
1508 goto err;
1509 break;
1510 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001511 set_state(qhp, C4IW_QP_STATE_ERROR);
1512 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001513 break;
1514 default:
1515 ret = -EINVAL;
1516 goto out;
1517 }
1518 break;
1519 case C4IW_QP_STATE_RTS:
1520 switch (attrs->next_state) {
1521 case C4IW_QP_STATE_CLOSING:
1522 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
Steve Wiseb4e29012014-04-09 09:38:26 -05001523 t4_set_wq_in_error(&qhp->wq);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001524 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001525 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001526 if (!internal) {
1527 abort = 0;
1528 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001529 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001530 }
Steve Wise73d6fca2010-07-23 19:12:27 +00001531 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001532 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001533 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001534 break;
1535 case C4IW_QP_STATE_TERMINATE:
Steve Wiseb4e29012014-04-09 09:38:26 -05001536 t4_set_wq_in_error(&qhp->wq);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001537 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301538 qhp->attr.layer_etype = attrs->layer_etype;
1539 qhp->attr.ecode = attrs->ecode;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001540 ep = qhp->ep;
Steve Wisecc18b932014-04-24 14:31:53 -05001541 if (!internal) {
1542 c4iw_get_ep(&qhp->ep->com);
Steve Wise0e42c1f2010-09-10 11:15:09 -05001543 terminate = 1;
Steve Wisecc18b932014-04-24 14:31:53 -05001544 disconnect = 1;
1545 } else {
1546 terminate = qhp->attr.send_term;
Steve Wise09992572013-08-06 21:04:40 +05301547 ret = rdma_fini(rhp, qhp, ep);
1548 if (ret)
1549 goto err;
1550 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001551 break;
1552 case C4IW_QP_STATE_ERROR:
Steve Wise1cf24dc2013-08-06 21:04:35 +05301553 t4_set_wq_in_error(&qhp->wq);
Steve Wiseb4e29012014-04-09 09:38:26 -05001554 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001555 if (!internal) {
1556 abort = 1;
1557 disconnect = 1;
1558 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001559 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001560 }
1561 goto err;
1562 break;
1563 default:
1564 ret = -EINVAL;
1565 goto out;
1566 }
1567 break;
1568 case C4IW_QP_STATE_CLOSING:
1569 if (!internal) {
1570 ret = -EINVAL;
1571 goto out;
1572 }
1573 switch (attrs->next_state) {
1574 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001575 flush_qp(qhp);
1576 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001577 qhp->attr.llp_stream_handle = NULL;
1578 c4iw_put_ep(&qhp->ep->com);
1579 qhp->ep = NULL;
1580 wake_up(&qhp->wait);
1581 break;
1582 case C4IW_QP_STATE_ERROR:
1583 goto err;
1584 default:
1585 ret = -EINVAL;
1586 goto err;
1587 }
1588 break;
1589 case C4IW_QP_STATE_ERROR:
1590 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1591 ret = -EINVAL;
1592 goto out;
1593 }
1594 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1595 ret = -EINVAL;
1596 goto out;
1597 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001598 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001599 break;
1600 case C4IW_QP_STATE_TERMINATE:
1601 if (!internal) {
1602 ret = -EINVAL;
1603 goto out;
1604 }
1605 goto err;
1606 break;
1607 default:
1608 printk(KERN_ERR "%s in a bad state %d\n",
1609 __func__, qhp->attr.state);
1610 ret = -EINVAL;
1611 goto err;
1612 break;
1613 }
1614 goto out;
1615err:
1616 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1617 qhp->wq.sq.qid);
1618
1619 /* disassociate the LLP connection */
1620 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001621 if (!ep)
1622 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001623 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001624 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001625 free = 1;
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001626 abort = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001627 BUG_ON(!ep);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001628 flush_qp(qhp);
Steve Wise5b3418082014-11-21 09:36:36 -06001629 wake_up(&qhp->wait);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001630out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001631 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001632
1633 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001634 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001635
1636 /*
1637 * If disconnect is 1, then we need to initiate a disconnect
1638 * on the EP. This can be a normal close (RTS->CLOSING) or
1639 * an abnormal close (RTS/CLOSING->ERROR).
1640 */
1641 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001642 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1643 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001644 c4iw_put_ep(&ep->com);
1645 }
1646
1647 /*
1648 * If free is 1, then we've disassociated the EP from the QP
1649 * and we need to dereference the EP.
1650 */
1651 if (free)
1652 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001653 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1654 return ret;
1655}
1656
1657int c4iw_destroy_qp(struct ib_qp *ib_qp)
1658{
1659 struct c4iw_dev *rhp;
1660 struct c4iw_qp *qhp;
1661 struct c4iw_qp_attributes attrs;
1662 struct c4iw_ucontext *ucontext;
1663
1664 qhp = to_c4iw_qp(ib_qp);
1665 rhp = qhp->rhp;
1666
1667 attrs.next_state = C4IW_QP_STATE_ERROR;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301668 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1669 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1670 else
1671 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001672 wait_event(qhp->wait, !qhp->ep);
1673
Steve Wise05eb2382014-03-14 21:52:08 +05301674 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001675 atomic_dec(&qhp->refcnt);
1676 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1677
Steve Wise05eb2382014-03-14 21:52:08 +05301678 spin_lock_irq(&rhp->lock);
1679 if (!list_empty(&qhp->db_fc_entry))
1680 list_del_init(&qhp->db_fc_entry);
1681 spin_unlock_irq(&rhp->lock);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301682 free_ird(rhp, qhp->attr.max_ird);
Steve Wise05eb2382014-03-14 21:52:08 +05301683
Steve Wisecfdda9d2010-04-21 15:30:06 -07001684 ucontext = ib_qp->uobject ?
1685 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1686 destroy_qp(&rhp->rdev, &qhp->wq,
1687 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1688
1689 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1690 kfree(qhp);
1691 return 0;
1692}
1693
1694struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1695 struct ib_udata *udata)
1696{
1697 struct c4iw_dev *rhp;
1698 struct c4iw_qp *qhp;
1699 struct c4iw_pd *php;
1700 struct c4iw_cq *schp;
1701 struct c4iw_cq *rchp;
1702 struct c4iw_create_qp_resp uresp;
Dan Carpenterff1706f2013-10-19 12:14:12 +03001703 unsigned int sqsize, rqsize;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001704 struct c4iw_ucontext *ucontext;
1705 int ret;
Steve Wisec6d7b262010-09-13 11:23:57 -05001706 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001707
1708 PDBG("%s ib_pd %p\n", __func__, pd);
1709
1710 if (attrs->qp_type != IB_QPT_RC)
1711 return ERR_PTR(-EINVAL);
1712
1713 php = to_c4iw_pd(pd);
1714 rhp = php->rhp;
1715 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1716 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1717 if (!schp || !rchp)
1718 return ERR_PTR(-EINVAL);
1719
1720 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1721 return ERR_PTR(-EINVAL);
1722
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301723 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001724 return ERR_PTR(-E2BIG);
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301725 rqsize = attrs->cap.max_recv_wr + 1;
1726 if (rqsize < 8)
1727 rqsize = 8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001728
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301729 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001730 return ERR_PTR(-E2BIG);
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301731 sqsize = attrs->cap.max_send_wr + 1;
1732 if (sqsize < 8)
1733 sqsize = 8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001734
1735 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1736
Steve Wisecfdda9d2010-04-21 15:30:06 -07001737 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1738 if (!qhp)
1739 return ERR_PTR(-ENOMEM);
1740 qhp->wq.sq.size = sqsize;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301741 qhp->wq.sq.memsize =
1742 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1743 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301744 qhp->wq.sq.flush_cidx = -1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001745 qhp->wq.rq.size = rqsize;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301746 qhp->wq.rq.memsize =
1747 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1748 sizeof(*qhp->wq.rq.queue);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001749
1750 if (ucontext) {
1751 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1752 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1753 }
1754
Steve Wisecfdda9d2010-04-21 15:30:06 -07001755 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1756 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1757 if (ret)
1758 goto err1;
1759
1760 attrs->cap.max_recv_wr = rqsize - 1;
1761 attrs->cap.max_send_wr = sqsize - 1;
1762 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1763
1764 qhp->rhp = rhp;
1765 qhp->attr.pd = php->pdid;
1766 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1767 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1768 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1769 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1770 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1771 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1772 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1773 qhp->attr.state = C4IW_QP_STATE_IDLE;
1774 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1775 qhp->attr.enable_rdma_read = 1;
1776 qhp->attr.enable_rdma_write = 1;
1777 qhp->attr.enable_bind = 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301778 qhp->attr.max_ord = 0;
1779 qhp->attr.max_ird = 0;
Steve Wiseba32de92014-03-19 17:44:43 +05301780 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001781 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001782 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001783 init_waitqueue_head(&qhp->wait);
1784 atomic_set(&qhp->refcnt, 1);
1785
Steve Wise05eb2382014-03-14 21:52:08 +05301786 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001787 if (ret)
1788 goto err2;
1789
Steve Wisecfdda9d2010-04-21 15:30:06 -07001790 if (udata) {
1791 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1792 if (!mm1) {
1793 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001794 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001795 }
1796 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1797 if (!mm2) {
1798 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001799 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001800 }
1801 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1802 if (!mm3) {
1803 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001804 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001805 }
1806 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1807 if (!mm4) {
1808 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001809 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001810 }
Steve Wisec6d7b262010-09-13 11:23:57 -05001811 if (t4_sq_onchip(&qhp->wq.sq)) {
1812 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1813 if (!mm5) {
1814 ret = -ENOMEM;
1815 goto err7;
1816 }
1817 uresp.flags = C4IW_QPF_ONCHIP;
1818 } else
1819 uresp.flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001820 uresp.qid_mask = rhp->rdev.qpmask;
1821 uresp.sqid = qhp->wq.sq.qid;
1822 uresp.sq_size = qhp->wq.sq.size;
1823 uresp.sq_memsize = qhp->wq.sq.memsize;
1824 uresp.rqid = qhp->wq.rq.qid;
1825 uresp.rq_size = qhp->wq.rq.size;
1826 uresp.rq_memsize = qhp->wq.rq.memsize;
1827 spin_lock(&ucontext->mmap_lock);
Steve Wisec6d7b262010-09-13 11:23:57 -05001828 if (mm5) {
1829 uresp.ma_sync_key = ucontext->key;
1830 ucontext->key += PAGE_SIZE;
Dan Carpenterae1fe072013-07-25 19:48:32 +03001831 } else {
1832 uresp.ma_sync_key = 0;
Steve Wisec6d7b262010-09-13 11:23:57 -05001833 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001834 uresp.sq_key = ucontext->key;
1835 ucontext->key += PAGE_SIZE;
1836 uresp.rq_key = ucontext->key;
1837 ucontext->key += PAGE_SIZE;
1838 uresp.sq_db_gts_key = ucontext->key;
1839 ucontext->key += PAGE_SIZE;
1840 uresp.rq_db_gts_key = ucontext->key;
1841 ucontext->key += PAGE_SIZE;
1842 spin_unlock(&ucontext->mmap_lock);
1843 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1844 if (ret)
Steve Wisec6d7b262010-09-13 11:23:57 -05001845 goto err8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001846 mm1->key = uresp.sq_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05001847 mm1->addr = qhp->wq.sq.phys_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001848 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1849 insert_mmap(ucontext, mm1);
1850 mm2->key = uresp.rq_key;
1851 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1852 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1853 insert_mmap(ucontext, mm2);
1854 mm3->key = uresp.sq_db_gts_key;
Hariprasad S74217d42015-06-09 18:23:12 +05301855 mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001856 mm3->len = PAGE_SIZE;
1857 insert_mmap(ucontext, mm3);
1858 mm4->key = uresp.rq_db_gts_key;
Hariprasad S74217d42015-06-09 18:23:12 +05301859 mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001860 mm4->len = PAGE_SIZE;
1861 insert_mmap(ucontext, mm4);
Steve Wisec6d7b262010-09-13 11:23:57 -05001862 if (mm5) {
1863 mm5->key = uresp.ma_sync_key;
1864 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
Hariprasad Shenaia56c66e2015-01-16 09:24:47 +05301865 + PCIE_MA_SYNC_A) & PAGE_MASK;
Steve Wisec6d7b262010-09-13 11:23:57 -05001866 mm5->len = PAGE_SIZE;
1867 insert_mmap(ucontext, mm5);
1868 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001869 }
1870 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1871 init_timer(&(qhp->timer));
Steve Wise05eb2382014-03-14 21:52:08 +05301872 INIT_LIST_HEAD(&qhp->db_fc_entry);
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05301873 PDBG("%s sq id %u size %u memsize %zu num_entries %u "
1874 "rq id %u size %u memsize %zu num_entries %u\n", __func__,
1875 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1876 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1877 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001878 return &qhp->ibqp;
Steve Wisec6d7b262010-09-13 11:23:57 -05001879err8:
1880 kfree(mm5);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001881err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001882 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001883err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001884 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001885err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001886 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001887err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001888 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001889err3:
1890 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1891err2:
1892 destroy_qp(&rhp->rdev, &qhp->wq,
1893 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1894err1:
1895 kfree(qhp);
1896 return ERR_PTR(ret);
1897}
1898
1899int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1900 int attr_mask, struct ib_udata *udata)
1901{
1902 struct c4iw_dev *rhp;
1903 struct c4iw_qp *qhp;
1904 enum c4iw_qp_attr_mask mask = 0;
1905 struct c4iw_qp_attributes attrs;
1906
1907 PDBG("%s ib_qp %p\n", __func__, ibqp);
1908
1909 /* iwarp does not support the RTR state */
1910 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1911 attr_mask &= ~IB_QP_STATE;
1912
1913 /* Make sure we still have something left to do */
1914 if (!attr_mask)
1915 return 0;
1916
1917 memset(&attrs, 0, sizeof attrs);
1918 qhp = to_c4iw_qp(ibqp);
1919 rhp = qhp->rhp;
1920
1921 attrs.next_state = c4iw_convert_state(attr->qp_state);
1922 attrs.enable_rdma_read = (attr->qp_access_flags &
1923 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1924 attrs.enable_rdma_write = (attr->qp_access_flags &
1925 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1926 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1927
1928
1929 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1930 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1931 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1932 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1933 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1934
Vipul Pandya2c974782012-05-18 15:29:28 +05301935 /*
1936 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1937 * ringing the queue db when we're in DB_FULL mode.
Steve Wisec2f9da92014-04-24 14:32:04 -05001938 * Only allow this on T4 devices.
Vipul Pandya2c974782012-05-18 15:29:28 +05301939 */
1940 attrs.sq_db_inc = attr->sq_psn;
1941 attrs.rq_db_inc = attr->rq_psn;
1942 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1943 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
Hariprasad S963cab52015-09-23 17:19:27 +05301944 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
Steve Wisec2f9da92014-04-24 14:32:04 -05001945 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1946 return -EINVAL;
Vipul Pandya2c974782012-05-18 15:29:28 +05301947
Steve Wisecfdda9d2010-04-21 15:30:06 -07001948 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1949}
1950
1951struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1952{
1953 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1954 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1955}
Vipul Pandya67bbc052012-05-18 15:29:33 +05301956
1957int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1958 int attr_mask, struct ib_qp_init_attr *init_attr)
1959{
1960 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1961
1962 memset(attr, 0, sizeof *attr);
1963 memset(init_attr, 0, sizeof *init_attr);
1964 attr->qp_state = to_ib_qp_state(qhp->attr.state);
Hariprasad Shenai3e5c02c2014-07-21 20:55:14 +05301965 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1966 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1967 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1968 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1969 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1970 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
Vipul Pandya67bbc052012-05-18 15:29:33 +05301971 return 0;
1972}