blob: 6bf14d803e5a966511e0f4928d82382133dfc4b0 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Vipul Pandya42b6a942013-03-14 05:09:01 +000033#include <linux/module.h>
34#include <linux/moduleparam.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include <rdma/ib_umem.h>
Arun Sharma600634972011-07-26 16:09:06 -070036#include <linux/atomic.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070037
38#include "iw_cxgb4.h"
39
Steve Wise96bb2702014-03-27 12:03:47 -050040int use_dsgl = 0;
Vipul Pandya42b6a942013-03-14 05:09:01 +000041module_param(use_dsgl, int, 0644);
Steve Wise96bb2702014-03-27 12:03:47 -050042MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)");
Vipul Pandya42b6a942013-03-14 05:09:01 +000043
Steve Wisecfdda9d2010-04-21 15:30:06 -070044#define T4_ULPTX_MIN_IO 32
45#define C4IW_MAX_INLINE_SIZE 96
Vipul Pandya42b6a942013-03-14 05:09:01 +000046#define T4_ULPTX_MAX_DMA 1024
47#define C4IW_INLINE_THRESHOLD 128
Steve Wisecfdda9d2010-04-21 15:30:06 -070048
Vipul Pandya42b6a942013-03-14 05:09:01 +000049static int inline_threshold = C4IW_INLINE_THRESHOLD;
50module_param(inline_threshold, int, 0644);
51MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
52
53static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
Vipul Pandya0e5eca72013-03-14 05:09:02 +000054 u32 len, dma_addr_t data, int wait)
Vipul Pandya42b6a942013-03-14 05:09:01 +000055{
56 struct sk_buff *skb;
57 struct ulp_mem_io *req;
58 struct ulptx_sgl *sgl;
59 u8 wr_len;
60 int ret = 0;
61 struct c4iw_wr_wait wr_wait;
62
63 addr &= 0x7FFFFFF;
64
65 if (wait)
66 c4iw_init_wr_wait(&wr_wait);
67 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
68
69 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
70 if (!skb)
71 return -ENOMEM;
72 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
73
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0);
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +053077 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL_F : 0));
Paul Bolle298589b2014-01-09 11:53:27 +010079 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +053080 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
Anish Bhattd7990b02014-11-12 17:15:57 -080081 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
Vipul Pandya42b6a942013-03-14 05:09:01 +000082 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
Anish Bhattd7990b02014-11-12 17:15:57 -080083 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
Vipul Pandya42b6a942013-03-14 05:09:01 +000084 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
Anish Bhattd7990b02014-11-12 17:15:57 -080085 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
Vipul Pandya42b6a942013-03-14 05:09:01 +000086
87 sgl = (struct ulptx_sgl *)(req + 1);
Anish Bhattd7990b02014-11-12 17:15:57 -080088 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
Vipul Pandya42b6a942013-03-14 05:09:01 +000089 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len);
Vipul Pandya0e5eca72013-03-14 05:09:02 +000091 sgl->addr0 = cpu_to_be64(data);
Vipul Pandya42b6a942013-03-14 05:09:01 +000092
93 ret = c4iw_ofld_send(rdev, skb);
94 if (ret)
95 return ret;
96 if (wait)
97 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
98 return ret;
99}
100
101static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
102 void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103{
104 struct sk_buff *skb;
105 struct ulp_mem_io *req;
106 struct ulptx_idata *sc;
107 u8 wr_len, *to_dp, *from_dp;
108 int copy_len, num_wqe, i, ret = 0;
109 struct c4iw_wr_wait wr_wait;
Anish Bhattd7990b02014-11-12 17:15:57 -0800110 __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
Vipul Pandya42b6a942013-03-14 05:09:01 +0000111
112 if (is_t4(rdev->lldi.adapter_type))
Anish Bhattd7990b02014-11-12 17:15:57 -0800113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000114 else
Anish Bhattd7990b02014-11-12 17:15:57 -0800115 cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700116
117 addr &= 0x7FFFFFF;
118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
119 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
120 c4iw_init_wr_wait(&wr_wait);
121 for (i = 0; i < num_wqe; i++) {
122
123 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
124 len;
125 wr_len = roundup(sizeof *req + sizeof *sc +
126 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
127
David Rientjesd3c814e2010-07-21 02:44:56 +0000128 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700129 if (!skb)
130 return -ENOMEM;
131 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
132
133 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
134 memset(req, 0, wr_len);
135 INIT_ULPTX_WR(req, wr_len, 0, 0);
136
137 if (i == (num_wqe-1)) {
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
139 FW_WR_COMPL_F);
Roland Dreierc8e081a2010-09-27 17:51:04 -0700140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700141 } else
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700143 req->wr.wr_mid = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530144 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700145
Vipul Pandya42b6a942013-03-14 05:09:01 +0000146 req->cmd = cmd;
Anish Bhattd7990b02014-11-12 17:15:57 -0800147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
Steve Wisecfdda9d2010-04-21 15:30:06 -0700148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
150 16));
Anish Bhattd7990b02014-11-12 17:15:57 -0800151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700152
153 sc = (struct ulptx_idata *)(req + 1);
Anish Bhattd7990b02014-11-12 17:15:57 -0800154 sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
156
157 to_dp = (u8 *)(sc + 1);
158 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
159 if (data)
160 memcpy(to_dp, from_dp, copy_len);
161 else
162 memset(to_dp, 0, copy_len);
163 if (copy_len % T4_ULPTX_MIN_IO)
164 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
165 (copy_len % T4_ULPTX_MIN_IO));
166 ret = c4iw_ofld_send(rdev, skb);
167 if (ret)
168 return ret;
169 len -= C4IW_MAX_INLINE_SIZE;
170 }
171
Steve Wiseaadc4df2010-09-10 11:15:25 -0500172 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700173 return ret;
174}
175
Rashikac00850d2013-12-14 18:42:14 +0530176static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
Vipul Pandya42b6a942013-03-14 05:09:01 +0000177{
178 u32 remain = len;
179 u32 dmalen;
180 int ret = 0;
Vipul Pandya0e5eca72013-03-14 05:09:02 +0000181 dma_addr_t daddr;
182 dma_addr_t save;
183
184 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
185 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
186 return -1;
187 save = daddr;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000188
189 while (remain > inline_threshold) {
190 if (remain < T4_ULPTX_MAX_DMA) {
191 if (remain & ~T4_ULPTX_MIN_IO)
192 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
193 else
194 dmalen = remain;
195 } else
196 dmalen = T4_ULPTX_MAX_DMA;
197 remain -= dmalen;
Vipul Pandya0e5eca72013-03-14 05:09:02 +0000198 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
Vipul Pandya42b6a942013-03-14 05:09:01 +0000199 !remain);
200 if (ret)
201 goto out;
202 addr += dmalen >> 5;
203 data += dmalen;
Vipul Pandya0e5eca72013-03-14 05:09:02 +0000204 daddr += dmalen;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000205 }
206 if (remain)
207 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
208out:
Vipul Pandya0e5eca72013-03-14 05:09:02 +0000209 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
Vipul Pandya42b6a942013-03-14 05:09:01 +0000210 return ret;
211}
212
213/*
214 * write len bytes of data into addr (32B aligned address)
215 * If data is NULL, clear len byte of memory to zero.
216 */
217static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
218 void *data)
219{
220 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
Vipul Pandya0e5eca72013-03-14 05:09:02 +0000221 if (len > inline_threshold) {
222 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
223 printk_ratelimited(KERN_WARNING
224 "%s: dma map"
225 " failure (non fatal)\n",
226 pci_name(rdev->lldi.pdev));
227 return _c4iw_write_mem_inline(rdev, addr, len,
228 data);
229 } else
230 return 0;
231 } else
Vipul Pandya42b6a942013-03-14 05:09:01 +0000232 return _c4iw_write_mem_inline(rdev, addr, len, data);
233 } else
234 return _c4iw_write_mem_inline(rdev, addr, len, data);
235}
236
Steve Wisecfdda9d2010-04-21 15:30:06 -0700237/*
238 * Build and write a TPT entry.
239 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
240 * pbl_size and pbl_addr
241 * OUT: stag index
242 */
243static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
244 u32 *stag, u8 stag_state, u32 pdid,
245 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
246 int bind_enabled, u32 zbva, u64 to,
247 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
248{
249 int err;
250 struct fw_ri_tpte tpt;
251 u32 stag_idx;
252 static atomic_t key;
253
254 if (c4iw_fatal_error(rdev))
255 return -EIO;
256
257 stag_state = stag_state > 0;
258 stag_idx = (*stag) >> 8;
259
260 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530261 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
Steve Wise98a3e872014-04-09 09:38:28 -0500262 if (!stag_idx) {
263 mutex_lock(&rdev->stats.lock);
264 rdev->stats.stag.fail++;
265 mutex_unlock(&rdev->stats.lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700266 return -ENOMEM;
Steve Wise98a3e872014-04-09 09:38:28 -0500267 }
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530268 mutex_lock(&rdev->stats.lock);
269 rdev->stats.stag.cur += 32;
270 if (rdev->stats.stag.cur > rdev->stats.stag.max)
271 rdev->stats.stag.max = rdev->stats.stag.cur;
272 mutex_unlock(&rdev->stats.lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700273 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
274 }
275 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
276 __func__, stag_state, type, pdid, stag_idx);
277
278 /* write TPT entry */
279 if (reset_tpt_entry)
280 memset(&tpt, 0, sizeof(tpt));
281 else {
282 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
283 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
284 V_FW_RI_TPTE_STAGSTATE(stag_state) |
285 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
286 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
287 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
288 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
289 FW_RI_VA_BASED_TO))|
290 V_FW_RI_TPTE_PS(page_size));
291 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
292 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
293 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
294 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
295 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
296 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
297 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
298 }
299 err = write_adapter_mem(rdev, stag_idx +
300 (rdev->lldi.vr->stag.start >> 5),
301 sizeof(tpt), &tpt);
302
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530303 if (reset_tpt_entry) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530304 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530305 mutex_lock(&rdev->stats.lock);
306 rdev->stats.stag.cur -= 32;
307 mutex_unlock(&rdev->stats.lock);
308 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700309 return err;
310}
311
312static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
313 u32 pbl_addr, u32 pbl_size)
314{
315 int err;
316
317 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
318 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
319 pbl_size);
320
321 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
322 return err;
323}
324
325static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
326 u32 pbl_addr)
327{
328 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
329 pbl_size, pbl_addr);
330}
331
332static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
333{
334 *stag = T4_STAG_UNSET;
335 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
336 0UL, 0, 0, 0, 0);
337}
338
339static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
340{
341 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
342 0);
343}
344
345static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
346 u32 pbl_size, u32 pbl_addr)
347{
348 *stag = T4_STAG_UNSET;
349 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
350 0UL, 0, 0, pbl_size, pbl_addr);
351}
352
353static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
354{
355 u32 mmid;
356
357 mhp->attr.state = 1;
358 mhp->attr.stag = stag;
359 mmid = stag >> 8;
360 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
361 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
362 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
363}
364
365static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
366 struct c4iw_mr *mhp, int shift)
367{
368 u32 stag = T4_STAG_UNSET;
369 int ret;
370
371 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
Pramod Kumar123bc2a2014-11-21 09:36:35 -0600372 FW_RI_STAG_NSMR, mhp->attr.len ?
373 mhp->attr.perms : 0,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700374 mhp->attr.mw_bind_enable, mhp->attr.zbva,
Pramod Kumar123bc2a2014-11-21 09:36:35 -0600375 mhp->attr.va_fbo, mhp->attr.len ?
376 mhp->attr.len : -1, shift - 12,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700377 mhp->attr.pbl_size, mhp->attr.pbl_addr);
378 if (ret)
379 return ret;
380
381 ret = finish_mem_reg(mhp, stag);
382 if (ret)
383 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
384 mhp->attr.pbl_addr);
385 return ret;
386}
387
388static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
389 struct c4iw_mr *mhp, int shift, int npages)
390{
391 u32 stag;
392 int ret;
393
394 if (npages > mhp->attr.pbl_size)
395 return -ENOMEM;
396
397 stag = mhp->attr.stag;
398 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
399 FW_RI_STAG_NSMR, mhp->attr.perms,
400 mhp->attr.mw_bind_enable, mhp->attr.zbva,
401 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
402 mhp->attr.pbl_size, mhp->attr.pbl_addr);
403 if (ret)
404 return ret;
405
406 ret = finish_mem_reg(mhp, stag);
407 if (ret)
408 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
409 mhp->attr.pbl_addr);
410
411 return ret;
412}
413
414static int alloc_pbl(struct c4iw_mr *mhp, int npages)
415{
416 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
417 npages << 3);
418
419 if (!mhp->attr.pbl_addr)
420 return -ENOMEM;
421
422 mhp->attr.pbl_size = npages;
423
424 return 0;
425}
426
427static int build_phys_page_list(struct ib_phys_buf *buffer_list,
428 int num_phys_buf, u64 *iova_start,
429 u64 *total_size, int *npages,
430 int *shift, __be64 **page_list)
431{
432 u64 mask;
433 int i, j, n;
434
435 mask = 0;
436 *total_size = 0;
437 for (i = 0; i < num_phys_buf; ++i) {
438 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
439 return -EINVAL;
440 if (i != 0 && i != num_phys_buf - 1 &&
441 (buffer_list[i].size & ~PAGE_MASK))
442 return -EINVAL;
443 *total_size += buffer_list[i].size;
444 if (i > 0)
445 mask |= buffer_list[i].addr;
446 else
447 mask |= buffer_list[i].addr & PAGE_MASK;
448 if (i != num_phys_buf - 1)
449 mask |= buffer_list[i].addr + buffer_list[i].size;
450 else
451 mask |= (buffer_list[i].addr + buffer_list[i].size +
452 PAGE_SIZE - 1) & PAGE_MASK;
453 }
454
455 if (*total_size > 0xFFFFFFFFULL)
456 return -ENOMEM;
457
458 /* Find largest page shift we can use to cover buffers */
459 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
460 if ((1ULL << *shift) & mask)
461 break;
462
463 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
464 buffer_list[0].addr &= ~0ull << *shift;
465
466 *npages = 0;
467 for (i = 0; i < num_phys_buf; ++i)
468 *npages += (buffer_list[i].size +
469 (1ULL << *shift) - 1) >> *shift;
470
471 if (!*npages)
472 return -EINVAL;
473
474 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
475 if (!*page_list)
476 return -ENOMEM;
477
478 n = 0;
479 for (i = 0; i < num_phys_buf; ++i)
480 for (j = 0;
481 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
482 ++j)
483 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
484 ((u64) j << *shift));
485
486 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
487 __func__, (unsigned long long)*iova_start,
488 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
489 *npages);
490
491 return 0;
492
493}
494
495int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
496 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
497 int num_phys_buf, int acc, u64 *iova_start)
498{
499
500 struct c4iw_mr mh, *mhp;
501 struct c4iw_pd *php;
502 struct c4iw_dev *rhp;
503 __be64 *page_list = NULL;
504 int shift = 0;
505 u64 total_size;
506 int npages;
507 int ret;
508
509 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
510
511 /* There can be no memory windows */
512 if (atomic_read(&mr->usecnt))
513 return -EINVAL;
514
515 mhp = to_c4iw_mr(mr);
516 rhp = mhp->rhp;
517 php = to_c4iw_pd(mr->pd);
518
519 /* make sure we are on the same adapter */
520 if (rhp != php->rhp)
521 return -EINVAL;
522
523 memcpy(&mh, mhp, sizeof *mhp);
524
525 if (mr_rereg_mask & IB_MR_REREG_PD)
526 php = to_c4iw_pd(pd);
527 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
528 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
529 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
530 IB_ACCESS_MW_BIND;
531 }
532 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
533 ret = build_phys_page_list(buffer_list, num_phys_buf,
534 iova_start,
535 &total_size, &npages,
536 &shift, &page_list);
537 if (ret)
538 return ret;
539 }
540
541 ret = reregister_mem(rhp, php, &mh, shift, npages);
542 kfree(page_list);
543 if (ret)
544 return ret;
545 if (mr_rereg_mask & IB_MR_REREG_PD)
546 mhp->attr.pdid = php->pdid;
547 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
548 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
549 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
550 mhp->attr.zbva = 0;
551 mhp->attr.va_fbo = *iova_start;
552 mhp->attr.page_size = shift - 12;
553 mhp->attr.len = (u32) total_size;
554 mhp->attr.pbl_size = npages;
555 }
556
557 return 0;
558}
559
560struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
561 struct ib_phys_buf *buffer_list,
562 int num_phys_buf, int acc, u64 *iova_start)
563{
564 __be64 *page_list;
565 int shift;
566 u64 total_size;
567 int npages;
568 struct c4iw_dev *rhp;
569 struct c4iw_pd *php;
570 struct c4iw_mr *mhp;
571 int ret;
572
573 PDBG("%s ib_pd %p\n", __func__, pd);
574 php = to_c4iw_pd(pd);
575 rhp = php->rhp;
576
577 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
578 if (!mhp)
579 return ERR_PTR(-ENOMEM);
580
581 mhp->rhp = rhp;
582
583 /* First check that we have enough alignment */
584 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
585 ret = -EINVAL;
586 goto err;
587 }
588
589 if (num_phys_buf > 1 &&
590 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
591 ret = -EINVAL;
592 goto err;
593 }
594
595 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
596 &total_size, &npages, &shift,
597 &page_list);
598 if (ret)
599 goto err;
600
601 ret = alloc_pbl(mhp, npages);
602 if (ret) {
603 kfree(page_list);
Thadeu Lima de Souza Cascardo32c631f2012-10-12 21:10:11 +0000604 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700605 }
606
607 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
608 npages);
609 kfree(page_list);
610 if (ret)
611 goto err_pbl;
612
613 mhp->attr.pdid = php->pdid;
614 mhp->attr.zbva = 0;
615
616 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
617 mhp->attr.va_fbo = *iova_start;
618 mhp->attr.page_size = shift - 12;
619
620 mhp->attr.len = (u32) total_size;
621 mhp->attr.pbl_size = npages;
622 ret = register_mem(rhp, php, mhp, shift);
623 if (ret)
624 goto err_pbl;
625
626 return &mhp->ibmr;
627
628err_pbl:
629 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
630 mhp->attr.pbl_size << 3);
631
632err:
633 kfree(mhp);
634 return ERR_PTR(ret);
635
636}
637
638struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
639{
640 struct c4iw_dev *rhp;
641 struct c4iw_pd *php;
642 struct c4iw_mr *mhp;
643 int ret;
644 u32 stag = T4_STAG_UNSET;
645
646 PDBG("%s ib_pd %p\n", __func__, pd);
647 php = to_c4iw_pd(pd);
648 rhp = php->rhp;
649
650 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
651 if (!mhp)
652 return ERR_PTR(-ENOMEM);
653
654 mhp->rhp = rhp;
655 mhp->attr.pdid = php->pdid;
656 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
657 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
658 mhp->attr.zbva = 0;
659 mhp->attr.va_fbo = 0;
660 mhp->attr.page_size = 0;
661 mhp->attr.len = ~0UL;
662 mhp->attr.pbl_size = 0;
663
664 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
665 FW_RI_STAG_NSMR, mhp->attr.perms,
666 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
667 if (ret)
668 goto err1;
669
670 ret = finish_mem_reg(mhp, stag);
671 if (ret)
672 goto err2;
673 return &mhp->ibmr;
674err2:
675 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
676 mhp->attr.pbl_addr);
677err1:
678 kfree(mhp);
679 return ERR_PTR(ret);
680}
681
682struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
683 u64 virt, int acc, struct ib_udata *udata)
684{
685 __be64 *pages;
686 int shift, n, len;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200687 int i, k, entry;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700688 int err = 0;
Yishai Hadaseeb84612014-01-28 13:40:15 +0200689 struct scatterlist *sg;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700690 struct c4iw_dev *rhp;
691 struct c4iw_pd *php;
692 struct c4iw_mr *mhp;
693
694 PDBG("%s ib_pd %p\n", __func__, pd);
695
696 if (length == ~0ULL)
697 return ERR_PTR(-EINVAL);
698
699 if ((length + start) < start)
700 return ERR_PTR(-EINVAL);
701
702 php = to_c4iw_pd(pd);
703 rhp = php->rhp;
704 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
705 if (!mhp)
706 return ERR_PTR(-ENOMEM);
707
708 mhp->rhp = rhp;
709
710 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
711 if (IS_ERR(mhp->umem)) {
712 err = PTR_ERR(mhp->umem);
713 kfree(mhp);
714 return ERR_PTR(err);
715 }
716
717 shift = ffs(mhp->umem->page_size) - 1;
718
Yishai Hadaseeb84612014-01-28 13:40:15 +0200719 n = mhp->umem->nmap;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700720 err = alloc_pbl(mhp, n);
721 if (err)
722 goto err;
723
724 pages = (__be64 *) __get_free_page(GFP_KERNEL);
725 if (!pages) {
726 err = -ENOMEM;
727 goto err_pbl;
728 }
729
730 i = n = 0;
731
Yishai Hadaseeb84612014-01-28 13:40:15 +0200732 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
733 len = sg_dma_len(sg) >> shift;
734 for (k = 0; k < len; ++k) {
735 pages[i++] = cpu_to_be64(sg_dma_address(sg) +
736 mhp->umem->page_size * k);
737 if (i == PAGE_SIZE / sizeof *pages) {
738 err = write_pbl(&mhp->rhp->rdev,
739 pages,
740 mhp->attr.pbl_addr + (n << 3), i);
741 if (err)
742 goto pbl_done;
743 n += i;
744 i = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700745 }
746 }
Yishai Hadaseeb84612014-01-28 13:40:15 +0200747 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700748
749 if (i)
750 err = write_pbl(&mhp->rhp->rdev, pages,
751 mhp->attr.pbl_addr + (n << 3), i);
752
753pbl_done:
754 free_page((unsigned long) pages);
755 if (err)
756 goto err_pbl;
757
758 mhp->attr.pdid = php->pdid;
759 mhp->attr.zbva = 0;
760 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
761 mhp->attr.va_fbo = virt;
762 mhp->attr.page_size = shift - 12;
Steve Wise301c2c32011-06-14 20:59:21 +0000763 mhp->attr.len = length;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700764
765 err = register_mem(rhp, php, mhp, shift);
766 if (err)
767 goto err_pbl;
768
769 return &mhp->ibmr;
770
771err_pbl:
772 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
773 mhp->attr.pbl_size << 3);
774
775err:
776 ib_umem_release(mhp->umem);
777 kfree(mhp);
778 return ERR_PTR(err);
779}
780
Shani Michaeli7083e422013-02-06 16:19:12 +0000781struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700782{
783 struct c4iw_dev *rhp;
784 struct c4iw_pd *php;
785 struct c4iw_mw *mhp;
786 u32 mmid;
787 u32 stag = 0;
788 int ret;
789
Shani Michaeli7083e422013-02-06 16:19:12 +0000790 if (type != IB_MW_TYPE_1)
791 return ERR_PTR(-EINVAL);
792
Steve Wisecfdda9d2010-04-21 15:30:06 -0700793 php = to_c4iw_pd(pd);
794 rhp = php->rhp;
795 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
796 if (!mhp)
797 return ERR_PTR(-ENOMEM);
798 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
799 if (ret) {
800 kfree(mhp);
801 return ERR_PTR(ret);
802 }
803 mhp->rhp = rhp;
804 mhp->attr.pdid = php->pdid;
805 mhp->attr.type = FW_RI_STAG_MW;
806 mhp->attr.stag = stag;
807 mmid = (stag) >> 8;
808 mhp->ibmw.rkey = stag;
809 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
810 deallocate_window(&rhp->rdev, mhp->attr.stag);
811 kfree(mhp);
812 return ERR_PTR(-ENOMEM);
813 }
814 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
815 return &(mhp->ibmw);
816}
817
818int c4iw_dealloc_mw(struct ib_mw *mw)
819{
820 struct c4iw_dev *rhp;
821 struct c4iw_mw *mhp;
822 u32 mmid;
823
824 mhp = to_c4iw_mw(mw);
825 rhp = mhp->rhp;
826 mmid = (mw->rkey) >> 8;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700827 remove_handle(rhp, &rhp->mmidr, mmid);
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530828 deallocate_window(&rhp->rdev, mhp->attr.stag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700829 kfree(mhp);
830 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
831 return 0;
832}
833
834struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
835{
836 struct c4iw_dev *rhp;
837 struct c4iw_pd *php;
838 struct c4iw_mr *mhp;
839 u32 mmid;
840 u32 stag = 0;
841 int ret = 0;
842
843 php = to_c4iw_pd(pd);
844 rhp = php->rhp;
845 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
Steve Wise841dba92010-05-20 16:57:54 -0500846 if (!mhp) {
847 ret = -ENOMEM;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700848 goto err;
Steve Wise841dba92010-05-20 16:57:54 -0500849 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700850
851 mhp->rhp = rhp;
852 ret = alloc_pbl(mhp, pbl_depth);
853 if (ret)
854 goto err1;
855 mhp->attr.pbl_size = pbl_depth;
856 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
857 mhp->attr.pbl_size, mhp->attr.pbl_addr);
858 if (ret)
859 goto err2;
860 mhp->attr.pdid = php->pdid;
861 mhp->attr.type = FW_RI_STAG_NSMR;
862 mhp->attr.stag = stag;
863 mhp->attr.state = 1;
864 mmid = (stag) >> 8;
865 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
Steve Wise841dba92010-05-20 16:57:54 -0500866 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
867 ret = -ENOMEM;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700868 goto err3;
Steve Wise841dba92010-05-20 16:57:54 -0500869 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700870
871 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
872 return &(mhp->ibmr);
873err3:
874 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
875 mhp->attr.pbl_addr);
876err2:
877 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
878 mhp->attr.pbl_size << 3);
879err1:
880 kfree(mhp);
881err:
882 return ERR_PTR(ret);
883}
884
885struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
886 int page_list_len)
887{
888 struct c4iw_fr_page_list *c4pl;
889 struct c4iw_dev *dev = to_c4iw_dev(device);
890 dma_addr_t dma_addr;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000891 int pll_len = roundup(page_list_len * sizeof(u64), 32);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700892
Vipul Pandya42b6a942013-03-14 05:09:01 +0000893 c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700894 if (!c4pl)
895 return ERR_PTR(-ENOMEM);
896
Vipul Pandya42b6a942013-03-14 05:09:01 +0000897 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
898 pll_len, &dma_addr,
899 GFP_KERNEL);
900 if (!c4pl->ibpl.page_list) {
901 kfree(c4pl);
902 return ERR_PTR(-ENOMEM);
903 }
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000904 dma_unmap_addr_set(c4pl, mapping, dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700905 c4pl->dma_addr = dma_addr;
906 c4pl->dev = dev;
Steve Wiseeda6d1d2014-03-19 17:44:45 +0530907 c4pl->pll_len = pll_len;
908
909 PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
910 __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
911 &c4pl->dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700912
913 return &c4pl->ibpl;
914}
915
916void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
917{
918 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
919
Steve Wiseeda6d1d2014-03-19 17:44:45 +0530920 PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n",
921 __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list,
922 &c4pl->dma_addr);
923
Vipul Pandya42b6a942013-03-14 05:09:01 +0000924 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
Steve Wiseeda6d1d2014-03-19 17:44:45 +0530925 c4pl->pll_len,
Vipul Pandya42b6a942013-03-14 05:09:01 +0000926 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
927 kfree(c4pl);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700928}
929
930int c4iw_dereg_mr(struct ib_mr *ib_mr)
931{
932 struct c4iw_dev *rhp;
933 struct c4iw_mr *mhp;
934 u32 mmid;
935
936 PDBG("%s ib_mr %p\n", __func__, ib_mr);
937 /* There can be no memory windows */
938 if (atomic_read(&ib_mr->usecnt))
939 return -EINVAL;
940
941 mhp = to_c4iw_mr(ib_mr);
942 rhp = mhp->rhp;
943 mmid = mhp->attr.stag >> 8;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530944 remove_handle(rhp, &rhp->mmidr, mmid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700945 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
946 mhp->attr.pbl_addr);
947 if (mhp->attr.pbl_size)
948 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
949 mhp->attr.pbl_size << 3);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700950 if (mhp->kva)
951 kfree((void *) (unsigned long) mhp->kva);
952 if (mhp->umem)
953 ib_umem_release(mhp->umem);
954 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
955 kfree(mhp);
956 return 0;
957}