blob: 33db9ee307dc204ed370b85e5b667a41a0e30190 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Vipul Pandya42b6a942013-03-14 05:09:01 +000033#include <linux/module.h>
34#include <linux/moduleparam.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070035#include <rdma/ib_umem.h>
Arun Sharma600634972011-07-26 16:09:06 -070036#include <linux/atomic.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070037
38#include "iw_cxgb4.h"
39
Vipul Pandya42b6a942013-03-14 05:09:01 +000040int use_dsgl = 1;
41module_param(use_dsgl, int, 0644);
42MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
43
Steve Wisecfdda9d2010-04-21 15:30:06 -070044#define T4_ULPTX_MIN_IO 32
45#define C4IW_MAX_INLINE_SIZE 96
Vipul Pandya42b6a942013-03-14 05:09:01 +000046#define T4_ULPTX_MAX_DMA 1024
47#define C4IW_INLINE_THRESHOLD 128
Steve Wisecfdda9d2010-04-21 15:30:06 -070048
Vipul Pandya42b6a942013-03-14 05:09:01 +000049static int inline_threshold = C4IW_INLINE_THRESHOLD;
50module_param(inline_threshold, int, 0644);
51MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
52
53static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
54 u32 len, void *data, int wait)
55{
56 struct sk_buff *skb;
57 struct ulp_mem_io *req;
58 struct ulptx_sgl *sgl;
59 u8 wr_len;
60 int ret = 0;
61 struct c4iw_wr_wait wr_wait;
62
63 addr &= 0x7FFFFFF;
64
65 if (wait)
66 c4iw_init_wr_wait(&wr_wait);
67 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
68
69 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
70 if (!skb)
71 return -ENOMEM;
72 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
73
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0));
79 req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
86
87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
89 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(virt_to_phys(data));
92
93 ret = c4iw_ofld_send(rdev, skb);
94 if (ret)
95 return ret;
96 if (wait)
97 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
98 return ret;
99}
100
101static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
102 void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103{
104 struct sk_buff *skb;
105 struct ulp_mem_io *req;
106 struct ulptx_idata *sc;
107 u8 wr_len, *to_dp, *from_dp;
108 int copy_len, num_wqe, i, ret = 0;
109 struct c4iw_wr_wait wr_wait;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
111
112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
114 else
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700116
117 addr &= 0x7FFFFFF;
118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
119 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
120 c4iw_init_wr_wait(&wr_wait);
121 for (i = 0; i < num_wqe; i++) {
122
123 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
124 len;
125 wr_len = roundup(sizeof *req + sizeof *sc +
126 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
127
David Rientjesd3c814e2010-07-21 02:44:56 +0000128 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700129 if (!skb)
130 return -ENOMEM;
131 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
132
133 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
134 memset(req, 0, wr_len);
135 INIT_ULPTX_WR(req, wr_len, 0, 0);
136
137 if (i == (num_wqe-1)) {
138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
139 FW_WR_COMPL(1));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700141 } else
142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR));
143 req->wr.wr_mid = cpu_to_be32(
144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
145
Vipul Pandya42b6a942013-03-14 05:09:01 +0000146 req->cmd = cmd;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
150 16));
151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3));
152
153 sc = (struct ulptx_idata *)(req + 1);
154 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM));
155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
156
157 to_dp = (u8 *)(sc + 1);
158 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
159 if (data)
160 memcpy(to_dp, from_dp, copy_len);
161 else
162 memset(to_dp, 0, copy_len);
163 if (copy_len % T4_ULPTX_MIN_IO)
164 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
165 (copy_len % T4_ULPTX_MIN_IO));
166 ret = c4iw_ofld_send(rdev, skb);
167 if (ret)
168 return ret;
169 len -= C4IW_MAX_INLINE_SIZE;
170 }
171
Steve Wiseaadc4df2010-09-10 11:15:25 -0500172 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700173 return ret;
174}
175
Vipul Pandya42b6a942013-03-14 05:09:01 +0000176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{
178 u32 remain = len;
179 u32 dmalen;
180 int ret = 0;
181
182 while (remain > inline_threshold) {
183 if (remain < T4_ULPTX_MAX_DMA) {
184 if (remain & ~T4_ULPTX_MIN_IO)
185 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
186 else
187 dmalen = remain;
188 } else
189 dmalen = T4_ULPTX_MAX_DMA;
190 remain -= dmalen;
191 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, data,
192 !remain);
193 if (ret)
194 goto out;
195 addr += dmalen >> 5;
196 data += dmalen;
197 }
198 if (remain)
199 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
200out:
201 return ret;
202}
203
204/*
205 * write len bytes of data into addr (32B aligned address)
206 * If data is NULL, clear len byte of memory to zero.
207 */
208static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
209 void *data)
210{
211 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
212 if (len > inline_threshold)
213 return _c4iw_write_mem_dma(rdev, addr, len, data);
214 else
215 return _c4iw_write_mem_inline(rdev, addr, len, data);
216 } else
217 return _c4iw_write_mem_inline(rdev, addr, len, data);
218}
219
Steve Wisecfdda9d2010-04-21 15:30:06 -0700220/*
221 * Build and write a TPT entry.
222 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
223 * pbl_size and pbl_addr
224 * OUT: stag index
225 */
226static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
227 u32 *stag, u8 stag_state, u32 pdid,
228 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
229 int bind_enabled, u32 zbva, u64 to,
230 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
231{
232 int err;
233 struct fw_ri_tpte tpt;
234 u32 stag_idx;
235 static atomic_t key;
236
237 if (c4iw_fatal_error(rdev))
238 return -EIO;
239
240 stag_state = stag_state > 0;
241 stag_idx = (*stag) >> 8;
242
243 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530244 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700245 if (!stag_idx)
246 return -ENOMEM;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530247 mutex_lock(&rdev->stats.lock);
248 rdev->stats.stag.cur += 32;
249 if (rdev->stats.stag.cur > rdev->stats.stag.max)
250 rdev->stats.stag.max = rdev->stats.stag.cur;
251 mutex_unlock(&rdev->stats.lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700252 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
253 }
254 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
255 __func__, stag_state, type, pdid, stag_idx);
256
257 /* write TPT entry */
258 if (reset_tpt_entry)
259 memset(&tpt, 0, sizeof(tpt));
260 else {
261 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
262 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
263 V_FW_RI_TPTE_STAGSTATE(stag_state) |
264 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
265 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
266 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
267 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
268 FW_RI_VA_BASED_TO))|
269 V_FW_RI_TPTE_PS(page_size));
270 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
271 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
272 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
273 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
274 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
275 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
276 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
277 }
278 err = write_adapter_mem(rdev, stag_idx +
279 (rdev->lldi.vr->stag.start >> 5),
280 sizeof(tpt), &tpt);
281
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530282 if (reset_tpt_entry) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530283 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530284 mutex_lock(&rdev->stats.lock);
285 rdev->stats.stag.cur -= 32;
286 mutex_unlock(&rdev->stats.lock);
287 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700288 return err;
289}
290
291static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
292 u32 pbl_addr, u32 pbl_size)
293{
294 int err;
295
296 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
297 __func__, pbl_addr, rdev->lldi.vr->pbl.start,
298 pbl_size);
299
300 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
301 return err;
302}
303
304static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
305 u32 pbl_addr)
306{
307 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
308 pbl_size, pbl_addr);
309}
310
311static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
312{
313 *stag = T4_STAG_UNSET;
314 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
315 0UL, 0, 0, 0, 0);
316}
317
318static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
319{
320 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
321 0);
322}
323
324static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
325 u32 pbl_size, u32 pbl_addr)
326{
327 *stag = T4_STAG_UNSET;
328 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
329 0UL, 0, 0, pbl_size, pbl_addr);
330}
331
332static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
333{
334 u32 mmid;
335
336 mhp->attr.state = 1;
337 mhp->attr.stag = stag;
338 mmid = stag >> 8;
339 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
340 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
341 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
342}
343
344static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
345 struct c4iw_mr *mhp, int shift)
346{
347 u32 stag = T4_STAG_UNSET;
348 int ret;
349
350 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
351 FW_RI_STAG_NSMR, mhp->attr.perms,
352 mhp->attr.mw_bind_enable, mhp->attr.zbva,
353 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
354 mhp->attr.pbl_size, mhp->attr.pbl_addr);
355 if (ret)
356 return ret;
357
358 ret = finish_mem_reg(mhp, stag);
359 if (ret)
360 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
361 mhp->attr.pbl_addr);
362 return ret;
363}
364
365static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
366 struct c4iw_mr *mhp, int shift, int npages)
367{
368 u32 stag;
369 int ret;
370
371 if (npages > mhp->attr.pbl_size)
372 return -ENOMEM;
373
374 stag = mhp->attr.stag;
375 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
376 FW_RI_STAG_NSMR, mhp->attr.perms,
377 mhp->attr.mw_bind_enable, mhp->attr.zbva,
378 mhp->attr.va_fbo, mhp->attr.len, shift - 12,
379 mhp->attr.pbl_size, mhp->attr.pbl_addr);
380 if (ret)
381 return ret;
382
383 ret = finish_mem_reg(mhp, stag);
384 if (ret)
385 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
386 mhp->attr.pbl_addr);
387
388 return ret;
389}
390
391static int alloc_pbl(struct c4iw_mr *mhp, int npages)
392{
393 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
394 npages << 3);
395
396 if (!mhp->attr.pbl_addr)
397 return -ENOMEM;
398
399 mhp->attr.pbl_size = npages;
400
401 return 0;
402}
403
404static int build_phys_page_list(struct ib_phys_buf *buffer_list,
405 int num_phys_buf, u64 *iova_start,
406 u64 *total_size, int *npages,
407 int *shift, __be64 **page_list)
408{
409 u64 mask;
410 int i, j, n;
411
412 mask = 0;
413 *total_size = 0;
414 for (i = 0; i < num_phys_buf; ++i) {
415 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
416 return -EINVAL;
417 if (i != 0 && i != num_phys_buf - 1 &&
418 (buffer_list[i].size & ~PAGE_MASK))
419 return -EINVAL;
420 *total_size += buffer_list[i].size;
421 if (i > 0)
422 mask |= buffer_list[i].addr;
423 else
424 mask |= buffer_list[i].addr & PAGE_MASK;
425 if (i != num_phys_buf - 1)
426 mask |= buffer_list[i].addr + buffer_list[i].size;
427 else
428 mask |= (buffer_list[i].addr + buffer_list[i].size +
429 PAGE_SIZE - 1) & PAGE_MASK;
430 }
431
432 if (*total_size > 0xFFFFFFFFULL)
433 return -ENOMEM;
434
435 /* Find largest page shift we can use to cover buffers */
436 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
437 if ((1ULL << *shift) & mask)
438 break;
439
440 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
441 buffer_list[0].addr &= ~0ull << *shift;
442
443 *npages = 0;
444 for (i = 0; i < num_phys_buf; ++i)
445 *npages += (buffer_list[i].size +
446 (1ULL << *shift) - 1) >> *shift;
447
448 if (!*npages)
449 return -EINVAL;
450
451 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
452 if (!*page_list)
453 return -ENOMEM;
454
455 n = 0;
456 for (i = 0; i < num_phys_buf; ++i)
457 for (j = 0;
458 j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
459 ++j)
460 (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
461 ((u64) j << *shift));
462
463 PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
464 __func__, (unsigned long long)*iova_start,
465 (unsigned long long)mask, *shift, (unsigned long long)*total_size,
466 *npages);
467
468 return 0;
469
470}
471
472int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
473 struct ib_pd *pd, struct ib_phys_buf *buffer_list,
474 int num_phys_buf, int acc, u64 *iova_start)
475{
476
477 struct c4iw_mr mh, *mhp;
478 struct c4iw_pd *php;
479 struct c4iw_dev *rhp;
480 __be64 *page_list = NULL;
481 int shift = 0;
482 u64 total_size;
483 int npages;
484 int ret;
485
486 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
487
488 /* There can be no memory windows */
489 if (atomic_read(&mr->usecnt))
490 return -EINVAL;
491
492 mhp = to_c4iw_mr(mr);
493 rhp = mhp->rhp;
494 php = to_c4iw_pd(mr->pd);
495
496 /* make sure we are on the same adapter */
497 if (rhp != php->rhp)
498 return -EINVAL;
499
500 memcpy(&mh, mhp, sizeof *mhp);
501
502 if (mr_rereg_mask & IB_MR_REREG_PD)
503 php = to_c4iw_pd(pd);
504 if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
505 mh.attr.perms = c4iw_ib_to_tpt_access(acc);
506 mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
507 IB_ACCESS_MW_BIND;
508 }
509 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
510 ret = build_phys_page_list(buffer_list, num_phys_buf,
511 iova_start,
512 &total_size, &npages,
513 &shift, &page_list);
514 if (ret)
515 return ret;
516 }
517
518 ret = reregister_mem(rhp, php, &mh, shift, npages);
519 kfree(page_list);
520 if (ret)
521 return ret;
522 if (mr_rereg_mask & IB_MR_REREG_PD)
523 mhp->attr.pdid = php->pdid;
524 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
525 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
526 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
527 mhp->attr.zbva = 0;
528 mhp->attr.va_fbo = *iova_start;
529 mhp->attr.page_size = shift - 12;
530 mhp->attr.len = (u32) total_size;
531 mhp->attr.pbl_size = npages;
532 }
533
534 return 0;
535}
536
537struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
538 struct ib_phys_buf *buffer_list,
539 int num_phys_buf, int acc, u64 *iova_start)
540{
541 __be64 *page_list;
542 int shift;
543 u64 total_size;
544 int npages;
545 struct c4iw_dev *rhp;
546 struct c4iw_pd *php;
547 struct c4iw_mr *mhp;
548 int ret;
549
550 PDBG("%s ib_pd %p\n", __func__, pd);
551 php = to_c4iw_pd(pd);
552 rhp = php->rhp;
553
554 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
555 if (!mhp)
556 return ERR_PTR(-ENOMEM);
557
558 mhp->rhp = rhp;
559
560 /* First check that we have enough alignment */
561 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
562 ret = -EINVAL;
563 goto err;
564 }
565
566 if (num_phys_buf > 1 &&
567 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
568 ret = -EINVAL;
569 goto err;
570 }
571
572 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
573 &total_size, &npages, &shift,
574 &page_list);
575 if (ret)
576 goto err;
577
578 ret = alloc_pbl(mhp, npages);
579 if (ret) {
580 kfree(page_list);
Thadeu Lima de Souza Cascardo32c631f2012-10-12 21:10:11 +0000581 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700582 }
583
584 ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
585 npages);
586 kfree(page_list);
587 if (ret)
588 goto err_pbl;
589
590 mhp->attr.pdid = php->pdid;
591 mhp->attr.zbva = 0;
592
593 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
594 mhp->attr.va_fbo = *iova_start;
595 mhp->attr.page_size = shift - 12;
596
597 mhp->attr.len = (u32) total_size;
598 mhp->attr.pbl_size = npages;
599 ret = register_mem(rhp, php, mhp, shift);
600 if (ret)
601 goto err_pbl;
602
603 return &mhp->ibmr;
604
605err_pbl:
606 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
607 mhp->attr.pbl_size << 3);
608
609err:
610 kfree(mhp);
611 return ERR_PTR(ret);
612
613}
614
615struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
616{
617 struct c4iw_dev *rhp;
618 struct c4iw_pd *php;
619 struct c4iw_mr *mhp;
620 int ret;
621 u32 stag = T4_STAG_UNSET;
622
623 PDBG("%s ib_pd %p\n", __func__, pd);
624 php = to_c4iw_pd(pd);
625 rhp = php->rhp;
626
627 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
628 if (!mhp)
629 return ERR_PTR(-ENOMEM);
630
631 mhp->rhp = rhp;
632 mhp->attr.pdid = php->pdid;
633 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
634 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
635 mhp->attr.zbva = 0;
636 mhp->attr.va_fbo = 0;
637 mhp->attr.page_size = 0;
638 mhp->attr.len = ~0UL;
639 mhp->attr.pbl_size = 0;
640
641 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
642 FW_RI_STAG_NSMR, mhp->attr.perms,
643 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
644 if (ret)
645 goto err1;
646
647 ret = finish_mem_reg(mhp, stag);
648 if (ret)
649 goto err2;
650 return &mhp->ibmr;
651err2:
652 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
653 mhp->attr.pbl_addr);
654err1:
655 kfree(mhp);
656 return ERR_PTR(ret);
657}
658
659struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
660 u64 virt, int acc, struct ib_udata *udata)
661{
662 __be64 *pages;
663 int shift, n, len;
664 int i, j, k;
665 int err = 0;
666 struct ib_umem_chunk *chunk;
667 struct c4iw_dev *rhp;
668 struct c4iw_pd *php;
669 struct c4iw_mr *mhp;
670
671 PDBG("%s ib_pd %p\n", __func__, pd);
672
673 if (length == ~0ULL)
674 return ERR_PTR(-EINVAL);
675
676 if ((length + start) < start)
677 return ERR_PTR(-EINVAL);
678
679 php = to_c4iw_pd(pd);
680 rhp = php->rhp;
681 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
682 if (!mhp)
683 return ERR_PTR(-ENOMEM);
684
685 mhp->rhp = rhp;
686
687 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
688 if (IS_ERR(mhp->umem)) {
689 err = PTR_ERR(mhp->umem);
690 kfree(mhp);
691 return ERR_PTR(err);
692 }
693
694 shift = ffs(mhp->umem->page_size) - 1;
695
696 n = 0;
697 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
698 n += chunk->nents;
699
700 err = alloc_pbl(mhp, n);
701 if (err)
702 goto err;
703
704 pages = (__be64 *) __get_free_page(GFP_KERNEL);
705 if (!pages) {
706 err = -ENOMEM;
707 goto err_pbl;
708 }
709
710 i = n = 0;
711
712 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
713 for (j = 0; j < chunk->nmap; ++j) {
714 len = sg_dma_len(&chunk->page_list[j]) >> shift;
715 for (k = 0; k < len; ++k) {
716 pages[i++] = cpu_to_be64(sg_dma_address(
717 &chunk->page_list[j]) +
718 mhp->umem->page_size * k);
719 if (i == PAGE_SIZE / sizeof *pages) {
720 err = write_pbl(&mhp->rhp->rdev,
721 pages,
722 mhp->attr.pbl_addr + (n << 3), i);
723 if (err)
724 goto pbl_done;
725 n += i;
726 i = 0;
727 }
728 }
729 }
730
731 if (i)
732 err = write_pbl(&mhp->rhp->rdev, pages,
733 mhp->attr.pbl_addr + (n << 3), i);
734
735pbl_done:
736 free_page((unsigned long) pages);
737 if (err)
738 goto err_pbl;
739
740 mhp->attr.pdid = php->pdid;
741 mhp->attr.zbva = 0;
742 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
743 mhp->attr.va_fbo = virt;
744 mhp->attr.page_size = shift - 12;
Steve Wise301c2c32011-06-14 20:59:21 +0000745 mhp->attr.len = length;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700746
747 err = register_mem(rhp, php, mhp, shift);
748 if (err)
749 goto err_pbl;
750
751 return &mhp->ibmr;
752
753err_pbl:
754 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
755 mhp->attr.pbl_size << 3);
756
757err:
758 ib_umem_release(mhp->umem);
759 kfree(mhp);
760 return ERR_PTR(err);
761}
762
Shani Michaeli7083e422013-02-06 16:19:12 +0000763struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700764{
765 struct c4iw_dev *rhp;
766 struct c4iw_pd *php;
767 struct c4iw_mw *mhp;
768 u32 mmid;
769 u32 stag = 0;
770 int ret;
771
Shani Michaeli7083e422013-02-06 16:19:12 +0000772 if (type != IB_MW_TYPE_1)
773 return ERR_PTR(-EINVAL);
774
Steve Wisecfdda9d2010-04-21 15:30:06 -0700775 php = to_c4iw_pd(pd);
776 rhp = php->rhp;
777 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
778 if (!mhp)
779 return ERR_PTR(-ENOMEM);
780 ret = allocate_window(&rhp->rdev, &stag, php->pdid);
781 if (ret) {
782 kfree(mhp);
783 return ERR_PTR(ret);
784 }
785 mhp->rhp = rhp;
786 mhp->attr.pdid = php->pdid;
787 mhp->attr.type = FW_RI_STAG_MW;
788 mhp->attr.stag = stag;
789 mmid = (stag) >> 8;
790 mhp->ibmw.rkey = stag;
791 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
792 deallocate_window(&rhp->rdev, mhp->attr.stag);
793 kfree(mhp);
794 return ERR_PTR(-ENOMEM);
795 }
796 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
797 return &(mhp->ibmw);
798}
799
800int c4iw_dealloc_mw(struct ib_mw *mw)
801{
802 struct c4iw_dev *rhp;
803 struct c4iw_mw *mhp;
804 u32 mmid;
805
806 mhp = to_c4iw_mw(mw);
807 rhp = mhp->rhp;
808 mmid = (mw->rkey) >> 8;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700809 remove_handle(rhp, &rhp->mmidr, mmid);
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530810 deallocate_window(&rhp->rdev, mhp->attr.stag);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700811 kfree(mhp);
812 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
813 return 0;
814}
815
816struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
817{
818 struct c4iw_dev *rhp;
819 struct c4iw_pd *php;
820 struct c4iw_mr *mhp;
821 u32 mmid;
822 u32 stag = 0;
823 int ret = 0;
824
825 php = to_c4iw_pd(pd);
826 rhp = php->rhp;
827 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
Steve Wise841dba92010-05-20 16:57:54 -0500828 if (!mhp) {
829 ret = -ENOMEM;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700830 goto err;
Steve Wise841dba92010-05-20 16:57:54 -0500831 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700832
833 mhp->rhp = rhp;
834 ret = alloc_pbl(mhp, pbl_depth);
835 if (ret)
836 goto err1;
837 mhp->attr.pbl_size = pbl_depth;
838 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
839 mhp->attr.pbl_size, mhp->attr.pbl_addr);
840 if (ret)
841 goto err2;
842 mhp->attr.pdid = php->pdid;
843 mhp->attr.type = FW_RI_STAG_NSMR;
844 mhp->attr.stag = stag;
845 mhp->attr.state = 1;
846 mmid = (stag) >> 8;
847 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
Steve Wise841dba92010-05-20 16:57:54 -0500848 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
849 ret = -ENOMEM;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700850 goto err3;
Steve Wise841dba92010-05-20 16:57:54 -0500851 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700852
853 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
854 return &(mhp->ibmr);
855err3:
856 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
857 mhp->attr.pbl_addr);
858err2:
859 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
860 mhp->attr.pbl_size << 3);
861err1:
862 kfree(mhp);
863err:
864 return ERR_PTR(ret);
865}
866
867struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
868 int page_list_len)
869{
870 struct c4iw_fr_page_list *c4pl;
871 struct c4iw_dev *dev = to_c4iw_dev(device);
872 dma_addr_t dma_addr;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000873 int pll_len = roundup(page_list_len * sizeof(u64), 32);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700874
Vipul Pandya42b6a942013-03-14 05:09:01 +0000875 c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700876 if (!c4pl)
877 return ERR_PTR(-ENOMEM);
878
Vipul Pandya42b6a942013-03-14 05:09:01 +0000879 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
880 pll_len, &dma_addr,
881 GFP_KERNEL);
882 if (!c4pl->ibpl.page_list) {
883 kfree(c4pl);
884 return ERR_PTR(-ENOMEM);
885 }
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000886 dma_unmap_addr_set(c4pl, mapping, dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700887 c4pl->dma_addr = dma_addr;
888 c4pl->dev = dev;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000889 c4pl->ibpl.max_page_list_len = pll_len;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700890
891 return &c4pl->ibpl;
892}
893
894void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
895{
896 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
897
Vipul Pandya42b6a942013-03-14 05:09:01 +0000898 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
899 c4pl->ibpl.max_page_list_len,
900 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
901 kfree(c4pl);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700902}
903
904int c4iw_dereg_mr(struct ib_mr *ib_mr)
905{
906 struct c4iw_dev *rhp;
907 struct c4iw_mr *mhp;
908 u32 mmid;
909
910 PDBG("%s ib_mr %p\n", __func__, ib_mr);
911 /* There can be no memory windows */
912 if (atomic_read(&ib_mr->usecnt))
913 return -EINVAL;
914
915 mhp = to_c4iw_mr(ib_mr);
916 rhp = mhp->rhp;
917 mmid = mhp->attr.stag >> 8;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530918 remove_handle(rhp, &rhp->mmidr, mmid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700919 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
920 mhp->attr.pbl_addr);
921 if (mhp->attr.pbl_size)
922 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
923 mhp->attr.pbl_size << 3);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700924 if (mhp->kva)
925 kfree((void *) (unsigned long) mhp->kva);
926 if (mhp->umem)
927 ib_umem_release(mhp->umem);
928 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
929 kfree(mhp);
930 return 0;
931}