blob: 254e1cf264393b36b0658e81883cd203d4e76aeb [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
35
Roland Dreier225c7b12007-05-08 18:00:38 -070036#include "mlx4_ib.h"
37
38static u32 convert_access(int acc)
39{
40 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
41 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
42 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
43 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
44 MLX4_PERM_LOCAL_READ;
45}
46
47struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
48{
49 struct mlx4_ib_mr *mr;
50 int err;
51
52 mr = kmalloc(sizeof *mr, GFP_KERNEL);
53 if (!mr)
54 return ERR_PTR(-ENOMEM);
55
56 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
57 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
58 if (err)
59 goto err_free;
60
61 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
62 if (err)
63 goto err_mr;
64
65 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
66 mr->umem = NULL;
67
68 return &mr->ibmr;
69
70err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +000071 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
Roland Dreier225c7b12007-05-08 18:00:38 -070072
73err_free:
74 kfree(mr);
75
76 return ERR_PTR(err);
77}
78
79int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
80 struct ib_umem *umem)
81{
82 u64 *pages;
83 struct ib_umem_chunk *chunk;
84 int i, j, k;
85 int n;
86 int len;
87 int err = 0;
88
89 pages = (u64 *) __get_free_page(GFP_KERNEL);
90 if (!pages)
91 return -ENOMEM;
92
93 i = n = 0;
94
95 list_for_each_entry(chunk, &umem->chunk_list, list)
96 for (j = 0; j < chunk->nmap; ++j) {
97 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
98 for (k = 0; k < len; ++k) {
99 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
100 umem->page_size * k;
101 /*
Jack Morgensteind7bb58f2007-08-01 12:28:53 +0300102 * Be friendly to mlx4_write_mtt() and
103 * pass it chunks of appropriate size.
Roland Dreier225c7b12007-05-08 18:00:38 -0700104 */
Jack Morgensteind7bb58f2007-08-01 12:28:53 +0300105 if (i == PAGE_SIZE / sizeof (u64)) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700106 err = mlx4_write_mtt(dev->dev, mtt, n,
107 i, pages);
108 if (err)
109 goto out;
110 n += i;
111 i = 0;
112 }
113 }
114 }
115
116 if (i)
117 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
118
119out:
120 free_page((unsigned long) pages);
121 return err;
122}
123
124struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
125 u64 virt_addr, int access_flags,
126 struct ib_udata *udata)
127{
128 struct mlx4_ib_dev *dev = to_mdev(pd->device);
129 struct mlx4_ib_mr *mr;
130 int shift;
131 int err;
132 int n;
133
134 mr = kmalloc(sizeof *mr, GFP_KERNEL);
135 if (!mr)
136 return ERR_PTR(-ENOMEM);
137
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700138 mr->umem = ib_umem_get(pd->uobject->context, start, length,
139 access_flags, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -0700140 if (IS_ERR(mr->umem)) {
141 err = PTR_ERR(mr->umem);
142 goto err_free;
143 }
144
145 n = ib_umem_page_count(mr->umem);
146 shift = ilog2(mr->umem->page_size);
147
148 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
149 convert_access(access_flags), n, shift, &mr->mmr);
150 if (err)
151 goto err_umem;
152
153 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
154 if (err)
155 goto err_mr;
156
157 err = mlx4_mr_enable(dev->dev, &mr->mmr);
158 if (err)
159 goto err_mr;
160
161 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
162
163 return &mr->ibmr;
164
165err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +0000166 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
Roland Dreier225c7b12007-05-08 18:00:38 -0700167
168err_umem:
169 ib_umem_release(mr->umem);
170
171err_free:
172 kfree(mr);
173
174 return ERR_PTR(err);
175}
176
177int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
178{
179 struct mlx4_ib_mr *mr = to_mmr(ibmr);
Shani Michaeli61083722013-02-06 16:19:09 +0000180 int ret;
Roland Dreier225c7b12007-05-08 18:00:38 -0700181
Shani Michaeli61083722013-02-06 16:19:09 +0000182 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
183 if (ret)
184 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -0700185 if (mr->umem)
186 ib_umem_release(mr->umem);
187 kfree(mr);
188
189 return 0;
190}
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300191
Roland Dreier95d04f02008-07-23 08:12:26 -0700192struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
193 int max_page_list_len)
194{
195 struct mlx4_ib_dev *dev = to_mdev(pd->device);
196 struct mlx4_ib_mr *mr;
197 int err;
198
199 mr = kmalloc(sizeof *mr, GFP_KERNEL);
200 if (!mr)
201 return ERR_PTR(-ENOMEM);
202
203 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
204 max_page_list_len, 0, &mr->mmr);
205 if (err)
206 goto err_free;
207
208 err = mlx4_mr_enable(dev->dev, &mr->mmr);
209 if (err)
210 goto err_mr;
211
Vladimir Sokolovsky4c246ed2008-08-27 14:29:57 -0700212 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
Vladimir Sokolovsky7f3abf52008-11-05 10:56:52 -0800213 mr->umem = NULL;
Vladimir Sokolovsky4c246ed2008-08-27 14:29:57 -0700214
Roland Dreier95d04f02008-07-23 08:12:26 -0700215 return &mr->ibmr;
216
217err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +0000218 (void) mlx4_mr_free(dev->dev, &mr->mmr);
Roland Dreier95d04f02008-07-23 08:12:26 -0700219
220err_free:
221 kfree(mr);
222 return ERR_PTR(err);
223}
224
225struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
226 int page_list_len)
227{
228 struct mlx4_ib_dev *dev = to_mdev(ibdev);
229 struct mlx4_ib_fast_reg_page_list *mfrpl;
230 int size = page_list_len * sizeof (u64);
231
Eli Cohen5a0fd092010-10-07 16:24:16 +0200232 if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
Roland Dreier95d04f02008-07-23 08:12:26 -0700233 return ERR_PTR(-EINVAL);
234
235 mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
236 if (!mfrpl)
237 return ERR_PTR(-ENOMEM);
238
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700239 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
240 if (!mfrpl->ibfrpl.page_list)
241 goto err_free;
242
243 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
Roland Dreier95d04f02008-07-23 08:12:26 -0700244 size, &mfrpl->map,
245 GFP_KERNEL);
Dan Carpenter7bd91292010-04-07 09:39:01 +0000246 if (!mfrpl->mapped_page_list)
Roland Dreier95d04f02008-07-23 08:12:26 -0700247 goto err_free;
248
249 WARN_ON(mfrpl->map & 0x3f);
250
251 return &mfrpl->ibfrpl;
252
253err_free:
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700254 kfree(mfrpl->ibfrpl.page_list);
Roland Dreier95d04f02008-07-23 08:12:26 -0700255 kfree(mfrpl);
256 return ERR_PTR(-ENOMEM);
257}
258
259void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
260{
261 struct mlx4_ib_dev *dev = to_mdev(page_list->device);
262 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
263 int size = page_list->max_page_list_len * sizeof (u64);
264
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700265 dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
Roland Dreier95d04f02008-07-23 08:12:26 -0700266 mfrpl->map);
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700267 kfree(mfrpl->ibfrpl.page_list);
Roland Dreier95d04f02008-07-23 08:12:26 -0700268 kfree(mfrpl);
269}
270
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300271struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
272 struct ib_fmr_attr *fmr_attr)
273{
274 struct mlx4_ib_dev *dev = to_mdev(pd->device);
275 struct mlx4_ib_fmr *fmr;
276 int err = -ENOMEM;
277
278 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
279 if (!fmr)
280 return ERR_PTR(-ENOMEM);
281
282 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
283 fmr_attr->max_pages, fmr_attr->max_maps,
284 fmr_attr->page_shift, &fmr->mfmr);
285 if (err)
286 goto err_free;
287
Jack Morgensteine6028c02008-02-14 10:39:36 -0800288 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300289 if (err)
290 goto err_mr;
291
292 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
293
294 return &fmr->ibfmr;
295
296err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +0000297 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300298
299err_free:
300 kfree(fmr);
301
302 return ERR_PTR(err);
303}
304
305int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
306 int npages, u64 iova)
307{
308 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
309 struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
310
311 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
312 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
313}
314
315int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
316{
317 struct ib_fmr *ibfmr;
318 int err;
319 struct mlx4_dev *mdev = NULL;
320
321 list_for_each_entry(ibfmr, fmr_list, list) {
322 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
323 return -EINVAL;
324 mdev = to_mdev(ibfmr->device)->dev;
325 }
326
327 if (!mdev)
328 return 0;
329
330 list_for_each_entry(ibfmr, fmr_list, list) {
331 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
332
333 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
334 }
335
336 /*
337 * Make sure all MPT status updates are visible before issuing
338 * SYNC_TPT firmware command.
339 */
340 wmb();
341
342 err = mlx4_SYNC_TPT(mdev);
343 if (err)
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300344 pr_warn("SYNC_TPT error %d when "
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300345 "unmapping FMRs\n", err);
346
347 return 0;
348}
349
350int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
351{
352 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
353 struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
354 int err;
355
356 err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
357
358 if (!err)
359 kfree(ifmr);
360
361 return err;
362}