blob: e5a32e4520e8d5922dd134979d1b44ca870cd88b [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
Jack Morgenstein51a379d2008-07-25 10:32:52 -07003 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
35
Roland Dreier225c7b12007-05-08 18:00:38 -070036#include "mlx4_ib.h"
37
38static u32 convert_access(int acc)
39{
40 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
41 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
42 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
43 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
Shani Michaeli804d6a82013-02-06 16:19:14 +000044 (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
Roland Dreier225c7b12007-05-08 18:00:38 -070045 MLX4_PERM_LOCAL_READ;
46}
47
Shani Michaeli804d6a82013-02-06 16:19:14 +000048static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
49{
50 switch (type) {
51 case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
52 case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
53 default: return -1;
54 }
55}
56
Roland Dreier225c7b12007-05-08 18:00:38 -070057struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
58{
59 struct mlx4_ib_mr *mr;
60 int err;
61
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +030062 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -070063 if (!mr)
64 return ERR_PTR(-ENOMEM);
65
66 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
67 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
68 if (err)
69 goto err_free;
70
71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
72 if (err)
73 goto err_mr;
74
75 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
76 mr->umem = NULL;
77
78 return &mr->ibmr;
79
80err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +000081 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
Roland Dreier225c7b12007-05-08 18:00:38 -070082
83err_free:
84 kfree(mr);
85
86 return ERR_PTR(err);
87}
88
89int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
90 struct ib_umem *umem)
91{
92 u64 *pages;
Yishai Hadaseeb84612014-01-28 13:40:15 +020093 int i, k, entry;
Roland Dreier225c7b12007-05-08 18:00:38 -070094 int n;
95 int len;
96 int err = 0;
Yishai Hadaseeb84612014-01-28 13:40:15 +020097 struct scatterlist *sg;
Roland Dreier225c7b12007-05-08 18:00:38 -070098
99 pages = (u64 *) __get_free_page(GFP_KERNEL);
100 if (!pages)
101 return -ENOMEM;
102
103 i = n = 0;
104
Yishai Hadaseeb84612014-01-28 13:40:15 +0200105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
106 len = sg_dma_len(sg) >> mtt->page_shift;
107 for (k = 0; k < len; ++k) {
108 pages[i++] = sg_dma_address(sg) +
109 umem->page_size * k;
110 /*
111 * Be friendly to mlx4_write_mtt() and
112 * pass it chunks of appropriate size.
113 */
114 if (i == PAGE_SIZE / sizeof (u64)) {
115 err = mlx4_write_mtt(dev->dev, mtt, n,
116 i, pages);
117 if (err)
118 goto out;
119 n += i;
120 i = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700121 }
122 }
Yishai Hadaseeb84612014-01-28 13:40:15 +0200123 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700124
125 if (i)
126 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
127
128out:
129 free_page((unsigned long) pages);
130 return err;
131}
132
133struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
134 u64 virt_addr, int access_flags,
135 struct ib_udata *udata)
136{
137 struct mlx4_ib_dev *dev = to_mdev(pd->device);
138 struct mlx4_ib_mr *mr;
139 int shift;
140 int err;
141 int n;
142
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300143 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
Roland Dreier225c7b12007-05-08 18:00:38 -0700144 if (!mr)
145 return ERR_PTR(-ENOMEM);
146
Matan Barak93769322014-07-31 11:01:30 +0300147 /* Force registering the memory as writable. */
148 /* Used for memory re-registeration. HCA protects the access */
Arthur Kepnercb9fbc52008-04-29 01:00:34 -0700149 mr->umem = ib_umem_get(pd->uobject->context, start, length,
Matan Barak93769322014-07-31 11:01:30 +0300150 access_flags | IB_ACCESS_LOCAL_WRITE, 0);
Roland Dreier225c7b12007-05-08 18:00:38 -0700151 if (IS_ERR(mr->umem)) {
152 err = PTR_ERR(mr->umem);
153 goto err_free;
154 }
155
156 n = ib_umem_page_count(mr->umem);
157 shift = ilog2(mr->umem->page_size);
158
159 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
160 convert_access(access_flags), n, shift, &mr->mmr);
161 if (err)
162 goto err_umem;
163
164 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
165 if (err)
166 goto err_mr;
167
168 err = mlx4_mr_enable(dev->dev, &mr->mmr);
169 if (err)
170 goto err_mr;
171
172 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
173
174 return &mr->ibmr;
175
176err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +0000177 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
Roland Dreier225c7b12007-05-08 18:00:38 -0700178
179err_umem:
180 ib_umem_release(mr->umem);
181
182err_free:
183 kfree(mr);
184
185 return ERR_PTR(err);
186}
187
Matan Barak93769322014-07-31 11:01:30 +0300188int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
189 u64 start, u64 length, u64 virt_addr,
190 int mr_access_flags, struct ib_pd *pd,
191 struct ib_udata *udata)
192{
193 struct mlx4_ib_dev *dev = to_mdev(mr->device);
194 struct mlx4_ib_mr *mmr = to_mmr(mr);
195 struct mlx4_mpt_entry *mpt_entry;
196 struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
197 int err;
198
199 /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
200 * we assume that the calls can't run concurrently. Otherwise, a
201 * race exists.
202 */
203 err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
204
205 if (err)
206 return err;
207
208 if (flags & IB_MR_REREG_PD) {
209 err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
210 to_mpd(pd)->pdn);
211
212 if (err)
213 goto release_mpt_entry;
214 }
215
216 if (flags & IB_MR_REREG_ACCESS) {
217 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
218 convert_access(mr_access_flags));
219
220 if (err)
221 goto release_mpt_entry;
222 }
223
224 if (flags & IB_MR_REREG_TRANS) {
225 int shift;
Matan Barak93769322014-07-31 11:01:30 +0300226 int n;
227
228 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
229 ib_umem_release(mmr->umem);
230 mmr->umem = ib_umem_get(mr->uobject->context, start, length,
231 mr_access_flags |
232 IB_ACCESS_LOCAL_WRITE,
233 0);
234 if (IS_ERR(mmr->umem)) {
235 err = PTR_ERR(mmr->umem);
Matan Barak4ff0acc2014-09-11 13:18:37 +0300236 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
Matan Barak93769322014-07-31 11:01:30 +0300237 mmr->umem = NULL;
238 goto release_mpt_entry;
239 }
240 n = ib_umem_page_count(mmr->umem);
241 shift = ilog2(mmr->umem->page_size);
242
Matan Barak93769322014-07-31 11:01:30 +0300243 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
244 virt_addr, length, n, shift,
245 *pmpt_entry);
246 if (err) {
247 ib_umem_release(mmr->umem);
248 goto release_mpt_entry;
249 }
Matan Barak4ff0acc2014-09-11 13:18:37 +0300250 mmr->mmr.iova = virt_addr;
251 mmr->mmr.size = length;
Matan Barak93769322014-07-31 11:01:30 +0300252
253 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
254 if (err) {
255 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
256 ib_umem_release(mmr->umem);
257 goto release_mpt_entry;
258 }
259 }
260
261 /* If we couldn't transfer the MR to the HCA, just remember to
262 * return a failure. But dereg_mr will free the resources.
263 */
264 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
Matan Barak4ff0acc2014-09-11 13:18:37 +0300265 if (!err && flags & IB_MR_REREG_ACCESS)
266 mmr->mmr.access = mr_access_flags;
Matan Barak93769322014-07-31 11:01:30 +0300267
268release_mpt_entry:
269 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
270
271 return err;
272}
273
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300274static int
275mlx4_alloc_priv_pages(struct ib_device *device,
276 struct mlx4_ib_mr *mr,
277 int max_pages)
278{
279 int size = max_pages * sizeof(u64);
280 int add_size;
281 int ret;
282
283 add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
284
285 mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
286 if (!mr->pages_alloc)
287 return -ENOMEM;
288
289 mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
290
291 mr->page_map = dma_map_single(device->dma_device, mr->pages,
292 size, DMA_TO_DEVICE);
293
294 if (dma_mapping_error(device->dma_device, mr->page_map)) {
295 ret = -ENOMEM;
296 goto err;
297 }
298
299 return 0;
300err:
301 kfree(mr->pages_alloc);
302
303 return ret;
304}
305
306static void
307mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
308{
309 if (mr->pages) {
310 struct ib_device *device = mr->ibmr.device;
311 int size = mr->max_pages * sizeof(u64);
312
313 dma_unmap_single(device->dma_device, mr->page_map,
314 size, DMA_TO_DEVICE);
315 kfree(mr->pages_alloc);
316 mr->pages = NULL;
317 }
318}
319
Roland Dreier225c7b12007-05-08 18:00:38 -0700320int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
321{
322 struct mlx4_ib_mr *mr = to_mmr(ibmr);
Shani Michaeli61083722013-02-06 16:19:09 +0000323 int ret;
Roland Dreier225c7b12007-05-08 18:00:38 -0700324
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300325 mlx4_free_priv_pages(mr);
326
Shani Michaeli61083722013-02-06 16:19:09 +0000327 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
328 if (ret)
329 return ret;
Roland Dreier225c7b12007-05-08 18:00:38 -0700330 if (mr->umem)
331 ib_umem_release(mr->umem);
332 kfree(mr);
333
334 return 0;
335}
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300336
Shani Michaeli804d6a82013-02-06 16:19:14 +0000337struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
338{
339 struct mlx4_ib_dev *dev = to_mdev(pd->device);
340 struct mlx4_ib_mw *mw;
341 int err;
342
343 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
344 if (!mw)
345 return ERR_PTR(-ENOMEM);
346
347 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
348 to_mlx4_type(type), &mw->mmw);
349 if (err)
350 goto err_free;
351
352 err = mlx4_mw_enable(dev->dev, &mw->mmw);
353 if (err)
354 goto err_mw;
355
356 mw->ibmw.rkey = mw->mmw.key;
357
358 return &mw->ibmw;
359
360err_mw:
361 mlx4_mw_free(dev->dev, &mw->mmw);
362
363err_free:
364 kfree(mw);
365
366 return ERR_PTR(err);
367}
368
Shani Michaeli6ff63e12013-02-06 16:19:15 +0000369int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
370 struct ib_mw_bind *mw_bind)
371{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100372 struct ib_bind_mw_wr wr;
Shani Michaeli6ff63e12013-02-06 16:19:15 +0000373 struct ib_send_wr *bad_wr;
374 int ret;
375
376 memset(&wr, 0, sizeof(wr));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100377 wr.wr.opcode = IB_WR_BIND_MW;
378 wr.wr.wr_id = mw_bind->wr_id;
379 wr.wr.send_flags = mw_bind->send_flags;
380 wr.mw = mw;
381 wr.bind_info = mw_bind->bind_info;
382 wr.rkey = ib_inc_rkey(mw->rkey);
Shani Michaeli6ff63e12013-02-06 16:19:15 +0000383
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100384 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr);
Shani Michaeli6ff63e12013-02-06 16:19:15 +0000385 if (!ret)
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100386 mw->rkey = wr.rkey;
Shani Michaeli6ff63e12013-02-06 16:19:15 +0000387
388 return ret;
389}
390
Shani Michaeli804d6a82013-02-06 16:19:14 +0000391int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
392{
393 struct mlx4_ib_mw *mw = to_mmw(ibmw);
394
395 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
396 kfree(mw);
397
398 return 0;
399}
400
Sagi Grimberg679e34d2015-07-30 10:32:42 +0300401struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
402 enum ib_mr_type mr_type,
403 u32 max_num_sg)
Roland Dreier95d04f02008-07-23 08:12:26 -0700404{
405 struct mlx4_ib_dev *dev = to_mdev(pd->device);
406 struct mlx4_ib_mr *mr;
407 int err;
408
Sagi Grimberg679e34d2015-07-30 10:32:42 +0300409 if (mr_type != IB_MR_TYPE_MEM_REG ||
410 max_num_sg > MLX4_MAX_FAST_REG_PAGES)
411 return ERR_PTR(-EINVAL);
412
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300413 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
Roland Dreier95d04f02008-07-23 08:12:26 -0700414 if (!mr)
415 return ERR_PTR(-ENOMEM);
416
417 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
Sagi Grimberg679e34d2015-07-30 10:32:42 +0300418 max_num_sg, 0, &mr->mmr);
Roland Dreier95d04f02008-07-23 08:12:26 -0700419 if (err)
420 goto err_free;
421
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300422 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
423 if (err)
424 goto err_free_mr;
425
426 mr->max_pages = max_num_sg;
427
Roland Dreier95d04f02008-07-23 08:12:26 -0700428 err = mlx4_mr_enable(dev->dev, &mr->mmr);
429 if (err)
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300430 goto err_free_pl;
Roland Dreier95d04f02008-07-23 08:12:26 -0700431
Vladimir Sokolovsky4c246ed2008-08-27 14:29:57 -0700432 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
Vladimir Sokolovsky7f3abf52008-11-05 10:56:52 -0800433 mr->umem = NULL;
Vladimir Sokolovsky4c246ed2008-08-27 14:29:57 -0700434
Roland Dreier95d04f02008-07-23 08:12:26 -0700435 return &mr->ibmr;
436
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300437err_free_pl:
438 mlx4_free_priv_pages(mr);
439err_free_mr:
Shani Michaeli61083722013-02-06 16:19:09 +0000440 (void) mlx4_mr_free(dev->dev, &mr->mmr);
Roland Dreier95d04f02008-07-23 08:12:26 -0700441err_free:
442 kfree(mr);
443 return ERR_PTR(err);
444}
445
446struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
447 int page_list_len)
448{
449 struct mlx4_ib_dev *dev = to_mdev(ibdev);
450 struct mlx4_ib_fast_reg_page_list *mfrpl;
451 int size = page_list_len * sizeof (u64);
452
Eli Cohen5a0fd092010-10-07 16:24:16 +0200453 if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
Roland Dreier95d04f02008-07-23 08:12:26 -0700454 return ERR_PTR(-EINVAL);
455
456 mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
457 if (!mfrpl)
458 return ERR_PTR(-ENOMEM);
459
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700460 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
461 if (!mfrpl->ibfrpl.page_list)
462 goto err_free;
463
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200464 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
465 pdev->dev,
Roland Dreier95d04f02008-07-23 08:12:26 -0700466 size, &mfrpl->map,
467 GFP_KERNEL);
Dan Carpenter7bd91292010-04-07 09:39:01 +0000468 if (!mfrpl->mapped_page_list)
Roland Dreier95d04f02008-07-23 08:12:26 -0700469 goto err_free;
470
471 WARN_ON(mfrpl->map & 0x3f);
472
473 return &mfrpl->ibfrpl;
474
475err_free:
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700476 kfree(mfrpl->ibfrpl.page_list);
Roland Dreier95d04f02008-07-23 08:12:26 -0700477 kfree(mfrpl);
478 return ERR_PTR(-ENOMEM);
479}
480
481void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
482{
483 struct mlx4_ib_dev *dev = to_mdev(page_list->device);
484 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
485 int size = page_list->max_page_list_len * sizeof (u64);
486
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200487 dma_free_coherent(&dev->dev->persist->pdev->dev, size,
488 mfrpl->mapped_page_list,
Roland Dreier95d04f02008-07-23 08:12:26 -0700489 mfrpl->map);
Jack Morgenstein2b6b7d42009-05-07 21:35:13 -0700490 kfree(mfrpl->ibfrpl.page_list);
Roland Dreier95d04f02008-07-23 08:12:26 -0700491 kfree(mfrpl);
492}
493
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300494struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
495 struct ib_fmr_attr *fmr_attr)
496{
497 struct mlx4_ib_dev *dev = to_mdev(pd->device);
498 struct mlx4_ib_fmr *fmr;
499 int err = -ENOMEM;
500
501 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
502 if (!fmr)
503 return ERR_PTR(-ENOMEM);
504
505 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
506 fmr_attr->max_pages, fmr_attr->max_maps,
507 fmr_attr->page_shift, &fmr->mfmr);
508 if (err)
509 goto err_free;
510
Jack Morgensteine6028c02008-02-14 10:39:36 -0800511 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300512 if (err)
513 goto err_mr;
514
515 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
516
517 return &fmr->ibfmr;
518
519err_mr:
Shani Michaeli61083722013-02-06 16:19:09 +0000520 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300521
522err_free:
523 kfree(fmr);
524
525 return ERR_PTR(err);
526}
527
528int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
529 int npages, u64 iova)
530{
531 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
532 struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
533
534 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
535 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
536}
537
538int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
539{
540 struct ib_fmr *ibfmr;
541 int err;
542 struct mlx4_dev *mdev = NULL;
543
544 list_for_each_entry(ibfmr, fmr_list, list) {
545 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
546 return -EINVAL;
547 mdev = to_mdev(ibfmr->device)->dev;
548 }
549
550 if (!mdev)
551 return 0;
552
553 list_for_each_entry(ibfmr, fmr_list, list) {
554 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
555
556 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
557 }
558
559 /*
560 * Make sure all MPT status updates are visible before issuing
561 * SYNC_TPT firmware command.
562 */
563 wmb();
564
565 err = mlx4_SYNC_TPT(mdev);
566 if (err)
Shlomo Pongratz987c8f82012-04-29 17:04:26 +0300567 pr_warn("SYNC_TPT error %d when "
Jack Morgenstein8ad11fb2007-08-01 12:29:05 +0300568 "unmapping FMRs\n", err);
569
570 return 0;
571}
572
573int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
574{
575 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
576 struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
577 int err;
578
579 err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
580
581 if (!err)
582 kfree(ifmr);
583
584 return err;
585}
Sagi Grimberg1b2cd0f2015-10-13 19:11:27 +0300586
587static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
588{
589 struct mlx4_ib_mr *mr = to_mmr(ibmr);
590
591 if (unlikely(mr->npages == mr->max_pages))
592 return -ENOMEM;
593
594 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
595
596 return 0;
597}
598
599int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
600 struct scatterlist *sg,
601 int sg_nents)
602{
603 struct mlx4_ib_mr *mr = to_mmr(ibmr);
604 int rc;
605
606 mr->npages = 0;
607
608 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
609 sizeof(u64) * mr->max_pages,
610 DMA_TO_DEVICE);
611
612 rc = ib_sg_to_pages(ibmr, sg, sg_nents, mlx4_set_page);
613
614 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
615 sizeof(u64) * mr->max_pages,
616 DMA_TO_DEVICE);
617
618 return rc;
619}