Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. |
Jack Morgenstein | 51a379d | 2008-07-25 10:32:52 -0700 | [diff] [blame] | 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
Matan Barak | b2a239d | 2016-02-29 18:05:29 +0200 | [diff] [blame] | 35 | #include <rdma/ib_user_verbs.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 36 | |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 37 | #include "mlx4_ib.h" |
| 38 | |
| 39 | static u32 convert_access(int acc) |
| 40 | { |
| 41 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) | |
| 42 | (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) | |
| 43 | (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) | |
| 44 | (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) | |
Shani Michaeli | 804d6a8 | 2013-02-06 16:19:14 +0000 | [diff] [blame] | 45 | (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) | |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 46 | MLX4_PERM_LOCAL_READ; |
| 47 | } |
| 48 | |
Shani Michaeli | 804d6a8 | 2013-02-06 16:19:14 +0000 | [diff] [blame] | 49 | static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) |
| 50 | { |
| 51 | switch (type) { |
| 52 | case IB_MW_TYPE_1: return MLX4_MW_TYPE_1; |
| 53 | case IB_MW_TYPE_2: return MLX4_MW_TYPE_2; |
| 54 | default: return -1; |
| 55 | } |
| 56 | } |
| 57 | |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 58 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) |
| 59 | { |
| 60 | struct mlx4_ib_mr *mr; |
| 61 | int err; |
| 62 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 63 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 64 | if (!mr) |
| 65 | return ERR_PTR(-ENOMEM); |
| 66 | |
| 67 | err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, |
| 68 | ~0ull, convert_access(acc), 0, 0, &mr->mmr); |
| 69 | if (err) |
| 70 | goto err_free; |
| 71 | |
| 72 | err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); |
| 73 | if (err) |
| 74 | goto err_mr; |
| 75 | |
| 76 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
| 77 | mr->umem = NULL; |
| 78 | |
| 79 | return &mr->ibmr; |
| 80 | |
| 81 | err_mr: |
Shani Michaeli | 6108372 | 2013-02-06 16:19:09 +0000 | [diff] [blame] | 82 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 83 | |
| 84 | err_free: |
| 85 | kfree(mr); |
| 86 | |
| 87 | return ERR_PTR(err); |
| 88 | } |
| 89 | |
| 90 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, |
| 91 | struct ib_umem *umem) |
| 92 | { |
| 93 | u64 *pages; |
Yishai Hadas | eeb8461 | 2014-01-28 13:40:15 +0200 | [diff] [blame] | 94 | int i, k, entry; |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 95 | int n; |
| 96 | int len; |
| 97 | int err = 0; |
Yishai Hadas | eeb8461 | 2014-01-28 13:40:15 +0200 | [diff] [blame] | 98 | struct scatterlist *sg; |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 99 | |
| 100 | pages = (u64 *) __get_free_page(GFP_KERNEL); |
| 101 | if (!pages) |
| 102 | return -ENOMEM; |
| 103 | |
| 104 | i = n = 0; |
| 105 | |
Yishai Hadas | eeb8461 | 2014-01-28 13:40:15 +0200 | [diff] [blame] | 106 | for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
| 107 | len = sg_dma_len(sg) >> mtt->page_shift; |
| 108 | for (k = 0; k < len; ++k) { |
| 109 | pages[i++] = sg_dma_address(sg) + |
| 110 | umem->page_size * k; |
| 111 | /* |
| 112 | * Be friendly to mlx4_write_mtt() and |
| 113 | * pass it chunks of appropriate size. |
| 114 | */ |
| 115 | if (i == PAGE_SIZE / sizeof (u64)) { |
| 116 | err = mlx4_write_mtt(dev->dev, mtt, n, |
| 117 | i, pages); |
| 118 | if (err) |
| 119 | goto out; |
| 120 | n += i; |
| 121 | i = 0; |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 122 | } |
| 123 | } |
Yishai Hadas | eeb8461 | 2014-01-28 13:40:15 +0200 | [diff] [blame] | 124 | } |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 125 | |
| 126 | if (i) |
| 127 | err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); |
| 128 | |
| 129 | out: |
| 130 | free_page((unsigned long) pages); |
| 131 | return err; |
| 132 | } |
| 133 | |
| 134 | struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
| 135 | u64 virt_addr, int access_flags, |
| 136 | struct ib_udata *udata) |
| 137 | { |
| 138 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
| 139 | struct mlx4_ib_mr *mr; |
| 140 | int shift; |
| 141 | int err; |
| 142 | int n; |
| 143 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 144 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 145 | if (!mr) |
| 146 | return ERR_PTR(-ENOMEM); |
| 147 | |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 148 | /* Force registering the memory as writable. */ |
| 149 | /* Used for memory re-registeration. HCA protects the access */ |
Arthur Kepner | cb9fbc5 | 2008-04-29 01:00:34 -0700 | [diff] [blame] | 150 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 151 | access_flags | IB_ACCESS_LOCAL_WRITE, 0); |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 152 | if (IS_ERR(mr->umem)) { |
| 153 | err = PTR_ERR(mr->umem); |
| 154 | goto err_free; |
| 155 | } |
| 156 | |
| 157 | n = ib_umem_page_count(mr->umem); |
| 158 | shift = ilog2(mr->umem->page_size); |
| 159 | |
| 160 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length, |
| 161 | convert_access(access_flags), n, shift, &mr->mmr); |
| 162 | if (err) |
| 163 | goto err_umem; |
| 164 | |
| 165 | err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); |
| 166 | if (err) |
| 167 | goto err_mr; |
| 168 | |
| 169 | err = mlx4_mr_enable(dev->dev, &mr->mmr); |
| 170 | if (err) |
| 171 | goto err_mr; |
| 172 | |
| 173 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
| 174 | |
| 175 | return &mr->ibmr; |
| 176 | |
| 177 | err_mr: |
Shani Michaeli | 6108372 | 2013-02-06 16:19:09 +0000 | [diff] [blame] | 178 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 179 | |
| 180 | err_umem: |
| 181 | ib_umem_release(mr->umem); |
| 182 | |
| 183 | err_free: |
| 184 | kfree(mr); |
| 185 | |
| 186 | return ERR_PTR(err); |
| 187 | } |
| 188 | |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 189 | int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, |
| 190 | u64 start, u64 length, u64 virt_addr, |
| 191 | int mr_access_flags, struct ib_pd *pd, |
| 192 | struct ib_udata *udata) |
| 193 | { |
| 194 | struct mlx4_ib_dev *dev = to_mdev(mr->device); |
| 195 | struct mlx4_ib_mr *mmr = to_mmr(mr); |
| 196 | struct mlx4_mpt_entry *mpt_entry; |
| 197 | struct mlx4_mpt_entry **pmpt_entry = &mpt_entry; |
| 198 | int err; |
| 199 | |
| 200 | /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs, |
| 201 | * we assume that the calls can't run concurrently. Otherwise, a |
| 202 | * race exists. |
| 203 | */ |
| 204 | err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry); |
| 205 | |
| 206 | if (err) |
| 207 | return err; |
| 208 | |
| 209 | if (flags & IB_MR_REREG_PD) { |
| 210 | err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry, |
| 211 | to_mpd(pd)->pdn); |
| 212 | |
| 213 | if (err) |
| 214 | goto release_mpt_entry; |
| 215 | } |
| 216 | |
| 217 | if (flags & IB_MR_REREG_ACCESS) { |
| 218 | err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, |
| 219 | convert_access(mr_access_flags)); |
| 220 | |
| 221 | if (err) |
| 222 | goto release_mpt_entry; |
| 223 | } |
| 224 | |
| 225 | if (flags & IB_MR_REREG_TRANS) { |
| 226 | int shift; |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 227 | int n; |
| 228 | |
| 229 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); |
| 230 | ib_umem_release(mmr->umem); |
| 231 | mmr->umem = ib_umem_get(mr->uobject->context, start, length, |
| 232 | mr_access_flags | |
| 233 | IB_ACCESS_LOCAL_WRITE, |
| 234 | 0); |
| 235 | if (IS_ERR(mmr->umem)) { |
| 236 | err = PTR_ERR(mmr->umem); |
Matan Barak | 4ff0acc | 2014-09-11 13:18:37 +0300 | [diff] [blame] | 237 | /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 238 | mmr->umem = NULL; |
| 239 | goto release_mpt_entry; |
| 240 | } |
| 241 | n = ib_umem_page_count(mmr->umem); |
| 242 | shift = ilog2(mmr->umem->page_size); |
| 243 | |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 244 | err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr, |
| 245 | virt_addr, length, n, shift, |
| 246 | *pmpt_entry); |
| 247 | if (err) { |
| 248 | ib_umem_release(mmr->umem); |
| 249 | goto release_mpt_entry; |
| 250 | } |
Matan Barak | 4ff0acc | 2014-09-11 13:18:37 +0300 | [diff] [blame] | 251 | mmr->mmr.iova = virt_addr; |
| 252 | mmr->mmr.size = length; |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 253 | |
| 254 | err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); |
| 255 | if (err) { |
| 256 | mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); |
| 257 | ib_umem_release(mmr->umem); |
| 258 | goto release_mpt_entry; |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | /* If we couldn't transfer the MR to the HCA, just remember to |
| 263 | * return a failure. But dereg_mr will free the resources. |
| 264 | */ |
| 265 | err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry); |
Matan Barak | 4ff0acc | 2014-09-11 13:18:37 +0300 | [diff] [blame] | 266 | if (!err && flags & IB_MR_REREG_ACCESS) |
| 267 | mmr->mmr.access = mr_access_flags; |
Matan Barak | 9376932 | 2014-07-31 11:01:30 +0300 | [diff] [blame] | 268 | |
| 269 | release_mpt_entry: |
| 270 | mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry); |
| 271 | |
| 272 | return err; |
| 273 | } |
| 274 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 275 | static int |
| 276 | mlx4_alloc_priv_pages(struct ib_device *device, |
| 277 | struct mlx4_ib_mr *mr, |
| 278 | int max_pages) |
| 279 | { |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 280 | int ret; |
| 281 | |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 282 | /* Ensure that size is aligned to DMA cacheline |
| 283 | * requirements. |
| 284 | * max_pages is limited to MLX4_MAX_FAST_REG_PAGES |
| 285 | * so page_map_size will never cross PAGE_SIZE. |
| 286 | */ |
| 287 | mr->page_map_size = roundup(max_pages * sizeof(u64), |
| 288 | MLX4_MR_PAGES_ALIGN); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 289 | |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 290 | /* Prevent cross page boundary allocation. */ |
| 291 | mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); |
| 292 | if (!mr->pages) |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 293 | return -ENOMEM; |
| 294 | |
Bart Van Assche | d66c88a8 | 2017-01-20 13:04:20 -0800 | [diff] [blame^] | 295 | mr->page_map = dma_map_single(device->dev.parent, mr->pages, |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 296 | mr->page_map_size, DMA_TO_DEVICE); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 297 | |
Bart Van Assche | d66c88a8 | 2017-01-20 13:04:20 -0800 | [diff] [blame^] | 298 | if (dma_mapping_error(device->dev.parent, mr->page_map)) { |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 299 | ret = -ENOMEM; |
| 300 | goto err; |
| 301 | } |
| 302 | |
| 303 | return 0; |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 304 | |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 305 | err: |
| 306 | free_page((unsigned long)mr->pages); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 307 | return ret; |
| 308 | } |
| 309 | |
| 310 | static void |
| 311 | mlx4_free_priv_pages(struct mlx4_ib_mr *mr) |
| 312 | { |
| 313 | if (mr->pages) { |
| 314 | struct ib_device *device = mr->ibmr.device; |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 315 | |
Bart Van Assche | d66c88a8 | 2017-01-20 13:04:20 -0800 | [diff] [blame^] | 316 | dma_unmap_single(device->dev.parent, mr->page_map, |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 317 | mr->page_map_size, DMA_TO_DEVICE); |
| 318 | free_page((unsigned long)mr->pages); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 319 | mr->pages = NULL; |
| 320 | } |
| 321 | } |
| 322 | |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 323 | int mlx4_ib_dereg_mr(struct ib_mr *ibmr) |
| 324 | { |
| 325 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
Shani Michaeli | 6108372 | 2013-02-06 16:19:09 +0000 | [diff] [blame] | 326 | int ret; |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 327 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 328 | mlx4_free_priv_pages(mr); |
| 329 | |
Shani Michaeli | 6108372 | 2013-02-06 16:19:09 +0000 | [diff] [blame] | 330 | ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); |
| 331 | if (ret) |
| 332 | return ret; |
Roland Dreier | 225c7b1 | 2007-05-08 18:00:38 -0700 | [diff] [blame] | 333 | if (mr->umem) |
| 334 | ib_umem_release(mr->umem); |
| 335 | kfree(mr); |
| 336 | |
| 337 | return 0; |
| 338 | } |
Jack Morgenstein | 8ad11fb | 2007-08-01 12:29:05 +0300 | [diff] [blame] | 339 | |
Matan Barak | b2a239d | 2016-02-29 18:05:29 +0200 | [diff] [blame] | 340 | struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, |
| 341 | struct ib_udata *udata) |
Shani Michaeli | 804d6a8 | 2013-02-06 16:19:14 +0000 | [diff] [blame] | 342 | { |
| 343 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
| 344 | struct mlx4_ib_mw *mw; |
| 345 | int err; |
| 346 | |
| 347 | mw = kmalloc(sizeof(*mw), GFP_KERNEL); |
| 348 | if (!mw) |
| 349 | return ERR_PTR(-ENOMEM); |
| 350 | |
| 351 | err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, |
| 352 | to_mlx4_type(type), &mw->mmw); |
| 353 | if (err) |
| 354 | goto err_free; |
| 355 | |
| 356 | err = mlx4_mw_enable(dev->dev, &mw->mmw); |
| 357 | if (err) |
| 358 | goto err_mw; |
| 359 | |
| 360 | mw->ibmw.rkey = mw->mmw.key; |
| 361 | |
| 362 | return &mw->ibmw; |
| 363 | |
| 364 | err_mw: |
| 365 | mlx4_mw_free(dev->dev, &mw->mmw); |
| 366 | |
| 367 | err_free: |
| 368 | kfree(mw); |
| 369 | |
| 370 | return ERR_PTR(err); |
| 371 | } |
| 372 | |
| 373 | int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) |
| 374 | { |
| 375 | struct mlx4_ib_mw *mw = to_mmw(ibmw); |
| 376 | |
| 377 | mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); |
| 378 | kfree(mw); |
| 379 | |
| 380 | return 0; |
| 381 | } |
| 382 | |
Sagi Grimberg | 679e34d | 2015-07-30 10:32:42 +0300 | [diff] [blame] | 383 | struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, |
| 384 | enum ib_mr_type mr_type, |
| 385 | u32 max_num_sg) |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 386 | { |
| 387 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
| 388 | struct mlx4_ib_mr *mr; |
| 389 | int err; |
| 390 | |
Sagi Grimberg | 679e34d | 2015-07-30 10:32:42 +0300 | [diff] [blame] | 391 | if (mr_type != IB_MR_TYPE_MEM_REG || |
| 392 | max_num_sg > MLX4_MAX_FAST_REG_PAGES) |
| 393 | return ERR_PTR(-EINVAL); |
| 394 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 395 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 396 | if (!mr) |
| 397 | return ERR_PTR(-ENOMEM); |
| 398 | |
| 399 | err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0, |
Sagi Grimberg | 679e34d | 2015-07-30 10:32:42 +0300 | [diff] [blame] | 400 | max_num_sg, 0, &mr->mmr); |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 401 | if (err) |
| 402 | goto err_free; |
| 403 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 404 | err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg); |
| 405 | if (err) |
| 406 | goto err_free_mr; |
| 407 | |
| 408 | mr->max_pages = max_num_sg; |
| 409 | |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 410 | err = mlx4_mr_enable(dev->dev, &mr->mmr); |
| 411 | if (err) |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 412 | goto err_free_pl; |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 413 | |
Vladimir Sokolovsky | 4c246ed | 2008-08-27 14:29:57 -0700 | [diff] [blame] | 414 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; |
Vladimir Sokolovsky | 7f3abf5 | 2008-11-05 10:56:52 -0800 | [diff] [blame] | 415 | mr->umem = NULL; |
Vladimir Sokolovsky | 4c246ed | 2008-08-27 14:29:57 -0700 | [diff] [blame] | 416 | |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 417 | return &mr->ibmr; |
| 418 | |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 419 | err_free_pl: |
| 420 | mlx4_free_priv_pages(mr); |
| 421 | err_free_mr: |
Shani Michaeli | 6108372 | 2013-02-06 16:19:09 +0000 | [diff] [blame] | 422 | (void) mlx4_mr_free(dev->dev, &mr->mmr); |
Roland Dreier | 95d04f0 | 2008-07-23 08:12:26 -0700 | [diff] [blame] | 423 | err_free: |
| 424 | kfree(mr); |
| 425 | return ERR_PTR(err); |
| 426 | } |
| 427 | |
Jack Morgenstein | 8ad11fb | 2007-08-01 12:29:05 +0300 | [diff] [blame] | 428 | struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, |
| 429 | struct ib_fmr_attr *fmr_attr) |
| 430 | { |
| 431 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
| 432 | struct mlx4_ib_fmr *fmr; |
| 433 | int err = -ENOMEM; |
| 434 | |
| 435 | fmr = kmalloc(sizeof *fmr, GFP_KERNEL); |
| 436 | if (!fmr) |
| 437 | return ERR_PTR(-ENOMEM); |
| 438 | |
| 439 | err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc), |
| 440 | fmr_attr->max_pages, fmr_attr->max_maps, |
| 441 | fmr_attr->page_shift, &fmr->mfmr); |
| 442 | if (err) |
| 443 | goto err_free; |
| 444 | |
Jack Morgenstein | e6028c0 | 2008-02-14 10:39:36 -0800 | [diff] [blame] | 445 | err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr); |
Jack Morgenstein | 8ad11fb | 2007-08-01 12:29:05 +0300 | [diff] [blame] | 446 | if (err) |
| 447 | goto err_mr; |
| 448 | |
| 449 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key; |
| 450 | |
| 451 | return &fmr->ibfmr; |
| 452 | |
| 453 | err_mr: |
Shani Michaeli | 6108372 | 2013-02-06 16:19:09 +0000 | [diff] [blame] | 454 | (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); |
Jack Morgenstein | 8ad11fb | 2007-08-01 12:29:05 +0300 | [diff] [blame] | 455 | |
| 456 | err_free: |
| 457 | kfree(fmr); |
| 458 | |
| 459 | return ERR_PTR(err); |
| 460 | } |
| 461 | |
| 462 | int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, |
| 463 | int npages, u64 iova) |
| 464 | { |
| 465 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); |
| 466 | struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device); |
| 467 | |
| 468 | return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, |
| 469 | &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); |
| 470 | } |
| 471 | |
| 472 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list) |
| 473 | { |
| 474 | struct ib_fmr *ibfmr; |
| 475 | int err; |
| 476 | struct mlx4_dev *mdev = NULL; |
| 477 | |
| 478 | list_for_each_entry(ibfmr, fmr_list, list) { |
| 479 | if (mdev && to_mdev(ibfmr->device)->dev != mdev) |
| 480 | return -EINVAL; |
| 481 | mdev = to_mdev(ibfmr->device)->dev; |
| 482 | } |
| 483 | |
| 484 | if (!mdev) |
| 485 | return 0; |
| 486 | |
| 487 | list_for_each_entry(ibfmr, fmr_list, list) { |
| 488 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); |
| 489 | |
| 490 | mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey); |
| 491 | } |
| 492 | |
| 493 | /* |
| 494 | * Make sure all MPT status updates are visible before issuing |
| 495 | * SYNC_TPT firmware command. |
| 496 | */ |
| 497 | wmb(); |
| 498 | |
| 499 | err = mlx4_SYNC_TPT(mdev); |
| 500 | if (err) |
Shlomo Pongratz | 987c8f8 | 2012-04-29 17:04:26 +0300 | [diff] [blame] | 501 | pr_warn("SYNC_TPT error %d when " |
Jack Morgenstein | 8ad11fb | 2007-08-01 12:29:05 +0300 | [diff] [blame] | 502 | "unmapping FMRs\n", err); |
| 503 | |
| 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr) |
| 508 | { |
| 509 | struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr); |
| 510 | struct mlx4_ib_dev *dev = to_mdev(ibfmr->device); |
| 511 | int err; |
| 512 | |
| 513 | err = mlx4_fmr_free(dev->dev, &ifmr->mfmr); |
| 514 | |
| 515 | if (!err) |
| 516 | kfree(ifmr); |
| 517 | |
| 518 | return err; |
| 519 | } |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 520 | |
| 521 | static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) |
| 522 | { |
| 523 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
| 524 | |
| 525 | if (unlikely(mr->npages == mr->max_pages)) |
| 526 | return -ENOMEM; |
| 527 | |
| 528 | mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); |
| 529 | |
| 530 | return 0; |
| 531 | } |
| 532 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 533 | int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, |
Bart Van Assche | 9aa8b32 | 2016-05-12 10:49:15 -0700 | [diff] [blame] | 534 | unsigned int *sg_offset) |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 535 | { |
| 536 | struct mlx4_ib_mr *mr = to_mmr(ibmr); |
| 537 | int rc; |
| 538 | |
| 539 | mr->npages = 0; |
| 540 | |
| 541 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 542 | mr->page_map_size, DMA_TO_DEVICE); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 543 | |
Christoph Hellwig | ff2ba99 | 2016-05-03 18:01:04 +0200 | [diff] [blame] | 544 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 545 | |
| 546 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, |
Chuck Lever | cbc9355 | 2016-06-22 17:27:32 +0300 | [diff] [blame] | 547 | mr->page_map_size, DMA_TO_DEVICE); |
Sagi Grimberg | 1b2cd0f | 2015-10-13 19:11:27 +0300 | [diff] [blame] | 548 | |
| 549 | return rc; |
| 550 | } |