blob: aa6f71570b77be7e3ca1b5f98e7ec1aa25a0e93c [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Haggai Eran6aec21f2014-12-11 17:04:23 +020050static int clean_mr(struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030051static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020052static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +020053
Haggai Eranb4cfe442014-12-11 17:04:26 +020054static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
55{
Matan Baraka606b0f2016-02-29 18:05:28 +020056 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020057
58#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
59 /* Wait until all page fault handlers using the mr complete. */
60 synchronize_srcu(&dev->mr_srcu);
61#endif
62
63 return err;
64}
65
Eli Cohene126ba92013-07-07 17:25:49 +030066static int order2idx(struct mlx5_ib_dev *dev, int order)
67{
68 struct mlx5_mr_cache *cache = &dev->cache;
69
70 if (order < cache->ent[0].order)
71 return 0;
72 else
73 return order - cache->ent[0].order;
74}
75
Noa Osherovich56e11d62016-02-29 16:46:51 +020076static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
77{
78 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
79 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
80}
81
Noa Osherovich395a8e42016-02-29 16:46:50 +020082#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
83static void update_odp_mr(struct mlx5_ib_mr *mr)
84{
85 if (mr->umem->odp_data) {
86 /*
87 * This barrier prevents the compiler from moving the
88 * setting of umem->odp_data->private to point to our
89 * MR, before reg_umr finished, to ensure that the MR
90 * initialization have finished before starting to
91 * handle invalidations.
92 */
93 smp_wmb();
94 mr->umem->odp_data->private = mr;
95 /*
96 * Make sure we will see the new
97 * umem->odp_data->private value in the invalidation
98 * routines, before we can get page faults on the
99 * MR. Page faults can happen once we put the MR in
100 * the tree, below this line. Without the barrier,
101 * there can be a fault handling and an invalidation
102 * before umem->odp_data->private == mr is visible to
103 * the invalidation handler.
104 */
105 smp_wmb();
106 }
107}
108#endif
109
Eli Cohen746b5582013-10-23 09:53:14 +0300110static void reg_mr_callback(int status, void *context)
111{
112 struct mlx5_ib_mr *mr = context;
113 struct mlx5_ib_dev *dev = mr->dev;
114 struct mlx5_mr_cache *cache = &dev->cache;
115 int c = order2idx(dev, mr->order);
116 struct mlx5_cache_ent *ent = &cache->ent[c];
117 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300118 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200119 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300120 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300121
Eli Cohen746b5582013-10-23 09:53:14 +0300122 spin_lock_irqsave(&ent->lock, flags);
123 ent->pending--;
124 spin_unlock_irqrestore(&ent->lock, flags);
125 if (status) {
126 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
127 kfree(mr);
128 dev->fill_delay = 1;
129 mod_timer(&dev->delay_timer, jiffies + HZ);
130 return;
131 }
132
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200133 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300134 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
135 key = dev->mdev->priv.mkey_key++;
136 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300137 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300138
139 cache->last_add = jiffies;
140
141 spin_lock_irqsave(&ent->lock, flags);
142 list_add_tail(&mr->list, &ent->head);
143 ent->cur++;
144 ent->size++;
145 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300146
147 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200148 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
149 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300150 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200151 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300152 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200153
154 if (!completion_done(&ent->compl))
155 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300156}
157
Eli Cohene126ba92013-07-07 17:25:49 +0300158static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
159{
Eli Cohene126ba92013-07-07 17:25:49 +0300160 struct mlx5_mr_cache *cache = &dev->cache;
161 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300162 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300163 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300164 void *mkc;
165 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300166 int err = 0;
167 int i;
168
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300169 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300170 if (!in)
171 return -ENOMEM;
172
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300173 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300174 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300175 if (ent->pending >= MAX_PENDING_REG_MR) {
176 err = -EAGAIN;
177 break;
178 }
179
Eli Cohene126ba92013-07-07 17:25:49 +0300180 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
181 if (!mr) {
182 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300183 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300184 }
185 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300186 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300187 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300188
189 MLX5_SET(mkc, mkc, free, 1);
190 MLX5_SET(mkc, mkc, umr_en, 1);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200191 MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300192
193 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200194 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
195 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300196
Eli Cohen746b5582013-10-23 09:53:14 +0300197 spin_lock_irq(&ent->lock);
198 ent->pending++;
199 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300200 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
201 in, inlen,
202 mr->out, sizeof(mr->out),
203 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300204 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200205 spin_lock_irq(&ent->lock);
206 ent->pending--;
207 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300208 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300210 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300211 }
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
213
Eli Cohene126ba92013-07-07 17:25:49 +0300214 kfree(in);
215 return err;
216}
217
218static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
219{
Eli Cohene126ba92013-07-07 17:25:49 +0300220 struct mlx5_mr_cache *cache = &dev->cache;
221 struct mlx5_cache_ent *ent = &cache->ent[c];
222 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300223 int err;
224 int i;
225
226 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300227 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300228 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300229 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300230 return;
231 }
232 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
233 list_del(&mr->list);
234 ent->cur--;
235 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300236 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200237 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300238 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300239 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300240 else
Eli Cohene126ba92013-07-07 17:25:49 +0300241 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300242 }
243}
244
245static ssize_t size_write(struct file *filp, const char __user *buf,
246 size_t count, loff_t *pos)
247{
248 struct mlx5_cache_ent *ent = filp->private_data;
249 struct mlx5_ib_dev *dev = ent->dev;
250 char lbuf[20];
251 u32 var;
252 int err;
253 int c;
254
255 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300256 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300257
258 c = order2idx(dev, ent->order);
259 lbuf[sizeof(lbuf) - 1] = 0;
260
261 if (sscanf(lbuf, "%u", &var) != 1)
262 return -EINVAL;
263
264 if (var < ent->limit)
265 return -EINVAL;
266
267 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300268 do {
269 err = add_keys(dev, c, var - ent->size);
270 if (err && err != -EAGAIN)
271 return err;
272
273 usleep_range(3000, 5000);
274 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300275 } else if (var < ent->size) {
276 remove_keys(dev, c, ent->size - var);
277 }
278
279 return count;
280}
281
282static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
283 loff_t *pos)
284{
285 struct mlx5_cache_ent *ent = filp->private_data;
286 char lbuf[20];
287 int err;
288
289 if (*pos)
290 return 0;
291
292 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
293 if (err < 0)
294 return err;
295
296 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300297 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300298
299 *pos += err;
300
301 return err;
302}
303
304static const struct file_operations size_fops = {
305 .owner = THIS_MODULE,
306 .open = simple_open,
307 .write = size_write,
308 .read = size_read,
309};
310
311static ssize_t limit_write(struct file *filp, const char __user *buf,
312 size_t count, loff_t *pos)
313{
314 struct mlx5_cache_ent *ent = filp->private_data;
315 struct mlx5_ib_dev *dev = ent->dev;
316 char lbuf[20];
317 u32 var;
318 int err;
319 int c;
320
321 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300322 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300323
324 c = order2idx(dev, ent->order);
325 lbuf[sizeof(lbuf) - 1] = 0;
326
327 if (sscanf(lbuf, "%u", &var) != 1)
328 return -EINVAL;
329
330 if (var > ent->size)
331 return -EINVAL;
332
333 ent->limit = var;
334
335 if (ent->cur < ent->limit) {
336 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
337 if (err)
338 return err;
339 }
340
341 return count;
342}
343
344static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
345 loff_t *pos)
346{
347 struct mlx5_cache_ent *ent = filp->private_data;
348 char lbuf[20];
349 int err;
350
351 if (*pos)
352 return 0;
353
354 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
355 if (err < 0)
356 return err;
357
358 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300359 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300360
361 *pos += err;
362
363 return err;
364}
365
366static const struct file_operations limit_fops = {
367 .owner = THIS_MODULE,
368 .open = simple_open,
369 .write = limit_write,
370 .read = limit_read,
371};
372
373static int someone_adding(struct mlx5_mr_cache *cache)
374{
375 int i;
376
377 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
378 if (cache->ent[i].cur < cache->ent[i].limit)
379 return 1;
380 }
381
382 return 0;
383}
384
385static void __cache_work_func(struct mlx5_cache_ent *ent)
386{
387 struct mlx5_ib_dev *dev = ent->dev;
388 struct mlx5_mr_cache *cache = &dev->cache;
389 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300390 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300391
392 if (cache->stopped)
393 return;
394
395 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300396 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
397 err = add_keys(dev, i, 1);
398 if (ent->cur < 2 * ent->limit) {
399 if (err == -EAGAIN) {
400 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
401 i + 2);
402 queue_delayed_work(cache->wq, &ent->dwork,
403 msecs_to_jiffies(3));
404 } else if (err) {
405 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
406 i + 2, err);
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(1000));
409 } else {
410 queue_work(cache->wq, &ent->work);
411 }
412 }
Eli Cohene126ba92013-07-07 17:25:49 +0300413 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300414 /*
415 * The remove_keys() logic is performed as garbage collection
416 * task. Such task is intended to be run when no other active
417 * processes are running.
418 *
419 * The need_resched() will return TRUE if there are user tasks
420 * to be activated in near future.
421 *
422 * In such case, we don't execute remove_keys() and postpone
423 * the garbage collection work to try to run in next cycle,
424 * in order to free CPU resources to other tasks.
425 */
426 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300427 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300428 remove_keys(dev, i, 1);
429 if (ent->cur > ent->limit)
430 queue_work(cache->wq, &ent->work);
431 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300432 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300433 }
434 }
435}
436
437static void delayed_cache_work_func(struct work_struct *work)
438{
439 struct mlx5_cache_ent *ent;
440
441 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
442 __cache_work_func(ent);
443}
444
445static void cache_work_func(struct work_struct *work)
446{
447 struct mlx5_cache_ent *ent;
448
449 ent = container_of(work, struct mlx5_cache_ent, work);
450 __cache_work_func(ent);
451}
452
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200453struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
454{
455 struct mlx5_mr_cache *cache = &dev->cache;
456 struct mlx5_cache_ent *ent;
457 struct mlx5_ib_mr *mr;
458 int err;
459
460 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
461 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
462 return NULL;
463 }
464
465 ent = &cache->ent[entry];
466 while (1) {
467 spin_lock_irq(&ent->lock);
468 if (list_empty(&ent->head)) {
469 spin_unlock_irq(&ent->lock);
470
471 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200472 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200473 return ERR_PTR(err);
474
475 wait_for_completion(&ent->compl);
476 } else {
477 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
478 list);
479 list_del(&mr->list);
480 ent->cur--;
481 spin_unlock_irq(&ent->lock);
482 if (ent->cur < ent->limit)
483 queue_work(cache->wq, &ent->work);
484 return mr;
485 }
486 }
487}
488
Eli Cohene126ba92013-07-07 17:25:49 +0300489static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
490{
491 struct mlx5_mr_cache *cache = &dev->cache;
492 struct mlx5_ib_mr *mr = NULL;
493 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300494 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300495 int c;
496 int i;
497
498 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300499 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300500 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300501 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
502 return NULL;
503 }
504
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300505 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300506 ent = &cache->ent[i];
507
508 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
509
Eli Cohen746b5582013-10-23 09:53:14 +0300510 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300511 if (!list_empty(&ent->head)) {
512 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
513 list);
514 list_del(&mr->list);
515 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300516 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300517 if (ent->cur < ent->limit)
518 queue_work(cache->wq, &ent->work);
519 break;
520 }
Eli Cohen746b5582013-10-23 09:53:14 +0300521 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300522
523 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300524 }
525
526 if (!mr)
527 cache->ent[c].miss++;
528
529 return mr;
530}
531
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200532void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300533{
534 struct mlx5_mr_cache *cache = &dev->cache;
535 struct mlx5_cache_ent *ent;
536 int shrink = 0;
537 int c;
538
539 c = order2idx(dev, mr->order);
540 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
541 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
542 return;
543 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200544
545 if (unreg_umr(dev, mr))
546 return;
547
Eli Cohene126ba92013-07-07 17:25:49 +0300548 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300549 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300550 list_add_tail(&mr->list, &ent->head);
551 ent->cur++;
552 if (ent->cur > 2 * ent->limit)
553 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300554 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300555
556 if (shrink)
557 queue_work(cache->wq, &ent->work);
558}
559
560static void clean_keys(struct mlx5_ib_dev *dev, int c)
561{
Eli Cohene126ba92013-07-07 17:25:49 +0300562 struct mlx5_mr_cache *cache = &dev->cache;
563 struct mlx5_cache_ent *ent = &cache->ent[c];
564 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300565 int err;
566
Moshe Lazer3c461912013-09-11 16:35:23 +0300567 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300568 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300569 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300570 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300571 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300572 return;
573 }
574 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
575 list_del(&mr->list);
576 ent->cur--;
577 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300578 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200579 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300580 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300581 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300582 else
Eli Cohene126ba92013-07-07 17:25:49 +0300583 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300584 }
585}
586
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300587static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
588{
589 if (!mlx5_debugfs_root)
590 return;
591
592 debugfs_remove_recursive(dev->cache.root);
593 dev->cache.root = NULL;
594}
595
Eli Cohene126ba92013-07-07 17:25:49 +0300596static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
597{
598 struct mlx5_mr_cache *cache = &dev->cache;
599 struct mlx5_cache_ent *ent;
600 int i;
601
602 if (!mlx5_debugfs_root)
603 return 0;
604
Jack Morgenstein9603b612014-07-28 23:30:22 +0300605 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300606 if (!cache->root)
607 return -ENOMEM;
608
609 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
610 ent = &cache->ent[i];
611 sprintf(ent->name, "%d", ent->order);
612 ent->dir = debugfs_create_dir(ent->name, cache->root);
613 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300614 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300615
616 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
617 &size_fops);
618 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300619 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300620
621 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
622 &limit_fops);
623 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300624 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300625
626 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
627 &ent->cur);
628 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300629 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300630
631 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
632 &ent->miss);
633 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300634 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300635 }
636
637 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300638err:
639 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300640
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300641 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300642}
643
Eli Cohen746b5582013-10-23 09:53:14 +0300644static void delay_time_func(unsigned long ctx)
645{
646 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
647
648 dev->fill_delay = 0;
649}
650
Eli Cohene126ba92013-07-07 17:25:49 +0300651int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
652{
653 struct mlx5_mr_cache *cache = &dev->cache;
654 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300655 int err;
656 int i;
657
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300658 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530659 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300660 if (!cache->wq) {
661 mlx5_ib_warn(dev, "failed to create work queue\n");
662 return -ENOMEM;
663 }
664
Eli Cohen746b5582013-10-23 09:53:14 +0300665 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300666 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300667 ent = &cache->ent[i];
668 INIT_LIST_HEAD(&ent->head);
669 spin_lock_init(&ent->lock);
670 ent->order = i + 2;
671 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200672 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300673
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200674 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300675 INIT_WORK(&ent->work, cache_work_func);
676 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300677 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200678
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300679 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200680 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200681 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200682 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200683
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300684 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200685 continue;
686
687 ent->page = PAGE_SHIFT;
688 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
689 MLX5_IB_UMR_OCTOWORD;
690 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
691 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
692 mlx5_core_is_pf(dev->mdev))
693 ent->limit = dev->mdev->profile->mr_cache[i].limit;
694 else
695 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300696 }
697
698 err = mlx5_mr_cache_debugfs_init(dev);
699 if (err)
700 mlx5_ib_warn(dev, "cache debugfs failure\n");
701
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300702 /*
703 * We don't want to fail driver if debugfs failed to initialize,
704 * so we are not forwarding error to the user.
705 */
706
Eli Cohene126ba92013-07-07 17:25:49 +0300707 return 0;
708}
709
Eli Cohenacbda522016-10-27 16:36:43 +0300710static void wait_for_async_commands(struct mlx5_ib_dev *dev)
711{
712 struct mlx5_mr_cache *cache = &dev->cache;
713 struct mlx5_cache_ent *ent;
714 int total = 0;
715 int i;
716 int j;
717
718 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
719 ent = &cache->ent[i];
720 for (j = 0 ; j < 1000; j++) {
721 if (!ent->pending)
722 break;
723 msleep(50);
724 }
725 }
726 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
727 ent = &cache->ent[i];
728 total += ent->pending;
729 }
730
731 if (total)
732 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
733 else
734 mlx5_ib_warn(dev, "done with all pending requests\n");
735}
736
Eli Cohene126ba92013-07-07 17:25:49 +0300737int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
738{
739 int i;
740
741 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300742 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300743
744 mlx5_mr_cache_debugfs_cleanup(dev);
745
746 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
747 clean_keys(dev, i);
748
Moshe Lazer3c461912013-09-11 16:35:23 +0300749 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300750 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300751 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300752
Eli Cohene126ba92013-07-07 17:25:49 +0300753 return 0;
754}
755
756struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
757{
758 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300759 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300760 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300761 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300762 void *mkc;
763 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300764 int err;
765
766 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
767 if (!mr)
768 return ERR_PTR(-ENOMEM);
769
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300770 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300771 if (!in) {
772 err = -ENOMEM;
773 goto err_free;
774 }
775
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300776 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300777
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300778 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
779 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
780 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
781 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
782 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
783 MLX5_SET(mkc, mkc, lr, 1);
784
785 MLX5_SET(mkc, mkc, length64, 1);
786 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
787 MLX5_SET(mkc, mkc, qpn, 0xffffff);
788 MLX5_SET64(mkc, mkc, start_addr, 0);
789
790 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300791 if (err)
792 goto err_in;
793
794 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200795 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200796 mr->ibmr.lkey = mr->mmkey.key;
797 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300798 mr->umem = NULL;
799
800 return &mr->ibmr;
801
802err_in:
803 kfree(in);
804
805err_free:
806 kfree(mr);
807
808 return ERR_PTR(err);
809}
810
811static int get_octo_len(u64 addr, u64 len, int page_size)
812{
813 u64 offset;
814 int npages;
815
816 offset = addr & (page_size - 1);
817 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
818 return (npages + 1) / 2;
819}
820
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300821static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300822{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200823 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300824 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300825 return MLX5_MAX_UMR_SHIFT;
826}
827
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200828static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
829 int access_flags, struct ib_umem **umem,
830 int *npages, int *page_shift, int *ncont,
831 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200832{
833 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200834 int err;
835
836 *umem = ib_umem_get(pd->uobject->context, start, length,
837 access_flags, 0);
838 err = PTR_ERR_OR_ZERO(*umem);
839 if (err < 0) {
Dan Carpenter396551e2017-06-14 13:20:09 +0300840 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200841 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200842 }
843
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200844 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300845 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200846 if (!*npages) {
847 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200848 ib_umem_release(*umem);
849 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200850 }
851
852 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
853 *npages, *ncont, *order, *page_shift);
854
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200855 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200856}
857
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100858static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300859{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100860 struct mlx5_ib_umr_context *context =
861 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300862
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100863 context->status = wc->status;
864 complete(&context->done);
865}
Eli Cohene126ba92013-07-07 17:25:49 +0300866
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100867static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
868{
869 context->cqe.done = mlx5_ib_umr_done;
870 context->status = -1;
871 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300872}
873
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200874static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
875 struct mlx5_umr_wr *umrwr)
876{
877 struct umr_common *umrc = &dev->umrc;
878 struct ib_send_wr *bad;
879 int err;
880 struct mlx5_ib_umr_context umr_context;
881
882 mlx5_ib_init_umr_context(&umr_context);
883 umrwr->wr.wr_cqe = &umr_context.cqe;
884
885 down(&umrc->sem);
886 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
887 if (err) {
888 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
889 } else {
890 wait_for_completion(&umr_context.done);
891 if (umr_context.status != IB_WC_SUCCESS) {
892 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
893 umr_context.status);
894 err = -EFAULT;
895 }
896 }
897 up(&umrc->sem);
898 return err;
899}
900
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300901static struct mlx5_ib_mr *alloc_mr_from_cache(
902 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300903 u64 virt_addr, u64 len, int npages,
904 int page_shift, int order, int access_flags)
905{
906 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300907 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300908 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300909 int i;
910
Eli Cohen746b5582013-10-23 09:53:14 +0300911 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300912 mr = alloc_cached_mr(dev, order);
913 if (mr)
914 break;
915
916 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300917 if (err && err != -EAGAIN) {
918 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300919 break;
920 }
921 }
922
923 if (!mr)
924 return ERR_PTR(-EAGAIN);
925
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200926 mr->ibmr.pd = pd;
927 mr->umem = umem;
928 mr->access_flags = access_flags;
929 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200930 mr->mmkey.iova = virt_addr;
931 mr->mmkey.size = len;
932 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300933
Eli Cohene126ba92013-07-07 17:25:49 +0300934 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300935}
936
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200937static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
938 void *xlt, int page_shift, size_t size,
939 int flags)
940{
941 struct mlx5_ib_dev *dev = mr->dev;
942 struct ib_umem *umem = mr->umem;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200943 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
944 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
945 return npages;
946 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200947
948 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
949
950 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
951 __mlx5_ib_populate_pas(dev, umem, page_shift,
952 idx, npages, xlt,
953 MLX5_IB_MTT_PRESENT);
954 /* Clear padding after the pages
955 * brought from the umem.
956 */
957 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
958 size - npages * sizeof(struct mlx5_mtt));
959 }
960
961 return npages;
962}
963
964#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
965 MLX5_UMR_MTT_ALIGNMENT)
966#define MLX5_SPARE_UMR_CHUNK 0x10000
967
968int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
969 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +0200970{
971 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -0800972 struct device *ddev = dev->ib_dev.dev.parent;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200973 struct mlx5_ib_ucontext *uctx = NULL;
Haggai Eran832a6b02014-12-11 17:04:22 +0200974 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200975 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200976 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100977 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200978 struct ib_sge sg;
979 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200980 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
981 ? sizeof(struct mlx5_klm)
982 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200983 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
984 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +0200985 size_t pages_mapped = 0;
986 size_t pages_to_map = 0;
987 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200988 gfp_t gfp;
Haggai Eran832a6b02014-12-11 17:04:22 +0200989
990 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200991 * so we need to align the offset and length accordingly
992 */
993 if (idx & page_mask) {
994 npages += idx & page_mask;
995 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +0200996 }
997
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200998 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
999 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001000
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001001 pages_to_map = ALIGN(npages, page_align);
1002 size = desc_size * pages_to_map;
1003 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001004
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001005 xlt = (void *)__get_free_pages(gfp, get_order(size));
1006 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1007 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1008 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1009
1010 size = MLX5_SPARE_UMR_CHUNK;
1011 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001012 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001013
1014 if (!xlt) {
Artemy Kovalyovbd174fc2017-04-05 09:23:51 +03001015 uctx = to_mucontext(mr->ibmr.pd->uobject->context);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001016 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1017 size = PAGE_SIZE;
1018 xlt = (void *)uctx->upd_xlt_page;
1019 mutex_lock(&uctx->upd_xlt_page_mutex);
1020 memset(xlt, 0, size);
1021 }
1022 pages_iter = size / desc_size;
1023 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001024 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001025 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001026 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001027 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001028 }
1029
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001030 sg.addr = dma;
1031 sg.lkey = dev->umrc.pd->local_dma_lkey;
1032
1033 memset(&wr, 0, sizeof(wr));
1034 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1035 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1036 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1037 wr.wr.sg_list = &sg;
1038 wr.wr.num_sge = 1;
1039 wr.wr.opcode = MLX5_IB_WR_UMR;
1040
1041 wr.pd = mr->ibmr.pd;
1042 wr.mkey = mr->mmkey.key;
1043 wr.length = mr->mmkey.size;
1044 wr.virt_addr = mr->mmkey.iova;
1045 wr.access_flags = mr->access_flags;
1046 wr.page_shift = page_shift;
1047
Haggai Eran832a6b02014-12-11 17:04:22 +02001048 for (pages_mapped = 0;
1049 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001050 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001051 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001052 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001053 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001054 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001055
1056 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1057
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001058 sg.length = ALIGN(npages * desc_size,
1059 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001060
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001061 if (pages_mapped + pages_iter >= pages_to_map) {
1062 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1063 wr.wr.send_flags |=
1064 MLX5_IB_SEND_UMR_ENABLE_MR |
1065 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1066 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1067 if (flags & MLX5_IB_UPD_XLT_PD ||
1068 flags & MLX5_IB_UPD_XLT_ACCESS)
1069 wr.wr.send_flags |=
1070 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1071 if (flags & MLX5_IB_UPD_XLT_ADDR)
1072 wr.wr.send_flags |=
1073 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1074 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001075
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001076 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001077 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001078
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001079 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001080 }
1081 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1082
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001083free_xlt:
1084 if (uctx)
1085 mutex_unlock(&uctx->upd_xlt_page_mutex);
Haggai Eran832a6b02014-12-11 17:04:22 +02001086 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001087 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001088
1089 return err;
1090}
Haggai Eran832a6b02014-12-11 17:04:22 +02001091
Noa Osherovich395a8e42016-02-29 16:46:50 +02001092/*
1093 * If ibmr is NULL it will be allocated by reg_create.
1094 * Else, the given ibmr will be used.
1095 */
1096static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1097 u64 virt_addr, u64 length,
1098 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001099 int page_shift, int access_flags,
1100 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001101{
1102 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001103 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001104 __be64 *pas;
1105 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001106 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001107 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001108 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001109 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001110
Noa Osherovich395a8e42016-02-29 16:46:50 +02001111 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001112 if (!mr)
1113 return ERR_PTR(-ENOMEM);
1114
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001115 mr->ibmr.pd = pd;
1116 mr->access_flags = access_flags;
1117
1118 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1119 if (populate)
1120 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001121 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001122 if (!in) {
1123 err = -ENOMEM;
1124 goto err_1;
1125 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001126 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001127 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001128 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1129 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001130
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001131 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001132 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001133 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1134
1135 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001136 MLX5_SET(mkc, mkc, free, !populate);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001137 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1138 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1139 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1140 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1141 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1142 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001143 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001144
1145 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1146 MLX5_SET64(mkc, mkc, len, length);
1147 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1148 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1149 MLX5_SET(mkc, mkc, translations_octword_size,
1150 get_octo_len(virt_addr, length, 1 << page_shift));
1151 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1152 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001153 if (populate) {
1154 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1155 get_octo_len(virt_addr, length, 1 << page_shift));
1156 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001157
1158 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001159 if (err) {
1160 mlx5_ib_warn(dev, "create mkey failed\n");
1161 goto err_2;
1162 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001163 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001164 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001165 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001166 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001167
Matan Baraka606b0f2016-02-29 18:05:28 +02001168 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001169
1170 return mr;
1171
1172err_2:
Al Viro479163f2014-11-20 08:13:57 +00001173 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001174
1175err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001176 if (!ibmr)
1177 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001178
1179 return ERR_PTR(err);
1180}
1181
Noa Osherovich395a8e42016-02-29 16:46:50 +02001182static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1183 int npages, u64 length, int access_flags)
1184{
1185 mr->npages = npages;
1186 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001187 mr->ibmr.lkey = mr->mmkey.key;
1188 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001189 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001190 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001191}
1192
Eli Cohene126ba92013-07-07 17:25:49 +03001193struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1194 u64 virt_addr, int access_flags,
1195 struct ib_udata *udata)
1196{
1197 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1198 struct mlx5_ib_mr *mr = NULL;
1199 struct ib_umem *umem;
1200 int page_shift;
1201 int npages;
1202 int ncont;
1203 int order;
1204 int err;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001205 bool use_umr = true;
Eli Cohene126ba92013-07-07 17:25:49 +03001206
Eli Cohen900a6d72014-09-14 16:47:51 +03001207 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1208 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001209
1210#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1211 if (!start && length == U64_MAX) {
1212 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1213 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1214 return ERR_PTR(-EINVAL);
1215
1216 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1217 return &mr->ibmr;
1218 }
1219#endif
1220
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001221 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001222 &page_shift, &ncont, &order);
1223
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001224 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001225 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001226
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001227 if (order <= mr_cache_max_order(dev)) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001228 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1229 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001230 if (PTR_ERR(mr) == -EAGAIN) {
1231 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1232 mr = NULL;
1233 }
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001234 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1235 if (access_flags & IB_ACCESS_ON_DEMAND) {
1236 err = -EINVAL;
1237 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1238 goto error;
1239 }
1240 use_umr = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001241 }
1242
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001243 if (!mr) {
1244 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001245 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001246 page_shift, access_flags, !use_umr);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001247 mutex_unlock(&dev->slow_path_mutex);
1248 }
Eli Cohene126ba92013-07-07 17:25:49 +03001249
1250 if (IS_ERR(mr)) {
1251 err = PTR_ERR(mr);
1252 goto error;
1253 }
1254
Matan Baraka606b0f2016-02-29 18:05:28 +02001255 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001256
1257 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001258 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001259
Haggai Eranb4cfe442014-12-11 17:04:26 +02001260#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001261 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001262#endif
1263
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001264 if (use_umr) {
1265 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001266
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001267 if (access_flags & IB_ACCESS_ON_DEMAND)
1268 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1269
1270 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1271 update_xlt_flags);
1272 if (err) {
1273 mlx5_ib_dereg_mr(&mr->ibmr);
1274 return ERR_PTR(err);
1275 }
1276 }
1277
1278 mr->live = 1;
1279 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001280error:
1281 ib_umem_release(umem);
1282 return ERR_PTR(err);
1283}
1284
1285static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1286{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001287 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001288 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001289
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001290 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1291 return 0;
1292
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001293 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1294 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1295 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1296 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001297
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001298 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001299}
1300
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001301static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001302 int access_flags, int flags)
1303{
1304 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001305 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001306 int err;
1307
Noa Osherovich56e11d62016-02-29 16:46:51 +02001308 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1309
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001310 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1311 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001312
Artemy Kovalyov31616252017-01-02 11:37:42 +02001313 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001314 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001315 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001316 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001317 }
1318
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001319 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001320
Noa Osherovich56e11d62016-02-29 16:46:51 +02001321 return err;
1322}
1323
1324int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1325 u64 length, u64 virt_addr, int new_access_flags,
1326 struct ib_pd *new_pd, struct ib_udata *udata)
1327{
1328 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1329 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1330 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1331 int access_flags = flags & IB_MR_REREG_ACCESS ?
1332 new_access_flags :
1333 mr->access_flags;
1334 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1335 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1336 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001337 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001338 int npages = 0;
1339 int ncont = 0;
1340 int order = 0;
1341 int err;
1342
1343 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1344 start, virt_addr, length, access_flags);
1345
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001346 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1347
Noa Osherovich56e11d62016-02-29 16:46:51 +02001348 if (flags != IB_MR_REREG_PD) {
1349 /*
1350 * Replace umem. This needs to be done whether or not UMR is
1351 * used.
1352 */
1353 flags |= IB_MR_REREG_TRANS;
1354 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001355 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1356 &npages, &page_shift, &ncont, &order);
1357 if (err < 0) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001358 clean_mr(mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001359 return err;
1360 }
1361 }
1362
1363 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1364 /*
1365 * UMR can't be used - MKey needs to be replaced.
1366 */
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001367 if (mr->allocated_from_cache) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001368 err = unreg_umr(dev, mr);
1369 if (err)
1370 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1371 } else {
1372 err = destroy_mkey(dev, mr);
1373 if (err)
1374 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1375 }
1376 if (err)
1377 return err;
1378
1379 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001380 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001381
1382 if (IS_ERR(mr))
1383 return PTR_ERR(mr);
1384
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001385 mr->allocated_from_cache = 0;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001386 mr->live = 1;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001387 } else {
1388 /*
1389 * Send a UMR WQE
1390 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001391 mr->ibmr.pd = pd;
1392 mr->access_flags = access_flags;
1393 mr->mmkey.iova = addr;
1394 mr->mmkey.size = len;
1395 mr->mmkey.pd = to_mpd(pd)->pdn;
1396
1397 if (flags & IB_MR_REREG_TRANS) {
1398 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1399 if (flags & IB_MR_REREG_PD)
1400 upd_flags |= MLX5_IB_UPD_XLT_PD;
1401 if (flags & IB_MR_REREG_ACCESS)
1402 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1403 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1404 upd_flags);
1405 } else {
1406 err = rereg_umr(pd, mr, access_flags, flags);
1407 }
1408
Noa Osherovich56e11d62016-02-29 16:46:51 +02001409 if (err) {
1410 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001411 ib_umem_release(mr->umem);
1412 clean_mr(mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001413 return err;
1414 }
1415 }
1416
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001417 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001418
Noa Osherovich56e11d62016-02-29 16:46:51 +02001419#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1420 update_odp_mr(mr);
1421#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001422 return 0;
1423}
1424
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001425static int
1426mlx5_alloc_priv_descs(struct ib_device *device,
1427 struct mlx5_ib_mr *mr,
1428 int ndescs,
1429 int desc_size)
1430{
1431 int size = ndescs * desc_size;
1432 int add_size;
1433 int ret;
1434
1435 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1436
1437 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1438 if (!mr->descs_alloc)
1439 return -ENOMEM;
1440
1441 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1442
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001443 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001444 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001445 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001446 ret = -ENOMEM;
1447 goto err;
1448 }
1449
1450 return 0;
1451err:
1452 kfree(mr->descs_alloc);
1453
1454 return ret;
1455}
1456
1457static void
1458mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1459{
1460 if (mr->descs) {
1461 struct ib_device *device = mr->ibmr.device;
1462 int size = mr->max_descs * mr->desc_size;
1463
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001464 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001465 size, DMA_TO_DEVICE);
1466 kfree(mr->descs_alloc);
1467 mr->descs = NULL;
1468 }
1469}
1470
Haggai Eran6aec21f2014-12-11 17:04:23 +02001471static int clean_mr(struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001472{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001473 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001474 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001475 int err;
1476
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001477 if (mr->sig) {
1478 if (mlx5_core_destroy_psv(dev->mdev,
1479 mr->sig->psv_memory.psv_idx))
1480 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1481 mr->sig->psv_memory.psv_idx);
1482 if (mlx5_core_destroy_psv(dev->mdev,
1483 mr->sig->psv_wire.psv_idx))
1484 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1485 mr->sig->psv_wire.psv_idx);
1486 kfree(mr->sig);
1487 mr->sig = NULL;
1488 }
1489
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001490 mlx5_free_priv_descs(mr);
1491
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001492 if (!allocated_from_cache) {
Haggai Eranb4cfe442014-12-11 17:04:26 +02001493 err = destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001494 if (err) {
1495 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Matan Baraka606b0f2016-02-29 18:05:28 +02001496 mr->mmkey.key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001497 return err;
1498 }
1499 } else {
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001500 mlx5_mr_cache_free(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001501 }
1502
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001503 if (!allocated_from_cache)
Eli Cohene126ba92013-07-07 17:25:49 +03001504 kfree(mr);
1505
1506 return 0;
1507}
1508
Haggai Eran6aec21f2014-12-11 17:04:23 +02001509int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1510{
1511 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1512 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1513 int npages = mr->npages;
1514 struct ib_umem *umem = mr->umem;
1515
1516#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001517 if (umem && umem->odp_data) {
1518 /* Prevent new page faults from succeeding */
1519 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001520 /* Wait for all running page-fault handlers to finish. */
1521 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001522 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001523 if (umem->odp_data->page_list)
1524 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1525 ib_umem_end(umem));
1526 else
1527 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001528 /*
1529 * We kill the umem before the MR for ODP,
1530 * so that there will not be any invalidations in
1531 * flight, looking at the *mr struct.
1532 */
1533 ib_umem_release(umem);
1534 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1535
1536 /* Avoid double-freeing the umem. */
1537 umem = NULL;
1538 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001539#endif
1540
1541 clean_mr(mr);
1542
1543 if (umem) {
1544 ib_umem_release(umem);
1545 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1546 }
1547
1548 return 0;
1549}
1550
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001551struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1552 enum ib_mr_type mr_type,
1553 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001554{
1555 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001556 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001557 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001558 struct mlx5_ib_mr *mr;
1559 void *mkc;
1560 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001561 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001562
1563 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1564 if (!mr)
1565 return ERR_PTR(-ENOMEM);
1566
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001567 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001568 if (!in) {
1569 err = -ENOMEM;
1570 goto err_free;
1571 }
1572
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001573 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1574 MLX5_SET(mkc, mkc, free, 1);
1575 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1576 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1577 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001578
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001579 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001580 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1581 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001582 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001583 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001584 if (err)
1585 goto err_free_in;
1586
Artemy Kovalyov31616252017-01-02 11:37:42 +02001587 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001588 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001589 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001590 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001591
1592 err = mlx5_alloc_priv_descs(pd->device, mr,
1593 ndescs, sizeof(struct mlx5_klm));
1594 if (err)
1595 goto err_free_in;
1596 mr->desc_size = sizeof(struct mlx5_klm);
1597 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001598 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001599 u32 psv_index[2];
1600
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001601 MLX5_SET(mkc, mkc, bsf_en, 1);
1602 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001603 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1604 if (!mr->sig) {
1605 err = -ENOMEM;
1606 goto err_free_in;
1607 }
1608
1609 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001610 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001611 2, psv_index);
1612 if (err)
1613 goto err_free_sig;
1614
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001615 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001616 mr->sig->psv_memory.psv_idx = psv_index[0];
1617 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001618
1619 mr->sig->sig_status_checked = true;
1620 mr->sig->sig_err_exists = false;
1621 /* Next UMR, Arm SIGERR */
1622 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001623 } else {
1624 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1625 err = -EINVAL;
1626 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001627 }
1628
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001629 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1630 MLX5_SET(mkc, mkc, umr_en, 1);
1631
1632 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001633 if (err)
1634 goto err_destroy_psv;
1635
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001636 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001637 mr->ibmr.lkey = mr->mmkey.key;
1638 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001639 mr->umem = NULL;
1640 kfree(in);
1641
1642 return &mr->ibmr;
1643
1644err_destroy_psv:
1645 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001646 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001647 mr->sig->psv_memory.psv_idx))
1648 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1649 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001650 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001651 mr->sig->psv_wire.psv_idx))
1652 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1653 mr->sig->psv_wire.psv_idx);
1654 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001655 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001656err_free_sig:
1657 kfree(mr->sig);
1658err_free_in:
1659 kfree(in);
1660err_free:
1661 kfree(mr);
1662 return ERR_PTR(err);
1663}
1664
Matan Barakd2370e02016-02-29 18:05:30 +02001665struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1666 struct ib_udata *udata)
1667{
1668 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001669 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001670 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001671 u32 *in = NULL;
1672 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001673 int ndescs;
1674 int err;
1675 struct mlx5_ib_alloc_mw req = {};
1676 struct {
1677 __u32 comp_mask;
1678 __u32 response_length;
1679 } resp = {};
1680
1681 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1682 if (err)
1683 return ERR_PTR(err);
1684
1685 if (req.comp_mask || req.reserved1 || req.reserved2)
1686 return ERR_PTR(-EOPNOTSUPP);
1687
1688 if (udata->inlen > sizeof(req) &&
1689 !ib_is_udata_cleared(udata, sizeof(req),
1690 udata->inlen - sizeof(req)))
1691 return ERR_PTR(-EOPNOTSUPP);
1692
1693 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1694
1695 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001696 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001697 if (!mw || !in) {
1698 err = -ENOMEM;
1699 goto free;
1700 }
1701
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001702 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001703
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001704 MLX5_SET(mkc, mkc, free, 1);
1705 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1706 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1707 MLX5_SET(mkc, mkc, umr_en, 1);
1708 MLX5_SET(mkc, mkc, lr, 1);
1709 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1710 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1711 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1712
1713 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001714 if (err)
1715 goto free;
1716
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001717 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001718 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001719 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001720
1721 resp.response_length = min(offsetof(typeof(resp), response_length) +
1722 sizeof(resp.response_length), udata->outlen);
1723 if (resp.response_length) {
1724 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1725 if (err) {
1726 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1727 goto free;
1728 }
1729 }
1730
1731 kfree(in);
1732 return &mw->ibmw;
1733
1734free:
1735 kfree(mw);
1736 kfree(in);
1737 return ERR_PTR(err);
1738}
1739
1740int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1741{
1742 struct mlx5_ib_mw *mmw = to_mmw(mw);
1743 int err;
1744
1745 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1746 &mmw->mmkey);
1747 if (!err)
1748 kfree(mmw);
1749 return err;
1750}
1751
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001752int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1753 struct ib_mr_status *mr_status)
1754{
1755 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1756 int ret = 0;
1757
1758 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1759 pr_err("Invalid status check mask\n");
1760 ret = -EINVAL;
1761 goto done;
1762 }
1763
1764 mr_status->fail_status = 0;
1765 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1766 if (!mmr->sig) {
1767 ret = -EINVAL;
1768 pr_err("signature status check requested on a non-signature enabled MR\n");
1769 goto done;
1770 }
1771
1772 mmr->sig->sig_status_checked = true;
1773 if (!mmr->sig->sig_err_exists)
1774 goto done;
1775
1776 if (ibmr->lkey == mmr->sig->err_item.key)
1777 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1778 sizeof(mr_status->sig_err));
1779 else {
1780 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1781 mr_status->sig_err.sig_err_offset = 0;
1782 mr_status->sig_err.key = mmr->sig->err_item.key;
1783 }
1784
1785 mmr->sig->sig_err_exists = false;
1786 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1787 }
1788
1789done:
1790 return ret;
1791}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001792
Sagi Grimbergb005d312016-02-29 19:07:33 +02001793static int
1794mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1795 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001796 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001797 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001798{
1799 struct scatterlist *sg = sgl;
1800 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001801 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001802 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1803 int i;
1804
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001805 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001806 mr->ibmr.length = 0;
1807 mr->ndescs = sg_nents;
1808
1809 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001810 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001811 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001812 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1813 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001814 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001815 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001816
1817 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001818 }
1819
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001820 if (sg_offset_p)
1821 *sg_offset_p = sg_offset;
1822
Sagi Grimbergb005d312016-02-29 19:07:33 +02001823 return i;
1824}
1825
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001826static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1827{
1828 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1829 __be64 *descs;
1830
1831 if (unlikely(mr->ndescs == mr->max_descs))
1832 return -ENOMEM;
1833
1834 descs = mr->descs;
1835 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1836
1837 return 0;
1838}
1839
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001840int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001841 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001842{
1843 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1844 int n;
1845
1846 mr->ndescs = 0;
1847
1848 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1849 mr->desc_size * mr->max_descs,
1850 DMA_TO_DEVICE);
1851
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001852 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001853 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001854 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001855 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1856 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001857
1858 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1859 mr->desc_size * mr->max_descs,
1860 DMA_TO_DEVICE);
1861
1862 return n;
1863}