blob: 90a9c461cedca5db0b7ca924087c59a8e3d58b4b [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Leon Romanovskyeeea6952018-03-13 15:29:28 +020050static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030052static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020053static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Majd Dibbinyc8d75a92018-03-22 15:34:04 +020054static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
55{
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
57}
58
59static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
60{
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
62}
63
64static bool use_umr(struct mlx5_ib_dev *dev, int order)
65{
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
68}
Haggai Eran6aec21f2014-12-11 17:04:23 +020069
Haggai Eranb4cfe442014-12-11 17:04:26 +020070static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
71{
Matan Baraka606b0f2016-02-29 18:05:28 +020072 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020073
74#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
77#endif
78
79 return err;
80}
81
Eli Cohene126ba92013-07-07 17:25:49 +030082static int order2idx(struct mlx5_ib_dev *dev, int order)
83{
84 struct mlx5_mr_cache *cache = &dev->cache;
85
86 if (order < cache->ent[0].order)
87 return 0;
88 else
89 return order - cache->ent[0].order;
90}
91
Noa Osherovich56e11d62016-02-29 16:46:51 +020092static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
93{
94 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
95 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
96}
97
Noa Osherovich395a8e42016-02-29 16:46:50 +020098#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
99static void update_odp_mr(struct mlx5_ib_mr *mr)
100{
101 if (mr->umem->odp_data) {
102 /*
103 * This barrier prevents the compiler from moving the
104 * setting of umem->odp_data->private to point to our
105 * MR, before reg_umr finished, to ensure that the MR
106 * initialization have finished before starting to
107 * handle invalidations.
108 */
109 smp_wmb();
110 mr->umem->odp_data->private = mr;
111 /*
112 * Make sure we will see the new
113 * umem->odp_data->private value in the invalidation
114 * routines, before we can get page faults on the
115 * MR. Page faults can happen once we put the MR in
116 * the tree, below this line. Without the barrier,
117 * there can be a fault handling and an invalidation
118 * before umem->odp_data->private == mr is visible to
119 * the invalidation handler.
120 */
121 smp_wmb();
122 }
123}
124#endif
125
Eli Cohen746b5582013-10-23 09:53:14 +0300126static void reg_mr_callback(int status, void *context)
127{
128 struct mlx5_ib_mr *mr = context;
129 struct mlx5_ib_dev *dev = mr->dev;
130 struct mlx5_mr_cache *cache = &dev->cache;
131 int c = order2idx(dev, mr->order);
132 struct mlx5_cache_ent *ent = &cache->ent[c];
133 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300134 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200135 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300136 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300137
Eli Cohen746b5582013-10-23 09:53:14 +0300138 spin_lock_irqsave(&ent->lock, flags);
139 ent->pending--;
140 spin_unlock_irqrestore(&ent->lock, flags);
141 if (status) {
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
143 kfree(mr);
144 dev->fill_delay = 1;
145 mod_timer(&dev->delay_timer, jiffies + HZ);
146 return;
147 }
148
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200149 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300150 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
151 key = dev->mdev->priv.mkey_key++;
152 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300153 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300154
155 cache->last_add = jiffies;
156
157 spin_lock_irqsave(&ent->lock, flags);
158 list_add_tail(&mr->list, &ent->head);
159 ent->cur++;
160 ent->size++;
161 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300162
163 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200164 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
165 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300166 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200167 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300168 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200169
170 if (!completion_done(&ent->compl))
171 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300172}
173
Eli Cohene126ba92013-07-07 17:25:49 +0300174static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
175{
Eli Cohene126ba92013-07-07 17:25:49 +0300176 struct mlx5_mr_cache *cache = &dev->cache;
177 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300178 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300179 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300180 void *mkc;
181 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300182 int err = 0;
183 int i;
184
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300185 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300186 if (!in)
187 return -ENOMEM;
188
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300189 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300190 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300191 if (ent->pending >= MAX_PENDING_REG_MR) {
192 err = -EAGAIN;
193 break;
194 }
195
Eli Cohene126ba92013-07-07 17:25:49 +0300196 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
197 if (!mr) {
198 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300199 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300200 }
201 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300202 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300203 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300204
205 MLX5_SET(mkc, mkc, free, 1);
206 MLX5_SET(mkc, mkc, umr_en, 1);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +0300207 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
208 MLX5_SET(mkc, mkc, access_mode_4_2,
209 (ent->access_mode >> 2) & 0x7);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300210
211 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200212 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
213 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300214
Eli Cohen746b5582013-10-23 09:53:14 +0300215 spin_lock_irq(&ent->lock);
216 ent->pending++;
217 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300218 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
219 in, inlen,
220 mr->out, sizeof(mr->out),
221 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300222 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200223 spin_lock_irq(&ent->lock);
224 ent->pending--;
225 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300226 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300227 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300228 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300229 }
Eli Cohene126ba92013-07-07 17:25:49 +0300230 }
231
Eli Cohene126ba92013-07-07 17:25:49 +0300232 kfree(in);
233 return err;
234}
235
236static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
237{
Eli Cohene126ba92013-07-07 17:25:49 +0300238 struct mlx5_mr_cache *cache = &dev->cache;
239 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200240 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300241 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200242 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300243 int i;
244
245 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300246 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300247 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300248 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200249 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300250 }
251 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200252 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300253 ent->cur--;
254 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300255 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200256 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
257 }
258
259#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
260 synchronize_srcu(&dev->mr_srcu);
261#endif
262
263 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
264 list_del(&mr->list);
265 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300266 }
267}
268
269static ssize_t size_write(struct file *filp, const char __user *buf,
270 size_t count, loff_t *pos)
271{
272 struct mlx5_cache_ent *ent = filp->private_data;
273 struct mlx5_ib_dev *dev = ent->dev;
274 char lbuf[20];
275 u32 var;
276 int err;
277 int c;
278
279 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300280 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300281
282 c = order2idx(dev, ent->order);
283 lbuf[sizeof(lbuf) - 1] = 0;
284
285 if (sscanf(lbuf, "%u", &var) != 1)
286 return -EINVAL;
287
288 if (var < ent->limit)
289 return -EINVAL;
290
291 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300292 do {
293 err = add_keys(dev, c, var - ent->size);
294 if (err && err != -EAGAIN)
295 return err;
296
297 usleep_range(3000, 5000);
298 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300299 } else if (var < ent->size) {
300 remove_keys(dev, c, ent->size - var);
301 }
302
303 return count;
304}
305
306static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
307 loff_t *pos)
308{
309 struct mlx5_cache_ent *ent = filp->private_data;
310 char lbuf[20];
311 int err;
312
313 if (*pos)
314 return 0;
315
316 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
317 if (err < 0)
318 return err;
319
320 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300321 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300322
323 *pos += err;
324
325 return err;
326}
327
328static const struct file_operations size_fops = {
329 .owner = THIS_MODULE,
330 .open = simple_open,
331 .write = size_write,
332 .read = size_read,
333};
334
335static ssize_t limit_write(struct file *filp, const char __user *buf,
336 size_t count, loff_t *pos)
337{
338 struct mlx5_cache_ent *ent = filp->private_data;
339 struct mlx5_ib_dev *dev = ent->dev;
340 char lbuf[20];
341 u32 var;
342 int err;
343 int c;
344
345 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300346 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300347
348 c = order2idx(dev, ent->order);
349 lbuf[sizeof(lbuf) - 1] = 0;
350
351 if (sscanf(lbuf, "%u", &var) != 1)
352 return -EINVAL;
353
354 if (var > ent->size)
355 return -EINVAL;
356
357 ent->limit = var;
358
359 if (ent->cur < ent->limit) {
360 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
361 if (err)
362 return err;
363 }
364
365 return count;
366}
367
368static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
369 loff_t *pos)
370{
371 struct mlx5_cache_ent *ent = filp->private_data;
372 char lbuf[20];
373 int err;
374
375 if (*pos)
376 return 0;
377
378 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
379 if (err < 0)
380 return err;
381
382 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300383 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300384
385 *pos += err;
386
387 return err;
388}
389
390static const struct file_operations limit_fops = {
391 .owner = THIS_MODULE,
392 .open = simple_open,
393 .write = limit_write,
394 .read = limit_read,
395};
396
397static int someone_adding(struct mlx5_mr_cache *cache)
398{
399 int i;
400
401 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
402 if (cache->ent[i].cur < cache->ent[i].limit)
403 return 1;
404 }
405
406 return 0;
407}
408
409static void __cache_work_func(struct mlx5_cache_ent *ent)
410{
411 struct mlx5_ib_dev *dev = ent->dev;
412 struct mlx5_mr_cache *cache = &dev->cache;
413 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300414 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300415
416 if (cache->stopped)
417 return;
418
419 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300420 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
421 err = add_keys(dev, i, 1);
422 if (ent->cur < 2 * ent->limit) {
423 if (err == -EAGAIN) {
424 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
425 i + 2);
426 queue_delayed_work(cache->wq, &ent->dwork,
427 msecs_to_jiffies(3));
428 } else if (err) {
429 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
430 i + 2, err);
431 queue_delayed_work(cache->wq, &ent->dwork,
432 msecs_to_jiffies(1000));
433 } else {
434 queue_work(cache->wq, &ent->work);
435 }
436 }
Eli Cohene126ba92013-07-07 17:25:49 +0300437 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300438 /*
439 * The remove_keys() logic is performed as garbage collection
440 * task. Such task is intended to be run when no other active
441 * processes are running.
442 *
443 * The need_resched() will return TRUE if there are user tasks
444 * to be activated in near future.
445 *
446 * In such case, we don't execute remove_keys() and postpone
447 * the garbage collection work to try to run in next cycle,
448 * in order to free CPU resources to other tasks.
449 */
450 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300451 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300452 remove_keys(dev, i, 1);
453 if (ent->cur > ent->limit)
454 queue_work(cache->wq, &ent->work);
455 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300456 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300457 }
458 }
459}
460
461static void delayed_cache_work_func(struct work_struct *work)
462{
463 struct mlx5_cache_ent *ent;
464
465 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
466 __cache_work_func(ent);
467}
468
469static void cache_work_func(struct work_struct *work)
470{
471 struct mlx5_cache_ent *ent;
472
473 ent = container_of(work, struct mlx5_cache_ent, work);
474 __cache_work_func(ent);
475}
476
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200477struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
478{
479 struct mlx5_mr_cache *cache = &dev->cache;
480 struct mlx5_cache_ent *ent;
481 struct mlx5_ib_mr *mr;
482 int err;
483
484 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
485 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
486 return NULL;
487 }
488
489 ent = &cache->ent[entry];
490 while (1) {
491 spin_lock_irq(&ent->lock);
492 if (list_empty(&ent->head)) {
493 spin_unlock_irq(&ent->lock);
494
495 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200496 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200497 return ERR_PTR(err);
498
499 wait_for_completion(&ent->compl);
500 } else {
501 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
502 list);
503 list_del(&mr->list);
504 ent->cur--;
505 spin_unlock_irq(&ent->lock);
506 if (ent->cur < ent->limit)
507 queue_work(cache->wq, &ent->work);
508 return mr;
509 }
510 }
511}
512
Eli Cohene126ba92013-07-07 17:25:49 +0300513static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
514{
515 struct mlx5_mr_cache *cache = &dev->cache;
516 struct mlx5_ib_mr *mr = NULL;
517 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300518 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300519 int c;
520 int i;
521
522 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300523 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300524 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300525 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
526 return NULL;
527 }
528
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300529 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300530 ent = &cache->ent[i];
531
532 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
533
Eli Cohen746b5582013-10-23 09:53:14 +0300534 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300535 if (!list_empty(&ent->head)) {
536 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
537 list);
538 list_del(&mr->list);
539 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300540 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300541 if (ent->cur < ent->limit)
542 queue_work(cache->wq, &ent->work);
543 break;
544 }
Eli Cohen746b5582013-10-23 09:53:14 +0300545 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300546
547 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300548 }
549
550 if (!mr)
551 cache->ent[c].miss++;
552
553 return mr;
554}
555
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200556void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300557{
558 struct mlx5_mr_cache *cache = &dev->cache;
559 struct mlx5_cache_ent *ent;
560 int shrink = 0;
561 int c;
562
563 c = order2idx(dev, mr->order);
564 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
565 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
566 return;
567 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200568
569 if (unreg_umr(dev, mr))
570 return;
571
Eli Cohene126ba92013-07-07 17:25:49 +0300572 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300573 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300574 list_add_tail(&mr->list, &ent->head);
575 ent->cur++;
576 if (ent->cur > 2 * ent->limit)
577 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300578 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300579
580 if (shrink)
581 queue_work(cache->wq, &ent->work);
582}
583
584static void clean_keys(struct mlx5_ib_dev *dev, int c)
585{
Eli Cohene126ba92013-07-07 17:25:49 +0300586 struct mlx5_mr_cache *cache = &dev->cache;
587 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200588 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300589 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200590 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300591
Moshe Lazer3c461912013-09-11 16:35:23 +0300592 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300593 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300594 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300595 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300596 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200597 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300598 }
599 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200600 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300601 ent->cur--;
602 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300603 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200604 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
605 }
606
607#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
608 synchronize_srcu(&dev->mr_srcu);
609#endif
610
611 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
612 list_del(&mr->list);
613 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300614 }
615}
616
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300617static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
618{
Mark Bloch72afcf82018-01-22 15:29:44 +0000619 if (!mlx5_debugfs_root || dev->rep)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300620 return;
621
622 debugfs_remove_recursive(dev->cache.root);
623 dev->cache.root = NULL;
624}
625
Eli Cohene126ba92013-07-07 17:25:49 +0300626static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
627{
628 struct mlx5_mr_cache *cache = &dev->cache;
629 struct mlx5_cache_ent *ent;
630 int i;
631
Mark Bloch72afcf82018-01-22 15:29:44 +0000632 if (!mlx5_debugfs_root || dev->rep)
Eli Cohene126ba92013-07-07 17:25:49 +0300633 return 0;
634
Jack Morgenstein9603b612014-07-28 23:30:22 +0300635 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300636 if (!cache->root)
637 return -ENOMEM;
638
639 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
640 ent = &cache->ent[i];
641 sprintf(ent->name, "%d", ent->order);
642 ent->dir = debugfs_create_dir(ent->name, cache->root);
643 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300644 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300645
646 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
647 &size_fops);
648 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300649 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300650
651 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
652 &limit_fops);
653 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300654 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300655
656 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
657 &ent->cur);
658 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300659 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300660
661 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
662 &ent->miss);
663 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300664 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300665 }
666
667 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300668err:
669 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300670
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300671 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300672}
673
Kees Cooke99e88a2017-10-16 14:43:17 -0700674static void delay_time_func(struct timer_list *t)
Eli Cohen746b5582013-10-23 09:53:14 +0300675{
Kees Cooke99e88a2017-10-16 14:43:17 -0700676 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
Eli Cohen746b5582013-10-23 09:53:14 +0300677
678 dev->fill_delay = 0;
679}
680
Eli Cohene126ba92013-07-07 17:25:49 +0300681int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
682{
683 struct mlx5_mr_cache *cache = &dev->cache;
684 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300685 int err;
686 int i;
687
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300688 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530689 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300690 if (!cache->wq) {
691 mlx5_ib_warn(dev, "failed to create work queue\n");
692 return -ENOMEM;
693 }
694
Kees Cooke99e88a2017-10-16 14:43:17 -0700695 timer_setup(&dev->delay_timer, delay_time_func, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300696 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300697 ent = &cache->ent[i];
698 INIT_LIST_HEAD(&ent->head);
699 spin_lock_init(&ent->lock);
700 ent->order = i + 2;
701 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200702 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300703
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200704 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300705 INIT_WORK(&ent->work, cache_work_func);
706 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300707 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200708
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300709 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200710 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200711 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200712 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200713
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300714 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200715 continue;
716
717 ent->page = PAGE_SHIFT;
718 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
719 MLX5_IB_UMR_OCTOWORD;
720 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
721 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
Mark Bloch72afcf82018-01-22 15:29:44 +0000722 !dev->rep &&
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200723 mlx5_core_is_pf(dev->mdev))
724 ent->limit = dev->mdev->profile->mr_cache[i].limit;
725 else
726 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300727 }
728
729 err = mlx5_mr_cache_debugfs_init(dev);
730 if (err)
731 mlx5_ib_warn(dev, "cache debugfs failure\n");
732
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300733 /*
734 * We don't want to fail driver if debugfs failed to initialize,
735 * so we are not forwarding error to the user.
736 */
737
Eli Cohene126ba92013-07-07 17:25:49 +0300738 return 0;
739}
740
Eli Cohenacbda522016-10-27 16:36:43 +0300741static void wait_for_async_commands(struct mlx5_ib_dev *dev)
742{
743 struct mlx5_mr_cache *cache = &dev->cache;
744 struct mlx5_cache_ent *ent;
745 int total = 0;
746 int i;
747 int j;
748
749 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
750 ent = &cache->ent[i];
751 for (j = 0 ; j < 1000; j++) {
752 if (!ent->pending)
753 break;
754 msleep(50);
755 }
756 }
757 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
758 ent = &cache->ent[i];
759 total += ent->pending;
760 }
761
762 if (total)
763 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
764 else
765 mlx5_ib_warn(dev, "done with all pending requests\n");
766}
767
Eli Cohene126ba92013-07-07 17:25:49 +0300768int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
769{
770 int i;
771
Mark Bloch32927e22018-03-20 15:45:37 +0200772 if (!dev->cache.wq)
773 return 0;
774
Eli Cohene126ba92013-07-07 17:25:49 +0300775 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300776 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300777
778 mlx5_mr_cache_debugfs_cleanup(dev);
779
780 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
781 clean_keys(dev, i);
782
Moshe Lazer3c461912013-09-11 16:35:23 +0300783 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300784 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300785 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300786
Eli Cohene126ba92013-07-07 17:25:49 +0300787 return 0;
788}
789
790struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
791{
792 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300793 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300794 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300795 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300796 void *mkc;
797 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300798 int err;
799
800 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
801 if (!mr)
802 return ERR_PTR(-ENOMEM);
803
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300804 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300805 if (!in) {
806 err = -ENOMEM;
807 goto err_free;
808 }
809
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300810 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300811
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +0300812 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300813 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
814 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
815 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
816 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
817 MLX5_SET(mkc, mkc, lr, 1);
818
819 MLX5_SET(mkc, mkc, length64, 1);
820 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
821 MLX5_SET(mkc, mkc, qpn, 0xffffff);
822 MLX5_SET64(mkc, mkc, start_addr, 0);
823
824 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300825 if (err)
826 goto err_in;
827
828 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200829 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200830 mr->ibmr.lkey = mr->mmkey.key;
831 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300832 mr->umem = NULL;
833
834 return &mr->ibmr;
835
836err_in:
837 kfree(in);
838
839err_free:
840 kfree(mr);
841
842 return ERR_PTR(err);
843}
844
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300845static int get_octo_len(u64 addr, u64 len, int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300846{
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300847 u64 page_size = 1ULL << page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300848 u64 offset;
849 int npages;
850
851 offset = addr & (page_size - 1);
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300852 npages = ALIGN(len + offset, page_size) >> page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300853 return (npages + 1) / 2;
854}
855
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300856static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300857{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200858 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300859 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300860 return MLX5_MAX_UMR_SHIFT;
861}
862
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200863static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
864 int access_flags, struct ib_umem **umem,
865 int *npages, int *page_shift, int *ncont,
866 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200867{
868 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300869 struct ib_umem *u;
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200870 int err;
871
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300872 *umem = NULL;
873
874 u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
875 err = PTR_ERR_OR_ZERO(u);
Leon Romanovskyf3f134f2018-03-12 21:26:37 +0200876 if (err) {
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300877 mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200878 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200879 }
880
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300881 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300882 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200883 if (!*npages) {
884 mlx5_ib_warn(dev, "avoid zero region\n");
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300885 ib_umem_release(u);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200886 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200887 }
888
Leon Romanovskyb4bd7012018-04-23 17:01:52 +0300889 *umem = u;
890
Noa Osherovich395a8e42016-02-29 16:46:50 +0200891 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
892 *npages, *ncont, *order, *page_shift);
893
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200894 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200895}
896
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100897static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300898{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100899 struct mlx5_ib_umr_context *context =
900 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300901
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100902 context->status = wc->status;
903 complete(&context->done);
904}
Eli Cohene126ba92013-07-07 17:25:49 +0300905
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100906static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
907{
908 context->cqe.done = mlx5_ib_umr_done;
909 context->status = -1;
910 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300911}
912
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200913static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
914 struct mlx5_umr_wr *umrwr)
915{
916 struct umr_common *umrc = &dev->umrc;
917 struct ib_send_wr *bad;
918 int err;
919 struct mlx5_ib_umr_context umr_context;
920
921 mlx5_ib_init_umr_context(&umr_context);
922 umrwr->wr.wr_cqe = &umr_context.cqe;
923
924 down(&umrc->sem);
925 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
926 if (err) {
927 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
928 } else {
929 wait_for_completion(&umr_context.done);
930 if (umr_context.status != IB_WC_SUCCESS) {
931 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
932 umr_context.status);
933 err = -EFAULT;
934 }
935 }
936 up(&umrc->sem);
937 return err;
938}
939
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300940static struct mlx5_ib_mr *alloc_mr_from_cache(
941 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300942 u64 virt_addr, u64 len, int npages,
943 int page_shift, int order, int access_flags)
944{
945 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300946 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300947 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300948 int i;
949
Eli Cohen746b5582013-10-23 09:53:14 +0300950 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300951 mr = alloc_cached_mr(dev, order);
952 if (mr)
953 break;
954
955 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300956 if (err && err != -EAGAIN) {
957 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300958 break;
959 }
960 }
961
962 if (!mr)
963 return ERR_PTR(-EAGAIN);
964
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200965 mr->ibmr.pd = pd;
966 mr->umem = umem;
967 mr->access_flags = access_flags;
968 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200969 mr->mmkey.iova = virt_addr;
970 mr->mmkey.size = len;
971 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300972
Eli Cohene126ba92013-07-07 17:25:49 +0300973 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300974}
975
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200976static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
977 void *xlt, int page_shift, size_t size,
978 int flags)
979{
980 struct mlx5_ib_dev *dev = mr->dev;
981 struct ib_umem *umem = mr->umem;
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200982
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200983 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200984 if (!umr_can_use_indirect_mkey(dev))
985 return -EPERM;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200986 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
987 return npages;
988 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200989
990 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
991
992 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
993 __mlx5_ib_populate_pas(dev, umem, page_shift,
994 idx, npages, xlt,
995 MLX5_IB_MTT_PRESENT);
996 /* Clear padding after the pages
997 * brought from the umem.
998 */
999 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
1000 size - npages * sizeof(struct mlx5_mtt));
1001 }
1002
1003 return npages;
1004}
1005
1006#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1007 MLX5_UMR_MTT_ALIGNMENT)
1008#define MLX5_SPARE_UMR_CHUNK 0x10000
1009
1010int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1011 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +02001012{
1013 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001014 struct device *ddev = dev->ib_dev.dev.parent;
Haggai Eran832a6b02014-12-11 17:04:22 +02001015 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001016 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001017 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001018 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +02001019 struct ib_sge sg;
1020 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001021 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1022 ? sizeof(struct mlx5_klm)
1023 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001024 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1025 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +02001026 size_t pages_mapped = 0;
1027 size_t pages_to_map = 0;
1028 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001029 gfp_t gfp;
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001030 bool use_emergency_page = false;
Haggai Eran832a6b02014-12-11 17:04:22 +02001031
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001032 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1033 !umr_can_use_indirect_mkey(dev))
1034 return -EPERM;
Haggai Eran832a6b02014-12-11 17:04:22 +02001035
1036 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001037 * so we need to align the offset and length accordingly
1038 */
1039 if (idx & page_mask) {
1040 npages += idx & page_mask;
1041 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +02001042 }
1043
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001044 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1045 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001046
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001047 pages_to_map = ALIGN(npages, page_align);
1048 size = desc_size * pages_to_map;
1049 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001050
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001051 xlt = (void *)__get_free_pages(gfp, get_order(size));
1052 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1053 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1054 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1055
1056 size = MLX5_SPARE_UMR_CHUNK;
1057 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001058 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001059
1060 if (!xlt) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001061 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001062 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001063 size = PAGE_SIZE;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001064 memset(xlt, 0, size);
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001065 use_emergency_page = true;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001066 }
1067 pages_iter = size / desc_size;
1068 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001069 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001070 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001071 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001072 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001073 }
1074
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001075 sg.addr = dma;
1076 sg.lkey = dev->umrc.pd->local_dma_lkey;
1077
1078 memset(&wr, 0, sizeof(wr));
1079 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1080 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1081 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1082 wr.wr.sg_list = &sg;
1083 wr.wr.num_sge = 1;
1084 wr.wr.opcode = MLX5_IB_WR_UMR;
1085
1086 wr.pd = mr->ibmr.pd;
1087 wr.mkey = mr->mmkey.key;
1088 wr.length = mr->mmkey.size;
1089 wr.virt_addr = mr->mmkey.iova;
1090 wr.access_flags = mr->access_flags;
1091 wr.page_shift = page_shift;
1092
Haggai Eran832a6b02014-12-11 17:04:22 +02001093 for (pages_mapped = 0;
1094 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001095 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001096 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001097 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001098 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001099 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001100
1101 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1102
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001103 sg.length = ALIGN(npages * desc_size,
1104 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001105
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001106 if (pages_mapped + pages_iter >= pages_to_map) {
1107 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1108 wr.wr.send_flags |=
1109 MLX5_IB_SEND_UMR_ENABLE_MR |
1110 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1111 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1112 if (flags & MLX5_IB_UPD_XLT_PD ||
1113 flags & MLX5_IB_UPD_XLT_ACCESS)
1114 wr.wr.send_flags |=
1115 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1116 if (flags & MLX5_IB_UPD_XLT_ADDR)
1117 wr.wr.send_flags |=
1118 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1119 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001120
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001121 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001122 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001123
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001124 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001125 }
1126 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1127
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001128free_xlt:
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001129 if (use_emergency_page)
1130 mlx5_ib_put_xlt_emergency_page();
Haggai Eran832a6b02014-12-11 17:04:22 +02001131 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001132 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001133
1134 return err;
1135}
Haggai Eran832a6b02014-12-11 17:04:22 +02001136
Noa Osherovich395a8e42016-02-29 16:46:50 +02001137/*
1138 * If ibmr is NULL it will be allocated by reg_create.
1139 * Else, the given ibmr will be used.
1140 */
1141static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1142 u64 virt_addr, u64 length,
1143 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001144 int page_shift, int access_flags,
1145 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001146{
1147 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001148 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001149 __be64 *pas;
1150 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001151 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001152 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001153 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001154 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001155
Noa Osherovich395a8e42016-02-29 16:46:50 +02001156 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001157 if (!mr)
1158 return ERR_PTR(-ENOMEM);
1159
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001160 mr->ibmr.pd = pd;
1161 mr->access_flags = access_flags;
1162
1163 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1164 if (populate)
1165 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001166 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001167 if (!in) {
1168 err = -ENOMEM;
1169 goto err_1;
1170 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001171 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001172 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001173 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1174 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001175
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001176 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001177 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001178 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1179
1180 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001181 MLX5_SET(mkc, mkc, free, !populate);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001182 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001183 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1184 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1185 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1186 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1187 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001188 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001189
1190 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1191 MLX5_SET64(mkc, mkc, len, length);
1192 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1193 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1194 MLX5_SET(mkc, mkc, translations_octword_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001195 get_octo_len(virt_addr, length, page_shift));
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001196 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1197 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001198 if (populate) {
1199 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001200 get_octo_len(virt_addr, length, page_shift));
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001201 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001202
1203 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001204 if (err) {
1205 mlx5_ib_warn(dev, "create mkey failed\n");
1206 goto err_2;
1207 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001208 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001209 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001210 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001211 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001212
Matan Baraka606b0f2016-02-29 18:05:28 +02001213 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001214
1215 return mr;
1216
1217err_2:
Al Viro479163f2014-11-20 08:13:57 +00001218 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001219
1220err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001221 if (!ibmr)
1222 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001223
1224 return ERR_PTR(err);
1225}
1226
Noa Osherovich395a8e42016-02-29 16:46:50 +02001227static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1228 int npages, u64 length, int access_flags)
1229{
1230 mr->npages = npages;
1231 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001232 mr->ibmr.lkey = mr->mmkey.key;
1233 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001234 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001235 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001236}
1237
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001238static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
1239 u64 length, int acc)
1240{
1241 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1242 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1243 struct mlx5_core_dev *mdev = dev->mdev;
1244 struct mlx5_ib_mr *mr;
1245 void *mkc;
1246 u32 *in;
1247 int err;
1248
1249 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1250 if (!mr)
1251 return ERR_PTR(-ENOMEM);
1252
1253 in = kzalloc(inlen, GFP_KERNEL);
1254 if (!in) {
1255 err = -ENOMEM;
1256 goto err_free;
1257 }
1258
1259 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1260
1261 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
1262 MLX5_SET(mkc, mkc, access_mode_4_2,
1263 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
1264 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1265 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1266 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1267 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1268 MLX5_SET(mkc, mkc, lr, 1);
1269
1270 MLX5_SET64(mkc, mkc, len, length);
1271 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1272 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1273 MLX5_SET64(mkc, mkc, start_addr,
1274 memic_addr - pci_resource_start(dev->mdev->pdev, 0));
1275
1276 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1277 if (err)
1278 goto err_in;
1279
1280 kfree(in);
1281
1282 mr->umem = NULL;
1283 set_mr_fileds(dev, mr, 0, length, acc);
1284
1285 return &mr->ibmr;
1286
1287err_in:
1288 kfree(in);
1289
1290err_free:
1291 kfree(mr);
1292
1293 return ERR_PTR(err);
1294}
1295
1296struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1297 struct ib_dm_mr_attr *attr,
1298 struct uverbs_attr_bundle *attrs)
1299{
1300 struct mlx5_ib_dm *mdm = to_mdm(dm);
1301 u64 memic_addr;
1302
1303 if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
1304 return ERR_PTR(-EINVAL);
1305
1306 memic_addr = mdm->dev_addr + attr->offset;
1307
1308 return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
1309 attr->access_flags);
1310}
1311
Eli Cohene126ba92013-07-07 17:25:49 +03001312struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1313 u64 virt_addr, int access_flags,
1314 struct ib_udata *udata)
1315{
1316 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1317 struct mlx5_ib_mr *mr = NULL;
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001318 bool populate_mtts = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001319 struct ib_umem *umem;
1320 int page_shift;
1321 int npages;
1322 int ncont;
1323 int order;
1324 int err;
1325
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001326 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
Leon Romanovskyea30f012018-03-13 15:29:25 +02001327 return ERR_PTR(-EOPNOTSUPP);
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001328
Eli Cohen900a6d72014-09-14 16:47:51 +03001329 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1330 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001331
1332#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1333 if (!start && length == U64_MAX) {
1334 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1335 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1336 return ERR_PTR(-EINVAL);
1337
1338 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
Leon Romanovsky42898612018-03-13 15:29:24 +02001339 if (IS_ERR(mr))
1340 return ERR_CAST(mr);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001341 return &mr->ibmr;
1342 }
1343#endif
1344
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001345 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001346 &page_shift, &ncont, &order);
1347
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001348 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001349 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001350
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001351 if (use_umr(dev, order)) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001352 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1353 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001354 if (PTR_ERR(mr) == -EAGAIN) {
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301355 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
Eli Cohene126ba92013-07-07 17:25:49 +03001356 mr = NULL;
1357 }
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001358 populate_mtts = false;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001359 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1360 if (access_flags & IB_ACCESS_ON_DEMAND) {
1361 err = -EINVAL;
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301362 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001363 goto error;
1364 }
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001365 populate_mtts = true;
Eli Cohene126ba92013-07-07 17:25:49 +03001366 }
1367
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001368 if (!mr) {
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001369 if (!umr_can_modify_entity_size(dev))
1370 populate_mtts = true;
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001371 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001372 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001373 page_shift, access_flags, populate_mtts);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001374 mutex_unlock(&dev->slow_path_mutex);
1375 }
Eli Cohene126ba92013-07-07 17:25:49 +03001376
1377 if (IS_ERR(mr)) {
1378 err = PTR_ERR(mr);
1379 goto error;
1380 }
1381
Matan Baraka606b0f2016-02-29 18:05:28 +02001382 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001383
1384 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001385 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001386
Haggai Eranb4cfe442014-12-11 17:04:26 +02001387#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001388 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001389#endif
1390
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001391 if (!populate_mtts) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001392 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001393
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001394 if (access_flags & IB_ACCESS_ON_DEMAND)
1395 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1396
1397 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1398 update_xlt_flags);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001399
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001400 if (err) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001401 dereg_mr(dev, mr);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001402 return ERR_PTR(err);
1403 }
1404 }
1405
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001406#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001407 mr->live = 1;
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001408#endif
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001409 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001410error:
1411 ib_umem_release(umem);
1412 return ERR_PTR(err);
1413}
1414
1415static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1416{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001417 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001418 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001419
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001420 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1421 return 0;
1422
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001423 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1424 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1425 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1426 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001427
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001428 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001429}
1430
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001431static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001432 int access_flags, int flags)
1433{
1434 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001435 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001436 int err;
1437
Noa Osherovich56e11d62016-02-29 16:46:51 +02001438 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1439
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001440 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1441 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001442
Artemy Kovalyov31616252017-01-02 11:37:42 +02001443 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001444 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001445 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001446 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001447 }
1448
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001449 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001450
Noa Osherovich56e11d62016-02-29 16:46:51 +02001451 return err;
1452}
1453
1454int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1455 u64 length, u64 virt_addr, int new_access_flags,
1456 struct ib_pd *new_pd, struct ib_udata *udata)
1457{
1458 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1459 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1460 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1461 int access_flags = flags & IB_MR_REREG_ACCESS ?
1462 new_access_flags :
1463 mr->access_flags;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001464 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001465 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001466 int npages = 0;
1467 int ncont = 0;
1468 int order = 0;
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001469 u64 addr, len;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001470 int err;
1471
1472 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1473 start, virt_addr, length, access_flags);
1474
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001475 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1476
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001477 if (!mr->umem)
1478 return -EINVAL;
1479
1480 if (flags & IB_MR_REREG_TRANS) {
1481 addr = virt_addr;
1482 len = length;
1483 } else {
1484 addr = mr->umem->address;
1485 len = mr->umem->length;
1486 }
1487
Noa Osherovich56e11d62016-02-29 16:46:51 +02001488 if (flags != IB_MR_REREG_PD) {
1489 /*
1490 * Replace umem. This needs to be done whether or not UMR is
1491 * used.
1492 */
1493 flags |= IB_MR_REREG_TRANS;
1494 ib_umem_release(mr->umem);
Leon Romanovskyb4bd7012018-04-23 17:01:52 +03001495 mr->umem = NULL;
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001496 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1497 &npages, &page_shift, &ncont, &order);
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001498 if (err)
1499 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001500 }
1501
1502 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1503 /*
1504 * UMR can't be used - MKey needs to be replaced.
1505 */
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001506 if (mr->allocated_from_cache)
Noa Osherovich56e11d62016-02-29 16:46:51 +02001507 err = unreg_umr(dev, mr);
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001508 else
Noa Osherovich56e11d62016-02-29 16:46:51 +02001509 err = destroy_mkey(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001510 if (err)
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001511 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001512
1513 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001514 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001515
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001516 if (IS_ERR(mr)) {
1517 err = PTR_ERR(mr);
1518 mr = to_mmr(ib_mr);
1519 goto err;
1520 }
Noa Osherovich56e11d62016-02-29 16:46:51 +02001521
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001522 mr->allocated_from_cache = 0;
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001523#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001524 mr->live = 1;
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001525#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001526 } else {
1527 /*
1528 * Send a UMR WQE
1529 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001530 mr->ibmr.pd = pd;
1531 mr->access_flags = access_flags;
1532 mr->mmkey.iova = addr;
1533 mr->mmkey.size = len;
1534 mr->mmkey.pd = to_mpd(pd)->pdn;
1535
1536 if (flags & IB_MR_REREG_TRANS) {
1537 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1538 if (flags & IB_MR_REREG_PD)
1539 upd_flags |= MLX5_IB_UPD_XLT_PD;
1540 if (flags & IB_MR_REREG_ACCESS)
1541 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1542 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1543 upd_flags);
1544 } else {
1545 err = rereg_umr(pd, mr, access_flags, flags);
1546 }
1547
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001548 if (err)
1549 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001550 }
1551
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001552 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001553
Noa Osherovich56e11d62016-02-29 16:46:51 +02001554#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1555 update_odp_mr(mr);
1556#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001557 return 0;
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001558
1559err:
1560 if (mr->umem) {
1561 ib_umem_release(mr->umem);
1562 mr->umem = NULL;
1563 }
1564 clean_mr(dev, mr);
1565 return err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001566}
1567
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001568static int
1569mlx5_alloc_priv_descs(struct ib_device *device,
1570 struct mlx5_ib_mr *mr,
1571 int ndescs,
1572 int desc_size)
1573{
1574 int size = ndescs * desc_size;
1575 int add_size;
1576 int ret;
1577
1578 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1579
1580 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1581 if (!mr->descs_alloc)
1582 return -ENOMEM;
1583
1584 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1585
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001586 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001587 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001588 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001589 ret = -ENOMEM;
1590 goto err;
1591 }
1592
1593 return 0;
1594err:
1595 kfree(mr->descs_alloc);
1596
1597 return ret;
1598}
1599
1600static void
1601mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1602{
1603 if (mr->descs) {
1604 struct ib_device *device = mr->ibmr.device;
1605 int size = mr->max_descs * mr->desc_size;
1606
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001607 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001608 size, DMA_TO_DEVICE);
1609 kfree(mr->descs_alloc);
1610 mr->descs = NULL;
1611 }
1612}
1613
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001614static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001615{
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001616 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001617
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001618 if (mr->sig) {
1619 if (mlx5_core_destroy_psv(dev->mdev,
1620 mr->sig->psv_memory.psv_idx))
1621 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1622 mr->sig->psv_memory.psv_idx);
1623 if (mlx5_core_destroy_psv(dev->mdev,
1624 mr->sig->psv_wire.psv_idx))
1625 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1626 mr->sig->psv_wire.psv_idx);
1627 kfree(mr->sig);
1628 mr->sig = NULL;
1629 }
1630
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001631 mlx5_free_priv_descs(mr);
1632
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001633 if (!allocated_from_cache)
1634 destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001635}
1636
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001637static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001638{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001639 int npages = mr->npages;
1640 struct ib_umem *umem = mr->umem;
1641
1642#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001643 if (umem && umem->odp_data) {
1644 /* Prevent new page faults from succeeding */
1645 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001646 /* Wait for all running page-fault handlers to finish. */
1647 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001648 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001649 if (umem->odp_data->page_list)
1650 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1651 ib_umem_end(umem));
1652 else
1653 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001654 /*
1655 * We kill the umem before the MR for ODP,
1656 * so that there will not be any invalidations in
1657 * flight, looking at the *mr struct.
1658 */
1659 ib_umem_release(umem);
1660 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1661
1662 /* Avoid double-freeing the umem. */
1663 umem = NULL;
1664 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001665#endif
1666
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001667 clean_mr(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001668
1669 if (umem) {
1670 ib_umem_release(umem);
1671 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1672 }
1673
Leon Romanovskyf3f134f2018-03-12 21:26:37 +02001674 if (!mr->allocated_from_cache)
1675 kfree(mr);
1676 else
1677 mlx5_mr_cache_free(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001678}
1679
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001680int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1681{
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001682 dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
1683 return 0;
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001684}
1685
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001686struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1687 enum ib_mr_type mr_type,
1688 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001689{
1690 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001691 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001692 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001693 struct mlx5_ib_mr *mr;
1694 void *mkc;
1695 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001696 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001697
1698 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1699 if (!mr)
1700 return ERR_PTR(-ENOMEM);
1701
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001702 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001703 if (!in) {
1704 err = -ENOMEM;
1705 goto err_free;
1706 }
1707
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001708 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1709 MLX5_SET(mkc, mkc, free, 1);
1710 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1711 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1712 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001713
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001714 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001715 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1716 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001717 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001718 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001719 if (err)
1720 goto err_free_in;
1721
Artemy Kovalyov31616252017-01-02 11:37:42 +02001722 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001723 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001724 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001725 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001726
1727 err = mlx5_alloc_priv_descs(pd->device, mr,
1728 ndescs, sizeof(struct mlx5_klm));
1729 if (err)
1730 goto err_free_in;
1731 mr->desc_size = sizeof(struct mlx5_klm);
1732 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001733 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001734 u32 psv_index[2];
1735
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001736 MLX5_SET(mkc, mkc, bsf_en, 1);
1737 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001738 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1739 if (!mr->sig) {
1740 err = -ENOMEM;
1741 goto err_free_in;
1742 }
1743
1744 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001745 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001746 2, psv_index);
1747 if (err)
1748 goto err_free_sig;
1749
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001750 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001751 mr->sig->psv_memory.psv_idx = psv_index[0];
1752 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001753
1754 mr->sig->sig_status_checked = true;
1755 mr->sig->sig_err_exists = false;
1756 /* Next UMR, Arm SIGERR */
1757 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001758 } else {
1759 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1760 err = -EINVAL;
1761 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001762 }
1763
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001764 MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
1765 MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001766 MLX5_SET(mkc, mkc, umr_en, 1);
1767
Nitzan Carmi45e6ae72017-12-26 11:20:20 +02001768 mr->ibmr.device = pd->device;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001769 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001770 if (err)
1771 goto err_destroy_psv;
1772
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001773 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001774 mr->ibmr.lkey = mr->mmkey.key;
1775 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001776 mr->umem = NULL;
1777 kfree(in);
1778
1779 return &mr->ibmr;
1780
1781err_destroy_psv:
1782 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001783 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001784 mr->sig->psv_memory.psv_idx))
1785 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1786 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001787 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001788 mr->sig->psv_wire.psv_idx))
1789 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1790 mr->sig->psv_wire.psv_idx);
1791 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001792 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001793err_free_sig:
1794 kfree(mr->sig);
1795err_free_in:
1796 kfree(in);
1797err_free:
1798 kfree(mr);
1799 return ERR_PTR(err);
1800}
1801
Matan Barakd2370e02016-02-29 18:05:30 +02001802struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1803 struct ib_udata *udata)
1804{
1805 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001806 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001807 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001808 u32 *in = NULL;
1809 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001810 int ndescs;
1811 int err;
1812 struct mlx5_ib_alloc_mw req = {};
1813 struct {
1814 __u32 comp_mask;
1815 __u32 response_length;
1816 } resp = {};
1817
1818 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1819 if (err)
1820 return ERR_PTR(err);
1821
1822 if (req.comp_mask || req.reserved1 || req.reserved2)
1823 return ERR_PTR(-EOPNOTSUPP);
1824
1825 if (udata->inlen > sizeof(req) &&
1826 !ib_is_udata_cleared(udata, sizeof(req),
1827 udata->inlen - sizeof(req)))
1828 return ERR_PTR(-EOPNOTSUPP);
1829
1830 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1831
1832 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001833 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001834 if (!mw || !in) {
1835 err = -ENOMEM;
1836 goto free;
1837 }
1838
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001839 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001840
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001841 MLX5_SET(mkc, mkc, free, 1);
1842 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1843 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1844 MLX5_SET(mkc, mkc, umr_en, 1);
1845 MLX5_SET(mkc, mkc, lr, 1);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001846 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001847 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1848 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1849
1850 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001851 if (err)
1852 goto free;
1853
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001854 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001855 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001856 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001857
1858 resp.response_length = min(offsetof(typeof(resp), response_length) +
1859 sizeof(resp.response_length), udata->outlen);
1860 if (resp.response_length) {
1861 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1862 if (err) {
1863 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1864 goto free;
1865 }
1866 }
1867
1868 kfree(in);
1869 return &mw->ibmw;
1870
1871free:
1872 kfree(mw);
1873 kfree(in);
1874 return ERR_PTR(err);
1875}
1876
1877int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1878{
1879 struct mlx5_ib_mw *mmw = to_mmw(mw);
1880 int err;
1881
1882 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1883 &mmw->mmkey);
1884 if (!err)
1885 kfree(mmw);
1886 return err;
1887}
1888
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001889int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1890 struct ib_mr_status *mr_status)
1891{
1892 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1893 int ret = 0;
1894
1895 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1896 pr_err("Invalid status check mask\n");
1897 ret = -EINVAL;
1898 goto done;
1899 }
1900
1901 mr_status->fail_status = 0;
1902 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1903 if (!mmr->sig) {
1904 ret = -EINVAL;
1905 pr_err("signature status check requested on a non-signature enabled MR\n");
1906 goto done;
1907 }
1908
1909 mmr->sig->sig_status_checked = true;
1910 if (!mmr->sig->sig_err_exists)
1911 goto done;
1912
1913 if (ibmr->lkey == mmr->sig->err_item.key)
1914 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1915 sizeof(mr_status->sig_err));
1916 else {
1917 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1918 mr_status->sig_err.sig_err_offset = 0;
1919 mr_status->sig_err.key = mmr->sig->err_item.key;
1920 }
1921
1922 mmr->sig->sig_err_exists = false;
1923 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1924 }
1925
1926done:
1927 return ret;
1928}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001929
Sagi Grimbergb005d312016-02-29 19:07:33 +02001930static int
1931mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1932 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001933 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001934 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001935{
1936 struct scatterlist *sg = sgl;
1937 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001938 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001939 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1940 int i;
1941
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001942 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001943 mr->ibmr.length = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001944
1945 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001946 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001947 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001948 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1949 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001950 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001951 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001952
1953 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001954 }
Sergey Gorenkoda343b62018-02-25 13:39:48 +02001955 mr->ndescs = i;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001956
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001957 if (sg_offset_p)
1958 *sg_offset_p = sg_offset;
1959
Sagi Grimbergb005d312016-02-29 19:07:33 +02001960 return i;
1961}
1962
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001963static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1964{
1965 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1966 __be64 *descs;
1967
1968 if (unlikely(mr->ndescs == mr->max_descs))
1969 return -ENOMEM;
1970
1971 descs = mr->descs;
1972 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1973
1974 return 0;
1975}
1976
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001977int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001978 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001979{
1980 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1981 int n;
1982
1983 mr->ndescs = 0;
1984
1985 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1986 mr->desc_size * mr->max_descs,
1987 DMA_TO_DEVICE);
1988
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001989 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001990 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001991 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001992 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1993 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001994
1995 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1996 mr->desc_size * mr->max_descs,
1997 DMA_TO_DEVICE);
1998
1999 return n;
2000}