blob: 1520a2f20f980a9563c402cc8b876d89e5890c4b [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Leon Romanovskyeeea6952018-03-13 15:29:28 +020050static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030052static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020053static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Majd Dibbinyc8d75a92018-03-22 15:34:04 +020054static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
55{
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
57}
58
59static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
60{
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
62}
63
64static bool use_umr(struct mlx5_ib_dev *dev, int order)
65{
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
68}
Haggai Eran6aec21f2014-12-11 17:04:23 +020069
Haggai Eranb4cfe442014-12-11 17:04:26 +020070static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
71{
Matan Baraka606b0f2016-02-29 18:05:28 +020072 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020073
74#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
77#endif
78
79 return err;
80}
81
Eli Cohene126ba92013-07-07 17:25:49 +030082static int order2idx(struct mlx5_ib_dev *dev, int order)
83{
84 struct mlx5_mr_cache *cache = &dev->cache;
85
86 if (order < cache->ent[0].order)
87 return 0;
88 else
89 return order - cache->ent[0].order;
90}
91
Noa Osherovich56e11d62016-02-29 16:46:51 +020092static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
93{
94 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
95 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
96}
97
Noa Osherovich395a8e42016-02-29 16:46:50 +020098#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
99static void update_odp_mr(struct mlx5_ib_mr *mr)
100{
101 if (mr->umem->odp_data) {
102 /*
103 * This barrier prevents the compiler from moving the
104 * setting of umem->odp_data->private to point to our
105 * MR, before reg_umr finished, to ensure that the MR
106 * initialization have finished before starting to
107 * handle invalidations.
108 */
109 smp_wmb();
110 mr->umem->odp_data->private = mr;
111 /*
112 * Make sure we will see the new
113 * umem->odp_data->private value in the invalidation
114 * routines, before we can get page faults on the
115 * MR. Page faults can happen once we put the MR in
116 * the tree, below this line. Without the barrier,
117 * there can be a fault handling and an invalidation
118 * before umem->odp_data->private == mr is visible to
119 * the invalidation handler.
120 */
121 smp_wmb();
122 }
123}
124#endif
125
Eli Cohen746b5582013-10-23 09:53:14 +0300126static void reg_mr_callback(int status, void *context)
127{
128 struct mlx5_ib_mr *mr = context;
129 struct mlx5_ib_dev *dev = mr->dev;
130 struct mlx5_mr_cache *cache = &dev->cache;
131 int c = order2idx(dev, mr->order);
132 struct mlx5_cache_ent *ent = &cache->ent[c];
133 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300134 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200135 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300136 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300137
Eli Cohen746b5582013-10-23 09:53:14 +0300138 spin_lock_irqsave(&ent->lock, flags);
139 ent->pending--;
140 spin_unlock_irqrestore(&ent->lock, flags);
141 if (status) {
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
143 kfree(mr);
144 dev->fill_delay = 1;
145 mod_timer(&dev->delay_timer, jiffies + HZ);
146 return;
147 }
148
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200149 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300150 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
151 key = dev->mdev->priv.mkey_key++;
152 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300153 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300154
155 cache->last_add = jiffies;
156
157 spin_lock_irqsave(&ent->lock, flags);
158 list_add_tail(&mr->list, &ent->head);
159 ent->cur++;
160 ent->size++;
161 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300162
163 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200164 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
165 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300166 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200167 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300168 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200169
170 if (!completion_done(&ent->compl))
171 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300172}
173
Eli Cohene126ba92013-07-07 17:25:49 +0300174static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
175{
Eli Cohene126ba92013-07-07 17:25:49 +0300176 struct mlx5_mr_cache *cache = &dev->cache;
177 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300178 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300179 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300180 void *mkc;
181 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300182 int err = 0;
183 int i;
184
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300185 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300186 if (!in)
187 return -ENOMEM;
188
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300189 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300190 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300191 if (ent->pending >= MAX_PENDING_REG_MR) {
192 err = -EAGAIN;
193 break;
194 }
195
Eli Cohene126ba92013-07-07 17:25:49 +0300196 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
197 if (!mr) {
198 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300199 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300200 }
201 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300202 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300203 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300204
205 MLX5_SET(mkc, mkc, free, 1);
206 MLX5_SET(mkc, mkc, umr_en, 1);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +0300207 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
208 MLX5_SET(mkc, mkc, access_mode_4_2,
209 (ent->access_mode >> 2) & 0x7);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300210
211 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200212 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
213 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300214
Eli Cohen746b5582013-10-23 09:53:14 +0300215 spin_lock_irq(&ent->lock);
216 ent->pending++;
217 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300218 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
219 in, inlen,
220 mr->out, sizeof(mr->out),
221 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300222 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200223 spin_lock_irq(&ent->lock);
224 ent->pending--;
225 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300226 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300227 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300228 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300229 }
Eli Cohene126ba92013-07-07 17:25:49 +0300230 }
231
Eli Cohene126ba92013-07-07 17:25:49 +0300232 kfree(in);
233 return err;
234}
235
236static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
237{
Eli Cohene126ba92013-07-07 17:25:49 +0300238 struct mlx5_mr_cache *cache = &dev->cache;
239 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200240 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300241 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200242 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300243 int i;
244
245 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300246 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300247 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300248 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200249 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300250 }
251 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200252 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300253 ent->cur--;
254 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300255 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200256 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
257 }
258
259#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
260 synchronize_srcu(&dev->mr_srcu);
261#endif
262
263 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
264 list_del(&mr->list);
265 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300266 }
267}
268
269static ssize_t size_write(struct file *filp, const char __user *buf,
270 size_t count, loff_t *pos)
271{
272 struct mlx5_cache_ent *ent = filp->private_data;
273 struct mlx5_ib_dev *dev = ent->dev;
274 char lbuf[20];
275 u32 var;
276 int err;
277 int c;
278
279 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300280 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300281
282 c = order2idx(dev, ent->order);
283 lbuf[sizeof(lbuf) - 1] = 0;
284
285 if (sscanf(lbuf, "%u", &var) != 1)
286 return -EINVAL;
287
288 if (var < ent->limit)
289 return -EINVAL;
290
291 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300292 do {
293 err = add_keys(dev, c, var - ent->size);
294 if (err && err != -EAGAIN)
295 return err;
296
297 usleep_range(3000, 5000);
298 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300299 } else if (var < ent->size) {
300 remove_keys(dev, c, ent->size - var);
301 }
302
303 return count;
304}
305
306static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
307 loff_t *pos)
308{
309 struct mlx5_cache_ent *ent = filp->private_data;
310 char lbuf[20];
311 int err;
312
313 if (*pos)
314 return 0;
315
316 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
317 if (err < 0)
318 return err;
319
320 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300321 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300322
323 *pos += err;
324
325 return err;
326}
327
328static const struct file_operations size_fops = {
329 .owner = THIS_MODULE,
330 .open = simple_open,
331 .write = size_write,
332 .read = size_read,
333};
334
335static ssize_t limit_write(struct file *filp, const char __user *buf,
336 size_t count, loff_t *pos)
337{
338 struct mlx5_cache_ent *ent = filp->private_data;
339 struct mlx5_ib_dev *dev = ent->dev;
340 char lbuf[20];
341 u32 var;
342 int err;
343 int c;
344
345 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300346 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300347
348 c = order2idx(dev, ent->order);
349 lbuf[sizeof(lbuf) - 1] = 0;
350
351 if (sscanf(lbuf, "%u", &var) != 1)
352 return -EINVAL;
353
354 if (var > ent->size)
355 return -EINVAL;
356
357 ent->limit = var;
358
359 if (ent->cur < ent->limit) {
360 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
361 if (err)
362 return err;
363 }
364
365 return count;
366}
367
368static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
369 loff_t *pos)
370{
371 struct mlx5_cache_ent *ent = filp->private_data;
372 char lbuf[20];
373 int err;
374
375 if (*pos)
376 return 0;
377
378 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
379 if (err < 0)
380 return err;
381
382 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300383 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300384
385 *pos += err;
386
387 return err;
388}
389
390static const struct file_operations limit_fops = {
391 .owner = THIS_MODULE,
392 .open = simple_open,
393 .write = limit_write,
394 .read = limit_read,
395};
396
397static int someone_adding(struct mlx5_mr_cache *cache)
398{
399 int i;
400
401 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
402 if (cache->ent[i].cur < cache->ent[i].limit)
403 return 1;
404 }
405
406 return 0;
407}
408
409static void __cache_work_func(struct mlx5_cache_ent *ent)
410{
411 struct mlx5_ib_dev *dev = ent->dev;
412 struct mlx5_mr_cache *cache = &dev->cache;
413 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300414 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300415
416 if (cache->stopped)
417 return;
418
419 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300420 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
421 err = add_keys(dev, i, 1);
422 if (ent->cur < 2 * ent->limit) {
423 if (err == -EAGAIN) {
424 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
425 i + 2);
426 queue_delayed_work(cache->wq, &ent->dwork,
427 msecs_to_jiffies(3));
428 } else if (err) {
429 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
430 i + 2, err);
431 queue_delayed_work(cache->wq, &ent->dwork,
432 msecs_to_jiffies(1000));
433 } else {
434 queue_work(cache->wq, &ent->work);
435 }
436 }
Eli Cohene126ba92013-07-07 17:25:49 +0300437 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300438 /*
439 * The remove_keys() logic is performed as garbage collection
440 * task. Such task is intended to be run when no other active
441 * processes are running.
442 *
443 * The need_resched() will return TRUE if there are user tasks
444 * to be activated in near future.
445 *
446 * In such case, we don't execute remove_keys() and postpone
447 * the garbage collection work to try to run in next cycle,
448 * in order to free CPU resources to other tasks.
449 */
450 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300451 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300452 remove_keys(dev, i, 1);
453 if (ent->cur > ent->limit)
454 queue_work(cache->wq, &ent->work);
455 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300456 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300457 }
458 }
459}
460
461static void delayed_cache_work_func(struct work_struct *work)
462{
463 struct mlx5_cache_ent *ent;
464
465 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
466 __cache_work_func(ent);
467}
468
469static void cache_work_func(struct work_struct *work)
470{
471 struct mlx5_cache_ent *ent;
472
473 ent = container_of(work, struct mlx5_cache_ent, work);
474 __cache_work_func(ent);
475}
476
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200477struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
478{
479 struct mlx5_mr_cache *cache = &dev->cache;
480 struct mlx5_cache_ent *ent;
481 struct mlx5_ib_mr *mr;
482 int err;
483
484 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
485 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
486 return NULL;
487 }
488
489 ent = &cache->ent[entry];
490 while (1) {
491 spin_lock_irq(&ent->lock);
492 if (list_empty(&ent->head)) {
493 spin_unlock_irq(&ent->lock);
494
495 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200496 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200497 return ERR_PTR(err);
498
499 wait_for_completion(&ent->compl);
500 } else {
501 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
502 list);
503 list_del(&mr->list);
504 ent->cur--;
505 spin_unlock_irq(&ent->lock);
506 if (ent->cur < ent->limit)
507 queue_work(cache->wq, &ent->work);
508 return mr;
509 }
510 }
511}
512
Eli Cohene126ba92013-07-07 17:25:49 +0300513static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
514{
515 struct mlx5_mr_cache *cache = &dev->cache;
516 struct mlx5_ib_mr *mr = NULL;
517 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300518 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300519 int c;
520 int i;
521
522 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300523 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300524 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300525 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
526 return NULL;
527 }
528
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300529 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300530 ent = &cache->ent[i];
531
532 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
533
Eli Cohen746b5582013-10-23 09:53:14 +0300534 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300535 if (!list_empty(&ent->head)) {
536 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
537 list);
538 list_del(&mr->list);
539 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300540 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300541 if (ent->cur < ent->limit)
542 queue_work(cache->wq, &ent->work);
543 break;
544 }
Eli Cohen746b5582013-10-23 09:53:14 +0300545 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300546
547 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300548 }
549
550 if (!mr)
551 cache->ent[c].miss++;
552
553 return mr;
554}
555
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200556void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300557{
558 struct mlx5_mr_cache *cache = &dev->cache;
559 struct mlx5_cache_ent *ent;
560 int shrink = 0;
561 int c;
562
563 c = order2idx(dev, mr->order);
564 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
565 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
566 return;
567 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200568
569 if (unreg_umr(dev, mr))
570 return;
571
Eli Cohene126ba92013-07-07 17:25:49 +0300572 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300573 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300574 list_add_tail(&mr->list, &ent->head);
575 ent->cur++;
576 if (ent->cur > 2 * ent->limit)
577 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300578 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300579
580 if (shrink)
581 queue_work(cache->wq, &ent->work);
582}
583
584static void clean_keys(struct mlx5_ib_dev *dev, int c)
585{
Eli Cohene126ba92013-07-07 17:25:49 +0300586 struct mlx5_mr_cache *cache = &dev->cache;
587 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200588 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300589 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200590 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300591
Moshe Lazer3c461912013-09-11 16:35:23 +0300592 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300593 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300594 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300595 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300596 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200597 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300598 }
599 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200600 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300601 ent->cur--;
602 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300603 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200604 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
605 }
606
607#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
608 synchronize_srcu(&dev->mr_srcu);
609#endif
610
611 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
612 list_del(&mr->list);
613 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300614 }
615}
616
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300617static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
618{
Mark Bloch72afcf82018-01-22 15:29:44 +0000619 if (!mlx5_debugfs_root || dev->rep)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300620 return;
621
622 debugfs_remove_recursive(dev->cache.root);
623 dev->cache.root = NULL;
624}
625
Eli Cohene126ba92013-07-07 17:25:49 +0300626static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
627{
628 struct mlx5_mr_cache *cache = &dev->cache;
629 struct mlx5_cache_ent *ent;
630 int i;
631
Mark Bloch72afcf82018-01-22 15:29:44 +0000632 if (!mlx5_debugfs_root || dev->rep)
Eli Cohene126ba92013-07-07 17:25:49 +0300633 return 0;
634
Jack Morgenstein9603b612014-07-28 23:30:22 +0300635 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300636 if (!cache->root)
637 return -ENOMEM;
638
639 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
640 ent = &cache->ent[i];
641 sprintf(ent->name, "%d", ent->order);
642 ent->dir = debugfs_create_dir(ent->name, cache->root);
643 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300644 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300645
646 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
647 &size_fops);
648 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300649 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300650
651 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
652 &limit_fops);
653 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300654 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300655
656 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
657 &ent->cur);
658 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300659 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300660
661 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
662 &ent->miss);
663 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300664 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300665 }
666
667 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300668err:
669 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300670
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300671 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300672}
673
Kees Cooke99e88a2017-10-16 14:43:17 -0700674static void delay_time_func(struct timer_list *t)
Eli Cohen746b5582013-10-23 09:53:14 +0300675{
Kees Cooke99e88a2017-10-16 14:43:17 -0700676 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
Eli Cohen746b5582013-10-23 09:53:14 +0300677
678 dev->fill_delay = 0;
679}
680
Eli Cohene126ba92013-07-07 17:25:49 +0300681int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
682{
683 struct mlx5_mr_cache *cache = &dev->cache;
684 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300685 int err;
686 int i;
687
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300688 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530689 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300690 if (!cache->wq) {
691 mlx5_ib_warn(dev, "failed to create work queue\n");
692 return -ENOMEM;
693 }
694
Kees Cooke99e88a2017-10-16 14:43:17 -0700695 timer_setup(&dev->delay_timer, delay_time_func, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300696 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300697 ent = &cache->ent[i];
698 INIT_LIST_HEAD(&ent->head);
699 spin_lock_init(&ent->lock);
700 ent->order = i + 2;
701 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200702 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300703
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200704 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300705 INIT_WORK(&ent->work, cache_work_func);
706 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300707 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200708
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300709 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200710 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200711 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200712 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200713
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300714 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200715 continue;
716
717 ent->page = PAGE_SHIFT;
718 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
719 MLX5_IB_UMR_OCTOWORD;
720 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
721 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
Mark Bloch72afcf82018-01-22 15:29:44 +0000722 !dev->rep &&
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200723 mlx5_core_is_pf(dev->mdev))
724 ent->limit = dev->mdev->profile->mr_cache[i].limit;
725 else
726 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300727 }
728
729 err = mlx5_mr_cache_debugfs_init(dev);
730 if (err)
731 mlx5_ib_warn(dev, "cache debugfs failure\n");
732
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300733 /*
734 * We don't want to fail driver if debugfs failed to initialize,
735 * so we are not forwarding error to the user.
736 */
737
Eli Cohene126ba92013-07-07 17:25:49 +0300738 return 0;
739}
740
Eli Cohenacbda522016-10-27 16:36:43 +0300741static void wait_for_async_commands(struct mlx5_ib_dev *dev)
742{
743 struct mlx5_mr_cache *cache = &dev->cache;
744 struct mlx5_cache_ent *ent;
745 int total = 0;
746 int i;
747 int j;
748
749 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
750 ent = &cache->ent[i];
751 for (j = 0 ; j < 1000; j++) {
752 if (!ent->pending)
753 break;
754 msleep(50);
755 }
756 }
757 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
758 ent = &cache->ent[i];
759 total += ent->pending;
760 }
761
762 if (total)
763 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
764 else
765 mlx5_ib_warn(dev, "done with all pending requests\n");
766}
767
Eli Cohene126ba92013-07-07 17:25:49 +0300768int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
769{
770 int i;
771
Mark Bloch32927e22018-03-20 15:45:37 +0200772 if (!dev->cache.wq)
773 return 0;
774
Eli Cohene126ba92013-07-07 17:25:49 +0300775 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300776 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300777
778 mlx5_mr_cache_debugfs_cleanup(dev);
779
780 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
781 clean_keys(dev, i);
782
Moshe Lazer3c461912013-09-11 16:35:23 +0300783 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300784 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300785 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300786
Eli Cohene126ba92013-07-07 17:25:49 +0300787 return 0;
788}
789
790struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
791{
792 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300793 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300794 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300795 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300796 void *mkc;
797 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300798 int err;
799
800 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
801 if (!mr)
802 return ERR_PTR(-ENOMEM);
803
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300804 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300805 if (!in) {
806 err = -ENOMEM;
807 goto err_free;
808 }
809
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300810 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300811
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +0300812 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300813 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
814 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
815 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
816 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
817 MLX5_SET(mkc, mkc, lr, 1);
818
819 MLX5_SET(mkc, mkc, length64, 1);
820 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
821 MLX5_SET(mkc, mkc, qpn, 0xffffff);
822 MLX5_SET64(mkc, mkc, start_addr, 0);
823
824 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300825 if (err)
826 goto err_in;
827
828 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200829 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200830 mr->ibmr.lkey = mr->mmkey.key;
831 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300832 mr->umem = NULL;
833
834 return &mr->ibmr;
835
836err_in:
837 kfree(in);
838
839err_free:
840 kfree(mr);
841
842 return ERR_PTR(err);
843}
844
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300845static int get_octo_len(u64 addr, u64 len, int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300846{
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300847 u64 page_size = 1ULL << page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300848 u64 offset;
849 int npages;
850
851 offset = addr & (page_size - 1);
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300852 npages = ALIGN(len + offset, page_size) >> page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300853 return (npages + 1) / 2;
854}
855
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300856static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300857{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200858 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300859 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300860 return MLX5_MAX_UMR_SHIFT;
861}
862
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200863static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
864 int access_flags, struct ib_umem **umem,
865 int *npages, int *page_shift, int *ncont,
866 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200867{
868 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200869 int err;
870
871 *umem = ib_umem_get(pd->uobject->context, start, length,
872 access_flags, 0);
873 err = PTR_ERR_OR_ZERO(*umem);
Leon Romanovskyf3f134f2018-03-12 21:26:37 +0200874 if (err) {
875 *umem = NULL;
Dan Carpenter396551e2017-06-14 13:20:09 +0300876 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200877 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200878 }
879
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200880 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300881 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200882 if (!*npages) {
883 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200884 ib_umem_release(*umem);
885 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200886 }
887
888 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
889 *npages, *ncont, *order, *page_shift);
890
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200891 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200892}
893
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100894static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300895{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100896 struct mlx5_ib_umr_context *context =
897 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300898
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100899 context->status = wc->status;
900 complete(&context->done);
901}
Eli Cohene126ba92013-07-07 17:25:49 +0300902
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100903static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
904{
905 context->cqe.done = mlx5_ib_umr_done;
906 context->status = -1;
907 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300908}
909
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200910static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
911 struct mlx5_umr_wr *umrwr)
912{
913 struct umr_common *umrc = &dev->umrc;
914 struct ib_send_wr *bad;
915 int err;
916 struct mlx5_ib_umr_context umr_context;
917
918 mlx5_ib_init_umr_context(&umr_context);
919 umrwr->wr.wr_cqe = &umr_context.cqe;
920
921 down(&umrc->sem);
922 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
923 if (err) {
924 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
925 } else {
926 wait_for_completion(&umr_context.done);
927 if (umr_context.status != IB_WC_SUCCESS) {
928 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
929 umr_context.status);
930 err = -EFAULT;
931 }
932 }
933 up(&umrc->sem);
934 return err;
935}
936
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300937static struct mlx5_ib_mr *alloc_mr_from_cache(
938 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300939 u64 virt_addr, u64 len, int npages,
940 int page_shift, int order, int access_flags)
941{
942 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300943 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300944 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300945 int i;
946
Eli Cohen746b5582013-10-23 09:53:14 +0300947 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300948 mr = alloc_cached_mr(dev, order);
949 if (mr)
950 break;
951
952 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300953 if (err && err != -EAGAIN) {
954 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300955 break;
956 }
957 }
958
959 if (!mr)
960 return ERR_PTR(-EAGAIN);
961
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200962 mr->ibmr.pd = pd;
963 mr->umem = umem;
964 mr->access_flags = access_flags;
965 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200966 mr->mmkey.iova = virt_addr;
967 mr->mmkey.size = len;
968 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300969
Eli Cohene126ba92013-07-07 17:25:49 +0300970 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300971}
972
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200973static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
974 void *xlt, int page_shift, size_t size,
975 int flags)
976{
977 struct mlx5_ib_dev *dev = mr->dev;
978 struct ib_umem *umem = mr->umem;
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200979
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200980 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
Majd Dibbinyc8d75a92018-03-22 15:34:04 +0200981 if (!umr_can_use_indirect_mkey(dev))
982 return -EPERM;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200983 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
984 return npages;
985 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200986
987 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
988
989 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
990 __mlx5_ib_populate_pas(dev, umem, page_shift,
991 idx, npages, xlt,
992 MLX5_IB_MTT_PRESENT);
993 /* Clear padding after the pages
994 * brought from the umem.
995 */
996 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
997 size - npages * sizeof(struct mlx5_mtt));
998 }
999
1000 return npages;
1001}
1002
1003#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1004 MLX5_UMR_MTT_ALIGNMENT)
1005#define MLX5_SPARE_UMR_CHUNK 0x10000
1006
1007int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1008 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +02001009{
1010 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001011 struct device *ddev = dev->ib_dev.dev.parent;
Haggai Eran832a6b02014-12-11 17:04:22 +02001012 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001013 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001014 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001015 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +02001016 struct ib_sge sg;
1017 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001018 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1019 ? sizeof(struct mlx5_klm)
1020 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001021 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1022 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +02001023 size_t pages_mapped = 0;
1024 size_t pages_to_map = 0;
1025 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001026 gfp_t gfp;
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001027 bool use_emergency_page = false;
Haggai Eran832a6b02014-12-11 17:04:22 +02001028
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001029 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1030 !umr_can_use_indirect_mkey(dev))
1031 return -EPERM;
Haggai Eran832a6b02014-12-11 17:04:22 +02001032
1033 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001034 * so we need to align the offset and length accordingly
1035 */
1036 if (idx & page_mask) {
1037 npages += idx & page_mask;
1038 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +02001039 }
1040
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001041 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1042 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001043
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001044 pages_to_map = ALIGN(npages, page_align);
1045 size = desc_size * pages_to_map;
1046 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001047
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001048 xlt = (void *)__get_free_pages(gfp, get_order(size));
1049 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1050 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1051 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1052
1053 size = MLX5_SPARE_UMR_CHUNK;
1054 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001055 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001056
1057 if (!xlt) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001058 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001059 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001060 size = PAGE_SIZE;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001061 memset(xlt, 0, size);
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001062 use_emergency_page = true;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001063 }
1064 pages_iter = size / desc_size;
1065 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001066 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001067 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001068 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001069 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001070 }
1071
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001072 sg.addr = dma;
1073 sg.lkey = dev->umrc.pd->local_dma_lkey;
1074
1075 memset(&wr, 0, sizeof(wr));
1076 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1077 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1078 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1079 wr.wr.sg_list = &sg;
1080 wr.wr.num_sge = 1;
1081 wr.wr.opcode = MLX5_IB_WR_UMR;
1082
1083 wr.pd = mr->ibmr.pd;
1084 wr.mkey = mr->mmkey.key;
1085 wr.length = mr->mmkey.size;
1086 wr.virt_addr = mr->mmkey.iova;
1087 wr.access_flags = mr->access_flags;
1088 wr.page_shift = page_shift;
1089
Haggai Eran832a6b02014-12-11 17:04:22 +02001090 for (pages_mapped = 0;
1091 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001092 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001093 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001094 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001095 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001096 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001097
1098 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1099
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001100 sg.length = ALIGN(npages * desc_size,
1101 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001102
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001103 if (pages_mapped + pages_iter >= pages_to_map) {
1104 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1105 wr.wr.send_flags |=
1106 MLX5_IB_SEND_UMR_ENABLE_MR |
1107 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1108 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1109 if (flags & MLX5_IB_UPD_XLT_PD ||
1110 flags & MLX5_IB_UPD_XLT_ACCESS)
1111 wr.wr.send_flags |=
1112 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1113 if (flags & MLX5_IB_UPD_XLT_ADDR)
1114 wr.wr.send_flags |=
1115 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1116 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001117
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001118 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001119 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001120
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001121 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001122 }
1123 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1124
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001125free_xlt:
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001126 if (use_emergency_page)
1127 mlx5_ib_put_xlt_emergency_page();
Haggai Eran832a6b02014-12-11 17:04:22 +02001128 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001129 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001130
1131 return err;
1132}
Haggai Eran832a6b02014-12-11 17:04:22 +02001133
Noa Osherovich395a8e42016-02-29 16:46:50 +02001134/*
1135 * If ibmr is NULL it will be allocated by reg_create.
1136 * Else, the given ibmr will be used.
1137 */
1138static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1139 u64 virt_addr, u64 length,
1140 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001141 int page_shift, int access_flags,
1142 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001143{
1144 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001145 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001146 __be64 *pas;
1147 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001148 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001149 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001150 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001151 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001152
Noa Osherovich395a8e42016-02-29 16:46:50 +02001153 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001154 if (!mr)
1155 return ERR_PTR(-ENOMEM);
1156
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001157 mr->ibmr.pd = pd;
1158 mr->access_flags = access_flags;
1159
1160 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1161 if (populate)
1162 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001163 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001164 if (!in) {
1165 err = -ENOMEM;
1166 goto err_1;
1167 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001168 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001169 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001170 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1171 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001172
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001173 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001174 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001175 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1176
1177 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001178 MLX5_SET(mkc, mkc, free, !populate);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001179 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001180 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1181 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1182 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1183 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1184 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001185 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001186
1187 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1188 MLX5_SET64(mkc, mkc, len, length);
1189 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1190 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1191 MLX5_SET(mkc, mkc, translations_octword_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001192 get_octo_len(virt_addr, length, page_shift));
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001193 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1194 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001195 if (populate) {
1196 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001197 get_octo_len(virt_addr, length, page_shift));
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001198 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001199
1200 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001201 if (err) {
1202 mlx5_ib_warn(dev, "create mkey failed\n");
1203 goto err_2;
1204 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001205 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001206 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001207 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001208 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001209
Matan Baraka606b0f2016-02-29 18:05:28 +02001210 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001211
1212 return mr;
1213
1214err_2:
Al Viro479163f2014-11-20 08:13:57 +00001215 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001216
1217err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001218 if (!ibmr)
1219 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001220
1221 return ERR_PTR(err);
1222}
1223
Noa Osherovich395a8e42016-02-29 16:46:50 +02001224static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1225 int npages, u64 length, int access_flags)
1226{
1227 mr->npages = npages;
1228 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001229 mr->ibmr.lkey = mr->mmkey.key;
1230 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001231 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001232 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001233}
1234
Ariel Levkovich6c29f572018-04-05 18:53:29 +03001235static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
1236 u64 length, int acc)
1237{
1238 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1239 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1240 struct mlx5_core_dev *mdev = dev->mdev;
1241 struct mlx5_ib_mr *mr;
1242 void *mkc;
1243 u32 *in;
1244 int err;
1245
1246 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1247 if (!mr)
1248 return ERR_PTR(-ENOMEM);
1249
1250 in = kzalloc(inlen, GFP_KERNEL);
1251 if (!in) {
1252 err = -ENOMEM;
1253 goto err_free;
1254 }
1255
1256 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1257
1258 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
1259 MLX5_SET(mkc, mkc, access_mode_4_2,
1260 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
1261 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1262 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1263 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1264 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1265 MLX5_SET(mkc, mkc, lr, 1);
1266
1267 MLX5_SET64(mkc, mkc, len, length);
1268 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1269 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1270 MLX5_SET64(mkc, mkc, start_addr,
1271 memic_addr - pci_resource_start(dev->mdev->pdev, 0));
1272
1273 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1274 if (err)
1275 goto err_in;
1276
1277 kfree(in);
1278
1279 mr->umem = NULL;
1280 set_mr_fileds(dev, mr, 0, length, acc);
1281
1282 return &mr->ibmr;
1283
1284err_in:
1285 kfree(in);
1286
1287err_free:
1288 kfree(mr);
1289
1290 return ERR_PTR(err);
1291}
1292
1293struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1294 struct ib_dm_mr_attr *attr,
1295 struct uverbs_attr_bundle *attrs)
1296{
1297 struct mlx5_ib_dm *mdm = to_mdm(dm);
1298 u64 memic_addr;
1299
1300 if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
1301 return ERR_PTR(-EINVAL);
1302
1303 memic_addr = mdm->dev_addr + attr->offset;
1304
1305 return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
1306 attr->access_flags);
1307}
1308
Eli Cohene126ba92013-07-07 17:25:49 +03001309struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1310 u64 virt_addr, int access_flags,
1311 struct ib_udata *udata)
1312{
1313 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1314 struct mlx5_ib_mr *mr = NULL;
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001315 bool populate_mtts = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001316 struct ib_umem *umem;
1317 int page_shift;
1318 int npages;
1319 int ncont;
1320 int order;
1321 int err;
1322
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001323 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
Leon Romanovskyea30f012018-03-13 15:29:25 +02001324 return ERR_PTR(-EOPNOTSUPP);
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001325
Eli Cohen900a6d72014-09-14 16:47:51 +03001326 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1327 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001328
1329#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1330 if (!start && length == U64_MAX) {
1331 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1332 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1333 return ERR_PTR(-EINVAL);
1334
1335 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
Leon Romanovsky42898612018-03-13 15:29:24 +02001336 if (IS_ERR(mr))
1337 return ERR_CAST(mr);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001338 return &mr->ibmr;
1339 }
1340#endif
1341
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001342 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001343 &page_shift, &ncont, &order);
1344
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001345 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001346 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001347
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001348 if (use_umr(dev, order)) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001349 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1350 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001351 if (PTR_ERR(mr) == -EAGAIN) {
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301352 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
Eli Cohene126ba92013-07-07 17:25:49 +03001353 mr = NULL;
1354 }
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001355 populate_mtts = false;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001356 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1357 if (access_flags & IB_ACCESS_ON_DEMAND) {
1358 err = -EINVAL;
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301359 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001360 goto error;
1361 }
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001362 populate_mtts = true;
Eli Cohene126ba92013-07-07 17:25:49 +03001363 }
1364
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001365 if (!mr) {
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001366 if (!umr_can_modify_entity_size(dev))
1367 populate_mtts = true;
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001368 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001369 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001370 page_shift, access_flags, populate_mtts);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001371 mutex_unlock(&dev->slow_path_mutex);
1372 }
Eli Cohene126ba92013-07-07 17:25:49 +03001373
1374 if (IS_ERR(mr)) {
1375 err = PTR_ERR(mr);
1376 goto error;
1377 }
1378
Matan Baraka606b0f2016-02-29 18:05:28 +02001379 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001380
1381 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001382 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001383
Haggai Eranb4cfe442014-12-11 17:04:26 +02001384#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001385 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001386#endif
1387
Majd Dibbinyc8d75a92018-03-22 15:34:04 +02001388 if (!populate_mtts) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001389 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001390
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001391 if (access_flags & IB_ACCESS_ON_DEMAND)
1392 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1393
1394 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1395 update_xlt_flags);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001396
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001397 if (err) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001398 dereg_mr(dev, mr);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001399 return ERR_PTR(err);
1400 }
1401 }
1402
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001403#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001404 mr->live = 1;
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001405#endif
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001406 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001407error:
1408 ib_umem_release(umem);
1409 return ERR_PTR(err);
1410}
1411
1412static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1413{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001414 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001415 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001416
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001417 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1418 return 0;
1419
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001420 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1421 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1422 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1423 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001424
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001425 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001426}
1427
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001428static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001429 int access_flags, int flags)
1430{
1431 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001432 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001433 int err;
1434
Noa Osherovich56e11d62016-02-29 16:46:51 +02001435 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1436
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001437 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1438 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001439
Artemy Kovalyov31616252017-01-02 11:37:42 +02001440 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001441 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001442 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001443 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001444 }
1445
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001446 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001447
Noa Osherovich56e11d62016-02-29 16:46:51 +02001448 return err;
1449}
1450
1451int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1452 u64 length, u64 virt_addr, int new_access_flags,
1453 struct ib_pd *new_pd, struct ib_udata *udata)
1454{
1455 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1456 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1457 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1458 int access_flags = flags & IB_MR_REREG_ACCESS ?
1459 new_access_flags :
1460 mr->access_flags;
1461 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1462 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1463 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001464 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001465 int npages = 0;
1466 int ncont = 0;
1467 int order = 0;
1468 int err;
1469
1470 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1471 start, virt_addr, length, access_flags);
1472
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001473 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1474
Noa Osherovich56e11d62016-02-29 16:46:51 +02001475 if (flags != IB_MR_REREG_PD) {
1476 /*
1477 * Replace umem. This needs to be done whether or not UMR is
1478 * used.
1479 */
1480 flags |= IB_MR_REREG_TRANS;
1481 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001482 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1483 &npages, &page_shift, &ncont, &order);
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001484 if (err)
1485 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001486 }
1487
1488 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1489 /*
1490 * UMR can't be used - MKey needs to be replaced.
1491 */
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001492 if (mr->allocated_from_cache)
Noa Osherovich56e11d62016-02-29 16:46:51 +02001493 err = unreg_umr(dev, mr);
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001494 else
Noa Osherovich56e11d62016-02-29 16:46:51 +02001495 err = destroy_mkey(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001496 if (err)
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001497 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001498
1499 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001500 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001501
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001502 if (IS_ERR(mr)) {
1503 err = PTR_ERR(mr);
1504 mr = to_mmr(ib_mr);
1505 goto err;
1506 }
Noa Osherovich56e11d62016-02-29 16:46:51 +02001507
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001508 mr->allocated_from_cache = 0;
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001509#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001510 mr->live = 1;
Leon Romanovskyc985bd02018-03-13 15:29:27 +02001511#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001512 } else {
1513 /*
1514 * Send a UMR WQE
1515 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001516 mr->ibmr.pd = pd;
1517 mr->access_flags = access_flags;
1518 mr->mmkey.iova = addr;
1519 mr->mmkey.size = len;
1520 mr->mmkey.pd = to_mpd(pd)->pdn;
1521
1522 if (flags & IB_MR_REREG_TRANS) {
1523 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1524 if (flags & IB_MR_REREG_PD)
1525 upd_flags |= MLX5_IB_UPD_XLT_PD;
1526 if (flags & IB_MR_REREG_ACCESS)
1527 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1528 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1529 upd_flags);
1530 } else {
1531 err = rereg_umr(pd, mr, access_flags, flags);
1532 }
1533
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001534 if (err)
1535 goto err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001536 }
1537
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001538 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001539
Noa Osherovich56e11d62016-02-29 16:46:51 +02001540#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1541 update_odp_mr(mr);
1542#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001543 return 0;
Leon Romanovsky4638a3b2018-03-13 15:29:26 +02001544
1545err:
1546 if (mr->umem) {
1547 ib_umem_release(mr->umem);
1548 mr->umem = NULL;
1549 }
1550 clean_mr(dev, mr);
1551 return err;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001552}
1553
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001554static int
1555mlx5_alloc_priv_descs(struct ib_device *device,
1556 struct mlx5_ib_mr *mr,
1557 int ndescs,
1558 int desc_size)
1559{
1560 int size = ndescs * desc_size;
1561 int add_size;
1562 int ret;
1563
1564 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1565
1566 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1567 if (!mr->descs_alloc)
1568 return -ENOMEM;
1569
1570 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1571
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001572 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001573 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001574 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001575 ret = -ENOMEM;
1576 goto err;
1577 }
1578
1579 return 0;
1580err:
1581 kfree(mr->descs_alloc);
1582
1583 return ret;
1584}
1585
1586static void
1587mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1588{
1589 if (mr->descs) {
1590 struct ib_device *device = mr->ibmr.device;
1591 int size = mr->max_descs * mr->desc_size;
1592
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001593 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001594 size, DMA_TO_DEVICE);
1595 kfree(mr->descs_alloc);
1596 mr->descs = NULL;
1597 }
1598}
1599
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001600static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001601{
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001602 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001603
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001604 if (mr->sig) {
1605 if (mlx5_core_destroy_psv(dev->mdev,
1606 mr->sig->psv_memory.psv_idx))
1607 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1608 mr->sig->psv_memory.psv_idx);
1609 if (mlx5_core_destroy_psv(dev->mdev,
1610 mr->sig->psv_wire.psv_idx))
1611 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1612 mr->sig->psv_wire.psv_idx);
1613 kfree(mr->sig);
1614 mr->sig = NULL;
1615 }
1616
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001617 mlx5_free_priv_descs(mr);
1618
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001619 if (!allocated_from_cache)
1620 destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001621}
1622
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001623static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001624{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001625 int npages = mr->npages;
1626 struct ib_umem *umem = mr->umem;
1627
1628#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001629 if (umem && umem->odp_data) {
1630 /* Prevent new page faults from succeeding */
1631 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001632 /* Wait for all running page-fault handlers to finish. */
1633 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001634 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001635 if (umem->odp_data->page_list)
1636 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1637 ib_umem_end(umem));
1638 else
1639 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001640 /*
1641 * We kill the umem before the MR for ODP,
1642 * so that there will not be any invalidations in
1643 * flight, looking at the *mr struct.
1644 */
1645 ib_umem_release(umem);
1646 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1647
1648 /* Avoid double-freeing the umem. */
1649 umem = NULL;
1650 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001651#endif
1652
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001653 clean_mr(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001654
1655 if (umem) {
1656 ib_umem_release(umem);
1657 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1658 }
1659
Leon Romanovskyf3f134f2018-03-12 21:26:37 +02001660 if (!mr->allocated_from_cache)
1661 kfree(mr);
1662 else
1663 mlx5_mr_cache_free(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001664}
1665
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001666int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1667{
Leon Romanovskyeeea6952018-03-13 15:29:28 +02001668 dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
1669 return 0;
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001670}
1671
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001672struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1673 enum ib_mr_type mr_type,
1674 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001675{
1676 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001677 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001678 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001679 struct mlx5_ib_mr *mr;
1680 void *mkc;
1681 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001682 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001683
1684 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1685 if (!mr)
1686 return ERR_PTR(-ENOMEM);
1687
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001688 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001689 if (!in) {
1690 err = -ENOMEM;
1691 goto err_free;
1692 }
1693
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001694 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1695 MLX5_SET(mkc, mkc, free, 1);
1696 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1697 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1698 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001699
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001700 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001701 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1702 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001703 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001704 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001705 if (err)
1706 goto err_free_in;
1707
Artemy Kovalyov31616252017-01-02 11:37:42 +02001708 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001709 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001710 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001711 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001712
1713 err = mlx5_alloc_priv_descs(pd->device, mr,
1714 ndescs, sizeof(struct mlx5_klm));
1715 if (err)
1716 goto err_free_in;
1717 mr->desc_size = sizeof(struct mlx5_klm);
1718 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001719 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001720 u32 psv_index[2];
1721
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001722 MLX5_SET(mkc, mkc, bsf_en, 1);
1723 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001724 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1725 if (!mr->sig) {
1726 err = -ENOMEM;
1727 goto err_free_in;
1728 }
1729
1730 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001731 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001732 2, psv_index);
1733 if (err)
1734 goto err_free_sig;
1735
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001736 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001737 mr->sig->psv_memory.psv_idx = psv_index[0];
1738 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001739
1740 mr->sig->sig_status_checked = true;
1741 mr->sig->sig_err_exists = false;
1742 /* Next UMR, Arm SIGERR */
1743 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001744 } else {
1745 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1746 err = -EINVAL;
1747 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001748 }
1749
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001750 MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
1751 MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001752 MLX5_SET(mkc, mkc, umr_en, 1);
1753
Nitzan Carmi45e6ae72017-12-26 11:20:20 +02001754 mr->ibmr.device = pd->device;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001755 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001756 if (err)
1757 goto err_destroy_psv;
1758
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001759 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001760 mr->ibmr.lkey = mr->mmkey.key;
1761 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001762 mr->umem = NULL;
1763 kfree(in);
1764
1765 return &mr->ibmr;
1766
1767err_destroy_psv:
1768 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001769 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001770 mr->sig->psv_memory.psv_idx))
1771 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1772 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001773 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001774 mr->sig->psv_wire.psv_idx))
1775 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1776 mr->sig->psv_wire.psv_idx);
1777 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001778 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001779err_free_sig:
1780 kfree(mr->sig);
1781err_free_in:
1782 kfree(in);
1783err_free:
1784 kfree(mr);
1785 return ERR_PTR(err);
1786}
1787
Matan Barakd2370e02016-02-29 18:05:30 +02001788struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1789 struct ib_udata *udata)
1790{
1791 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001792 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001793 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001794 u32 *in = NULL;
1795 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001796 int ndescs;
1797 int err;
1798 struct mlx5_ib_alloc_mw req = {};
1799 struct {
1800 __u32 comp_mask;
1801 __u32 response_length;
1802 } resp = {};
1803
1804 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1805 if (err)
1806 return ERR_PTR(err);
1807
1808 if (req.comp_mask || req.reserved1 || req.reserved2)
1809 return ERR_PTR(-EOPNOTSUPP);
1810
1811 if (udata->inlen > sizeof(req) &&
1812 !ib_is_udata_cleared(udata, sizeof(req),
1813 udata->inlen - sizeof(req)))
1814 return ERR_PTR(-EOPNOTSUPP);
1815
1816 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1817
1818 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001819 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001820 if (!mw || !in) {
1821 err = -ENOMEM;
1822 goto free;
1823 }
1824
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001825 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001826
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001827 MLX5_SET(mkc, mkc, free, 1);
1828 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1829 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1830 MLX5_SET(mkc, mkc, umr_en, 1);
1831 MLX5_SET(mkc, mkc, lr, 1);
Ariel Levkovichcdbd0d22018-04-05 18:53:28 +03001832 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001833 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1834 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1835
1836 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001837 if (err)
1838 goto free;
1839
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001840 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001841 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001842 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001843
1844 resp.response_length = min(offsetof(typeof(resp), response_length) +
1845 sizeof(resp.response_length), udata->outlen);
1846 if (resp.response_length) {
1847 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1848 if (err) {
1849 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1850 goto free;
1851 }
1852 }
1853
1854 kfree(in);
1855 return &mw->ibmw;
1856
1857free:
1858 kfree(mw);
1859 kfree(in);
1860 return ERR_PTR(err);
1861}
1862
1863int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1864{
1865 struct mlx5_ib_mw *mmw = to_mmw(mw);
1866 int err;
1867
1868 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1869 &mmw->mmkey);
1870 if (!err)
1871 kfree(mmw);
1872 return err;
1873}
1874
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001875int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1876 struct ib_mr_status *mr_status)
1877{
1878 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1879 int ret = 0;
1880
1881 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1882 pr_err("Invalid status check mask\n");
1883 ret = -EINVAL;
1884 goto done;
1885 }
1886
1887 mr_status->fail_status = 0;
1888 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1889 if (!mmr->sig) {
1890 ret = -EINVAL;
1891 pr_err("signature status check requested on a non-signature enabled MR\n");
1892 goto done;
1893 }
1894
1895 mmr->sig->sig_status_checked = true;
1896 if (!mmr->sig->sig_err_exists)
1897 goto done;
1898
1899 if (ibmr->lkey == mmr->sig->err_item.key)
1900 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1901 sizeof(mr_status->sig_err));
1902 else {
1903 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1904 mr_status->sig_err.sig_err_offset = 0;
1905 mr_status->sig_err.key = mmr->sig->err_item.key;
1906 }
1907
1908 mmr->sig->sig_err_exists = false;
1909 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1910 }
1911
1912done:
1913 return ret;
1914}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001915
Sagi Grimbergb005d312016-02-29 19:07:33 +02001916static int
1917mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1918 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001919 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001920 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001921{
1922 struct scatterlist *sg = sgl;
1923 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001924 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001925 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1926 int i;
1927
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001928 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001929 mr->ibmr.length = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001930
1931 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001932 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001933 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001934 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1935 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001936 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001937 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001938
1939 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001940 }
Sergey Gorenkoda343b62018-02-25 13:39:48 +02001941 mr->ndescs = i;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001942
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001943 if (sg_offset_p)
1944 *sg_offset_p = sg_offset;
1945
Sagi Grimbergb005d312016-02-29 19:07:33 +02001946 return i;
1947}
1948
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001949static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1950{
1951 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1952 __be64 *descs;
1953
1954 if (unlikely(mr->ndescs == mr->max_descs))
1955 return -ENOMEM;
1956
1957 descs = mr->descs;
1958 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1959
1960 return 0;
1961}
1962
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001963int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001964 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001965{
1966 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1967 int n;
1968
1969 mr->ndescs = 0;
1970
1971 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1972 mr->desc_size * mr->max_descs,
1973 DMA_TO_DEVICE);
1974
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001975 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001976 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001977 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001978 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1979 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001980
1981 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1982 mr->desc_size * mr->max_descs,
1983 DMA_TO_DEVICE);
1984
1985 return n;
1986}