blob: a5fad3e87ff74c84ebf42a49ce45ec980c6f4d7d [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +030050static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030052static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020053static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +020054
Haggai Eranb4cfe442014-12-11 17:04:26 +020055static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
56{
Matan Baraka606b0f2016-02-29 18:05:28 +020057 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020058
59#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
60 /* Wait until all page fault handlers using the mr complete. */
61 synchronize_srcu(&dev->mr_srcu);
62#endif
63
64 return err;
65}
66
Eli Cohene126ba92013-07-07 17:25:49 +030067static int order2idx(struct mlx5_ib_dev *dev, int order)
68{
69 struct mlx5_mr_cache *cache = &dev->cache;
70
71 if (order < cache->ent[0].order)
72 return 0;
73 else
74 return order - cache->ent[0].order;
75}
76
Noa Osherovich56e11d62016-02-29 16:46:51 +020077static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
78{
79 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
80 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
81}
82
Noa Osherovich395a8e42016-02-29 16:46:50 +020083#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
84static void update_odp_mr(struct mlx5_ib_mr *mr)
85{
86 if (mr->umem->odp_data) {
87 /*
88 * This barrier prevents the compiler from moving the
89 * setting of umem->odp_data->private to point to our
90 * MR, before reg_umr finished, to ensure that the MR
91 * initialization have finished before starting to
92 * handle invalidations.
93 */
94 smp_wmb();
95 mr->umem->odp_data->private = mr;
96 /*
97 * Make sure we will see the new
98 * umem->odp_data->private value in the invalidation
99 * routines, before we can get page faults on the
100 * MR. Page faults can happen once we put the MR in
101 * the tree, below this line. Without the barrier,
102 * there can be a fault handling and an invalidation
103 * before umem->odp_data->private == mr is visible to
104 * the invalidation handler.
105 */
106 smp_wmb();
107 }
108}
109#endif
110
Eli Cohen746b5582013-10-23 09:53:14 +0300111static void reg_mr_callback(int status, void *context)
112{
113 struct mlx5_ib_mr *mr = context;
114 struct mlx5_ib_dev *dev = mr->dev;
115 struct mlx5_mr_cache *cache = &dev->cache;
116 int c = order2idx(dev, mr->order);
117 struct mlx5_cache_ent *ent = &cache->ent[c];
118 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300119 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200120 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300121 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300122
Eli Cohen746b5582013-10-23 09:53:14 +0300123 spin_lock_irqsave(&ent->lock, flags);
124 ent->pending--;
125 spin_unlock_irqrestore(&ent->lock, flags);
126 if (status) {
127 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
128 kfree(mr);
129 dev->fill_delay = 1;
130 mod_timer(&dev->delay_timer, jiffies + HZ);
131 return;
132 }
133
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200134 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300135 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
136 key = dev->mdev->priv.mkey_key++;
137 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300138 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300139
140 cache->last_add = jiffies;
141
142 spin_lock_irqsave(&ent->lock, flags);
143 list_add_tail(&mr->list, &ent->head);
144 ent->cur++;
145 ent->size++;
146 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300147
148 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200149 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
150 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300151 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200152 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300153 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200154
155 if (!completion_done(&ent->compl))
156 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300157}
158
Eli Cohene126ba92013-07-07 17:25:49 +0300159static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
160{
Eli Cohene126ba92013-07-07 17:25:49 +0300161 struct mlx5_mr_cache *cache = &dev->cache;
162 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300163 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300164 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300165 void *mkc;
166 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300167 int err = 0;
168 int i;
169
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300170 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300171 if (!in)
172 return -ENOMEM;
173
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300174 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300175 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300176 if (ent->pending >= MAX_PENDING_REG_MR) {
177 err = -EAGAIN;
178 break;
179 }
180
Eli Cohene126ba92013-07-07 17:25:49 +0300181 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
182 if (!mr) {
183 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300184 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300185 }
186 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300187 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300188 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300189
190 MLX5_SET(mkc, mkc, free, 1);
191 MLX5_SET(mkc, mkc, umr_en, 1);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200192 MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300193
194 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200195 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
196 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300197
Eli Cohen746b5582013-10-23 09:53:14 +0300198 spin_lock_irq(&ent->lock);
199 ent->pending++;
200 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300201 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
202 in, inlen,
203 mr->out, sizeof(mr->out),
204 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300205 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200206 spin_lock_irq(&ent->lock);
207 ent->pending--;
208 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300210 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300211 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
Eli Cohene126ba92013-07-07 17:25:49 +0300213 }
214
Eli Cohene126ba92013-07-07 17:25:49 +0300215 kfree(in);
216 return err;
217}
218
219static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
220{
Eli Cohene126ba92013-07-07 17:25:49 +0300221 struct mlx5_mr_cache *cache = &dev->cache;
222 struct mlx5_cache_ent *ent = &cache->ent[c];
223 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300224 int err;
225 int i;
226
227 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300228 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300229 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300230 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300231 return;
232 }
233 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
234 list_del(&mr->list);
235 ent->cur--;
236 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300237 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200238 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300239 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300240 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300241 else
Eli Cohene126ba92013-07-07 17:25:49 +0300242 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300243 }
244}
245
246static ssize_t size_write(struct file *filp, const char __user *buf,
247 size_t count, loff_t *pos)
248{
249 struct mlx5_cache_ent *ent = filp->private_data;
250 struct mlx5_ib_dev *dev = ent->dev;
251 char lbuf[20];
252 u32 var;
253 int err;
254 int c;
255
256 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300257 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300258
259 c = order2idx(dev, ent->order);
260 lbuf[sizeof(lbuf) - 1] = 0;
261
262 if (sscanf(lbuf, "%u", &var) != 1)
263 return -EINVAL;
264
265 if (var < ent->limit)
266 return -EINVAL;
267
268 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300269 do {
270 err = add_keys(dev, c, var - ent->size);
271 if (err && err != -EAGAIN)
272 return err;
273
274 usleep_range(3000, 5000);
275 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300276 } else if (var < ent->size) {
277 remove_keys(dev, c, ent->size - var);
278 }
279
280 return count;
281}
282
283static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
284 loff_t *pos)
285{
286 struct mlx5_cache_ent *ent = filp->private_data;
287 char lbuf[20];
288 int err;
289
290 if (*pos)
291 return 0;
292
293 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
294 if (err < 0)
295 return err;
296
297 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300298 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300299
300 *pos += err;
301
302 return err;
303}
304
305static const struct file_operations size_fops = {
306 .owner = THIS_MODULE,
307 .open = simple_open,
308 .write = size_write,
309 .read = size_read,
310};
311
312static ssize_t limit_write(struct file *filp, const char __user *buf,
313 size_t count, loff_t *pos)
314{
315 struct mlx5_cache_ent *ent = filp->private_data;
316 struct mlx5_ib_dev *dev = ent->dev;
317 char lbuf[20];
318 u32 var;
319 int err;
320 int c;
321
322 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300323 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300324
325 c = order2idx(dev, ent->order);
326 lbuf[sizeof(lbuf) - 1] = 0;
327
328 if (sscanf(lbuf, "%u", &var) != 1)
329 return -EINVAL;
330
331 if (var > ent->size)
332 return -EINVAL;
333
334 ent->limit = var;
335
336 if (ent->cur < ent->limit) {
337 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
338 if (err)
339 return err;
340 }
341
342 return count;
343}
344
345static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
346 loff_t *pos)
347{
348 struct mlx5_cache_ent *ent = filp->private_data;
349 char lbuf[20];
350 int err;
351
352 if (*pos)
353 return 0;
354
355 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
356 if (err < 0)
357 return err;
358
359 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300360 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300361
362 *pos += err;
363
364 return err;
365}
366
367static const struct file_operations limit_fops = {
368 .owner = THIS_MODULE,
369 .open = simple_open,
370 .write = limit_write,
371 .read = limit_read,
372};
373
374static int someone_adding(struct mlx5_mr_cache *cache)
375{
376 int i;
377
378 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
379 if (cache->ent[i].cur < cache->ent[i].limit)
380 return 1;
381 }
382
383 return 0;
384}
385
386static void __cache_work_func(struct mlx5_cache_ent *ent)
387{
388 struct mlx5_ib_dev *dev = ent->dev;
389 struct mlx5_mr_cache *cache = &dev->cache;
390 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300391 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300392
393 if (cache->stopped)
394 return;
395
396 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300397 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
398 err = add_keys(dev, i, 1);
399 if (ent->cur < 2 * ent->limit) {
400 if (err == -EAGAIN) {
401 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
402 i + 2);
403 queue_delayed_work(cache->wq, &ent->dwork,
404 msecs_to_jiffies(3));
405 } else if (err) {
406 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
407 i + 2, err);
408 queue_delayed_work(cache->wq, &ent->dwork,
409 msecs_to_jiffies(1000));
410 } else {
411 queue_work(cache->wq, &ent->work);
412 }
413 }
Eli Cohene126ba92013-07-07 17:25:49 +0300414 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300415 /*
416 * The remove_keys() logic is performed as garbage collection
417 * task. Such task is intended to be run when no other active
418 * processes are running.
419 *
420 * The need_resched() will return TRUE if there are user tasks
421 * to be activated in near future.
422 *
423 * In such case, we don't execute remove_keys() and postpone
424 * the garbage collection work to try to run in next cycle,
425 * in order to free CPU resources to other tasks.
426 */
427 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300428 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300429 remove_keys(dev, i, 1);
430 if (ent->cur > ent->limit)
431 queue_work(cache->wq, &ent->work);
432 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300433 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300434 }
435 }
436}
437
438static void delayed_cache_work_func(struct work_struct *work)
439{
440 struct mlx5_cache_ent *ent;
441
442 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
443 __cache_work_func(ent);
444}
445
446static void cache_work_func(struct work_struct *work)
447{
448 struct mlx5_cache_ent *ent;
449
450 ent = container_of(work, struct mlx5_cache_ent, work);
451 __cache_work_func(ent);
452}
453
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200454struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
455{
456 struct mlx5_mr_cache *cache = &dev->cache;
457 struct mlx5_cache_ent *ent;
458 struct mlx5_ib_mr *mr;
459 int err;
460
461 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
462 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
463 return NULL;
464 }
465
466 ent = &cache->ent[entry];
467 while (1) {
468 spin_lock_irq(&ent->lock);
469 if (list_empty(&ent->head)) {
470 spin_unlock_irq(&ent->lock);
471
472 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200473 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200474 return ERR_PTR(err);
475
476 wait_for_completion(&ent->compl);
477 } else {
478 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
479 list);
480 list_del(&mr->list);
481 ent->cur--;
482 spin_unlock_irq(&ent->lock);
483 if (ent->cur < ent->limit)
484 queue_work(cache->wq, &ent->work);
485 return mr;
486 }
487 }
488}
489
Eli Cohene126ba92013-07-07 17:25:49 +0300490static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
491{
492 struct mlx5_mr_cache *cache = &dev->cache;
493 struct mlx5_ib_mr *mr = NULL;
494 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300495 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300496 int c;
497 int i;
498
499 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300500 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300501 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300502 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
503 return NULL;
504 }
505
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300506 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300507 ent = &cache->ent[i];
508
509 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
510
Eli Cohen746b5582013-10-23 09:53:14 +0300511 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300512 if (!list_empty(&ent->head)) {
513 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
514 list);
515 list_del(&mr->list);
516 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300517 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300518 if (ent->cur < ent->limit)
519 queue_work(cache->wq, &ent->work);
520 break;
521 }
Eli Cohen746b5582013-10-23 09:53:14 +0300522 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300523
524 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300525 }
526
527 if (!mr)
528 cache->ent[c].miss++;
529
530 return mr;
531}
532
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200533void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300534{
535 struct mlx5_mr_cache *cache = &dev->cache;
536 struct mlx5_cache_ent *ent;
537 int shrink = 0;
538 int c;
539
540 c = order2idx(dev, mr->order);
541 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
542 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
543 return;
544 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200545
546 if (unreg_umr(dev, mr))
547 return;
548
Eli Cohene126ba92013-07-07 17:25:49 +0300549 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300550 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300551 list_add_tail(&mr->list, &ent->head);
552 ent->cur++;
553 if (ent->cur > 2 * ent->limit)
554 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300555 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300556
557 if (shrink)
558 queue_work(cache->wq, &ent->work);
559}
560
561static void clean_keys(struct mlx5_ib_dev *dev, int c)
562{
Eli Cohene126ba92013-07-07 17:25:49 +0300563 struct mlx5_mr_cache *cache = &dev->cache;
564 struct mlx5_cache_ent *ent = &cache->ent[c];
565 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300566 int err;
567
Moshe Lazer3c461912013-09-11 16:35:23 +0300568 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300569 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300570 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300571 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300572 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300573 return;
574 }
575 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
576 list_del(&mr->list);
577 ent->cur--;
578 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300579 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200580 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300581 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300582 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300583 else
Eli Cohene126ba92013-07-07 17:25:49 +0300584 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300585 }
586}
587
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300588static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
589{
Mark Bloch72afcf82018-01-22 15:29:44 +0000590 if (!mlx5_debugfs_root || dev->rep)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300591 return;
592
593 debugfs_remove_recursive(dev->cache.root);
594 dev->cache.root = NULL;
595}
596
Eli Cohene126ba92013-07-07 17:25:49 +0300597static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
598{
599 struct mlx5_mr_cache *cache = &dev->cache;
600 struct mlx5_cache_ent *ent;
601 int i;
602
Mark Bloch72afcf82018-01-22 15:29:44 +0000603 if (!mlx5_debugfs_root || dev->rep)
Eli Cohene126ba92013-07-07 17:25:49 +0300604 return 0;
605
Jack Morgenstein9603b612014-07-28 23:30:22 +0300606 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300607 if (!cache->root)
608 return -ENOMEM;
609
610 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
611 ent = &cache->ent[i];
612 sprintf(ent->name, "%d", ent->order);
613 ent->dir = debugfs_create_dir(ent->name, cache->root);
614 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300615 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300616
617 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
618 &size_fops);
619 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300620 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300621
622 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
623 &limit_fops);
624 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300625 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300626
627 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
628 &ent->cur);
629 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300630 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300631
632 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
633 &ent->miss);
634 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300635 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300636 }
637
638 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300639err:
640 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300641
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300642 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300643}
644
Kees Cooke99e88a2017-10-16 14:43:17 -0700645static void delay_time_func(struct timer_list *t)
Eli Cohen746b5582013-10-23 09:53:14 +0300646{
Kees Cooke99e88a2017-10-16 14:43:17 -0700647 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
Eli Cohen746b5582013-10-23 09:53:14 +0300648
649 dev->fill_delay = 0;
650}
651
Eli Cohene126ba92013-07-07 17:25:49 +0300652int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
653{
654 struct mlx5_mr_cache *cache = &dev->cache;
655 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300656 int err;
657 int i;
658
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300659 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530660 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300661 if (!cache->wq) {
662 mlx5_ib_warn(dev, "failed to create work queue\n");
663 return -ENOMEM;
664 }
665
Kees Cooke99e88a2017-10-16 14:43:17 -0700666 timer_setup(&dev->delay_timer, delay_time_func, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300667 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300668 ent = &cache->ent[i];
669 INIT_LIST_HEAD(&ent->head);
670 spin_lock_init(&ent->lock);
671 ent->order = i + 2;
672 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200673 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300674
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200675 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300676 INIT_WORK(&ent->work, cache_work_func);
677 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300678 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200679
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300680 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200681 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200682 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200683 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200684
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300685 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200686 continue;
687
688 ent->page = PAGE_SHIFT;
689 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
690 MLX5_IB_UMR_OCTOWORD;
691 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
692 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
Mark Bloch72afcf82018-01-22 15:29:44 +0000693 !dev->rep &&
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200694 mlx5_core_is_pf(dev->mdev))
695 ent->limit = dev->mdev->profile->mr_cache[i].limit;
696 else
697 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300698 }
699
700 err = mlx5_mr_cache_debugfs_init(dev);
701 if (err)
702 mlx5_ib_warn(dev, "cache debugfs failure\n");
703
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300704 /*
705 * We don't want to fail driver if debugfs failed to initialize,
706 * so we are not forwarding error to the user.
707 */
708
Eli Cohene126ba92013-07-07 17:25:49 +0300709 return 0;
710}
711
Eli Cohenacbda522016-10-27 16:36:43 +0300712static void wait_for_async_commands(struct mlx5_ib_dev *dev)
713{
714 struct mlx5_mr_cache *cache = &dev->cache;
715 struct mlx5_cache_ent *ent;
716 int total = 0;
717 int i;
718 int j;
719
720 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
721 ent = &cache->ent[i];
722 for (j = 0 ; j < 1000; j++) {
723 if (!ent->pending)
724 break;
725 msleep(50);
726 }
727 }
728 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
729 ent = &cache->ent[i];
730 total += ent->pending;
731 }
732
733 if (total)
734 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
735 else
736 mlx5_ib_warn(dev, "done with all pending requests\n");
737}
738
Eli Cohene126ba92013-07-07 17:25:49 +0300739int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
740{
741 int i;
742
743 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300744 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300745
746 mlx5_mr_cache_debugfs_cleanup(dev);
747
748 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
749 clean_keys(dev, i);
750
Moshe Lazer3c461912013-09-11 16:35:23 +0300751 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300752 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300753 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300754
Eli Cohene126ba92013-07-07 17:25:49 +0300755 return 0;
756}
757
758struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
759{
760 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300761 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300762 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300763 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300764 void *mkc;
765 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300766 int err;
767
768 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
769 if (!mr)
770 return ERR_PTR(-ENOMEM);
771
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300772 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300773 if (!in) {
774 err = -ENOMEM;
775 goto err_free;
776 }
777
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300778 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300779
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300780 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
781 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
782 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
783 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
784 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
785 MLX5_SET(mkc, mkc, lr, 1);
786
787 MLX5_SET(mkc, mkc, length64, 1);
788 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
789 MLX5_SET(mkc, mkc, qpn, 0xffffff);
790 MLX5_SET64(mkc, mkc, start_addr, 0);
791
792 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300793 if (err)
794 goto err_in;
795
796 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200797 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200798 mr->ibmr.lkey = mr->mmkey.key;
799 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300800 mr->umem = NULL;
801
802 return &mr->ibmr;
803
804err_in:
805 kfree(in);
806
807err_free:
808 kfree(mr);
809
810 return ERR_PTR(err);
811}
812
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300813static int get_octo_len(u64 addr, u64 len, int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300814{
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300815 u64 page_size = 1ULL << page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300816 u64 offset;
817 int npages;
818
819 offset = addr & (page_size - 1);
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300820 npages = ALIGN(len + offset, page_size) >> page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300821 return (npages + 1) / 2;
822}
823
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300824static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300825{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200826 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300827 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300828 return MLX5_MAX_UMR_SHIFT;
829}
830
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200831static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
832 int access_flags, struct ib_umem **umem,
833 int *npages, int *page_shift, int *ncont,
834 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200835{
836 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200837 int err;
838
839 *umem = ib_umem_get(pd->uobject->context, start, length,
840 access_flags, 0);
841 err = PTR_ERR_OR_ZERO(*umem);
842 if (err < 0) {
Dan Carpenter396551e2017-06-14 13:20:09 +0300843 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200844 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200845 }
846
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200847 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300848 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200849 if (!*npages) {
850 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200851 ib_umem_release(*umem);
852 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200853 }
854
855 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
856 *npages, *ncont, *order, *page_shift);
857
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200858 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200859}
860
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100861static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300862{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100863 struct mlx5_ib_umr_context *context =
864 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300865
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100866 context->status = wc->status;
867 complete(&context->done);
868}
Eli Cohene126ba92013-07-07 17:25:49 +0300869
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100870static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
871{
872 context->cqe.done = mlx5_ib_umr_done;
873 context->status = -1;
874 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300875}
876
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200877static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
878 struct mlx5_umr_wr *umrwr)
879{
880 struct umr_common *umrc = &dev->umrc;
881 struct ib_send_wr *bad;
882 int err;
883 struct mlx5_ib_umr_context umr_context;
884
885 mlx5_ib_init_umr_context(&umr_context);
886 umrwr->wr.wr_cqe = &umr_context.cqe;
887
888 down(&umrc->sem);
889 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
890 if (err) {
891 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
892 } else {
893 wait_for_completion(&umr_context.done);
894 if (umr_context.status != IB_WC_SUCCESS) {
895 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
896 umr_context.status);
897 err = -EFAULT;
898 }
899 }
900 up(&umrc->sem);
901 return err;
902}
903
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300904static struct mlx5_ib_mr *alloc_mr_from_cache(
905 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300906 u64 virt_addr, u64 len, int npages,
907 int page_shift, int order, int access_flags)
908{
909 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300910 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300911 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300912 int i;
913
Eli Cohen746b5582013-10-23 09:53:14 +0300914 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300915 mr = alloc_cached_mr(dev, order);
916 if (mr)
917 break;
918
919 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300920 if (err && err != -EAGAIN) {
921 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300922 break;
923 }
924 }
925
926 if (!mr)
927 return ERR_PTR(-EAGAIN);
928
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200929 mr->ibmr.pd = pd;
930 mr->umem = umem;
931 mr->access_flags = access_flags;
932 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200933 mr->mmkey.iova = virt_addr;
934 mr->mmkey.size = len;
935 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300936
Eli Cohene126ba92013-07-07 17:25:49 +0300937 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300938}
939
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200940static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
941 void *xlt, int page_shift, size_t size,
942 int flags)
943{
944 struct mlx5_ib_dev *dev = mr->dev;
945 struct ib_umem *umem = mr->umem;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200946 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
947 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
948 return npages;
949 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200950
951 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
952
953 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
954 __mlx5_ib_populate_pas(dev, umem, page_shift,
955 idx, npages, xlt,
956 MLX5_IB_MTT_PRESENT);
957 /* Clear padding after the pages
958 * brought from the umem.
959 */
960 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
961 size - npages * sizeof(struct mlx5_mtt));
962 }
963
964 return npages;
965}
966
967#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
968 MLX5_UMR_MTT_ALIGNMENT)
969#define MLX5_SPARE_UMR_CHUNK 0x10000
970
971int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
972 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +0200973{
974 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -0800975 struct device *ddev = dev->ib_dev.dev.parent;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200976 struct mlx5_ib_ucontext *uctx = NULL;
Haggai Eran832a6b02014-12-11 17:04:22 +0200977 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200978 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200979 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100980 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200981 struct ib_sge sg;
982 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200983 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
984 ? sizeof(struct mlx5_klm)
985 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200986 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
987 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +0200988 size_t pages_mapped = 0;
989 size_t pages_to_map = 0;
990 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200991 gfp_t gfp;
Haggai Eran832a6b02014-12-11 17:04:22 +0200992
993 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200994 * so we need to align the offset and length accordingly
995 */
996 if (idx & page_mask) {
997 npages += idx & page_mask;
998 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +0200999 }
1000
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001001 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1002 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001003
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001004 pages_to_map = ALIGN(npages, page_align);
1005 size = desc_size * pages_to_map;
1006 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001007
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001008 xlt = (void *)__get_free_pages(gfp, get_order(size));
1009 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1010 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1011 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1012
1013 size = MLX5_SPARE_UMR_CHUNK;
1014 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001015 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001016
1017 if (!xlt) {
Artemy Kovalyovbd174fc2017-04-05 09:23:51 +03001018 uctx = to_mucontext(mr->ibmr.pd->uobject->context);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001019 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1020 size = PAGE_SIZE;
1021 xlt = (void *)uctx->upd_xlt_page;
1022 mutex_lock(&uctx->upd_xlt_page_mutex);
1023 memset(xlt, 0, size);
1024 }
1025 pages_iter = size / desc_size;
1026 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001027 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001028 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001029 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001030 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001031 }
1032
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001033 sg.addr = dma;
1034 sg.lkey = dev->umrc.pd->local_dma_lkey;
1035
1036 memset(&wr, 0, sizeof(wr));
1037 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1038 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1039 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1040 wr.wr.sg_list = &sg;
1041 wr.wr.num_sge = 1;
1042 wr.wr.opcode = MLX5_IB_WR_UMR;
1043
1044 wr.pd = mr->ibmr.pd;
1045 wr.mkey = mr->mmkey.key;
1046 wr.length = mr->mmkey.size;
1047 wr.virt_addr = mr->mmkey.iova;
1048 wr.access_flags = mr->access_flags;
1049 wr.page_shift = page_shift;
1050
Haggai Eran832a6b02014-12-11 17:04:22 +02001051 for (pages_mapped = 0;
1052 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001053 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001054 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001055 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001056 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001057 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001058
1059 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1060
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001061 sg.length = ALIGN(npages * desc_size,
1062 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001063
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001064 if (pages_mapped + pages_iter >= pages_to_map) {
1065 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1066 wr.wr.send_flags |=
1067 MLX5_IB_SEND_UMR_ENABLE_MR |
1068 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1069 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1070 if (flags & MLX5_IB_UPD_XLT_PD ||
1071 flags & MLX5_IB_UPD_XLT_ACCESS)
1072 wr.wr.send_flags |=
1073 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1074 if (flags & MLX5_IB_UPD_XLT_ADDR)
1075 wr.wr.send_flags |=
1076 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1077 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001078
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001079 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001080 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001081
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001082 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001083 }
1084 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1085
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001086free_xlt:
1087 if (uctx)
1088 mutex_unlock(&uctx->upd_xlt_page_mutex);
Haggai Eran832a6b02014-12-11 17:04:22 +02001089 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001090 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001091
1092 return err;
1093}
Haggai Eran832a6b02014-12-11 17:04:22 +02001094
Noa Osherovich395a8e42016-02-29 16:46:50 +02001095/*
1096 * If ibmr is NULL it will be allocated by reg_create.
1097 * Else, the given ibmr will be used.
1098 */
1099static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1100 u64 virt_addr, u64 length,
1101 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001102 int page_shift, int access_flags,
1103 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001104{
1105 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001106 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001107 __be64 *pas;
1108 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001109 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001110 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001111 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001112 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001113
Noa Osherovich395a8e42016-02-29 16:46:50 +02001114 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001115 if (!mr)
1116 return ERR_PTR(-ENOMEM);
1117
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001118 mr->ibmr.pd = pd;
1119 mr->access_flags = access_flags;
1120
1121 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1122 if (populate)
1123 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001124 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001125 if (!in) {
1126 err = -ENOMEM;
1127 goto err_1;
1128 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001129 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001130 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001131 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1132 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001133
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001134 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001135 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001136 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1137
1138 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001139 MLX5_SET(mkc, mkc, free, !populate);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001140 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1141 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1142 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1143 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1144 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1145 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001146 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001147
1148 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1149 MLX5_SET64(mkc, mkc, len, length);
1150 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1151 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1152 MLX5_SET(mkc, mkc, translations_octword_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001153 get_octo_len(virt_addr, length, page_shift));
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001154 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1155 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001156 if (populate) {
1157 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001158 get_octo_len(virt_addr, length, page_shift));
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001159 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001160
1161 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001162 if (err) {
1163 mlx5_ib_warn(dev, "create mkey failed\n");
1164 goto err_2;
1165 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001166 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001167 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001168 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001169 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001170
Matan Baraka606b0f2016-02-29 18:05:28 +02001171 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001172
1173 return mr;
1174
1175err_2:
Al Viro479163f2014-11-20 08:13:57 +00001176 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001177
1178err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001179 if (!ibmr)
1180 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001181
1182 return ERR_PTR(err);
1183}
1184
Noa Osherovich395a8e42016-02-29 16:46:50 +02001185static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1186 int npages, u64 length, int access_flags)
1187{
1188 mr->npages = npages;
1189 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001190 mr->ibmr.lkey = mr->mmkey.key;
1191 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001192 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001193 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001194}
1195
Eli Cohene126ba92013-07-07 17:25:49 +03001196struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1197 u64 virt_addr, int access_flags,
1198 struct ib_udata *udata)
1199{
1200 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1201 struct mlx5_ib_mr *mr = NULL;
1202 struct ib_umem *umem;
1203 int page_shift;
1204 int npages;
1205 int ncont;
1206 int order;
1207 int err;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001208 bool use_umr = true;
Eli Cohene126ba92013-07-07 17:25:49 +03001209
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001210 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1211 return ERR_PTR(-EINVAL);
1212
Eli Cohen900a6d72014-09-14 16:47:51 +03001213 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1214 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001215
1216#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1217 if (!start && length == U64_MAX) {
1218 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1219 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1220 return ERR_PTR(-EINVAL);
1221
1222 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1223 return &mr->ibmr;
1224 }
1225#endif
1226
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001227 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001228 &page_shift, &ncont, &order);
1229
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001230 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001231 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001232
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001233 if (order <= mr_cache_max_order(dev)) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001234 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1235 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001236 if (PTR_ERR(mr) == -EAGAIN) {
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301237 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
Eli Cohene126ba92013-07-07 17:25:49 +03001238 mr = NULL;
1239 }
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001240 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1241 if (access_flags & IB_ACCESS_ON_DEMAND) {
1242 err = -EINVAL;
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301243 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001244 goto error;
1245 }
1246 use_umr = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001247 }
1248
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001249 if (!mr) {
1250 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001251 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001252 page_shift, access_flags, !use_umr);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001253 mutex_unlock(&dev->slow_path_mutex);
1254 }
Eli Cohene126ba92013-07-07 17:25:49 +03001255
1256 if (IS_ERR(mr)) {
1257 err = PTR_ERR(mr);
1258 goto error;
1259 }
1260
Matan Baraka606b0f2016-02-29 18:05:28 +02001261 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001262
1263 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001264 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001265
Haggai Eranb4cfe442014-12-11 17:04:26 +02001266#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001267 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001268#endif
1269
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001270 if (use_umr) {
1271 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001272
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001273 if (access_flags & IB_ACCESS_ON_DEMAND)
1274 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1275
1276 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1277 update_xlt_flags);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001278
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001279 if (err) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001280 dereg_mr(dev, mr);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001281 return ERR_PTR(err);
1282 }
1283 }
1284
1285 mr->live = 1;
1286 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001287error:
1288 ib_umem_release(umem);
1289 return ERR_PTR(err);
1290}
1291
1292static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1293{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001294 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001295 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001296
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001297 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1298 return 0;
1299
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001300 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1301 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1302 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1303 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001304
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001305 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001306}
1307
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001308static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001309 int access_flags, int flags)
1310{
1311 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001312 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001313 int err;
1314
Noa Osherovich56e11d62016-02-29 16:46:51 +02001315 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1316
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001317 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1318 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001319
Artemy Kovalyov31616252017-01-02 11:37:42 +02001320 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001321 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001322 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001323 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001324 }
1325
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001326 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001327
Noa Osherovich56e11d62016-02-29 16:46:51 +02001328 return err;
1329}
1330
1331int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1332 u64 length, u64 virt_addr, int new_access_flags,
1333 struct ib_pd *new_pd, struct ib_udata *udata)
1334{
1335 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1336 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1337 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1338 int access_flags = flags & IB_MR_REREG_ACCESS ?
1339 new_access_flags :
1340 mr->access_flags;
1341 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1342 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1343 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001344 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001345 int npages = 0;
1346 int ncont = 0;
1347 int order = 0;
1348 int err;
1349
1350 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1351 start, virt_addr, length, access_flags);
1352
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001353 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1354
Noa Osherovich56e11d62016-02-29 16:46:51 +02001355 if (flags != IB_MR_REREG_PD) {
1356 /*
1357 * Replace umem. This needs to be done whether or not UMR is
1358 * used.
1359 */
1360 flags |= IB_MR_REREG_TRANS;
1361 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001362 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1363 &npages, &page_shift, &ncont, &order);
1364 if (err < 0) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001365 clean_mr(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001366 return err;
1367 }
1368 }
1369
1370 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1371 /*
1372 * UMR can't be used - MKey needs to be replaced.
1373 */
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001374 if (mr->allocated_from_cache) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001375 err = unreg_umr(dev, mr);
1376 if (err)
1377 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1378 } else {
1379 err = destroy_mkey(dev, mr);
1380 if (err)
1381 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1382 }
1383 if (err)
1384 return err;
1385
1386 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001387 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001388
1389 if (IS_ERR(mr))
1390 return PTR_ERR(mr);
1391
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001392 mr->allocated_from_cache = 0;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001393 mr->live = 1;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001394 } else {
1395 /*
1396 * Send a UMR WQE
1397 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001398 mr->ibmr.pd = pd;
1399 mr->access_flags = access_flags;
1400 mr->mmkey.iova = addr;
1401 mr->mmkey.size = len;
1402 mr->mmkey.pd = to_mpd(pd)->pdn;
1403
1404 if (flags & IB_MR_REREG_TRANS) {
1405 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1406 if (flags & IB_MR_REREG_PD)
1407 upd_flags |= MLX5_IB_UPD_XLT_PD;
1408 if (flags & IB_MR_REREG_ACCESS)
1409 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1410 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1411 upd_flags);
1412 } else {
1413 err = rereg_umr(pd, mr, access_flags, flags);
1414 }
1415
Noa Osherovich56e11d62016-02-29 16:46:51 +02001416 if (err) {
1417 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001418 ib_umem_release(mr->umem);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001419 clean_mr(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001420 return err;
1421 }
1422 }
1423
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001424 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001425
Noa Osherovich56e11d62016-02-29 16:46:51 +02001426#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1427 update_odp_mr(mr);
1428#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001429 return 0;
1430}
1431
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001432static int
1433mlx5_alloc_priv_descs(struct ib_device *device,
1434 struct mlx5_ib_mr *mr,
1435 int ndescs,
1436 int desc_size)
1437{
1438 int size = ndescs * desc_size;
1439 int add_size;
1440 int ret;
1441
1442 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1443
1444 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1445 if (!mr->descs_alloc)
1446 return -ENOMEM;
1447
1448 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1449
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001450 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001451 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001452 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001453 ret = -ENOMEM;
1454 goto err;
1455 }
1456
1457 return 0;
1458err:
1459 kfree(mr->descs_alloc);
1460
1461 return ret;
1462}
1463
1464static void
1465mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1466{
1467 if (mr->descs) {
1468 struct ib_device *device = mr->ibmr.device;
1469 int size = mr->max_descs * mr->desc_size;
1470
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001471 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001472 size, DMA_TO_DEVICE);
1473 kfree(mr->descs_alloc);
1474 mr->descs = NULL;
1475 }
1476}
1477
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001478static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001479{
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001480 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001481 int err;
1482
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001483 if (mr->sig) {
1484 if (mlx5_core_destroy_psv(dev->mdev,
1485 mr->sig->psv_memory.psv_idx))
1486 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1487 mr->sig->psv_memory.psv_idx);
1488 if (mlx5_core_destroy_psv(dev->mdev,
1489 mr->sig->psv_wire.psv_idx))
1490 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1491 mr->sig->psv_wire.psv_idx);
1492 kfree(mr->sig);
1493 mr->sig = NULL;
1494 }
1495
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001496 mlx5_free_priv_descs(mr);
1497
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001498 if (!allocated_from_cache) {
Kamal Heib5942d8a2017-08-17 15:52:31 +03001499 u32 key = mr->mmkey.key;
1500
Haggai Eranb4cfe442014-12-11 17:04:26 +02001501 err = destroy_mkey(dev, mr);
Kamal Heib5942d8a2017-08-17 15:52:31 +03001502 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001503 if (err) {
1504 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Kamal Heib5942d8a2017-08-17 15:52:31 +03001505 key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001506 return err;
1507 }
1508 } else {
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001509 mlx5_mr_cache_free(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001510 }
1511
Eli Cohene126ba92013-07-07 17:25:49 +03001512 return 0;
1513}
1514
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001515static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001516{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001517 int npages = mr->npages;
1518 struct ib_umem *umem = mr->umem;
1519
1520#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001521 if (umem && umem->odp_data) {
1522 /* Prevent new page faults from succeeding */
1523 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001524 /* Wait for all running page-fault handlers to finish. */
1525 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001526 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001527 if (umem->odp_data->page_list)
1528 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1529 ib_umem_end(umem));
1530 else
1531 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001532 /*
1533 * We kill the umem before the MR for ODP,
1534 * so that there will not be any invalidations in
1535 * flight, looking at the *mr struct.
1536 */
1537 ib_umem_release(umem);
1538 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1539
1540 /* Avoid double-freeing the umem. */
1541 umem = NULL;
1542 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001543#endif
1544
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001545 clean_mr(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001546
1547 if (umem) {
1548 ib_umem_release(umem);
1549 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1550 }
1551
1552 return 0;
1553}
1554
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001555int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1556{
1557 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1558 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1559
1560 return dereg_mr(dev, mr);
1561}
1562
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001563struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1564 enum ib_mr_type mr_type,
1565 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001566{
1567 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001568 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001569 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001570 struct mlx5_ib_mr *mr;
1571 void *mkc;
1572 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001573 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001574
1575 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1576 if (!mr)
1577 return ERR_PTR(-ENOMEM);
1578
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001579 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001580 if (!in) {
1581 err = -ENOMEM;
1582 goto err_free;
1583 }
1584
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001585 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1586 MLX5_SET(mkc, mkc, free, 1);
1587 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1588 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1589 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001590
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001591 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001592 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1593 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001594 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001595 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001596 if (err)
1597 goto err_free_in;
1598
Artemy Kovalyov31616252017-01-02 11:37:42 +02001599 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001600 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001601 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001602 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001603
1604 err = mlx5_alloc_priv_descs(pd->device, mr,
1605 ndescs, sizeof(struct mlx5_klm));
1606 if (err)
1607 goto err_free_in;
1608 mr->desc_size = sizeof(struct mlx5_klm);
1609 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001610 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001611 u32 psv_index[2];
1612
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001613 MLX5_SET(mkc, mkc, bsf_en, 1);
1614 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001615 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1616 if (!mr->sig) {
1617 err = -ENOMEM;
1618 goto err_free_in;
1619 }
1620
1621 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001622 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001623 2, psv_index);
1624 if (err)
1625 goto err_free_sig;
1626
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001627 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001628 mr->sig->psv_memory.psv_idx = psv_index[0];
1629 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001630
1631 mr->sig->sig_status_checked = true;
1632 mr->sig->sig_err_exists = false;
1633 /* Next UMR, Arm SIGERR */
1634 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001635 } else {
1636 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1637 err = -EINVAL;
1638 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001639 }
1640
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001641 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1642 MLX5_SET(mkc, mkc, umr_en, 1);
1643
Nitzan Carmi45e6ae72017-12-26 11:20:20 +02001644 mr->ibmr.device = pd->device;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001645 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001646 if (err)
1647 goto err_destroy_psv;
1648
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001649 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001650 mr->ibmr.lkey = mr->mmkey.key;
1651 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001652 mr->umem = NULL;
1653 kfree(in);
1654
1655 return &mr->ibmr;
1656
1657err_destroy_psv:
1658 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001659 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001660 mr->sig->psv_memory.psv_idx))
1661 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1662 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001663 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001664 mr->sig->psv_wire.psv_idx))
1665 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1666 mr->sig->psv_wire.psv_idx);
1667 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001668 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001669err_free_sig:
1670 kfree(mr->sig);
1671err_free_in:
1672 kfree(in);
1673err_free:
1674 kfree(mr);
1675 return ERR_PTR(err);
1676}
1677
Matan Barakd2370e02016-02-29 18:05:30 +02001678struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1679 struct ib_udata *udata)
1680{
1681 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001682 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001683 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001684 u32 *in = NULL;
1685 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001686 int ndescs;
1687 int err;
1688 struct mlx5_ib_alloc_mw req = {};
1689 struct {
1690 __u32 comp_mask;
1691 __u32 response_length;
1692 } resp = {};
1693
1694 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1695 if (err)
1696 return ERR_PTR(err);
1697
1698 if (req.comp_mask || req.reserved1 || req.reserved2)
1699 return ERR_PTR(-EOPNOTSUPP);
1700
1701 if (udata->inlen > sizeof(req) &&
1702 !ib_is_udata_cleared(udata, sizeof(req),
1703 udata->inlen - sizeof(req)))
1704 return ERR_PTR(-EOPNOTSUPP);
1705
1706 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1707
1708 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001709 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001710 if (!mw || !in) {
1711 err = -ENOMEM;
1712 goto free;
1713 }
1714
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001715 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001716
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001717 MLX5_SET(mkc, mkc, free, 1);
1718 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1719 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1720 MLX5_SET(mkc, mkc, umr_en, 1);
1721 MLX5_SET(mkc, mkc, lr, 1);
1722 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1723 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1724 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1725
1726 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001727 if (err)
1728 goto free;
1729
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001730 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001731 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001732 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001733
1734 resp.response_length = min(offsetof(typeof(resp), response_length) +
1735 sizeof(resp.response_length), udata->outlen);
1736 if (resp.response_length) {
1737 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1738 if (err) {
1739 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1740 goto free;
1741 }
1742 }
1743
1744 kfree(in);
1745 return &mw->ibmw;
1746
1747free:
1748 kfree(mw);
1749 kfree(in);
1750 return ERR_PTR(err);
1751}
1752
1753int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1754{
1755 struct mlx5_ib_mw *mmw = to_mmw(mw);
1756 int err;
1757
1758 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1759 &mmw->mmkey);
1760 if (!err)
1761 kfree(mmw);
1762 return err;
1763}
1764
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001765int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1766 struct ib_mr_status *mr_status)
1767{
1768 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1769 int ret = 0;
1770
1771 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1772 pr_err("Invalid status check mask\n");
1773 ret = -EINVAL;
1774 goto done;
1775 }
1776
1777 mr_status->fail_status = 0;
1778 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1779 if (!mmr->sig) {
1780 ret = -EINVAL;
1781 pr_err("signature status check requested on a non-signature enabled MR\n");
1782 goto done;
1783 }
1784
1785 mmr->sig->sig_status_checked = true;
1786 if (!mmr->sig->sig_err_exists)
1787 goto done;
1788
1789 if (ibmr->lkey == mmr->sig->err_item.key)
1790 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1791 sizeof(mr_status->sig_err));
1792 else {
1793 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1794 mr_status->sig_err.sig_err_offset = 0;
1795 mr_status->sig_err.key = mmr->sig->err_item.key;
1796 }
1797
1798 mmr->sig->sig_err_exists = false;
1799 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1800 }
1801
1802done:
1803 return ret;
1804}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001805
Sagi Grimbergb005d312016-02-29 19:07:33 +02001806static int
1807mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1808 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001809 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001810 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001811{
1812 struct scatterlist *sg = sgl;
1813 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001814 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001815 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1816 int i;
1817
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001818 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001819 mr->ibmr.length = 0;
1820 mr->ndescs = sg_nents;
1821
1822 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001823 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001824 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001825 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1826 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001827 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001828 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001829
1830 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001831 }
1832
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001833 if (sg_offset_p)
1834 *sg_offset_p = sg_offset;
1835
Sagi Grimbergb005d312016-02-29 19:07:33 +02001836 return i;
1837}
1838
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001839static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1840{
1841 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1842 __be64 *descs;
1843
1844 if (unlikely(mr->ndescs == mr->max_descs))
1845 return -ENOMEM;
1846
1847 descs = mr->descs;
1848 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1849
1850 return 0;
1851}
1852
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001853int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001854 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001855{
1856 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1857 int n;
1858
1859 mr->ndescs = 0;
1860
1861 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1862 mr->desc_size * mr->max_descs,
1863 DMA_TO_DEVICE);
1864
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001865 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001866 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001867 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001868 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1869 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001870
1871 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1872 mr->desc_size * mr->max_descs,
1873 DMA_TO_DEVICE);
1874
1875 return n;
1876}