blob: 556e015678de26809f45d74ac6357ecf9dcbf501 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +030050static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030052static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020053static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +020054
Haggai Eranb4cfe442014-12-11 17:04:26 +020055static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
56{
Matan Baraka606b0f2016-02-29 18:05:28 +020057 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020058
59#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
60 /* Wait until all page fault handlers using the mr complete. */
61 synchronize_srcu(&dev->mr_srcu);
62#endif
63
64 return err;
65}
66
Eli Cohene126ba92013-07-07 17:25:49 +030067static int order2idx(struct mlx5_ib_dev *dev, int order)
68{
69 struct mlx5_mr_cache *cache = &dev->cache;
70
71 if (order < cache->ent[0].order)
72 return 0;
73 else
74 return order - cache->ent[0].order;
75}
76
Noa Osherovich56e11d62016-02-29 16:46:51 +020077static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
78{
79 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
80 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
81}
82
Noa Osherovich395a8e42016-02-29 16:46:50 +020083#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
84static void update_odp_mr(struct mlx5_ib_mr *mr)
85{
86 if (mr->umem->odp_data) {
87 /*
88 * This barrier prevents the compiler from moving the
89 * setting of umem->odp_data->private to point to our
90 * MR, before reg_umr finished, to ensure that the MR
91 * initialization have finished before starting to
92 * handle invalidations.
93 */
94 smp_wmb();
95 mr->umem->odp_data->private = mr;
96 /*
97 * Make sure we will see the new
98 * umem->odp_data->private value in the invalidation
99 * routines, before we can get page faults on the
100 * MR. Page faults can happen once we put the MR in
101 * the tree, below this line. Without the barrier,
102 * there can be a fault handling and an invalidation
103 * before umem->odp_data->private == mr is visible to
104 * the invalidation handler.
105 */
106 smp_wmb();
107 }
108}
109#endif
110
Eli Cohen746b5582013-10-23 09:53:14 +0300111static void reg_mr_callback(int status, void *context)
112{
113 struct mlx5_ib_mr *mr = context;
114 struct mlx5_ib_dev *dev = mr->dev;
115 struct mlx5_mr_cache *cache = &dev->cache;
116 int c = order2idx(dev, mr->order);
117 struct mlx5_cache_ent *ent = &cache->ent[c];
118 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300119 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200120 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300121 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300122
Eli Cohen746b5582013-10-23 09:53:14 +0300123 spin_lock_irqsave(&ent->lock, flags);
124 ent->pending--;
125 spin_unlock_irqrestore(&ent->lock, flags);
126 if (status) {
127 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
128 kfree(mr);
129 dev->fill_delay = 1;
130 mod_timer(&dev->delay_timer, jiffies + HZ);
131 return;
132 }
133
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200134 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300135 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
136 key = dev->mdev->priv.mkey_key++;
137 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300138 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300139
140 cache->last_add = jiffies;
141
142 spin_lock_irqsave(&ent->lock, flags);
143 list_add_tail(&mr->list, &ent->head);
144 ent->cur++;
145 ent->size++;
146 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300147
148 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200149 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
150 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300151 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200152 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300153 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200154
155 if (!completion_done(&ent->compl))
156 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300157}
158
Eli Cohene126ba92013-07-07 17:25:49 +0300159static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
160{
Eli Cohene126ba92013-07-07 17:25:49 +0300161 struct mlx5_mr_cache *cache = &dev->cache;
162 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300163 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300164 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300165 void *mkc;
166 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300167 int err = 0;
168 int i;
169
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300170 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300171 if (!in)
172 return -ENOMEM;
173
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300174 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300175 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300176 if (ent->pending >= MAX_PENDING_REG_MR) {
177 err = -EAGAIN;
178 break;
179 }
180
Eli Cohene126ba92013-07-07 17:25:49 +0300181 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
182 if (!mr) {
183 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300184 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300185 }
186 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300187 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300188 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300189
190 MLX5_SET(mkc, mkc, free, 1);
191 MLX5_SET(mkc, mkc, umr_en, 1);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200192 MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300193
194 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200195 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
196 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300197
Eli Cohen746b5582013-10-23 09:53:14 +0300198 spin_lock_irq(&ent->lock);
199 ent->pending++;
200 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300201 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
202 in, inlen,
203 mr->out, sizeof(mr->out),
204 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300205 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200206 spin_lock_irq(&ent->lock);
207 ent->pending--;
208 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300210 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300211 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
Eli Cohene126ba92013-07-07 17:25:49 +0300213 }
214
Eli Cohene126ba92013-07-07 17:25:49 +0300215 kfree(in);
216 return err;
217}
218
219static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
220{
Eli Cohene126ba92013-07-07 17:25:49 +0300221 struct mlx5_mr_cache *cache = &dev->cache;
222 struct mlx5_cache_ent *ent = &cache->ent[c];
223 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300224 int err;
225 int i;
226
227 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300228 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300229 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300230 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300231 return;
232 }
233 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
234 list_del(&mr->list);
235 ent->cur--;
236 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300237 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200238 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300239 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300240 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300241 else
Eli Cohene126ba92013-07-07 17:25:49 +0300242 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300243 }
244}
245
246static ssize_t size_write(struct file *filp, const char __user *buf,
247 size_t count, loff_t *pos)
248{
249 struct mlx5_cache_ent *ent = filp->private_data;
250 struct mlx5_ib_dev *dev = ent->dev;
251 char lbuf[20];
252 u32 var;
253 int err;
254 int c;
255
256 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300257 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300258
259 c = order2idx(dev, ent->order);
260 lbuf[sizeof(lbuf) - 1] = 0;
261
262 if (sscanf(lbuf, "%u", &var) != 1)
263 return -EINVAL;
264
265 if (var < ent->limit)
266 return -EINVAL;
267
268 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300269 do {
270 err = add_keys(dev, c, var - ent->size);
271 if (err && err != -EAGAIN)
272 return err;
273
274 usleep_range(3000, 5000);
275 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300276 } else if (var < ent->size) {
277 remove_keys(dev, c, ent->size - var);
278 }
279
280 return count;
281}
282
283static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
284 loff_t *pos)
285{
286 struct mlx5_cache_ent *ent = filp->private_data;
287 char lbuf[20];
288 int err;
289
290 if (*pos)
291 return 0;
292
293 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
294 if (err < 0)
295 return err;
296
297 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300298 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300299
300 *pos += err;
301
302 return err;
303}
304
305static const struct file_operations size_fops = {
306 .owner = THIS_MODULE,
307 .open = simple_open,
308 .write = size_write,
309 .read = size_read,
310};
311
312static ssize_t limit_write(struct file *filp, const char __user *buf,
313 size_t count, loff_t *pos)
314{
315 struct mlx5_cache_ent *ent = filp->private_data;
316 struct mlx5_ib_dev *dev = ent->dev;
317 char lbuf[20];
318 u32 var;
319 int err;
320 int c;
321
322 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300323 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300324
325 c = order2idx(dev, ent->order);
326 lbuf[sizeof(lbuf) - 1] = 0;
327
328 if (sscanf(lbuf, "%u", &var) != 1)
329 return -EINVAL;
330
331 if (var > ent->size)
332 return -EINVAL;
333
334 ent->limit = var;
335
336 if (ent->cur < ent->limit) {
337 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
338 if (err)
339 return err;
340 }
341
342 return count;
343}
344
345static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
346 loff_t *pos)
347{
348 struct mlx5_cache_ent *ent = filp->private_data;
349 char lbuf[20];
350 int err;
351
352 if (*pos)
353 return 0;
354
355 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
356 if (err < 0)
357 return err;
358
359 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300360 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300361
362 *pos += err;
363
364 return err;
365}
366
367static const struct file_operations limit_fops = {
368 .owner = THIS_MODULE,
369 .open = simple_open,
370 .write = limit_write,
371 .read = limit_read,
372};
373
374static int someone_adding(struct mlx5_mr_cache *cache)
375{
376 int i;
377
378 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
379 if (cache->ent[i].cur < cache->ent[i].limit)
380 return 1;
381 }
382
383 return 0;
384}
385
386static void __cache_work_func(struct mlx5_cache_ent *ent)
387{
388 struct mlx5_ib_dev *dev = ent->dev;
389 struct mlx5_mr_cache *cache = &dev->cache;
390 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300391 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300392
393 if (cache->stopped)
394 return;
395
396 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300397 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
398 err = add_keys(dev, i, 1);
399 if (ent->cur < 2 * ent->limit) {
400 if (err == -EAGAIN) {
401 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
402 i + 2);
403 queue_delayed_work(cache->wq, &ent->dwork,
404 msecs_to_jiffies(3));
405 } else if (err) {
406 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
407 i + 2, err);
408 queue_delayed_work(cache->wq, &ent->dwork,
409 msecs_to_jiffies(1000));
410 } else {
411 queue_work(cache->wq, &ent->work);
412 }
413 }
Eli Cohene126ba92013-07-07 17:25:49 +0300414 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300415 /*
416 * The remove_keys() logic is performed as garbage collection
417 * task. Such task is intended to be run when no other active
418 * processes are running.
419 *
420 * The need_resched() will return TRUE if there are user tasks
421 * to be activated in near future.
422 *
423 * In such case, we don't execute remove_keys() and postpone
424 * the garbage collection work to try to run in next cycle,
425 * in order to free CPU resources to other tasks.
426 */
427 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300428 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300429 remove_keys(dev, i, 1);
430 if (ent->cur > ent->limit)
431 queue_work(cache->wq, &ent->work);
432 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300433 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300434 }
435 }
436}
437
438static void delayed_cache_work_func(struct work_struct *work)
439{
440 struct mlx5_cache_ent *ent;
441
442 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
443 __cache_work_func(ent);
444}
445
446static void cache_work_func(struct work_struct *work)
447{
448 struct mlx5_cache_ent *ent;
449
450 ent = container_of(work, struct mlx5_cache_ent, work);
451 __cache_work_func(ent);
452}
453
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200454struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
455{
456 struct mlx5_mr_cache *cache = &dev->cache;
457 struct mlx5_cache_ent *ent;
458 struct mlx5_ib_mr *mr;
459 int err;
460
461 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
462 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
463 return NULL;
464 }
465
466 ent = &cache->ent[entry];
467 while (1) {
468 spin_lock_irq(&ent->lock);
469 if (list_empty(&ent->head)) {
470 spin_unlock_irq(&ent->lock);
471
472 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200473 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200474 return ERR_PTR(err);
475
476 wait_for_completion(&ent->compl);
477 } else {
478 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
479 list);
480 list_del(&mr->list);
481 ent->cur--;
482 spin_unlock_irq(&ent->lock);
483 if (ent->cur < ent->limit)
484 queue_work(cache->wq, &ent->work);
485 return mr;
486 }
487 }
488}
489
Eli Cohene126ba92013-07-07 17:25:49 +0300490static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
491{
492 struct mlx5_mr_cache *cache = &dev->cache;
493 struct mlx5_ib_mr *mr = NULL;
494 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300495 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300496 int c;
497 int i;
498
499 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300500 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300501 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300502 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
503 return NULL;
504 }
505
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300506 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300507 ent = &cache->ent[i];
508
509 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
510
Eli Cohen746b5582013-10-23 09:53:14 +0300511 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300512 if (!list_empty(&ent->head)) {
513 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
514 list);
515 list_del(&mr->list);
516 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300517 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300518 if (ent->cur < ent->limit)
519 queue_work(cache->wq, &ent->work);
520 break;
521 }
Eli Cohen746b5582013-10-23 09:53:14 +0300522 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300523
524 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300525 }
526
527 if (!mr)
528 cache->ent[c].miss++;
529
530 return mr;
531}
532
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200533void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300534{
535 struct mlx5_mr_cache *cache = &dev->cache;
536 struct mlx5_cache_ent *ent;
537 int shrink = 0;
538 int c;
539
540 c = order2idx(dev, mr->order);
541 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
542 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
543 return;
544 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200545
546 if (unreg_umr(dev, mr))
547 return;
548
Eli Cohene126ba92013-07-07 17:25:49 +0300549 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300550 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300551 list_add_tail(&mr->list, &ent->head);
552 ent->cur++;
553 if (ent->cur > 2 * ent->limit)
554 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300555 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300556
557 if (shrink)
558 queue_work(cache->wq, &ent->work);
559}
560
561static void clean_keys(struct mlx5_ib_dev *dev, int c)
562{
Eli Cohene126ba92013-07-07 17:25:49 +0300563 struct mlx5_mr_cache *cache = &dev->cache;
564 struct mlx5_cache_ent *ent = &cache->ent[c];
565 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300566 int err;
567
Moshe Lazer3c461912013-09-11 16:35:23 +0300568 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300569 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300570 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300571 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300572 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300573 return;
574 }
575 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
576 list_del(&mr->list);
577 ent->cur--;
578 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300579 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200580 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300581 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300582 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300583 else
Eli Cohene126ba92013-07-07 17:25:49 +0300584 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300585 }
586}
587
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300588static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
589{
590 if (!mlx5_debugfs_root)
591 return;
592
593 debugfs_remove_recursive(dev->cache.root);
594 dev->cache.root = NULL;
595}
596
Eli Cohene126ba92013-07-07 17:25:49 +0300597static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
598{
599 struct mlx5_mr_cache *cache = &dev->cache;
600 struct mlx5_cache_ent *ent;
601 int i;
602
603 if (!mlx5_debugfs_root)
604 return 0;
605
Jack Morgenstein9603b612014-07-28 23:30:22 +0300606 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300607 if (!cache->root)
608 return -ENOMEM;
609
610 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
611 ent = &cache->ent[i];
612 sprintf(ent->name, "%d", ent->order);
613 ent->dir = debugfs_create_dir(ent->name, cache->root);
614 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300615 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300616
617 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
618 &size_fops);
619 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300620 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300621
622 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
623 &limit_fops);
624 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300625 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300626
627 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
628 &ent->cur);
629 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300630 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300631
632 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
633 &ent->miss);
634 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300635 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300636 }
637
638 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300639err:
640 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300641
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300642 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300643}
644
Kees Cooke99e88a2017-10-16 14:43:17 -0700645static void delay_time_func(struct timer_list *t)
Eli Cohen746b5582013-10-23 09:53:14 +0300646{
Kees Cooke99e88a2017-10-16 14:43:17 -0700647 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
Eli Cohen746b5582013-10-23 09:53:14 +0300648
649 dev->fill_delay = 0;
650}
651
Eli Cohene126ba92013-07-07 17:25:49 +0300652int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
653{
654 struct mlx5_mr_cache *cache = &dev->cache;
655 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300656 int err;
657 int i;
658
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300659 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530660 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300661 if (!cache->wq) {
662 mlx5_ib_warn(dev, "failed to create work queue\n");
663 return -ENOMEM;
664 }
665
Kees Cooke99e88a2017-10-16 14:43:17 -0700666 timer_setup(&dev->delay_timer, delay_time_func, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300667 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300668 ent = &cache->ent[i];
669 INIT_LIST_HEAD(&ent->head);
670 spin_lock_init(&ent->lock);
671 ent->order = i + 2;
672 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200673 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300674
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200675 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300676 INIT_WORK(&ent->work, cache_work_func);
677 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300678 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200679
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300680 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200681 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200682 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200683 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200684
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300685 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200686 continue;
687
688 ent->page = PAGE_SHIFT;
689 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
690 MLX5_IB_UMR_OCTOWORD;
691 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
692 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
693 mlx5_core_is_pf(dev->mdev))
694 ent->limit = dev->mdev->profile->mr_cache[i].limit;
695 else
696 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300697 }
698
699 err = mlx5_mr_cache_debugfs_init(dev);
700 if (err)
701 mlx5_ib_warn(dev, "cache debugfs failure\n");
702
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300703 /*
704 * We don't want to fail driver if debugfs failed to initialize,
705 * so we are not forwarding error to the user.
706 */
707
Eli Cohene126ba92013-07-07 17:25:49 +0300708 return 0;
709}
710
Eli Cohenacbda522016-10-27 16:36:43 +0300711static void wait_for_async_commands(struct mlx5_ib_dev *dev)
712{
713 struct mlx5_mr_cache *cache = &dev->cache;
714 struct mlx5_cache_ent *ent;
715 int total = 0;
716 int i;
717 int j;
718
719 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
720 ent = &cache->ent[i];
721 for (j = 0 ; j < 1000; j++) {
722 if (!ent->pending)
723 break;
724 msleep(50);
725 }
726 }
727 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
728 ent = &cache->ent[i];
729 total += ent->pending;
730 }
731
732 if (total)
733 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
734 else
735 mlx5_ib_warn(dev, "done with all pending requests\n");
736}
737
Eli Cohene126ba92013-07-07 17:25:49 +0300738int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
739{
740 int i;
741
742 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300743 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300744
745 mlx5_mr_cache_debugfs_cleanup(dev);
746
747 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
748 clean_keys(dev, i);
749
Moshe Lazer3c461912013-09-11 16:35:23 +0300750 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300751 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300752 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300753
Eli Cohene126ba92013-07-07 17:25:49 +0300754 return 0;
755}
756
757struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
758{
759 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300760 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300761 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300762 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300763 void *mkc;
764 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300765 int err;
766
767 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
768 if (!mr)
769 return ERR_PTR(-ENOMEM);
770
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300771 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300772 if (!in) {
773 err = -ENOMEM;
774 goto err_free;
775 }
776
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300777 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300778
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300779 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
780 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
781 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
782 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
783 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
784 MLX5_SET(mkc, mkc, lr, 1);
785
786 MLX5_SET(mkc, mkc, length64, 1);
787 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
788 MLX5_SET(mkc, mkc, qpn, 0xffffff);
789 MLX5_SET64(mkc, mkc, start_addr, 0);
790
791 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300792 if (err)
793 goto err_in;
794
795 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200796 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200797 mr->ibmr.lkey = mr->mmkey.key;
798 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300799 mr->umem = NULL;
800
801 return &mr->ibmr;
802
803err_in:
804 kfree(in);
805
806err_free:
807 kfree(mr);
808
809 return ERR_PTR(err);
810}
811
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300812static int get_octo_len(u64 addr, u64 len, int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300813{
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300814 u64 page_size = 1ULL << page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300815 u64 offset;
816 int npages;
817
818 offset = addr & (page_size - 1);
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300819 npages = ALIGN(len + offset, page_size) >> page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300820 return (npages + 1) / 2;
821}
822
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300823static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300824{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200825 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300826 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300827 return MLX5_MAX_UMR_SHIFT;
828}
829
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200830static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
831 int access_flags, struct ib_umem **umem,
832 int *npages, int *page_shift, int *ncont,
833 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200834{
835 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200836 int err;
837
838 *umem = ib_umem_get(pd->uobject->context, start, length,
839 access_flags, 0);
840 err = PTR_ERR_OR_ZERO(*umem);
841 if (err < 0) {
Dan Carpenter396551e2017-06-14 13:20:09 +0300842 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200843 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200844 }
845
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200846 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300847 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200848 if (!*npages) {
849 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200850 ib_umem_release(*umem);
851 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200852 }
853
854 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
855 *npages, *ncont, *order, *page_shift);
856
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200857 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200858}
859
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100860static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300861{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100862 struct mlx5_ib_umr_context *context =
863 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300864
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100865 context->status = wc->status;
866 complete(&context->done);
867}
Eli Cohene126ba92013-07-07 17:25:49 +0300868
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100869static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
870{
871 context->cqe.done = mlx5_ib_umr_done;
872 context->status = -1;
873 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300874}
875
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200876static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
877 struct mlx5_umr_wr *umrwr)
878{
879 struct umr_common *umrc = &dev->umrc;
880 struct ib_send_wr *bad;
881 int err;
882 struct mlx5_ib_umr_context umr_context;
883
884 mlx5_ib_init_umr_context(&umr_context);
885 umrwr->wr.wr_cqe = &umr_context.cqe;
886
887 down(&umrc->sem);
888 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
889 if (err) {
890 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
891 } else {
892 wait_for_completion(&umr_context.done);
893 if (umr_context.status != IB_WC_SUCCESS) {
894 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
895 umr_context.status);
896 err = -EFAULT;
897 }
898 }
899 up(&umrc->sem);
900 return err;
901}
902
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300903static struct mlx5_ib_mr *alloc_mr_from_cache(
904 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300905 u64 virt_addr, u64 len, int npages,
906 int page_shift, int order, int access_flags)
907{
908 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300909 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300910 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300911 int i;
912
Eli Cohen746b5582013-10-23 09:53:14 +0300913 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300914 mr = alloc_cached_mr(dev, order);
915 if (mr)
916 break;
917
918 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300919 if (err && err != -EAGAIN) {
920 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300921 break;
922 }
923 }
924
925 if (!mr)
926 return ERR_PTR(-EAGAIN);
927
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200928 mr->ibmr.pd = pd;
929 mr->umem = umem;
930 mr->access_flags = access_flags;
931 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200932 mr->mmkey.iova = virt_addr;
933 mr->mmkey.size = len;
934 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300935
Eli Cohene126ba92013-07-07 17:25:49 +0300936 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300937}
938
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200939static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
940 void *xlt, int page_shift, size_t size,
941 int flags)
942{
943 struct mlx5_ib_dev *dev = mr->dev;
944 struct ib_umem *umem = mr->umem;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200945 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
946 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
947 return npages;
948 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200949
950 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
951
952 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
953 __mlx5_ib_populate_pas(dev, umem, page_shift,
954 idx, npages, xlt,
955 MLX5_IB_MTT_PRESENT);
956 /* Clear padding after the pages
957 * brought from the umem.
958 */
959 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
960 size - npages * sizeof(struct mlx5_mtt));
961 }
962
963 return npages;
964}
965
966#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
967 MLX5_UMR_MTT_ALIGNMENT)
968#define MLX5_SPARE_UMR_CHUNK 0x10000
969
970int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
971 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +0200972{
973 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -0800974 struct device *ddev = dev->ib_dev.dev.parent;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200975 struct mlx5_ib_ucontext *uctx = NULL;
Haggai Eran832a6b02014-12-11 17:04:22 +0200976 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200977 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200978 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100979 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200980 struct ib_sge sg;
981 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200982 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
983 ? sizeof(struct mlx5_klm)
984 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200985 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
986 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +0200987 size_t pages_mapped = 0;
988 size_t pages_to_map = 0;
989 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200990 gfp_t gfp;
Haggai Eran832a6b02014-12-11 17:04:22 +0200991
992 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200993 * so we need to align the offset and length accordingly
994 */
995 if (idx & page_mask) {
996 npages += idx & page_mask;
997 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +0200998 }
999
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001000 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1001 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001002
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001003 pages_to_map = ALIGN(npages, page_align);
1004 size = desc_size * pages_to_map;
1005 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001006
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001007 xlt = (void *)__get_free_pages(gfp, get_order(size));
1008 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1009 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1010 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1011
1012 size = MLX5_SPARE_UMR_CHUNK;
1013 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001014 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001015
1016 if (!xlt) {
Artemy Kovalyovbd174fc2017-04-05 09:23:51 +03001017 uctx = to_mucontext(mr->ibmr.pd->uobject->context);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001018 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1019 size = PAGE_SIZE;
1020 xlt = (void *)uctx->upd_xlt_page;
1021 mutex_lock(&uctx->upd_xlt_page_mutex);
1022 memset(xlt, 0, size);
1023 }
1024 pages_iter = size / desc_size;
1025 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001026 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001027 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001028 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001029 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001030 }
1031
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001032 sg.addr = dma;
1033 sg.lkey = dev->umrc.pd->local_dma_lkey;
1034
1035 memset(&wr, 0, sizeof(wr));
1036 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1037 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1038 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1039 wr.wr.sg_list = &sg;
1040 wr.wr.num_sge = 1;
1041 wr.wr.opcode = MLX5_IB_WR_UMR;
1042
1043 wr.pd = mr->ibmr.pd;
1044 wr.mkey = mr->mmkey.key;
1045 wr.length = mr->mmkey.size;
1046 wr.virt_addr = mr->mmkey.iova;
1047 wr.access_flags = mr->access_flags;
1048 wr.page_shift = page_shift;
1049
Haggai Eran832a6b02014-12-11 17:04:22 +02001050 for (pages_mapped = 0;
1051 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001052 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001053 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001054 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001055 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001056 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001057
1058 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1059
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001060 sg.length = ALIGN(npages * desc_size,
1061 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001062
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001063 if (pages_mapped + pages_iter >= pages_to_map) {
1064 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1065 wr.wr.send_flags |=
1066 MLX5_IB_SEND_UMR_ENABLE_MR |
1067 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1068 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1069 if (flags & MLX5_IB_UPD_XLT_PD ||
1070 flags & MLX5_IB_UPD_XLT_ACCESS)
1071 wr.wr.send_flags |=
1072 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1073 if (flags & MLX5_IB_UPD_XLT_ADDR)
1074 wr.wr.send_flags |=
1075 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1076 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001077
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001078 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001079 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001080
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001081 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001082 }
1083 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1084
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001085free_xlt:
1086 if (uctx)
1087 mutex_unlock(&uctx->upd_xlt_page_mutex);
Haggai Eran832a6b02014-12-11 17:04:22 +02001088 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001089 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001090
1091 return err;
1092}
Haggai Eran832a6b02014-12-11 17:04:22 +02001093
Noa Osherovich395a8e42016-02-29 16:46:50 +02001094/*
1095 * If ibmr is NULL it will be allocated by reg_create.
1096 * Else, the given ibmr will be used.
1097 */
1098static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1099 u64 virt_addr, u64 length,
1100 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001101 int page_shift, int access_flags,
1102 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001103{
1104 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001105 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001106 __be64 *pas;
1107 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001108 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001109 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001110 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001111 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001112
Noa Osherovich395a8e42016-02-29 16:46:50 +02001113 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001114 if (!mr)
1115 return ERR_PTR(-ENOMEM);
1116
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001117 mr->ibmr.pd = pd;
1118 mr->access_flags = access_flags;
1119
1120 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1121 if (populate)
1122 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001123 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001124 if (!in) {
1125 err = -ENOMEM;
1126 goto err_1;
1127 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001128 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001129 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001130 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1131 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001132
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001133 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001134 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001135 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1136
1137 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001138 MLX5_SET(mkc, mkc, free, !populate);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001139 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1140 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1141 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1142 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1143 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1144 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001145 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001146
1147 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1148 MLX5_SET64(mkc, mkc, len, length);
1149 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1150 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1151 MLX5_SET(mkc, mkc, translations_octword_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001152 get_octo_len(virt_addr, length, page_shift));
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001153 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1154 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001155 if (populate) {
1156 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001157 get_octo_len(virt_addr, length, page_shift));
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001158 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001159
1160 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001161 if (err) {
1162 mlx5_ib_warn(dev, "create mkey failed\n");
1163 goto err_2;
1164 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001165 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001166 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001167 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001168 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001169
Matan Baraka606b0f2016-02-29 18:05:28 +02001170 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001171
1172 return mr;
1173
1174err_2:
Al Viro479163f2014-11-20 08:13:57 +00001175 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001176
1177err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001178 if (!ibmr)
1179 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001180
1181 return ERR_PTR(err);
1182}
1183
Noa Osherovich395a8e42016-02-29 16:46:50 +02001184static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1185 int npages, u64 length, int access_flags)
1186{
1187 mr->npages = npages;
1188 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001189 mr->ibmr.lkey = mr->mmkey.key;
1190 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001191 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001192 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001193}
1194
Eli Cohene126ba92013-07-07 17:25:49 +03001195struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1196 u64 virt_addr, int access_flags,
1197 struct ib_udata *udata)
1198{
1199 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1200 struct mlx5_ib_mr *mr = NULL;
1201 struct ib_umem *umem;
1202 int page_shift;
1203 int npages;
1204 int ncont;
1205 int order;
1206 int err;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001207 bool use_umr = true;
Eli Cohene126ba92013-07-07 17:25:49 +03001208
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001209 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1210 return ERR_PTR(-EINVAL);
1211
Eli Cohen900a6d72014-09-14 16:47:51 +03001212 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1213 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001214
1215#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1216 if (!start && length == U64_MAX) {
1217 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1218 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1219 return ERR_PTR(-EINVAL);
1220
1221 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1222 return &mr->ibmr;
1223 }
1224#endif
1225
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001226 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001227 &page_shift, &ncont, &order);
1228
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001229 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001230 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001231
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001232 if (order <= mr_cache_max_order(dev)) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001233 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1234 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001235 if (PTR_ERR(mr) == -EAGAIN) {
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301236 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
Eli Cohene126ba92013-07-07 17:25:49 +03001237 mr = NULL;
1238 }
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001239 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1240 if (access_flags & IB_ACCESS_ON_DEMAND) {
1241 err = -EINVAL;
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301242 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001243 goto error;
1244 }
1245 use_umr = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001246 }
1247
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001248 if (!mr) {
1249 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001250 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001251 page_shift, access_flags, !use_umr);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001252 mutex_unlock(&dev->slow_path_mutex);
1253 }
Eli Cohene126ba92013-07-07 17:25:49 +03001254
1255 if (IS_ERR(mr)) {
1256 err = PTR_ERR(mr);
1257 goto error;
1258 }
1259
Matan Baraka606b0f2016-02-29 18:05:28 +02001260 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001261
1262 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001263 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001264
Haggai Eranb4cfe442014-12-11 17:04:26 +02001265#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001266 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001267#endif
1268
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001269 if (use_umr) {
1270 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001271
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001272 if (access_flags & IB_ACCESS_ON_DEMAND)
1273 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1274
1275 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1276 update_xlt_flags);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001277
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001278 if (err) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001279 dereg_mr(dev, mr);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001280 return ERR_PTR(err);
1281 }
1282 }
1283
1284 mr->live = 1;
1285 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001286error:
1287 ib_umem_release(umem);
1288 return ERR_PTR(err);
1289}
1290
1291static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1292{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001293 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001294 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001295
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001296 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1297 return 0;
1298
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001299 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1300 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1301 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1302 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001303
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001304 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001305}
1306
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001307static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001308 int access_flags, int flags)
1309{
1310 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001311 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001312 int err;
1313
Noa Osherovich56e11d62016-02-29 16:46:51 +02001314 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1315
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001316 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1317 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001318
Artemy Kovalyov31616252017-01-02 11:37:42 +02001319 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001320 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001321 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001322 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001323 }
1324
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001325 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001326
Noa Osherovich56e11d62016-02-29 16:46:51 +02001327 return err;
1328}
1329
1330int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1331 u64 length, u64 virt_addr, int new_access_flags,
1332 struct ib_pd *new_pd, struct ib_udata *udata)
1333{
1334 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1335 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1336 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1337 int access_flags = flags & IB_MR_REREG_ACCESS ?
1338 new_access_flags :
1339 mr->access_flags;
1340 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1341 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1342 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001343 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001344 int npages = 0;
1345 int ncont = 0;
1346 int order = 0;
1347 int err;
1348
1349 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1350 start, virt_addr, length, access_flags);
1351
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001352 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1353
Noa Osherovich56e11d62016-02-29 16:46:51 +02001354 if (flags != IB_MR_REREG_PD) {
1355 /*
1356 * Replace umem. This needs to be done whether or not UMR is
1357 * used.
1358 */
1359 flags |= IB_MR_REREG_TRANS;
1360 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001361 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1362 &npages, &page_shift, &ncont, &order);
1363 if (err < 0) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001364 clean_mr(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001365 return err;
1366 }
1367 }
1368
1369 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1370 /*
1371 * UMR can't be used - MKey needs to be replaced.
1372 */
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001373 if (mr->allocated_from_cache) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001374 err = unreg_umr(dev, mr);
1375 if (err)
1376 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1377 } else {
1378 err = destroy_mkey(dev, mr);
1379 if (err)
1380 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1381 }
1382 if (err)
1383 return err;
1384
1385 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001386 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001387
1388 if (IS_ERR(mr))
1389 return PTR_ERR(mr);
1390
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001391 mr->allocated_from_cache = 0;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001392 mr->live = 1;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001393 } else {
1394 /*
1395 * Send a UMR WQE
1396 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001397 mr->ibmr.pd = pd;
1398 mr->access_flags = access_flags;
1399 mr->mmkey.iova = addr;
1400 mr->mmkey.size = len;
1401 mr->mmkey.pd = to_mpd(pd)->pdn;
1402
1403 if (flags & IB_MR_REREG_TRANS) {
1404 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1405 if (flags & IB_MR_REREG_PD)
1406 upd_flags |= MLX5_IB_UPD_XLT_PD;
1407 if (flags & IB_MR_REREG_ACCESS)
1408 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1409 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1410 upd_flags);
1411 } else {
1412 err = rereg_umr(pd, mr, access_flags, flags);
1413 }
1414
Noa Osherovich56e11d62016-02-29 16:46:51 +02001415 if (err) {
1416 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001417 ib_umem_release(mr->umem);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001418 clean_mr(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001419 return err;
1420 }
1421 }
1422
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001423 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001424
Noa Osherovich56e11d62016-02-29 16:46:51 +02001425#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1426 update_odp_mr(mr);
1427#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001428 return 0;
1429}
1430
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001431static int
1432mlx5_alloc_priv_descs(struct ib_device *device,
1433 struct mlx5_ib_mr *mr,
1434 int ndescs,
1435 int desc_size)
1436{
1437 int size = ndescs * desc_size;
1438 int add_size;
1439 int ret;
1440
1441 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1442
1443 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1444 if (!mr->descs_alloc)
1445 return -ENOMEM;
1446
1447 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1448
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001449 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001450 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001451 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001452 ret = -ENOMEM;
1453 goto err;
1454 }
1455
1456 return 0;
1457err:
1458 kfree(mr->descs_alloc);
1459
1460 return ret;
1461}
1462
1463static void
1464mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1465{
1466 if (mr->descs) {
1467 struct ib_device *device = mr->ibmr.device;
1468 int size = mr->max_descs * mr->desc_size;
1469
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001470 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001471 size, DMA_TO_DEVICE);
1472 kfree(mr->descs_alloc);
1473 mr->descs = NULL;
1474 }
1475}
1476
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001477static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001478{
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001479 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001480 int err;
1481
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001482 if (mr->sig) {
1483 if (mlx5_core_destroy_psv(dev->mdev,
1484 mr->sig->psv_memory.psv_idx))
1485 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1486 mr->sig->psv_memory.psv_idx);
1487 if (mlx5_core_destroy_psv(dev->mdev,
1488 mr->sig->psv_wire.psv_idx))
1489 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1490 mr->sig->psv_wire.psv_idx);
1491 kfree(mr->sig);
1492 mr->sig = NULL;
1493 }
1494
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001495 mlx5_free_priv_descs(mr);
1496
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001497 if (!allocated_from_cache) {
Kamal Heib5942d8a2017-08-17 15:52:31 +03001498 u32 key = mr->mmkey.key;
1499
Haggai Eranb4cfe442014-12-11 17:04:26 +02001500 err = destroy_mkey(dev, mr);
Kamal Heib5942d8a2017-08-17 15:52:31 +03001501 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001502 if (err) {
1503 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Kamal Heib5942d8a2017-08-17 15:52:31 +03001504 key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001505 return err;
1506 }
1507 } else {
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001508 mlx5_mr_cache_free(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001509 }
1510
Eli Cohene126ba92013-07-07 17:25:49 +03001511 return 0;
1512}
1513
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001514static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001515{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001516 int npages = mr->npages;
1517 struct ib_umem *umem = mr->umem;
1518
1519#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001520 if (umem && umem->odp_data) {
1521 /* Prevent new page faults from succeeding */
1522 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001523 /* Wait for all running page-fault handlers to finish. */
1524 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001525 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001526 if (umem->odp_data->page_list)
1527 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1528 ib_umem_end(umem));
1529 else
1530 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001531 /*
1532 * We kill the umem before the MR for ODP,
1533 * so that there will not be any invalidations in
1534 * flight, looking at the *mr struct.
1535 */
1536 ib_umem_release(umem);
1537 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1538
1539 /* Avoid double-freeing the umem. */
1540 umem = NULL;
1541 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001542#endif
1543
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001544 clean_mr(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001545
1546 if (umem) {
1547 ib_umem_release(umem);
1548 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1549 }
1550
1551 return 0;
1552}
1553
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001554int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1555{
1556 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1557 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1558
1559 return dereg_mr(dev, mr);
1560}
1561
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001562struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1563 enum ib_mr_type mr_type,
1564 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001565{
1566 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001567 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001568 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001569 struct mlx5_ib_mr *mr;
1570 void *mkc;
1571 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001572 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001573
1574 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1575 if (!mr)
1576 return ERR_PTR(-ENOMEM);
1577
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001578 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001579 if (!in) {
1580 err = -ENOMEM;
1581 goto err_free;
1582 }
1583
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001584 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1585 MLX5_SET(mkc, mkc, free, 1);
1586 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1587 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1588 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001589
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001590 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001591 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1592 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001593 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001594 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001595 if (err)
1596 goto err_free_in;
1597
Artemy Kovalyov31616252017-01-02 11:37:42 +02001598 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001599 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001600 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001601 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001602
1603 err = mlx5_alloc_priv_descs(pd->device, mr,
1604 ndescs, sizeof(struct mlx5_klm));
1605 if (err)
1606 goto err_free_in;
1607 mr->desc_size = sizeof(struct mlx5_klm);
1608 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001609 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001610 u32 psv_index[2];
1611
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001612 MLX5_SET(mkc, mkc, bsf_en, 1);
1613 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001614 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1615 if (!mr->sig) {
1616 err = -ENOMEM;
1617 goto err_free_in;
1618 }
1619
1620 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001621 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001622 2, psv_index);
1623 if (err)
1624 goto err_free_sig;
1625
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001626 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001627 mr->sig->psv_memory.psv_idx = psv_index[0];
1628 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001629
1630 mr->sig->sig_status_checked = true;
1631 mr->sig->sig_err_exists = false;
1632 /* Next UMR, Arm SIGERR */
1633 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001634 } else {
1635 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1636 err = -EINVAL;
1637 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001638 }
1639
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001640 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1641 MLX5_SET(mkc, mkc, umr_en, 1);
1642
Nitzan Carmi45e6ae72017-12-26 11:20:20 +02001643 mr->ibmr.device = pd->device;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001644 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001645 if (err)
1646 goto err_destroy_psv;
1647
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001648 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001649 mr->ibmr.lkey = mr->mmkey.key;
1650 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001651 mr->umem = NULL;
1652 kfree(in);
1653
1654 return &mr->ibmr;
1655
1656err_destroy_psv:
1657 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001658 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001659 mr->sig->psv_memory.psv_idx))
1660 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1661 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001662 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001663 mr->sig->psv_wire.psv_idx))
1664 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1665 mr->sig->psv_wire.psv_idx);
1666 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001667 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001668err_free_sig:
1669 kfree(mr->sig);
1670err_free_in:
1671 kfree(in);
1672err_free:
1673 kfree(mr);
1674 return ERR_PTR(err);
1675}
1676
Matan Barakd2370e02016-02-29 18:05:30 +02001677struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1678 struct ib_udata *udata)
1679{
1680 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001681 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001682 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001683 u32 *in = NULL;
1684 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001685 int ndescs;
1686 int err;
1687 struct mlx5_ib_alloc_mw req = {};
1688 struct {
1689 __u32 comp_mask;
1690 __u32 response_length;
1691 } resp = {};
1692
1693 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1694 if (err)
1695 return ERR_PTR(err);
1696
1697 if (req.comp_mask || req.reserved1 || req.reserved2)
1698 return ERR_PTR(-EOPNOTSUPP);
1699
1700 if (udata->inlen > sizeof(req) &&
1701 !ib_is_udata_cleared(udata, sizeof(req),
1702 udata->inlen - sizeof(req)))
1703 return ERR_PTR(-EOPNOTSUPP);
1704
1705 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1706
1707 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001708 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001709 if (!mw || !in) {
1710 err = -ENOMEM;
1711 goto free;
1712 }
1713
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001714 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001715
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001716 MLX5_SET(mkc, mkc, free, 1);
1717 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1718 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1719 MLX5_SET(mkc, mkc, umr_en, 1);
1720 MLX5_SET(mkc, mkc, lr, 1);
1721 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1722 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1723 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1724
1725 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001726 if (err)
1727 goto free;
1728
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001729 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001730 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001731 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001732
1733 resp.response_length = min(offsetof(typeof(resp), response_length) +
1734 sizeof(resp.response_length), udata->outlen);
1735 if (resp.response_length) {
1736 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1737 if (err) {
1738 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1739 goto free;
1740 }
1741 }
1742
1743 kfree(in);
1744 return &mw->ibmw;
1745
1746free:
1747 kfree(mw);
1748 kfree(in);
1749 return ERR_PTR(err);
1750}
1751
1752int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1753{
1754 struct mlx5_ib_mw *mmw = to_mmw(mw);
1755 int err;
1756
1757 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1758 &mmw->mmkey);
1759 if (!err)
1760 kfree(mmw);
1761 return err;
1762}
1763
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001764int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1765 struct ib_mr_status *mr_status)
1766{
1767 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1768 int ret = 0;
1769
1770 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1771 pr_err("Invalid status check mask\n");
1772 ret = -EINVAL;
1773 goto done;
1774 }
1775
1776 mr_status->fail_status = 0;
1777 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1778 if (!mmr->sig) {
1779 ret = -EINVAL;
1780 pr_err("signature status check requested on a non-signature enabled MR\n");
1781 goto done;
1782 }
1783
1784 mmr->sig->sig_status_checked = true;
1785 if (!mmr->sig->sig_err_exists)
1786 goto done;
1787
1788 if (ibmr->lkey == mmr->sig->err_item.key)
1789 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1790 sizeof(mr_status->sig_err));
1791 else {
1792 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1793 mr_status->sig_err.sig_err_offset = 0;
1794 mr_status->sig_err.key = mmr->sig->err_item.key;
1795 }
1796
1797 mmr->sig->sig_err_exists = false;
1798 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1799 }
1800
1801done:
1802 return ret;
1803}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001804
Sagi Grimbergb005d312016-02-29 19:07:33 +02001805static int
1806mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1807 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001808 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001809 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001810{
1811 struct scatterlist *sg = sgl;
1812 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001813 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001814 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1815 int i;
1816
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001817 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001818 mr->ibmr.length = 0;
1819 mr->ndescs = sg_nents;
1820
1821 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001822 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001823 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001824 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1825 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001826 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001827 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001828
1829 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001830 }
1831
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001832 if (sg_offset_p)
1833 *sg_offset_p = sg_offset;
1834
Sagi Grimbergb005d312016-02-29 19:07:33 +02001835 return i;
1836}
1837
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001838static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1839{
1840 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1841 __be64 *descs;
1842
1843 if (unlikely(mr->ndescs == mr->max_descs))
1844 return -ENOMEM;
1845
1846 descs = mr->descs;
1847 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1848
1849 return 0;
1850}
1851
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001852int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001853 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001854{
1855 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1856 int n;
1857
1858 mr->ndescs = 0;
1859
1860 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1861 mr->desc_size * mr->max_descs,
1862 DMA_TO_DEVICE);
1863
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001864 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001865 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001866 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001867 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1868 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001869
1870 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1871 mr->desc_size * mr->max_descs,
1872 DMA_TO_DEVICE);
1873
1874 return n;
1875}