blob: 67985c69f9b99be87df2c37deb330cdfa6eb7545 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
49#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50static __be64 mlx5_ib_update_mtt_emergency_buffer[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
52 __aligned(MLX5_UMR_ALIGN);
53static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
54#endif
Eli Cohenfe45f822013-09-11 16:35:35 +030055
Haggai Eran6aec21f2014-12-11 17:04:23 +020056static int clean_mr(struct mlx5_ib_mr *mr);
57
Haggai Eranb4cfe442014-12-11 17:04:26 +020058static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
59{
Matan Baraka606b0f2016-02-29 18:05:28 +020060 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020061
62#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev->mr_srcu);
65#endif
66
67 return err;
68}
69
Eli Cohene126ba92013-07-07 17:25:49 +030070static int order2idx(struct mlx5_ib_dev *dev, int order)
71{
72 struct mlx5_mr_cache *cache = &dev->cache;
73
74 if (order < cache->ent[0].order)
75 return 0;
76 else
77 return order - cache->ent[0].order;
78}
79
Noa Osherovich56e11d62016-02-29 16:46:51 +020080static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
81{
82 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
83 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
84}
85
Noa Osherovich395a8e42016-02-29 16:46:50 +020086#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
87static void update_odp_mr(struct mlx5_ib_mr *mr)
88{
89 if (mr->umem->odp_data) {
90 /*
91 * This barrier prevents the compiler from moving the
92 * setting of umem->odp_data->private to point to our
93 * MR, before reg_umr finished, to ensure that the MR
94 * initialization have finished before starting to
95 * handle invalidations.
96 */
97 smp_wmb();
98 mr->umem->odp_data->private = mr;
99 /*
100 * Make sure we will see the new
101 * umem->odp_data->private value in the invalidation
102 * routines, before we can get page faults on the
103 * MR. Page faults can happen once we put the MR in
104 * the tree, below this line. Without the barrier,
105 * there can be a fault handling and an invalidation
106 * before umem->odp_data->private == mr is visible to
107 * the invalidation handler.
108 */
109 smp_wmb();
110 }
111}
112#endif
113
Eli Cohen746b5582013-10-23 09:53:14 +0300114static void reg_mr_callback(int status, void *context)
115{
116 struct mlx5_ib_mr *mr = context;
117 struct mlx5_ib_dev *dev = mr->dev;
118 struct mlx5_mr_cache *cache = &dev->cache;
119 int c = order2idx(dev, mr->order);
120 struct mlx5_cache_ent *ent = &cache->ent[c];
121 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300122 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200123 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300124 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300125
Eli Cohen746b5582013-10-23 09:53:14 +0300126 spin_lock_irqsave(&ent->lock, flags);
127 ent->pending--;
128 spin_unlock_irqrestore(&ent->lock, flags);
129 if (status) {
130 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
131 kfree(mr);
132 dev->fill_delay = 1;
133 mod_timer(&dev->delay_timer, jiffies + HZ);
134 return;
135 }
136
Jack Morgenstein9603b612014-07-28 23:30:22 +0300137 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
138 key = dev->mdev->priv.mkey_key++;
139 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300140 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300141
142 cache->last_add = jiffies;
143
144 spin_lock_irqsave(&ent->lock, flags);
145 list_add_tail(&mr->list, &ent->head);
146 ent->cur++;
147 ent->size++;
148 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300149
150 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200151 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
152 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300153 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200154 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300155 write_unlock_irqrestore(&table->lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300156}
157
Eli Cohene126ba92013-07-07 17:25:49 +0300158static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
159{
Eli Cohene126ba92013-07-07 17:25:49 +0300160 struct mlx5_mr_cache *cache = &dev->cache;
161 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300162 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300163 struct mlx5_ib_mr *mr;
164 int npages = 1 << ent->order;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300165 void *mkc;
166 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300167 int err = 0;
168 int i;
169
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300170 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300171 if (!in)
172 return -ENOMEM;
173
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300174 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300175 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300176 if (ent->pending >= MAX_PENDING_REG_MR) {
177 err = -EAGAIN;
178 break;
179 }
180
Eli Cohene126ba92013-07-07 17:25:49 +0300181 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
182 if (!mr) {
183 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300184 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300185 }
186 mr->order = ent->order;
187 mr->umred = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300188 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300189
190 MLX5_SET(mkc, mkc, free, 1);
191 MLX5_SET(mkc, mkc, umr_en, 1);
192 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
193
194 MLX5_SET(mkc, mkc, qpn, 0xffffff);
195 MLX5_SET(mkc, mkc, translations_octword_size, (npages + 1) / 2);
196 MLX5_SET(mkc, mkc, log_page_size, 12);
Eli Cohene126ba92013-07-07 17:25:49 +0300197
Eli Cohen746b5582013-10-23 09:53:14 +0300198 spin_lock_irq(&ent->lock);
199 ent->pending++;
200 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300201 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
202 in, inlen,
203 mr->out, sizeof(mr->out),
204 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300205 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200206 spin_lock_irq(&ent->lock);
207 ent->pending--;
208 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300210 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300211 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
Eli Cohene126ba92013-07-07 17:25:49 +0300213 }
214
Eli Cohene126ba92013-07-07 17:25:49 +0300215 kfree(in);
216 return err;
217}
218
219static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
220{
Eli Cohene126ba92013-07-07 17:25:49 +0300221 struct mlx5_mr_cache *cache = &dev->cache;
222 struct mlx5_cache_ent *ent = &cache->ent[c];
223 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300224 int err;
225 int i;
226
227 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300228 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300229 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300230 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300231 return;
232 }
233 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
234 list_del(&mr->list);
235 ent->cur--;
236 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300237 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200238 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300239 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300240 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300241 else
Eli Cohene126ba92013-07-07 17:25:49 +0300242 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300243 }
244}
245
246static ssize_t size_write(struct file *filp, const char __user *buf,
247 size_t count, loff_t *pos)
248{
249 struct mlx5_cache_ent *ent = filp->private_data;
250 struct mlx5_ib_dev *dev = ent->dev;
251 char lbuf[20];
252 u32 var;
253 int err;
254 int c;
255
256 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300257 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300258
259 c = order2idx(dev, ent->order);
260 lbuf[sizeof(lbuf) - 1] = 0;
261
262 if (sscanf(lbuf, "%u", &var) != 1)
263 return -EINVAL;
264
265 if (var < ent->limit)
266 return -EINVAL;
267
268 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300269 do {
270 err = add_keys(dev, c, var - ent->size);
271 if (err && err != -EAGAIN)
272 return err;
273
274 usleep_range(3000, 5000);
275 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300276 } else if (var < ent->size) {
277 remove_keys(dev, c, ent->size - var);
278 }
279
280 return count;
281}
282
283static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
284 loff_t *pos)
285{
286 struct mlx5_cache_ent *ent = filp->private_data;
287 char lbuf[20];
288 int err;
289
290 if (*pos)
291 return 0;
292
293 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
294 if (err < 0)
295 return err;
296
297 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300298 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300299
300 *pos += err;
301
302 return err;
303}
304
305static const struct file_operations size_fops = {
306 .owner = THIS_MODULE,
307 .open = simple_open,
308 .write = size_write,
309 .read = size_read,
310};
311
312static ssize_t limit_write(struct file *filp, const char __user *buf,
313 size_t count, loff_t *pos)
314{
315 struct mlx5_cache_ent *ent = filp->private_data;
316 struct mlx5_ib_dev *dev = ent->dev;
317 char lbuf[20];
318 u32 var;
319 int err;
320 int c;
321
322 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300323 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300324
325 c = order2idx(dev, ent->order);
326 lbuf[sizeof(lbuf) - 1] = 0;
327
328 if (sscanf(lbuf, "%u", &var) != 1)
329 return -EINVAL;
330
331 if (var > ent->size)
332 return -EINVAL;
333
334 ent->limit = var;
335
336 if (ent->cur < ent->limit) {
337 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
338 if (err)
339 return err;
340 }
341
342 return count;
343}
344
345static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
346 loff_t *pos)
347{
348 struct mlx5_cache_ent *ent = filp->private_data;
349 char lbuf[20];
350 int err;
351
352 if (*pos)
353 return 0;
354
355 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
356 if (err < 0)
357 return err;
358
359 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300360 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300361
362 *pos += err;
363
364 return err;
365}
366
367static const struct file_operations limit_fops = {
368 .owner = THIS_MODULE,
369 .open = simple_open,
370 .write = limit_write,
371 .read = limit_read,
372};
373
374static int someone_adding(struct mlx5_mr_cache *cache)
375{
376 int i;
377
378 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
379 if (cache->ent[i].cur < cache->ent[i].limit)
380 return 1;
381 }
382
383 return 0;
384}
385
386static void __cache_work_func(struct mlx5_cache_ent *ent)
387{
388 struct mlx5_ib_dev *dev = ent->dev;
389 struct mlx5_mr_cache *cache = &dev->cache;
390 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300391 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300392
393 if (cache->stopped)
394 return;
395
396 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300397 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
398 err = add_keys(dev, i, 1);
399 if (ent->cur < 2 * ent->limit) {
400 if (err == -EAGAIN) {
401 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
402 i + 2);
403 queue_delayed_work(cache->wq, &ent->dwork,
404 msecs_to_jiffies(3));
405 } else if (err) {
406 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
407 i + 2, err);
408 queue_delayed_work(cache->wq, &ent->dwork,
409 msecs_to_jiffies(1000));
410 } else {
411 queue_work(cache->wq, &ent->work);
412 }
413 }
Eli Cohene126ba92013-07-07 17:25:49 +0300414 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300415 /*
416 * The remove_keys() logic is performed as garbage collection
417 * task. Such task is intended to be run when no other active
418 * processes are running.
419 *
420 * The need_resched() will return TRUE if there are user tasks
421 * to be activated in near future.
422 *
423 * In such case, we don't execute remove_keys() and postpone
424 * the garbage collection work to try to run in next cycle,
425 * in order to free CPU resources to other tasks.
426 */
427 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300428 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300429 remove_keys(dev, i, 1);
430 if (ent->cur > ent->limit)
431 queue_work(cache->wq, &ent->work);
432 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300433 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300434 }
435 }
436}
437
438static void delayed_cache_work_func(struct work_struct *work)
439{
440 struct mlx5_cache_ent *ent;
441
442 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
443 __cache_work_func(ent);
444}
445
446static void cache_work_func(struct work_struct *work)
447{
448 struct mlx5_cache_ent *ent;
449
450 ent = container_of(work, struct mlx5_cache_ent, work);
451 __cache_work_func(ent);
452}
453
454static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
455{
456 struct mlx5_mr_cache *cache = &dev->cache;
457 struct mlx5_ib_mr *mr = NULL;
458 struct mlx5_cache_ent *ent;
459 int c;
460 int i;
461
462 c = order2idx(dev, order);
463 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
464 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
465 return NULL;
466 }
467
468 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
469 ent = &cache->ent[i];
470
471 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
472
Eli Cohen746b5582013-10-23 09:53:14 +0300473 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300474 if (!list_empty(&ent->head)) {
475 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
476 list);
477 list_del(&mr->list);
478 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300479 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300480 if (ent->cur < ent->limit)
481 queue_work(cache->wq, &ent->work);
482 break;
483 }
Eli Cohen746b5582013-10-23 09:53:14 +0300484 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300485
486 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300487 }
488
489 if (!mr)
490 cache->ent[c].miss++;
491
492 return mr;
493}
494
495static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
496{
497 struct mlx5_mr_cache *cache = &dev->cache;
498 struct mlx5_cache_ent *ent;
499 int shrink = 0;
500 int c;
501
502 c = order2idx(dev, mr->order);
503 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
504 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
505 return;
506 }
507 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300508 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300509 list_add_tail(&mr->list, &ent->head);
510 ent->cur++;
511 if (ent->cur > 2 * ent->limit)
512 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300513 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300514
515 if (shrink)
516 queue_work(cache->wq, &ent->work);
517}
518
519static void clean_keys(struct mlx5_ib_dev *dev, int c)
520{
Eli Cohene126ba92013-07-07 17:25:49 +0300521 struct mlx5_mr_cache *cache = &dev->cache;
522 struct mlx5_cache_ent *ent = &cache->ent[c];
523 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300524 int err;
525
Moshe Lazer3c461912013-09-11 16:35:23 +0300526 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300527 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300528 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300529 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300530 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300531 return;
532 }
533 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
534 list_del(&mr->list);
535 ent->cur--;
536 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300537 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200538 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300539 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300540 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300541 else
Eli Cohene126ba92013-07-07 17:25:49 +0300542 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300543 }
544}
545
546static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
547{
548 struct mlx5_mr_cache *cache = &dev->cache;
549 struct mlx5_cache_ent *ent;
550 int i;
551
552 if (!mlx5_debugfs_root)
553 return 0;
554
Jack Morgenstein9603b612014-07-28 23:30:22 +0300555 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300556 if (!cache->root)
557 return -ENOMEM;
558
559 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
560 ent = &cache->ent[i];
561 sprintf(ent->name, "%d", ent->order);
562 ent->dir = debugfs_create_dir(ent->name, cache->root);
563 if (!ent->dir)
564 return -ENOMEM;
565
566 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
567 &size_fops);
568 if (!ent->fsize)
569 return -ENOMEM;
570
571 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
572 &limit_fops);
573 if (!ent->flimit)
574 return -ENOMEM;
575
576 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
577 &ent->cur);
578 if (!ent->fcur)
579 return -ENOMEM;
580
581 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
582 &ent->miss);
583 if (!ent->fmiss)
584 return -ENOMEM;
585 }
586
587 return 0;
588}
589
590static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
591{
592 if (!mlx5_debugfs_root)
593 return;
594
595 debugfs_remove_recursive(dev->cache.root);
596}
597
Eli Cohen746b5582013-10-23 09:53:14 +0300598static void delay_time_func(unsigned long ctx)
599{
600 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
601
602 dev->fill_delay = 0;
603}
604
Eli Cohene126ba92013-07-07 17:25:49 +0300605int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
606{
607 struct mlx5_mr_cache *cache = &dev->cache;
608 struct mlx5_cache_ent *ent;
609 int limit;
Eli Cohene126ba92013-07-07 17:25:49 +0300610 int err;
611 int i;
612
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530613 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300614 if (!cache->wq) {
615 mlx5_ib_warn(dev, "failed to create work queue\n");
616 return -ENOMEM;
617 }
618
Eli Cohen746b5582013-10-23 09:53:14 +0300619 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300620 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
621 INIT_LIST_HEAD(&cache->ent[i].head);
622 spin_lock_init(&cache->ent[i].lock);
623
624 ent = &cache->ent[i];
625 INIT_LIST_HEAD(&ent->head);
626 spin_lock_init(&ent->lock);
627 ent->order = i + 2;
628 ent->dev = dev;
629
Eli Cohenafd02cd2016-11-27 15:18:21 +0200630 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
631 (mlx5_core_is_pf(dev->mdev)))
Jack Morgenstein9603b612014-07-28 23:30:22 +0300632 limit = dev->mdev->profile->mr_cache[i].limit;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300633 else
Eli Cohene126ba92013-07-07 17:25:49 +0300634 limit = 0;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300635
Eli Cohene126ba92013-07-07 17:25:49 +0300636 INIT_WORK(&ent->work, cache_work_func);
637 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
638 ent->limit = limit;
639 queue_work(cache->wq, &ent->work);
640 }
641
642 err = mlx5_mr_cache_debugfs_init(dev);
643 if (err)
644 mlx5_ib_warn(dev, "cache debugfs failure\n");
645
646 return 0;
647}
648
Eli Cohenacbda522016-10-27 16:36:43 +0300649static void wait_for_async_commands(struct mlx5_ib_dev *dev)
650{
651 struct mlx5_mr_cache *cache = &dev->cache;
652 struct mlx5_cache_ent *ent;
653 int total = 0;
654 int i;
655 int j;
656
657 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
658 ent = &cache->ent[i];
659 for (j = 0 ; j < 1000; j++) {
660 if (!ent->pending)
661 break;
662 msleep(50);
663 }
664 }
665 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
666 ent = &cache->ent[i];
667 total += ent->pending;
668 }
669
670 if (total)
671 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
672 else
673 mlx5_ib_warn(dev, "done with all pending requests\n");
674}
675
Eli Cohene126ba92013-07-07 17:25:49 +0300676int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
677{
678 int i;
679
680 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300681 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300682
683 mlx5_mr_cache_debugfs_cleanup(dev);
684
685 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
686 clean_keys(dev, i);
687
Moshe Lazer3c461912013-09-11 16:35:23 +0300688 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300689 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300690 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300691
Eli Cohene126ba92013-07-07 17:25:49 +0300692 return 0;
693}
694
695struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
696{
697 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300698 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300699 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300700 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300701 void *mkc;
702 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300703 int err;
704
705 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
706 if (!mr)
707 return ERR_PTR(-ENOMEM);
708
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300709 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300710 if (!in) {
711 err = -ENOMEM;
712 goto err_free;
713 }
714
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300715 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300716
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300717 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
718 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
719 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
720 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
721 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
722 MLX5_SET(mkc, mkc, lr, 1);
723
724 MLX5_SET(mkc, mkc, length64, 1);
725 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
726 MLX5_SET(mkc, mkc, qpn, 0xffffff);
727 MLX5_SET64(mkc, mkc, start_addr, 0);
728
729 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300730 if (err)
731 goto err_in;
732
733 kfree(in);
Matan Baraka606b0f2016-02-29 18:05:28 +0200734 mr->ibmr.lkey = mr->mmkey.key;
735 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300736 mr->umem = NULL;
737
738 return &mr->ibmr;
739
740err_in:
741 kfree(in);
742
743err_free:
744 kfree(mr);
745
746 return ERR_PTR(err);
747}
748
749static int get_octo_len(u64 addr, u64 len, int page_size)
750{
751 u64 offset;
752 int npages;
753
754 offset = addr & (page_size - 1);
755 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
756 return (npages + 1) / 2;
757}
758
759static int use_umr(int order)
760{
Haggai Erancc149f752014-12-11 17:04:21 +0200761 return order <= MLX5_MAX_UMR_SHIFT;
Eli Cohene126ba92013-07-07 17:25:49 +0300762}
763
Noa Osherovich395a8e42016-02-29 16:46:50 +0200764static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
765 int npages, int page_shift, int *size,
766 __be64 **mr_pas, dma_addr_t *dma)
767{
768 __be64 *pas;
769 struct device *ddev = dev->ib_dev.dma_device;
770
771 /*
772 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
773 * To avoid copying garbage after the pas array, we allocate
774 * a little more.
775 */
776 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
777 *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
778 if (!(*mr_pas))
779 return -ENOMEM;
780
781 pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
782 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
783 /* Clear padding after the actual pages. */
784 memset(pas + npages, 0, *size - npages * sizeof(u64));
785
786 *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
787 if (dma_mapping_error(ddev, *dma)) {
788 kfree(*mr_pas);
789 return -ENOMEM;
790 }
791
792 return 0;
793}
794
795static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
796 struct ib_sge *sg, u64 dma, int n, u32 key,
797 int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300798{
799 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100800 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Eli Cohene126ba92013-07-07 17:25:49 +0300801
802 sg->addr = dma;
803 sg->length = ALIGN(sizeof(u64) * n, 64);
Jason Gunthorpeb37c7882015-07-30 17:22:19 -0600804 sg->lkey = dev->umrc.pd->local_dma_lkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300805
806 wr->next = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300807 wr->sg_list = sg;
808 if (n)
809 wr->num_sge = 1;
810 else
811 wr->num_sge = 0;
812
813 wr->opcode = MLX5_IB_WR_UMR;
Haggai Eran968e78d2014-12-11 17:04:11 +0200814
815 umrwr->npages = n;
816 umrwr->page_shift = page_shift;
817 umrwr->mkey = key;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200818}
819
820static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
821 struct ib_sge *sg, u64 dma, int n, u32 key,
822 int page_shift, u64 virt_addr, u64 len,
823 int access_flags)
824{
825 struct mlx5_umr_wr *umrwr = umr_wr(wr);
826
827 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
828
829 wr->send_flags = 0;
830
Haggai Eran968e78d2014-12-11 17:04:11 +0200831 umrwr->target.virt_addr = virt_addr;
832 umrwr->length = len;
833 umrwr->access_flags = access_flags;
834 umrwr->pd = pd;
Eli Cohene126ba92013-07-07 17:25:49 +0300835}
836
837static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
838 struct ib_send_wr *wr, u32 key)
839{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100840 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Haggai Eran968e78d2014-12-11 17:04:11 +0200841
842 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +0300843 wr->opcode = MLX5_IB_WR_UMR;
Haggai Eran968e78d2014-12-11 17:04:11 +0200844 umrwr->mkey = key;
Eli Cohene126ba92013-07-07 17:25:49 +0300845}
846
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200847static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
848 int access_flags, struct ib_umem **umem,
849 int *npages, int *page_shift, int *ncont,
850 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200851{
852 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200853 int err;
854
855 *umem = ib_umem_get(pd->uobject->context, start, length,
856 access_flags, 0);
857 err = PTR_ERR_OR_ZERO(*umem);
858 if (err < 0) {
Noa Osherovich395a8e42016-02-29 16:46:50 +0200859 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200860 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200861 }
862
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200863 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300864 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200865 if (!*npages) {
866 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200867 ib_umem_release(*umem);
868 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200869 }
870
871 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
872 *npages, *ncont, *order, *page_shift);
873
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200874 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200875}
876
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100877static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300878{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100879 struct mlx5_ib_umr_context *context =
880 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300881
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100882 context->status = wc->status;
883 complete(&context->done);
884}
Eli Cohene126ba92013-07-07 17:25:49 +0300885
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100886static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
887{
888 context->cqe.done = mlx5_ib_umr_done;
889 context->status = -1;
890 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300891}
892
893static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
894 u64 virt_addr, u64 len, int npages,
895 int page_shift, int order, int access_flags)
896{
897 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohen203099f2013-09-11 16:35:26 +0300898 struct device *ddev = dev->ib_dev.dma_device;
Eli Cohene126ba92013-07-07 17:25:49 +0300899 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300900 struct mlx5_ib_umr_context umr_context;
Doug Ledford0025b0b2016-03-03 11:23:37 -0500901 struct mlx5_umr_wr umrwr = {};
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100902 struct ib_send_wr *bad;
Eli Cohene126ba92013-07-07 17:25:49 +0300903 struct mlx5_ib_mr *mr;
904 struct ib_sge sg;
Haggai Erancc149f752014-12-11 17:04:21 +0200905 int size;
Haggai Eran21af2c32014-12-11 17:04:10 +0200906 __be64 *mr_pas;
907 dma_addr_t dma;
Haggai Eran096f7e72014-05-22 14:50:08 +0300908 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300909 int i;
910
Eli Cohen746b5582013-10-23 09:53:14 +0300911 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300912 mr = alloc_cached_mr(dev, order);
913 if (mr)
914 break;
915
916 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300917 if (err && err != -EAGAIN) {
918 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300919 break;
920 }
921 }
922
923 if (!mr)
924 return ERR_PTR(-EAGAIN);
925
Noa Osherovich395a8e42016-02-29 16:46:50 +0200926 err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
927 &dma);
928 if (err)
Haggai Eran096f7e72014-05-22 14:50:08 +0300929 goto free_mr;
Eli Cohen203099f2013-09-11 16:35:26 +0300930
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100931 mlx5_ib_init_umr_context(&umr_context);
932
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100933 umrwr.wr.wr_cqe = &umr_context.cqe;
Matan Baraka606b0f2016-02-29 18:05:28 +0200934 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100935 page_shift, virt_addr, len, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300936
Eli Cohene126ba92013-07-07 17:25:49 +0300937 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100938 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
Eli Cohene126ba92013-07-07 17:25:49 +0300939 if (err) {
940 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
Haggai Eran096f7e72014-05-22 14:50:08 +0300941 goto unmap_dma;
Shachar Raindela74d2412014-05-22 14:50:12 +0300942 } else {
943 wait_for_completion(&umr_context.done);
944 if (umr_context.status != IB_WC_SUCCESS) {
945 mlx5_ib_warn(dev, "reg umr failed\n");
946 err = -EFAULT;
947 }
Haggai Eran096f7e72014-05-22 14:50:08 +0300948 }
949
Matan Baraka606b0f2016-02-29 18:05:28 +0200950 mr->mmkey.iova = virt_addr;
951 mr->mmkey.size = len;
952 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300953
Haggai Eranb4cfe442014-12-11 17:04:26 +0200954 mr->live = 1;
955
Haggai Eran096f7e72014-05-22 14:50:08 +0300956unmap_dma:
957 up(&umrc->sem);
Haggai Eran21af2c32014-12-11 17:04:10 +0200958 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
Haggai Eran096f7e72014-05-22 14:50:08 +0300959
Haggai Eran21af2c32014-12-11 17:04:10 +0200960 kfree(mr_pas);
Haggai Eran096f7e72014-05-22 14:50:08 +0300961
962free_mr:
963 if (err) {
964 free_cached_mr(dev, mr);
965 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +0300966 }
967
968 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300969}
970
Haggai Eran832a6b02014-12-11 17:04:22 +0200971#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
972int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
973 int zap)
974{
975 struct mlx5_ib_dev *dev = mr->dev;
976 struct device *ddev = dev->ib_dev.dma_device;
977 struct umr_common *umrc = &dev->umrc;
978 struct mlx5_ib_umr_context umr_context;
979 struct ib_umem *umem = mr->umem;
980 int size;
981 __be64 *pas;
982 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100983 struct ib_send_wr *bad;
984 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200985 struct ib_sge sg;
986 int err = 0;
987 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
988 const int page_index_mask = page_index_alignment - 1;
989 size_t pages_mapped = 0;
990 size_t pages_to_map = 0;
991 size_t pages_iter = 0;
992 int use_emergency_buf = 0;
993
994 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
995 * so we need to align the offset and length accordingly */
996 if (start_page_index & page_index_mask) {
997 npages += start_page_index & page_index_mask;
998 start_page_index &= ~page_index_mask;
999 }
1000
1001 pages_to_map = ALIGN(npages, page_index_alignment);
1002
1003 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
1004 return -EINVAL;
1005
1006 size = sizeof(u64) * pages_to_map;
1007 size = min_t(int, PAGE_SIZE, size);
1008 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
1009 * code, when we are called from an invalidation. The pas buffer must
1010 * be 2k-aligned for Connect-IB. */
1011 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
1012 if (!pas) {
1013 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
1014 pas = mlx5_ib_update_mtt_emergency_buffer;
1015 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
1016 use_emergency_buf = 1;
1017 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
1018 memset(pas, 0, size);
1019 }
1020 pages_iter = size / sizeof(u64);
1021 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
1022 if (dma_mapping_error(ddev, dma)) {
1023 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
1024 err = -ENOMEM;
1025 goto free_pas;
1026 }
1027
1028 for (pages_mapped = 0;
1029 pages_mapped < pages_to_map && !err;
1030 pages_mapped += pages_iter, start_page_index += pages_iter) {
1031 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1032
1033 npages = min_t(size_t,
1034 pages_iter,
1035 ib_umem_num_pages(umem) - start_page_index);
1036
1037 if (!zap) {
1038 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
1039 start_page_index, npages, pas,
1040 MLX5_IB_MTT_PRESENT);
1041 /* Clear padding after the pages brought from the
1042 * umem. */
1043 memset(pas + npages, 0, size - npages * sizeof(u64));
1044 }
1045
1046 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1047
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001048 mlx5_ib_init_umr_context(&umr_context);
1049
Haggai Eran832a6b02014-12-11 17:04:22 +02001050 memset(&wr, 0, sizeof(wr));
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001051 wr.wr.wr_cqe = &umr_context.cqe;
Haggai Eran832a6b02014-12-11 17:04:22 +02001052
1053 sg.addr = dma;
1054 sg.length = ALIGN(npages * sizeof(u64),
1055 MLX5_UMR_MTT_ALIGNMENT);
Jason Gunthorpeb37c7882015-07-30 17:22:19 -06001056 sg.lkey = dev->umrc.pd->local_dma_lkey;
Haggai Eran832a6b02014-12-11 17:04:22 +02001057
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001058 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
Haggai Eran832a6b02014-12-11 17:04:22 +02001059 MLX5_IB_SEND_UMR_UPDATE_MTT;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001060 wr.wr.sg_list = &sg;
1061 wr.wr.num_sge = 1;
1062 wr.wr.opcode = MLX5_IB_WR_UMR;
1063 wr.npages = sg.length / sizeof(u64);
1064 wr.page_shift = PAGE_SHIFT;
Matan Baraka606b0f2016-02-29 18:05:28 +02001065 wr.mkey = mr->mmkey.key;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001066 wr.target.offset = start_page_index;
Haggai Eran832a6b02014-12-11 17:04:22 +02001067
Haggai Eran832a6b02014-12-11 17:04:22 +02001068 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001069 err = ib_post_send(umrc->qp, &wr.wr, &bad);
Haggai Eran832a6b02014-12-11 17:04:22 +02001070 if (err) {
1071 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
1072 } else {
1073 wait_for_completion(&umr_context.done);
1074 if (umr_context.status != IB_WC_SUCCESS) {
1075 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
1076 umr_context.status);
1077 err = -EFAULT;
1078 }
1079 }
1080 up(&umrc->sem);
1081 }
1082 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1083
1084free_pas:
1085 if (!use_emergency_buf)
1086 free_page((unsigned long)pas);
1087 else
1088 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
1089
1090 return err;
1091}
1092#endif
1093
Noa Osherovich395a8e42016-02-29 16:46:50 +02001094/*
1095 * If ibmr is NULL it will be allocated by reg_create.
1096 * Else, the given ibmr will be used.
1097 */
1098static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1099 u64 virt_addr, u64 length,
1100 struct ib_umem *umem, int npages,
1101 int page_shift, int access_flags)
Eli Cohene126ba92013-07-07 17:25:49 +03001102{
1103 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001104 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001105 __be64 *pas;
1106 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001107 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001108 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001109 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001110 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001111
Noa Osherovich395a8e42016-02-29 16:46:50 +02001112 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001113 if (!mr)
1114 return ERR_PTR(-ENOMEM);
1115
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001116 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
1117 sizeof(*pas) * ((npages + 1) / 2) * 2;
Eli Cohene126ba92013-07-07 17:25:49 +03001118 in = mlx5_vzalloc(inlen);
1119 if (!in) {
1120 err = -ENOMEM;
1121 goto err_1;
1122 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001123 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1124 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
Haggai Erancc149f752014-12-11 17:04:21 +02001125 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001126
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001127 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001128 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001129 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1130
1131 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1132 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1133 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1134 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1135 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1136 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1137 MLX5_SET(mkc, mkc, lr, 1);
1138
1139 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1140 MLX5_SET64(mkc, mkc, len, length);
1141 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1142 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1143 MLX5_SET(mkc, mkc, translations_octword_size,
1144 get_octo_len(virt_addr, length, 1 << page_shift));
1145 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1146 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1147 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1148 get_octo_len(virt_addr, length, 1 << page_shift));
1149
1150 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001151 if (err) {
1152 mlx5_ib_warn(dev, "create mkey failed\n");
1153 goto err_2;
1154 }
1155 mr->umem = umem;
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001156 mr->dev = dev;
Haggai Eranb4cfe442014-12-11 17:04:26 +02001157 mr->live = 1;
Al Viro479163f2014-11-20 08:13:57 +00001158 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001159
Matan Baraka606b0f2016-02-29 18:05:28 +02001160 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001161
1162 return mr;
1163
1164err_2:
Al Viro479163f2014-11-20 08:13:57 +00001165 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001166
1167err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001168 if (!ibmr)
1169 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001170
1171 return ERR_PTR(err);
1172}
1173
Noa Osherovich395a8e42016-02-29 16:46:50 +02001174static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1175 int npages, u64 length, int access_flags)
1176{
1177 mr->npages = npages;
1178 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001179 mr->ibmr.lkey = mr->mmkey.key;
1180 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001181 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001182 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001183}
1184
Eli Cohene126ba92013-07-07 17:25:49 +03001185struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1186 u64 virt_addr, int access_flags,
1187 struct ib_udata *udata)
1188{
1189 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1190 struct mlx5_ib_mr *mr = NULL;
1191 struct ib_umem *umem;
1192 int page_shift;
1193 int npages;
1194 int ncont;
1195 int order;
1196 int err;
1197
Eli Cohen900a6d72014-09-14 16:47:51 +03001198 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1199 start, virt_addr, length, access_flags);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001200 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001201 &page_shift, &ncont, &order);
1202
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001203 if (err < 0)
1204 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001205
1206 if (use_umr(order)) {
1207 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1208 order, access_flags);
1209 if (PTR_ERR(mr) == -EAGAIN) {
1210 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1211 mr = NULL;
1212 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001213 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1214 err = -EINVAL;
1215 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1216 goto error;
Eli Cohene126ba92013-07-07 17:25:49 +03001217 }
1218
1219 if (!mr)
Noa Osherovich395a8e42016-02-29 16:46:50 +02001220 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1221 page_shift, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001222
1223 if (IS_ERR(mr)) {
1224 err = PTR_ERR(mr);
1225 goto error;
1226 }
1227
Matan Baraka606b0f2016-02-29 18:05:28 +02001228 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001229
1230 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001231 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001232
Haggai Eranb4cfe442014-12-11 17:04:26 +02001233#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001234 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001235#endif
1236
Eli Cohene126ba92013-07-07 17:25:49 +03001237 return &mr->ibmr;
1238
1239error:
1240 ib_umem_release(umem);
1241 return ERR_PTR(err);
1242}
1243
1244static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1245{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001246 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001247 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +03001248 struct mlx5_ib_umr_context umr_context;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001249 struct mlx5_umr_wr umrwr = {};
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001250 struct ib_send_wr *bad;
Eli Cohene126ba92013-07-07 17:25:49 +03001251 int err;
1252
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001253 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1254 return 0;
1255
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001256 mlx5_ib_init_umr_context(&umr_context);
1257
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001258 umrwr.wr.wr_cqe = &umr_context.cqe;
Matan Baraka606b0f2016-02-29 18:05:28 +02001259 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001260
1261 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001262 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
Eli Cohene126ba92013-07-07 17:25:49 +03001263 if (err) {
1264 up(&umrc->sem);
1265 mlx5_ib_dbg(dev, "err %d\n", err);
1266 goto error;
Shachar Raindela74d2412014-05-22 14:50:12 +03001267 } else {
1268 wait_for_completion(&umr_context.done);
1269 up(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +03001270 }
Shachar Raindela74d2412014-05-22 14:50:12 +03001271 if (umr_context.status != IB_WC_SUCCESS) {
Eli Cohene126ba92013-07-07 17:25:49 +03001272 mlx5_ib_warn(dev, "unreg umr failed\n");
1273 err = -EFAULT;
1274 goto error;
1275 }
1276 return 0;
1277
1278error:
1279 return err;
1280}
1281
Noa Osherovich56e11d62016-02-29 16:46:51 +02001282static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1283 u64 length, int npages, int page_shift, int order,
1284 int access_flags, int flags)
1285{
1286 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1287 struct device *ddev = dev->ib_dev.dma_device;
1288 struct mlx5_ib_umr_context umr_context;
1289 struct ib_send_wr *bad;
1290 struct mlx5_umr_wr umrwr = {};
1291 struct ib_sge sg;
1292 struct umr_common *umrc = &dev->umrc;
1293 dma_addr_t dma = 0;
1294 __be64 *mr_pas = NULL;
1295 int size;
1296 int err;
1297
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001298 mlx5_ib_init_umr_context(&umr_context);
1299
1300 umrwr.wr.wr_cqe = &umr_context.cqe;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001301 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1302
1303 if (flags & IB_MR_REREG_TRANS) {
1304 err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
1305 &mr_pas, &dma);
1306 if (err)
1307 return err;
1308
1309 umrwr.target.virt_addr = virt_addr;
1310 umrwr.length = length;
1311 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1312 }
1313
Matan Baraka606b0f2016-02-29 18:05:28 +02001314 prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001315 page_shift);
1316
1317 if (flags & IB_MR_REREG_PD) {
1318 umrwr.pd = pd;
1319 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
1320 }
1321
1322 if (flags & IB_MR_REREG_ACCESS) {
1323 umrwr.access_flags = access_flags;
1324 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1325 }
1326
Noa Osherovich56e11d62016-02-29 16:46:51 +02001327 /* post send request to UMR QP */
1328 down(&umrc->sem);
1329 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1330
1331 if (err) {
1332 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
1333 } else {
1334 wait_for_completion(&umr_context.done);
1335 if (umr_context.status != IB_WC_SUCCESS) {
1336 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
1337 umr_context.status);
1338 err = -EFAULT;
1339 }
1340 }
1341
1342 up(&umrc->sem);
1343 if (flags & IB_MR_REREG_TRANS) {
1344 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1345 kfree(mr_pas);
1346 }
1347 return err;
1348}
1349
1350int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1351 u64 length, u64 virt_addr, int new_access_flags,
1352 struct ib_pd *new_pd, struct ib_udata *udata)
1353{
1354 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1355 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1356 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1357 int access_flags = flags & IB_MR_REREG_ACCESS ?
1358 new_access_flags :
1359 mr->access_flags;
1360 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1361 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1362 int page_shift = 0;
1363 int npages = 0;
1364 int ncont = 0;
1365 int order = 0;
1366 int err;
1367
1368 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1369 start, virt_addr, length, access_flags);
1370
1371 if (flags != IB_MR_REREG_PD) {
1372 /*
1373 * Replace umem. This needs to be done whether or not UMR is
1374 * used.
1375 */
1376 flags |= IB_MR_REREG_TRANS;
1377 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001378 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1379 &npages, &page_shift, &ncont, &order);
1380 if (err < 0) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001381 mr->umem = NULL;
1382 return err;
1383 }
1384 }
1385
1386 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1387 /*
1388 * UMR can't be used - MKey needs to be replaced.
1389 */
1390 if (mr->umred) {
1391 err = unreg_umr(dev, mr);
1392 if (err)
1393 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1394 } else {
1395 err = destroy_mkey(dev, mr);
1396 if (err)
1397 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1398 }
1399 if (err)
1400 return err;
1401
1402 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1403 page_shift, access_flags);
1404
1405 if (IS_ERR(mr))
1406 return PTR_ERR(mr);
1407
1408 mr->umred = 0;
1409 } else {
1410 /*
1411 * Send a UMR WQE
1412 */
1413 err = rereg_umr(pd, mr, addr, len, npages, page_shift,
1414 order, access_flags, flags);
1415 if (err) {
1416 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1417 return err;
1418 }
1419 }
1420
1421 if (flags & IB_MR_REREG_PD) {
1422 ib_mr->pd = pd;
Matan Baraka606b0f2016-02-29 18:05:28 +02001423 mr->mmkey.pd = to_mpd(pd)->pdn;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001424 }
1425
1426 if (flags & IB_MR_REREG_ACCESS)
1427 mr->access_flags = access_flags;
1428
1429 if (flags & IB_MR_REREG_TRANS) {
1430 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1431 set_mr_fileds(dev, mr, npages, len, access_flags);
Matan Baraka606b0f2016-02-29 18:05:28 +02001432 mr->mmkey.iova = addr;
1433 mr->mmkey.size = len;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001434 }
1435#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1436 update_odp_mr(mr);
1437#endif
1438
1439 return 0;
1440}
1441
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001442static int
1443mlx5_alloc_priv_descs(struct ib_device *device,
1444 struct mlx5_ib_mr *mr,
1445 int ndescs,
1446 int desc_size)
1447{
1448 int size = ndescs * desc_size;
1449 int add_size;
1450 int ret;
1451
1452 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1453
1454 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1455 if (!mr->descs_alloc)
1456 return -ENOMEM;
1457
1458 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1459
1460 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1461 size, DMA_TO_DEVICE);
1462 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1463 ret = -ENOMEM;
1464 goto err;
1465 }
1466
1467 return 0;
1468err:
1469 kfree(mr->descs_alloc);
1470
1471 return ret;
1472}
1473
1474static void
1475mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1476{
1477 if (mr->descs) {
1478 struct ib_device *device = mr->ibmr.device;
1479 int size = mr->max_descs * mr->desc_size;
1480
1481 dma_unmap_single(device->dma_device, mr->desc_map,
1482 size, DMA_TO_DEVICE);
1483 kfree(mr->descs_alloc);
1484 mr->descs = NULL;
1485 }
1486}
1487
Haggai Eran6aec21f2014-12-11 17:04:23 +02001488static int clean_mr(struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001489{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001490 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
Eli Cohene126ba92013-07-07 17:25:49 +03001491 int umred = mr->umred;
1492 int err;
1493
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001494 if (mr->sig) {
1495 if (mlx5_core_destroy_psv(dev->mdev,
1496 mr->sig->psv_memory.psv_idx))
1497 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1498 mr->sig->psv_memory.psv_idx);
1499 if (mlx5_core_destroy_psv(dev->mdev,
1500 mr->sig->psv_wire.psv_idx))
1501 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1502 mr->sig->psv_wire.psv_idx);
1503 kfree(mr->sig);
1504 mr->sig = NULL;
1505 }
1506
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001507 mlx5_free_priv_descs(mr);
1508
Eli Cohene126ba92013-07-07 17:25:49 +03001509 if (!umred) {
Haggai Eranb4cfe442014-12-11 17:04:26 +02001510 err = destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001511 if (err) {
1512 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Matan Baraka606b0f2016-02-29 18:05:28 +02001513 mr->mmkey.key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001514 return err;
1515 }
1516 } else {
1517 err = unreg_umr(dev, mr);
1518 if (err) {
1519 mlx5_ib_warn(dev, "failed unregister\n");
1520 return err;
1521 }
1522 free_cached_mr(dev, mr);
1523 }
1524
Eli Cohene126ba92013-07-07 17:25:49 +03001525 if (!umred)
1526 kfree(mr);
1527
1528 return 0;
1529}
1530
Haggai Eran6aec21f2014-12-11 17:04:23 +02001531int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1532{
1533 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1534 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1535 int npages = mr->npages;
1536 struct ib_umem *umem = mr->umem;
1537
1538#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001539 if (umem && umem->odp_data) {
1540 /* Prevent new page faults from succeeding */
1541 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001542 /* Wait for all running page-fault handlers to finish. */
1543 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001544 /* Destroy all page mappings */
1545 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1546 ib_umem_end(umem));
1547 /*
1548 * We kill the umem before the MR for ODP,
1549 * so that there will not be any invalidations in
1550 * flight, looking at the *mr struct.
1551 */
1552 ib_umem_release(umem);
1553 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1554
1555 /* Avoid double-freeing the umem. */
1556 umem = NULL;
1557 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001558#endif
1559
1560 clean_mr(mr);
1561
1562 if (umem) {
1563 ib_umem_release(umem);
1564 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1565 }
1566
1567 return 0;
1568}
1569
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001570struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1571 enum ib_mr_type mr_type,
1572 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001573{
1574 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001575 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001576 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001577 struct mlx5_ib_mr *mr;
1578 void *mkc;
1579 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001580 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001581
1582 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1583 if (!mr)
1584 return ERR_PTR(-ENOMEM);
1585
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001586 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001587 if (!in) {
1588 err = -ENOMEM;
1589 goto err_free;
1590 }
1591
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001592 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1593 MLX5_SET(mkc, mkc, free, 1);
1594 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1595 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1596 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001597
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001598 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001599 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1600 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001601 err = mlx5_alloc_priv_descs(pd->device, mr,
1602 ndescs, sizeof(u64));
1603 if (err)
1604 goto err_free_in;
1605
1606 mr->desc_size = sizeof(u64);
1607 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001608 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001609 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001610
1611 err = mlx5_alloc_priv_descs(pd->device, mr,
1612 ndescs, sizeof(struct mlx5_klm));
1613 if (err)
1614 goto err_free_in;
1615 mr->desc_size = sizeof(struct mlx5_klm);
1616 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001617 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001618 u32 psv_index[2];
1619
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001620 MLX5_SET(mkc, mkc, bsf_en, 1);
1621 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001622 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1623 if (!mr->sig) {
1624 err = -ENOMEM;
1625 goto err_free_in;
1626 }
1627
1628 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001629 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001630 2, psv_index);
1631 if (err)
1632 goto err_free_sig;
1633
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001634 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001635 mr->sig->psv_memory.psv_idx = psv_index[0];
1636 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001637
1638 mr->sig->sig_status_checked = true;
1639 mr->sig->sig_err_exists = false;
1640 /* Next UMR, Arm SIGERR */
1641 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001642 } else {
1643 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1644 err = -EINVAL;
1645 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001646 }
1647
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001648 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1649 MLX5_SET(mkc, mkc, umr_en, 1);
1650
1651 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001652 if (err)
1653 goto err_destroy_psv;
1654
Matan Baraka606b0f2016-02-29 18:05:28 +02001655 mr->ibmr.lkey = mr->mmkey.key;
1656 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001657 mr->umem = NULL;
1658 kfree(in);
1659
1660 return &mr->ibmr;
1661
1662err_destroy_psv:
1663 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001664 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001665 mr->sig->psv_memory.psv_idx))
1666 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1667 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001668 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001669 mr->sig->psv_wire.psv_idx))
1670 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1671 mr->sig->psv_wire.psv_idx);
1672 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001673 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001674err_free_sig:
1675 kfree(mr->sig);
1676err_free_in:
1677 kfree(in);
1678err_free:
1679 kfree(mr);
1680 return ERR_PTR(err);
1681}
1682
Matan Barakd2370e02016-02-29 18:05:30 +02001683struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1684 struct ib_udata *udata)
1685{
1686 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001687 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001688 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001689 u32 *in = NULL;
1690 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001691 int ndescs;
1692 int err;
1693 struct mlx5_ib_alloc_mw req = {};
1694 struct {
1695 __u32 comp_mask;
1696 __u32 response_length;
1697 } resp = {};
1698
1699 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1700 if (err)
1701 return ERR_PTR(err);
1702
1703 if (req.comp_mask || req.reserved1 || req.reserved2)
1704 return ERR_PTR(-EOPNOTSUPP);
1705
1706 if (udata->inlen > sizeof(req) &&
1707 !ib_is_udata_cleared(udata, sizeof(req),
1708 udata->inlen - sizeof(req)))
1709 return ERR_PTR(-EOPNOTSUPP);
1710
1711 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1712
1713 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001714 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001715 if (!mw || !in) {
1716 err = -ENOMEM;
1717 goto free;
1718 }
1719
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001720 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001721
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001722 MLX5_SET(mkc, mkc, free, 1);
1723 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1724 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1725 MLX5_SET(mkc, mkc, umr_en, 1);
1726 MLX5_SET(mkc, mkc, lr, 1);
1727 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1728 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1729 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1730
1731 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001732 if (err)
1733 goto free;
1734
1735 mw->ibmw.rkey = mw->mmkey.key;
1736
1737 resp.response_length = min(offsetof(typeof(resp), response_length) +
1738 sizeof(resp.response_length), udata->outlen);
1739 if (resp.response_length) {
1740 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1741 if (err) {
1742 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1743 goto free;
1744 }
1745 }
1746
1747 kfree(in);
1748 return &mw->ibmw;
1749
1750free:
1751 kfree(mw);
1752 kfree(in);
1753 return ERR_PTR(err);
1754}
1755
1756int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1757{
1758 struct mlx5_ib_mw *mmw = to_mmw(mw);
1759 int err;
1760
1761 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1762 &mmw->mmkey);
1763 if (!err)
1764 kfree(mmw);
1765 return err;
1766}
1767
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001768int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1769 struct ib_mr_status *mr_status)
1770{
1771 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1772 int ret = 0;
1773
1774 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1775 pr_err("Invalid status check mask\n");
1776 ret = -EINVAL;
1777 goto done;
1778 }
1779
1780 mr_status->fail_status = 0;
1781 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1782 if (!mmr->sig) {
1783 ret = -EINVAL;
1784 pr_err("signature status check requested on a non-signature enabled MR\n");
1785 goto done;
1786 }
1787
1788 mmr->sig->sig_status_checked = true;
1789 if (!mmr->sig->sig_err_exists)
1790 goto done;
1791
1792 if (ibmr->lkey == mmr->sig->err_item.key)
1793 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1794 sizeof(mr_status->sig_err));
1795 else {
1796 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1797 mr_status->sig_err.sig_err_offset = 0;
1798 mr_status->sig_err.key = mmr->sig->err_item.key;
1799 }
1800
1801 mmr->sig->sig_err_exists = false;
1802 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1803 }
1804
1805done:
1806 return ret;
1807}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001808
Sagi Grimbergb005d312016-02-29 19:07:33 +02001809static int
1810mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1811 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001812 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001813 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001814{
1815 struct scatterlist *sg = sgl;
1816 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001817 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001818 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1819 int i;
1820
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001821 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001822 mr->ibmr.length = 0;
1823 mr->ndescs = sg_nents;
1824
1825 for_each_sg(sgl, sg, sg_nents, i) {
1826 if (unlikely(i > mr->max_descs))
1827 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001828 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1829 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001830 klms[i].key = cpu_to_be32(lkey);
1831 mr->ibmr.length += sg_dma_len(sg);
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001832
1833 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001834 }
1835
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001836 if (sg_offset_p)
1837 *sg_offset_p = sg_offset;
1838
Sagi Grimbergb005d312016-02-29 19:07:33 +02001839 return i;
1840}
1841
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001842static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1843{
1844 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1845 __be64 *descs;
1846
1847 if (unlikely(mr->ndescs == mr->max_descs))
1848 return -ENOMEM;
1849
1850 descs = mr->descs;
1851 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1852
1853 return 0;
1854}
1855
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001856int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001857 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001858{
1859 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1860 int n;
1861
1862 mr->ndescs = 0;
1863
1864 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1865 mr->desc_size * mr->max_descs,
1866 DMA_TO_DEVICE);
1867
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001868 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001869 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001870 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001871 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1872 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001873
1874 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1875 mr->desc_size * mr->max_descs,
1876 DMA_TO_DEVICE);
1877
1878 return n;
1879}