blob: c7a8ece05bd21903002f204a617a3091a917dbf7 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +030050static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030052static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020053static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +020054
Haggai Eranb4cfe442014-12-11 17:04:26 +020055static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
56{
Matan Baraka606b0f2016-02-29 18:05:28 +020057 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020058
59#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
60 /* Wait until all page fault handlers using the mr complete. */
61 synchronize_srcu(&dev->mr_srcu);
62#endif
63
64 return err;
65}
66
Eli Cohene126ba92013-07-07 17:25:49 +030067static int order2idx(struct mlx5_ib_dev *dev, int order)
68{
69 struct mlx5_mr_cache *cache = &dev->cache;
70
71 if (order < cache->ent[0].order)
72 return 0;
73 else
74 return order - cache->ent[0].order;
75}
76
Noa Osherovich56e11d62016-02-29 16:46:51 +020077static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
78{
79 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
80 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
81}
82
Noa Osherovich395a8e42016-02-29 16:46:50 +020083#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
84static void update_odp_mr(struct mlx5_ib_mr *mr)
85{
86 if (mr->umem->odp_data) {
87 /*
88 * This barrier prevents the compiler from moving the
89 * setting of umem->odp_data->private to point to our
90 * MR, before reg_umr finished, to ensure that the MR
91 * initialization have finished before starting to
92 * handle invalidations.
93 */
94 smp_wmb();
95 mr->umem->odp_data->private = mr;
96 /*
97 * Make sure we will see the new
98 * umem->odp_data->private value in the invalidation
99 * routines, before we can get page faults on the
100 * MR. Page faults can happen once we put the MR in
101 * the tree, below this line. Without the barrier,
102 * there can be a fault handling and an invalidation
103 * before umem->odp_data->private == mr is visible to
104 * the invalidation handler.
105 */
106 smp_wmb();
107 }
108}
109#endif
110
Eli Cohen746b5582013-10-23 09:53:14 +0300111static void reg_mr_callback(int status, void *context)
112{
113 struct mlx5_ib_mr *mr = context;
114 struct mlx5_ib_dev *dev = mr->dev;
115 struct mlx5_mr_cache *cache = &dev->cache;
116 int c = order2idx(dev, mr->order);
117 struct mlx5_cache_ent *ent = &cache->ent[c];
118 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300119 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200120 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300121 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300122
Eli Cohen746b5582013-10-23 09:53:14 +0300123 spin_lock_irqsave(&ent->lock, flags);
124 ent->pending--;
125 spin_unlock_irqrestore(&ent->lock, flags);
126 if (status) {
127 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
128 kfree(mr);
129 dev->fill_delay = 1;
130 mod_timer(&dev->delay_timer, jiffies + HZ);
131 return;
132 }
133
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200134 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300135 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
136 key = dev->mdev->priv.mkey_key++;
137 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300138 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300139
140 cache->last_add = jiffies;
141
142 spin_lock_irqsave(&ent->lock, flags);
143 list_add_tail(&mr->list, &ent->head);
144 ent->cur++;
145 ent->size++;
146 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300147
148 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200149 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
150 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300151 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200152 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300153 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200154
155 if (!completion_done(&ent->compl))
156 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300157}
158
Eli Cohene126ba92013-07-07 17:25:49 +0300159static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
160{
Eli Cohene126ba92013-07-07 17:25:49 +0300161 struct mlx5_mr_cache *cache = &dev->cache;
162 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300163 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300164 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300165 void *mkc;
166 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300167 int err = 0;
168 int i;
169
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300170 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300171 if (!in)
172 return -ENOMEM;
173
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300174 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300175 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300176 if (ent->pending >= MAX_PENDING_REG_MR) {
177 err = -EAGAIN;
178 break;
179 }
180
Eli Cohene126ba92013-07-07 17:25:49 +0300181 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
182 if (!mr) {
183 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300184 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300185 }
186 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300187 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300188 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300189
190 MLX5_SET(mkc, mkc, free, 1);
191 MLX5_SET(mkc, mkc, umr_en, 1);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200192 MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300193
194 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200195 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
196 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300197
Eli Cohen746b5582013-10-23 09:53:14 +0300198 spin_lock_irq(&ent->lock);
199 ent->pending++;
200 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300201 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
202 in, inlen,
203 mr->out, sizeof(mr->out),
204 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300205 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200206 spin_lock_irq(&ent->lock);
207 ent->pending--;
208 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300210 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300211 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
Eli Cohene126ba92013-07-07 17:25:49 +0300213 }
214
Eli Cohene126ba92013-07-07 17:25:49 +0300215 kfree(in);
216 return err;
217}
218
219static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
220{
Eli Cohene126ba92013-07-07 17:25:49 +0300221 struct mlx5_mr_cache *cache = &dev->cache;
222 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200223 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300224 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200225 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300226 int i;
227
228 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300229 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300230 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300231 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200232 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300233 }
234 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200235 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300236 ent->cur--;
237 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300238 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200239 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
240 }
241
242#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
243 synchronize_srcu(&dev->mr_srcu);
244#endif
245
246 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
247 list_del(&mr->list);
248 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300249 }
250}
251
252static ssize_t size_write(struct file *filp, const char __user *buf,
253 size_t count, loff_t *pos)
254{
255 struct mlx5_cache_ent *ent = filp->private_data;
256 struct mlx5_ib_dev *dev = ent->dev;
257 char lbuf[20];
258 u32 var;
259 int err;
260 int c;
261
262 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300263 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300264
265 c = order2idx(dev, ent->order);
266 lbuf[sizeof(lbuf) - 1] = 0;
267
268 if (sscanf(lbuf, "%u", &var) != 1)
269 return -EINVAL;
270
271 if (var < ent->limit)
272 return -EINVAL;
273
274 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300275 do {
276 err = add_keys(dev, c, var - ent->size);
277 if (err && err != -EAGAIN)
278 return err;
279
280 usleep_range(3000, 5000);
281 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300282 } else if (var < ent->size) {
283 remove_keys(dev, c, ent->size - var);
284 }
285
286 return count;
287}
288
289static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
290 loff_t *pos)
291{
292 struct mlx5_cache_ent *ent = filp->private_data;
293 char lbuf[20];
294 int err;
295
296 if (*pos)
297 return 0;
298
299 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
300 if (err < 0)
301 return err;
302
303 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300304 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300305
306 *pos += err;
307
308 return err;
309}
310
311static const struct file_operations size_fops = {
312 .owner = THIS_MODULE,
313 .open = simple_open,
314 .write = size_write,
315 .read = size_read,
316};
317
318static ssize_t limit_write(struct file *filp, const char __user *buf,
319 size_t count, loff_t *pos)
320{
321 struct mlx5_cache_ent *ent = filp->private_data;
322 struct mlx5_ib_dev *dev = ent->dev;
323 char lbuf[20];
324 u32 var;
325 int err;
326 int c;
327
328 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300329 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300330
331 c = order2idx(dev, ent->order);
332 lbuf[sizeof(lbuf) - 1] = 0;
333
334 if (sscanf(lbuf, "%u", &var) != 1)
335 return -EINVAL;
336
337 if (var > ent->size)
338 return -EINVAL;
339
340 ent->limit = var;
341
342 if (ent->cur < ent->limit) {
343 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
344 if (err)
345 return err;
346 }
347
348 return count;
349}
350
351static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
352 loff_t *pos)
353{
354 struct mlx5_cache_ent *ent = filp->private_data;
355 char lbuf[20];
356 int err;
357
358 if (*pos)
359 return 0;
360
361 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
362 if (err < 0)
363 return err;
364
365 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300366 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300367
368 *pos += err;
369
370 return err;
371}
372
373static const struct file_operations limit_fops = {
374 .owner = THIS_MODULE,
375 .open = simple_open,
376 .write = limit_write,
377 .read = limit_read,
378};
379
380static int someone_adding(struct mlx5_mr_cache *cache)
381{
382 int i;
383
384 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
385 if (cache->ent[i].cur < cache->ent[i].limit)
386 return 1;
387 }
388
389 return 0;
390}
391
392static void __cache_work_func(struct mlx5_cache_ent *ent)
393{
394 struct mlx5_ib_dev *dev = ent->dev;
395 struct mlx5_mr_cache *cache = &dev->cache;
396 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300397 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300398
399 if (cache->stopped)
400 return;
401
402 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300403 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
404 err = add_keys(dev, i, 1);
405 if (ent->cur < 2 * ent->limit) {
406 if (err == -EAGAIN) {
407 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
408 i + 2);
409 queue_delayed_work(cache->wq, &ent->dwork,
410 msecs_to_jiffies(3));
411 } else if (err) {
412 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
413 i + 2, err);
414 queue_delayed_work(cache->wq, &ent->dwork,
415 msecs_to_jiffies(1000));
416 } else {
417 queue_work(cache->wq, &ent->work);
418 }
419 }
Eli Cohene126ba92013-07-07 17:25:49 +0300420 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300421 /*
422 * The remove_keys() logic is performed as garbage collection
423 * task. Such task is intended to be run when no other active
424 * processes are running.
425 *
426 * The need_resched() will return TRUE if there are user tasks
427 * to be activated in near future.
428 *
429 * In such case, we don't execute remove_keys() and postpone
430 * the garbage collection work to try to run in next cycle,
431 * in order to free CPU resources to other tasks.
432 */
433 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300434 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300435 remove_keys(dev, i, 1);
436 if (ent->cur > ent->limit)
437 queue_work(cache->wq, &ent->work);
438 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300439 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300440 }
441 }
442}
443
444static void delayed_cache_work_func(struct work_struct *work)
445{
446 struct mlx5_cache_ent *ent;
447
448 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
449 __cache_work_func(ent);
450}
451
452static void cache_work_func(struct work_struct *work)
453{
454 struct mlx5_cache_ent *ent;
455
456 ent = container_of(work, struct mlx5_cache_ent, work);
457 __cache_work_func(ent);
458}
459
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200460struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
461{
462 struct mlx5_mr_cache *cache = &dev->cache;
463 struct mlx5_cache_ent *ent;
464 struct mlx5_ib_mr *mr;
465 int err;
466
467 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
468 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
469 return NULL;
470 }
471
472 ent = &cache->ent[entry];
473 while (1) {
474 spin_lock_irq(&ent->lock);
475 if (list_empty(&ent->head)) {
476 spin_unlock_irq(&ent->lock);
477
478 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200479 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200480 return ERR_PTR(err);
481
482 wait_for_completion(&ent->compl);
483 } else {
484 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
485 list);
486 list_del(&mr->list);
487 ent->cur--;
488 spin_unlock_irq(&ent->lock);
489 if (ent->cur < ent->limit)
490 queue_work(cache->wq, &ent->work);
491 return mr;
492 }
493 }
494}
495
Eli Cohene126ba92013-07-07 17:25:49 +0300496static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
497{
498 struct mlx5_mr_cache *cache = &dev->cache;
499 struct mlx5_ib_mr *mr = NULL;
500 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300501 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300502 int c;
503 int i;
504
505 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300506 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300507 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300508 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
509 return NULL;
510 }
511
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300512 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300513 ent = &cache->ent[i];
514
515 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
516
Eli Cohen746b5582013-10-23 09:53:14 +0300517 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300518 if (!list_empty(&ent->head)) {
519 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
520 list);
521 list_del(&mr->list);
522 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300523 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300524 if (ent->cur < ent->limit)
525 queue_work(cache->wq, &ent->work);
526 break;
527 }
Eli Cohen746b5582013-10-23 09:53:14 +0300528 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300529
530 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300531 }
532
533 if (!mr)
534 cache->ent[c].miss++;
535
536 return mr;
537}
538
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200539void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300540{
541 struct mlx5_mr_cache *cache = &dev->cache;
542 struct mlx5_cache_ent *ent;
543 int shrink = 0;
544 int c;
545
546 c = order2idx(dev, mr->order);
547 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
548 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
549 return;
550 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200551
552 if (unreg_umr(dev, mr))
553 return;
554
Eli Cohene126ba92013-07-07 17:25:49 +0300555 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300556 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300557 list_add_tail(&mr->list, &ent->head);
558 ent->cur++;
559 if (ent->cur > 2 * ent->limit)
560 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300561 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300562
563 if (shrink)
564 queue_work(cache->wq, &ent->work);
565}
566
567static void clean_keys(struct mlx5_ib_dev *dev, int c)
568{
Eli Cohene126ba92013-07-07 17:25:49 +0300569 struct mlx5_mr_cache *cache = &dev->cache;
570 struct mlx5_cache_ent *ent = &cache->ent[c];
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200571 struct mlx5_ib_mr *tmp_mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300572 struct mlx5_ib_mr *mr;
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200573 LIST_HEAD(del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300574
Moshe Lazer3c461912013-09-11 16:35:23 +0300575 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300576 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300577 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300578 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300579 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200580 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300581 }
582 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200583 list_move(&mr->list, &del_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300584 ent->cur--;
585 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300586 spin_unlock_irq(&ent->lock);
Daniel Jurgens65edd0e2018-03-13 15:18:47 +0200587 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
588 }
589
590#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
591 synchronize_srcu(&dev->mr_srcu);
592#endif
593
594 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
595 list_del(&mr->list);
596 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300597 }
598}
599
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300600static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
601{
Mark Bloch72afcf82018-01-22 15:29:44 +0000602 if (!mlx5_debugfs_root || dev->rep)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300603 return;
604
605 debugfs_remove_recursive(dev->cache.root);
606 dev->cache.root = NULL;
607}
608
Eli Cohene126ba92013-07-07 17:25:49 +0300609static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
610{
611 struct mlx5_mr_cache *cache = &dev->cache;
612 struct mlx5_cache_ent *ent;
613 int i;
614
Mark Bloch72afcf82018-01-22 15:29:44 +0000615 if (!mlx5_debugfs_root || dev->rep)
Eli Cohene126ba92013-07-07 17:25:49 +0300616 return 0;
617
Jack Morgenstein9603b612014-07-28 23:30:22 +0300618 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300619 if (!cache->root)
620 return -ENOMEM;
621
622 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
623 ent = &cache->ent[i];
624 sprintf(ent->name, "%d", ent->order);
625 ent->dir = debugfs_create_dir(ent->name, cache->root);
626 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300627 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300628
629 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
630 &size_fops);
631 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300632 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300633
634 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
635 &limit_fops);
636 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300637 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300638
639 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
640 &ent->cur);
641 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300642 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300643
644 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
645 &ent->miss);
646 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300647 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300648 }
649
650 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300651err:
652 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300653
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300654 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300655}
656
Kees Cooke99e88a2017-10-16 14:43:17 -0700657static void delay_time_func(struct timer_list *t)
Eli Cohen746b5582013-10-23 09:53:14 +0300658{
Kees Cooke99e88a2017-10-16 14:43:17 -0700659 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
Eli Cohen746b5582013-10-23 09:53:14 +0300660
661 dev->fill_delay = 0;
662}
663
Eli Cohene126ba92013-07-07 17:25:49 +0300664int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
665{
666 struct mlx5_mr_cache *cache = &dev->cache;
667 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300668 int err;
669 int i;
670
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300671 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530672 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300673 if (!cache->wq) {
674 mlx5_ib_warn(dev, "failed to create work queue\n");
675 return -ENOMEM;
676 }
677
Kees Cooke99e88a2017-10-16 14:43:17 -0700678 timer_setup(&dev->delay_timer, delay_time_func, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300679 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300680 ent = &cache->ent[i];
681 INIT_LIST_HEAD(&ent->head);
682 spin_lock_init(&ent->lock);
683 ent->order = i + 2;
684 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200685 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300686
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200687 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300688 INIT_WORK(&ent->work, cache_work_func);
689 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300690 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200691
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300692 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200693 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200694 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200695 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200696
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300697 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200698 continue;
699
700 ent->page = PAGE_SHIFT;
701 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
702 MLX5_IB_UMR_OCTOWORD;
703 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
704 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
Mark Bloch72afcf82018-01-22 15:29:44 +0000705 !dev->rep &&
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200706 mlx5_core_is_pf(dev->mdev))
707 ent->limit = dev->mdev->profile->mr_cache[i].limit;
708 else
709 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300710 }
711
712 err = mlx5_mr_cache_debugfs_init(dev);
713 if (err)
714 mlx5_ib_warn(dev, "cache debugfs failure\n");
715
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300716 /*
717 * We don't want to fail driver if debugfs failed to initialize,
718 * so we are not forwarding error to the user.
719 */
720
Eli Cohene126ba92013-07-07 17:25:49 +0300721 return 0;
722}
723
Eli Cohenacbda522016-10-27 16:36:43 +0300724static void wait_for_async_commands(struct mlx5_ib_dev *dev)
725{
726 struct mlx5_mr_cache *cache = &dev->cache;
727 struct mlx5_cache_ent *ent;
728 int total = 0;
729 int i;
730 int j;
731
732 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
733 ent = &cache->ent[i];
734 for (j = 0 ; j < 1000; j++) {
735 if (!ent->pending)
736 break;
737 msleep(50);
738 }
739 }
740 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
741 ent = &cache->ent[i];
742 total += ent->pending;
743 }
744
745 if (total)
746 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
747 else
748 mlx5_ib_warn(dev, "done with all pending requests\n");
749}
750
Eli Cohene126ba92013-07-07 17:25:49 +0300751int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
752{
753 int i;
754
755 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300756 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300757
758 mlx5_mr_cache_debugfs_cleanup(dev);
759
760 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
761 clean_keys(dev, i);
762
Moshe Lazer3c461912013-09-11 16:35:23 +0300763 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300764 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300765 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300766
Eli Cohene126ba92013-07-07 17:25:49 +0300767 return 0;
768}
769
770struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
771{
772 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300773 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300774 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300775 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300776 void *mkc;
777 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300778 int err;
779
780 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
781 if (!mr)
782 return ERR_PTR(-ENOMEM);
783
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300784 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300785 if (!in) {
786 err = -ENOMEM;
787 goto err_free;
788 }
789
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300790 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300791
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300792 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
793 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
794 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
795 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
796 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
797 MLX5_SET(mkc, mkc, lr, 1);
798
799 MLX5_SET(mkc, mkc, length64, 1);
800 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
801 MLX5_SET(mkc, mkc, qpn, 0xffffff);
802 MLX5_SET64(mkc, mkc, start_addr, 0);
803
804 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300805 if (err)
806 goto err_in;
807
808 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200809 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200810 mr->ibmr.lkey = mr->mmkey.key;
811 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300812 mr->umem = NULL;
813
814 return &mr->ibmr;
815
816err_in:
817 kfree(in);
818
819err_free:
820 kfree(mr);
821
822 return ERR_PTR(err);
823}
824
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300825static int get_octo_len(u64 addr, u64 len, int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300826{
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300827 u64 page_size = 1ULL << page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300828 u64 offset;
829 int npages;
830
831 offset = addr & (page_size - 1);
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +0300832 npages = ALIGN(len + offset, page_size) >> page_shift;
Eli Cohene126ba92013-07-07 17:25:49 +0300833 return (npages + 1) / 2;
834}
835
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300836static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300837{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200838 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300839 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300840 return MLX5_MAX_UMR_SHIFT;
841}
842
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200843static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
844 int access_flags, struct ib_umem **umem,
845 int *npages, int *page_shift, int *ncont,
846 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200847{
848 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200849 int err;
850
851 *umem = ib_umem_get(pd->uobject->context, start, length,
852 access_flags, 0);
853 err = PTR_ERR_OR_ZERO(*umem);
Leon Romanovskyf3f134f2018-03-12 21:26:37 +0200854 if (err) {
855 *umem = NULL;
Dan Carpenter396551e2017-06-14 13:20:09 +0300856 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200857 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200858 }
859
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200860 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300861 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200862 if (!*npages) {
863 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200864 ib_umem_release(*umem);
865 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200866 }
867
868 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
869 *npages, *ncont, *order, *page_shift);
870
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200871 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200872}
873
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100874static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300875{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100876 struct mlx5_ib_umr_context *context =
877 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300878
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100879 context->status = wc->status;
880 complete(&context->done);
881}
Eli Cohene126ba92013-07-07 17:25:49 +0300882
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100883static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
884{
885 context->cqe.done = mlx5_ib_umr_done;
886 context->status = -1;
887 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300888}
889
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200890static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
891 struct mlx5_umr_wr *umrwr)
892{
893 struct umr_common *umrc = &dev->umrc;
894 struct ib_send_wr *bad;
895 int err;
896 struct mlx5_ib_umr_context umr_context;
897
898 mlx5_ib_init_umr_context(&umr_context);
899 umrwr->wr.wr_cqe = &umr_context.cqe;
900
901 down(&umrc->sem);
902 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
903 if (err) {
904 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
905 } else {
906 wait_for_completion(&umr_context.done);
907 if (umr_context.status != IB_WC_SUCCESS) {
908 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
909 umr_context.status);
910 err = -EFAULT;
911 }
912 }
913 up(&umrc->sem);
914 return err;
915}
916
Ilya Lesokhinff740ae2017-08-17 15:52:30 +0300917static struct mlx5_ib_mr *alloc_mr_from_cache(
918 struct ib_pd *pd, struct ib_umem *umem,
Eli Cohene126ba92013-07-07 17:25:49 +0300919 u64 virt_addr, u64 len, int npages,
920 int page_shift, int order, int access_flags)
921{
922 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300923 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300924 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300925 int i;
926
Eli Cohen746b5582013-10-23 09:53:14 +0300927 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300928 mr = alloc_cached_mr(dev, order);
929 if (mr)
930 break;
931
932 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300933 if (err && err != -EAGAIN) {
934 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300935 break;
936 }
937 }
938
939 if (!mr)
940 return ERR_PTR(-EAGAIN);
941
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200942 mr->ibmr.pd = pd;
943 mr->umem = umem;
944 mr->access_flags = access_flags;
945 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200946 mr->mmkey.iova = virt_addr;
947 mr->mmkey.size = len;
948 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300949
Eli Cohene126ba92013-07-07 17:25:49 +0300950 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300951}
952
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200953static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
954 void *xlt, int page_shift, size_t size,
955 int flags)
956{
957 struct mlx5_ib_dev *dev = mr->dev;
958 struct ib_umem *umem = mr->umem;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200959 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
960 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
961 return npages;
962 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200963
964 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
965
966 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
967 __mlx5_ib_populate_pas(dev, umem, page_shift,
968 idx, npages, xlt,
969 MLX5_IB_MTT_PRESENT);
970 /* Clear padding after the pages
971 * brought from the umem.
972 */
973 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
974 size - npages * sizeof(struct mlx5_mtt));
975 }
976
977 return npages;
978}
979
980#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
981 MLX5_UMR_MTT_ALIGNMENT)
982#define MLX5_SPARE_UMR_CHUNK 0x10000
983
984int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
985 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +0200986{
987 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -0800988 struct device *ddev = dev->ib_dev.dev.parent;
Haggai Eran832a6b02014-12-11 17:04:22 +0200989 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200990 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200991 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100992 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200993 struct ib_sge sg;
994 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200995 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
996 ? sizeof(struct mlx5_klm)
997 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200998 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
999 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +02001000 size_t pages_mapped = 0;
1001 size_t pages_to_map = 0;
1002 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001003 gfp_t gfp;
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001004 bool use_emergency_page = false;
Haggai Eran832a6b02014-12-11 17:04:22 +02001005
1006 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001007 * so we need to align the offset and length accordingly
1008 */
1009 if (idx & page_mask) {
1010 npages += idx & page_mask;
1011 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +02001012 }
1013
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001014 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1015 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001016
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001017 pages_to_map = ALIGN(npages, page_align);
1018 size = desc_size * pages_to_map;
1019 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001020
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001021 xlt = (void *)__get_free_pages(gfp, get_order(size));
1022 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1023 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1024 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1025
1026 size = MLX5_SPARE_UMR_CHUNK;
1027 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001028 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001029
1030 if (!xlt) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001031 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001032 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001033 size = PAGE_SIZE;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001034 memset(xlt, 0, size);
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001035 use_emergency_page = true;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001036 }
1037 pages_iter = size / desc_size;
1038 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001039 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001040 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001041 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001042 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001043 }
1044
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001045 sg.addr = dma;
1046 sg.lkey = dev->umrc.pd->local_dma_lkey;
1047
1048 memset(&wr, 0, sizeof(wr));
1049 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1050 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1051 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1052 wr.wr.sg_list = &sg;
1053 wr.wr.num_sge = 1;
1054 wr.wr.opcode = MLX5_IB_WR_UMR;
1055
1056 wr.pd = mr->ibmr.pd;
1057 wr.mkey = mr->mmkey.key;
1058 wr.length = mr->mmkey.size;
1059 wr.virt_addr = mr->mmkey.iova;
1060 wr.access_flags = mr->access_flags;
1061 wr.page_shift = page_shift;
1062
Haggai Eran832a6b02014-12-11 17:04:22 +02001063 for (pages_mapped = 0;
1064 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001065 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001066 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001067 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001068 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001069 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001070
1071 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1072
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001073 sg.length = ALIGN(npages * desc_size,
1074 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001075
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001076 if (pages_mapped + pages_iter >= pages_to_map) {
1077 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1078 wr.wr.send_flags |=
1079 MLX5_IB_SEND_UMR_ENABLE_MR |
1080 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1081 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1082 if (flags & MLX5_IB_UPD_XLT_PD ||
1083 flags & MLX5_IB_UPD_XLT_ACCESS)
1084 wr.wr.send_flags |=
1085 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1086 if (flags & MLX5_IB_UPD_XLT_ADDR)
1087 wr.wr.send_flags |=
1088 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1089 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001090
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001091 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001092 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001093
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001094 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001095 }
1096 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1097
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001098free_xlt:
Ilya Lesokhinc44ef992018-03-13 15:18:48 +02001099 if (use_emergency_page)
1100 mlx5_ib_put_xlt_emergency_page();
Haggai Eran832a6b02014-12-11 17:04:22 +02001101 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001102 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001103
1104 return err;
1105}
Haggai Eran832a6b02014-12-11 17:04:22 +02001106
Noa Osherovich395a8e42016-02-29 16:46:50 +02001107/*
1108 * If ibmr is NULL it will be allocated by reg_create.
1109 * Else, the given ibmr will be used.
1110 */
1111static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1112 u64 virt_addr, u64 length,
1113 struct ib_umem *umem, int npages,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001114 int page_shift, int access_flags,
1115 bool populate)
Eli Cohene126ba92013-07-07 17:25:49 +03001116{
1117 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001118 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001119 __be64 *pas;
1120 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001121 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001122 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001123 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001124 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001125
Noa Osherovich395a8e42016-02-29 16:46:50 +02001126 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001127 if (!mr)
1128 return ERR_PTR(-ENOMEM);
1129
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001130 mr->ibmr.pd = pd;
1131 mr->access_flags = access_flags;
1132
1133 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1134 if (populate)
1135 inlen += sizeof(*pas) * roundup(npages, 2);
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001136 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001137 if (!in) {
1138 err = -ENOMEM;
1139 goto err_1;
1140 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001141 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001142 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001143 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1144 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001145
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001146 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001147 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001148 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1149
1150 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001151 MLX5_SET(mkc, mkc, free, !populate);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001152 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1153 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1154 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1155 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1156 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1157 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001158 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001159
1160 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1161 MLX5_SET64(mkc, mkc, len, length);
1162 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1163 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1164 MLX5_SET(mkc, mkc, translations_octword_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001165 get_octo_len(virt_addr, length, page_shift));
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001166 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1167 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001168 if (populate) {
1169 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
Ilya Lesokhin7b4cdaa2017-08-17 15:52:32 +03001170 get_octo_len(virt_addr, length, page_shift));
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001171 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001172
1173 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001174 if (err) {
1175 mlx5_ib_warn(dev, "create mkey failed\n");
1176 goto err_2;
1177 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001178 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001179 mr->desc_size = sizeof(struct mlx5_mtt);
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001180 mr->dev = dev;
Al Viro479163f2014-11-20 08:13:57 +00001181 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001182
Matan Baraka606b0f2016-02-29 18:05:28 +02001183 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001184
1185 return mr;
1186
1187err_2:
Al Viro479163f2014-11-20 08:13:57 +00001188 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001189
1190err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001191 if (!ibmr)
1192 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001193
1194 return ERR_PTR(err);
1195}
1196
Noa Osherovich395a8e42016-02-29 16:46:50 +02001197static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1198 int npages, u64 length, int access_flags)
1199{
1200 mr->npages = npages;
1201 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001202 mr->ibmr.lkey = mr->mmkey.key;
1203 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001204 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001205 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001206}
1207
Eli Cohene126ba92013-07-07 17:25:49 +03001208struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1209 u64 virt_addr, int access_flags,
1210 struct ib_udata *udata)
1211{
1212 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1213 struct mlx5_ib_mr *mr = NULL;
1214 struct ib_umem *umem;
1215 int page_shift;
1216 int npages;
1217 int ncont;
1218 int order;
1219 int err;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001220 bool use_umr = true;
Eli Cohene126ba92013-07-07 17:25:49 +03001221
Arnd Bergmann1b19b9512017-12-11 12:45:44 +01001222 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1223 return ERR_PTR(-EINVAL);
1224
Eli Cohen900a6d72014-09-14 16:47:51 +03001225 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1226 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001227
1228#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1229 if (!start && length == U64_MAX) {
1230 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1231 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1232 return ERR_PTR(-EINVAL);
1233
1234 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
Leon Romanovsky42898612018-03-13 15:29:24 +02001235 if (IS_ERR(mr))
1236 return ERR_CAST(mr);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001237 return &mr->ibmr;
1238 }
1239#endif
1240
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001241 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001242 &page_shift, &ncont, &order);
1243
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001244 if (err < 0)
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001245 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001246
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001247 if (order <= mr_cache_max_order(dev)) {
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001248 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1249 page_shift, order, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001250 if (PTR_ERR(mr) == -EAGAIN) {
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301251 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
Eli Cohene126ba92013-07-07 17:25:49 +03001252 mr = NULL;
1253 }
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001254 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1255 if (access_flags & IB_ACCESS_ON_DEMAND) {
1256 err = -EINVAL;
Arvind Yadavd23a8ba2017-09-26 12:20:01 +05301257 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001258 goto error;
1259 }
1260 use_umr = false;
Eli Cohene126ba92013-07-07 17:25:49 +03001261 }
1262
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001263 if (!mr) {
1264 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001265 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001266 page_shift, access_flags, !use_umr);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001267 mutex_unlock(&dev->slow_path_mutex);
1268 }
Eli Cohene126ba92013-07-07 17:25:49 +03001269
1270 if (IS_ERR(mr)) {
1271 err = PTR_ERR(mr);
1272 goto error;
1273 }
1274
Matan Baraka606b0f2016-02-29 18:05:28 +02001275 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001276
1277 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001278 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001279
Haggai Eranb4cfe442014-12-11 17:04:26 +02001280#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001281 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001282#endif
1283
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001284 if (use_umr) {
1285 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
Eli Cohene126ba92013-07-07 17:25:49 +03001286
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001287 if (access_flags & IB_ACCESS_ON_DEMAND)
1288 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1289
1290 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1291 update_xlt_flags);
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001292
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001293 if (err) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001294 dereg_mr(dev, mr);
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001295 return ERR_PTR(err);
1296 }
1297 }
1298
1299 mr->live = 1;
1300 return &mr->ibmr;
Eli Cohene126ba92013-07-07 17:25:49 +03001301error:
1302 ib_umem_release(umem);
1303 return ERR_PTR(err);
1304}
1305
1306static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1307{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001308 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001309 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001310
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001311 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1312 return 0;
1313
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001314 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1315 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1316 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1317 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001318
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001319 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001320}
1321
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001322static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001323 int access_flags, int flags)
1324{
1325 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001326 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001327 int err;
1328
Noa Osherovich56e11d62016-02-29 16:46:51 +02001329 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1330
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001331 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1332 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001333
Artemy Kovalyov31616252017-01-02 11:37:42 +02001334 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001335 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001336 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001337 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001338 }
1339
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001340 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001341
Noa Osherovich56e11d62016-02-29 16:46:51 +02001342 return err;
1343}
1344
1345int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1346 u64 length, u64 virt_addr, int new_access_flags,
1347 struct ib_pd *new_pd, struct ib_udata *udata)
1348{
1349 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1350 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1351 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1352 int access_flags = flags & IB_MR_REREG_ACCESS ?
1353 new_access_flags :
1354 mr->access_flags;
1355 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1356 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1357 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001358 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001359 int npages = 0;
1360 int ncont = 0;
1361 int order = 0;
1362 int err;
1363
1364 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1365 start, virt_addr, length, access_flags);
1366
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001367 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1368
Noa Osherovich56e11d62016-02-29 16:46:51 +02001369 if (flags != IB_MR_REREG_PD) {
1370 /*
1371 * Replace umem. This needs to be done whether or not UMR is
1372 * used.
1373 */
1374 flags |= IB_MR_REREG_TRANS;
1375 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001376 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1377 &npages, &page_shift, &ncont, &order);
1378 if (err < 0) {
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001379 clean_mr(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001380 return err;
1381 }
1382 }
1383
1384 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1385 /*
1386 * UMR can't be used - MKey needs to be replaced.
1387 */
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001388 if (mr->allocated_from_cache) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001389 err = unreg_umr(dev, mr);
1390 if (err)
1391 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1392 } else {
1393 err = destroy_mkey(dev, mr);
1394 if (err)
1395 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1396 }
1397 if (err)
1398 return err;
1399
1400 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001401 page_shift, access_flags, true);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001402
1403 if (IS_ERR(mr))
1404 return PTR_ERR(mr);
1405
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001406 mr->allocated_from_cache = 0;
Ilya Lesokhinff740ae2017-08-17 15:52:30 +03001407 mr->live = 1;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001408 } else {
1409 /*
1410 * Send a UMR WQE
1411 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001412 mr->ibmr.pd = pd;
1413 mr->access_flags = access_flags;
1414 mr->mmkey.iova = addr;
1415 mr->mmkey.size = len;
1416 mr->mmkey.pd = to_mpd(pd)->pdn;
1417
1418 if (flags & IB_MR_REREG_TRANS) {
1419 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1420 if (flags & IB_MR_REREG_PD)
1421 upd_flags |= MLX5_IB_UPD_XLT_PD;
1422 if (flags & IB_MR_REREG_ACCESS)
1423 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1424 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1425 upd_flags);
1426 } else {
1427 err = rereg_umr(pd, mr, access_flags, flags);
1428 }
1429
Noa Osherovich56e11d62016-02-29 16:46:51 +02001430 if (err) {
1431 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001432 ib_umem_release(mr->umem);
Leon Romanovskyf3f134f2018-03-12 21:26:37 +02001433 mr->umem = NULL;
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001434 clean_mr(dev, mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001435 return err;
1436 }
1437 }
1438
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001439 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001440
Noa Osherovich56e11d62016-02-29 16:46:51 +02001441#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1442 update_odp_mr(mr);
1443#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001444 return 0;
1445}
1446
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001447static int
1448mlx5_alloc_priv_descs(struct ib_device *device,
1449 struct mlx5_ib_mr *mr,
1450 int ndescs,
1451 int desc_size)
1452{
1453 int size = ndescs * desc_size;
1454 int add_size;
1455 int ret;
1456
1457 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1458
1459 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1460 if (!mr->descs_alloc)
1461 return -ENOMEM;
1462
1463 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1464
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001465 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001466 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001467 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001468 ret = -ENOMEM;
1469 goto err;
1470 }
1471
1472 return 0;
1473err:
1474 kfree(mr->descs_alloc);
1475
1476 return ret;
1477}
1478
1479static void
1480mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1481{
1482 if (mr->descs) {
1483 struct ib_device *device = mr->ibmr.device;
1484 int size = mr->max_descs * mr->desc_size;
1485
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001486 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001487 size, DMA_TO_DEVICE);
1488 kfree(mr->descs_alloc);
1489 mr->descs = NULL;
1490 }
1491}
1492
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001493static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001494{
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001495 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001496 int err;
1497
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001498 if (mr->sig) {
1499 if (mlx5_core_destroy_psv(dev->mdev,
1500 mr->sig->psv_memory.psv_idx))
1501 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1502 mr->sig->psv_memory.psv_idx);
1503 if (mlx5_core_destroy_psv(dev->mdev,
1504 mr->sig->psv_wire.psv_idx))
1505 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1506 mr->sig->psv_wire.psv_idx);
1507 kfree(mr->sig);
1508 mr->sig = NULL;
1509 }
1510
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001511 mlx5_free_priv_descs(mr);
1512
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001513 if (!allocated_from_cache) {
Kamal Heib5942d8a2017-08-17 15:52:31 +03001514 u32 key = mr->mmkey.key;
1515
Haggai Eranb4cfe442014-12-11 17:04:26 +02001516 err = destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001517 if (err) {
1518 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Kamal Heib5942d8a2017-08-17 15:52:31 +03001519 key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001520 return err;
1521 }
Eli Cohene126ba92013-07-07 17:25:49 +03001522 }
1523
Eli Cohene126ba92013-07-07 17:25:49 +03001524 return 0;
1525}
1526
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001527static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Haggai Eran6aec21f2014-12-11 17:04:23 +02001528{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001529 int npages = mr->npages;
1530 struct ib_umem *umem = mr->umem;
1531
1532#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001533 if (umem && umem->odp_data) {
1534 /* Prevent new page faults from succeeding */
1535 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001536 /* Wait for all running page-fault handlers to finish. */
1537 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001538 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001539 if (umem->odp_data->page_list)
1540 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1541 ib_umem_end(umem));
1542 else
1543 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001544 /*
1545 * We kill the umem before the MR for ODP,
1546 * so that there will not be any invalidations in
1547 * flight, looking at the *mr struct.
1548 */
1549 ib_umem_release(umem);
1550 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1551
1552 /* Avoid double-freeing the umem. */
1553 umem = NULL;
1554 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001555#endif
1556
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001557 clean_mr(dev, mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +02001558
1559 if (umem) {
1560 ib_umem_release(umem);
1561 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1562 }
1563
Leon Romanovskyf3f134f2018-03-12 21:26:37 +02001564 if (!mr->allocated_from_cache)
1565 kfree(mr);
1566 else
1567 mlx5_mr_cache_free(dev, mr);
1568
Haggai Eran6aec21f2014-12-11 17:04:23 +02001569 return 0;
1570}
1571
Ilya Lesokhinfbcd4982017-09-24 21:46:35 +03001572int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1573{
1574 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1575 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1576
1577 return dereg_mr(dev, mr);
1578}
1579
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001580struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1581 enum ib_mr_type mr_type,
1582 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001583{
1584 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001585 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001586 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001587 struct mlx5_ib_mr *mr;
1588 void *mkc;
1589 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001590 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001591
1592 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1593 if (!mr)
1594 return ERR_PTR(-ENOMEM);
1595
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001596 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001597 if (!in) {
1598 err = -ENOMEM;
1599 goto err_free;
1600 }
1601
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001602 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1603 MLX5_SET(mkc, mkc, free, 1);
1604 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1605 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1606 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001607
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001608 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001609 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1610 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001611 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001612 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001613 if (err)
1614 goto err_free_in;
1615
Artemy Kovalyov31616252017-01-02 11:37:42 +02001616 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001617 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001618 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001619 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001620
1621 err = mlx5_alloc_priv_descs(pd->device, mr,
1622 ndescs, sizeof(struct mlx5_klm));
1623 if (err)
1624 goto err_free_in;
1625 mr->desc_size = sizeof(struct mlx5_klm);
1626 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001627 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001628 u32 psv_index[2];
1629
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001630 MLX5_SET(mkc, mkc, bsf_en, 1);
1631 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001632 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1633 if (!mr->sig) {
1634 err = -ENOMEM;
1635 goto err_free_in;
1636 }
1637
1638 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001639 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001640 2, psv_index);
1641 if (err)
1642 goto err_free_sig;
1643
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001644 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001645 mr->sig->psv_memory.psv_idx = psv_index[0];
1646 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001647
1648 mr->sig->sig_status_checked = true;
1649 mr->sig->sig_err_exists = false;
1650 /* Next UMR, Arm SIGERR */
1651 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001652 } else {
1653 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1654 err = -EINVAL;
1655 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001656 }
1657
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001658 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1659 MLX5_SET(mkc, mkc, umr_en, 1);
1660
Nitzan Carmi45e6ae72017-12-26 11:20:20 +02001661 mr->ibmr.device = pd->device;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001662 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001663 if (err)
1664 goto err_destroy_psv;
1665
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001666 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001667 mr->ibmr.lkey = mr->mmkey.key;
1668 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001669 mr->umem = NULL;
1670 kfree(in);
1671
1672 return &mr->ibmr;
1673
1674err_destroy_psv:
1675 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001676 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001677 mr->sig->psv_memory.psv_idx))
1678 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1679 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001680 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001681 mr->sig->psv_wire.psv_idx))
1682 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1683 mr->sig->psv_wire.psv_idx);
1684 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001685 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001686err_free_sig:
1687 kfree(mr->sig);
1688err_free_in:
1689 kfree(in);
1690err_free:
1691 kfree(mr);
1692 return ERR_PTR(err);
1693}
1694
Matan Barakd2370e02016-02-29 18:05:30 +02001695struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1696 struct ib_udata *udata)
1697{
1698 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001699 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001700 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001701 u32 *in = NULL;
1702 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001703 int ndescs;
1704 int err;
1705 struct mlx5_ib_alloc_mw req = {};
1706 struct {
1707 __u32 comp_mask;
1708 __u32 response_length;
1709 } resp = {};
1710
1711 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1712 if (err)
1713 return ERR_PTR(err);
1714
1715 if (req.comp_mask || req.reserved1 || req.reserved2)
1716 return ERR_PTR(-EOPNOTSUPP);
1717
1718 if (udata->inlen > sizeof(req) &&
1719 !ib_is_udata_cleared(udata, sizeof(req),
1720 udata->inlen - sizeof(req)))
1721 return ERR_PTR(-EOPNOTSUPP);
1722
1723 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1724
1725 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001726 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001727 if (!mw || !in) {
1728 err = -ENOMEM;
1729 goto free;
1730 }
1731
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001732 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001733
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001734 MLX5_SET(mkc, mkc, free, 1);
1735 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1736 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1737 MLX5_SET(mkc, mkc, umr_en, 1);
1738 MLX5_SET(mkc, mkc, lr, 1);
1739 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1740 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1741 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1742
1743 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001744 if (err)
1745 goto free;
1746
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001747 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001748 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001749 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001750
1751 resp.response_length = min(offsetof(typeof(resp), response_length) +
1752 sizeof(resp.response_length), udata->outlen);
1753 if (resp.response_length) {
1754 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1755 if (err) {
1756 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1757 goto free;
1758 }
1759 }
1760
1761 kfree(in);
1762 return &mw->ibmw;
1763
1764free:
1765 kfree(mw);
1766 kfree(in);
1767 return ERR_PTR(err);
1768}
1769
1770int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1771{
1772 struct mlx5_ib_mw *mmw = to_mmw(mw);
1773 int err;
1774
1775 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1776 &mmw->mmkey);
1777 if (!err)
1778 kfree(mmw);
1779 return err;
1780}
1781
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001782int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1783 struct ib_mr_status *mr_status)
1784{
1785 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1786 int ret = 0;
1787
1788 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1789 pr_err("Invalid status check mask\n");
1790 ret = -EINVAL;
1791 goto done;
1792 }
1793
1794 mr_status->fail_status = 0;
1795 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1796 if (!mmr->sig) {
1797 ret = -EINVAL;
1798 pr_err("signature status check requested on a non-signature enabled MR\n");
1799 goto done;
1800 }
1801
1802 mmr->sig->sig_status_checked = true;
1803 if (!mmr->sig->sig_err_exists)
1804 goto done;
1805
1806 if (ibmr->lkey == mmr->sig->err_item.key)
1807 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1808 sizeof(mr_status->sig_err));
1809 else {
1810 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1811 mr_status->sig_err.sig_err_offset = 0;
1812 mr_status->sig_err.key = mmr->sig->err_item.key;
1813 }
1814
1815 mmr->sig->sig_err_exists = false;
1816 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1817 }
1818
1819done:
1820 return ret;
1821}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001822
Sagi Grimbergb005d312016-02-29 19:07:33 +02001823static int
1824mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1825 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001826 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001827 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001828{
1829 struct scatterlist *sg = sgl;
1830 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001831 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001832 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1833 int i;
1834
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001835 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001836 mr->ibmr.length = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001837
1838 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001839 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001840 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001841 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1842 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001843 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001844 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001845
1846 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001847 }
Sergey Gorenkoda343b62018-02-25 13:39:48 +02001848 mr->ndescs = i;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001849
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001850 if (sg_offset_p)
1851 *sg_offset_p = sg_offset;
1852
Sagi Grimbergb005d312016-02-29 19:07:33 +02001853 return i;
1854}
1855
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001856static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1857{
1858 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1859 __be64 *descs;
1860
1861 if (unlikely(mr->ndescs == mr->max_descs))
1862 return -ENOMEM;
1863
1864 descs = mr->descs;
1865 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1866
1867 return 0;
1868}
1869
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001870int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001871 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001872{
1873 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1874 int n;
1875
1876 mr->ndescs = 0;
1877
1878 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1879 mr->desc_size * mr->max_descs,
1880 DMA_TO_DEVICE);
1881
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001882 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001883 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001884 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001885 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1886 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001887
1888 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1889 mr->desc_size * mr->max_descs,
1890 DMA_TO_DEVICE);
1891
1892 return n;
1893}