blob: bc87016021e3586ae6f9a89483ea4563aaabf8b2 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
43
44enum {
Eli Cohen746b5582013-10-23 09:53:14 +030045 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030046};
47
Haggai Eran832a6b02014-12-11 17:04:22 +020048#define MLX5_UMR_ALIGN 2048
Eli Cohenfe45f822013-09-11 16:35:35 +030049
Haggai Eran6aec21f2014-12-11 17:04:23 +020050static int clean_mr(struct mlx5_ib_mr *mr);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +030051static int mr_cache_max_order(struct mlx5_ib_dev *dev);
Artemy Kovalyov49780d42017-01-18 16:58:10 +020052static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
Haggai Eran6aec21f2014-12-11 17:04:23 +020053
Haggai Eranb4cfe442014-12-11 17:04:26 +020054static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
55{
Matan Baraka606b0f2016-02-29 18:05:28 +020056 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020057
58#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
59 /* Wait until all page fault handlers using the mr complete. */
60 synchronize_srcu(&dev->mr_srcu);
61#endif
62
63 return err;
64}
65
Eli Cohene126ba92013-07-07 17:25:49 +030066static int order2idx(struct mlx5_ib_dev *dev, int order)
67{
68 struct mlx5_mr_cache *cache = &dev->cache;
69
70 if (order < cache->ent[0].order)
71 return 0;
72 else
73 return order - cache->ent[0].order;
74}
75
Noa Osherovich56e11d62016-02-29 16:46:51 +020076static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
77{
78 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
79 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
80}
81
Noa Osherovich395a8e42016-02-29 16:46:50 +020082#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
83static void update_odp_mr(struct mlx5_ib_mr *mr)
84{
85 if (mr->umem->odp_data) {
86 /*
87 * This barrier prevents the compiler from moving the
88 * setting of umem->odp_data->private to point to our
89 * MR, before reg_umr finished, to ensure that the MR
90 * initialization have finished before starting to
91 * handle invalidations.
92 */
93 smp_wmb();
94 mr->umem->odp_data->private = mr;
95 /*
96 * Make sure we will see the new
97 * umem->odp_data->private value in the invalidation
98 * routines, before we can get page faults on the
99 * MR. Page faults can happen once we put the MR in
100 * the tree, below this line. Without the barrier,
101 * there can be a fault handling and an invalidation
102 * before umem->odp_data->private == mr is visible to
103 * the invalidation handler.
104 */
105 smp_wmb();
106 }
107}
108#endif
109
Eli Cohen746b5582013-10-23 09:53:14 +0300110static void reg_mr_callback(int status, void *context)
111{
112 struct mlx5_ib_mr *mr = context;
113 struct mlx5_ib_dev *dev = mr->dev;
114 struct mlx5_mr_cache *cache = &dev->cache;
115 int c = order2idx(dev, mr->order);
116 struct mlx5_cache_ent *ent = &cache->ent[c];
117 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300118 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200119 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300120 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300121
Eli Cohen746b5582013-10-23 09:53:14 +0300122 spin_lock_irqsave(&ent->lock, flags);
123 ent->pending--;
124 spin_unlock_irqrestore(&ent->lock, flags);
125 if (status) {
126 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
127 kfree(mr);
128 dev->fill_delay = 1;
129 mod_timer(&dev->delay_timer, jiffies + HZ);
130 return;
131 }
132
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200133 mr->mmkey.type = MLX5_MKEY_MR;
Jack Morgenstein9603b612014-07-28 23:30:22 +0300134 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
135 key = dev->mdev->priv.mkey_key++;
136 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300137 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300138
139 cache->last_add = jiffies;
140
141 spin_lock_irqsave(&ent->lock, flags);
142 list_add_tail(&mr->list, &ent->head);
143 ent->cur++;
144 ent->size++;
145 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300146
147 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200148 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
149 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300150 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200151 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300152 write_unlock_irqrestore(&table->lock, flags);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200153
154 if (!completion_done(&ent->compl))
155 complete(&ent->compl);
Eli Cohen746b5582013-10-23 09:53:14 +0300156}
157
Eli Cohene126ba92013-07-07 17:25:49 +0300158static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
159{
Eli Cohene126ba92013-07-07 17:25:49 +0300160 struct mlx5_mr_cache *cache = &dev->cache;
161 struct mlx5_cache_ent *ent = &cache->ent[c];
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300162 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300163 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300164 void *mkc;
165 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300166 int err = 0;
167 int i;
168
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300169 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300170 if (!in)
171 return -ENOMEM;
172
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300173 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300174 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300175 if (ent->pending >= MAX_PENDING_REG_MR) {
176 err = -EAGAIN;
177 break;
178 }
179
Eli Cohene126ba92013-07-07 17:25:49 +0300180 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
181 if (!mr) {
182 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300183 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300184 }
185 mr->order = ent->order;
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300186 mr->allocated_from_cache = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300187 mr->dev = dev;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300188
189 MLX5_SET(mkc, mkc, free, 1);
190 MLX5_SET(mkc, mkc, umr_en, 1);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200191 MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300192
193 MLX5_SET(mkc, mkc, qpn, 0xffffff);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200194 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
195 MLX5_SET(mkc, mkc, log_page_size, ent->page);
Eli Cohene126ba92013-07-07 17:25:49 +0300196
Eli Cohen746b5582013-10-23 09:53:14 +0300197 spin_lock_irq(&ent->lock);
198 ent->pending++;
199 spin_unlock_irq(&ent->lock);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300200 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
201 in, inlen,
202 mr->out, sizeof(mr->out),
203 reg_mr_callback, mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300204 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200205 spin_lock_irq(&ent->lock);
206 ent->pending--;
207 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300208 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300210 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300211 }
Eli Cohene126ba92013-07-07 17:25:49 +0300212 }
213
Eli Cohene126ba92013-07-07 17:25:49 +0300214 kfree(in);
215 return err;
216}
217
218static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
219{
Eli Cohene126ba92013-07-07 17:25:49 +0300220 struct mlx5_mr_cache *cache = &dev->cache;
221 struct mlx5_cache_ent *ent = &cache->ent[c];
222 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300223 int err;
224 int i;
225
226 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300227 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300228 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300229 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300230 return;
231 }
232 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
233 list_del(&mr->list);
234 ent->cur--;
235 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300236 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200237 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300238 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300239 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300240 else
Eli Cohene126ba92013-07-07 17:25:49 +0300241 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300242 }
243}
244
245static ssize_t size_write(struct file *filp, const char __user *buf,
246 size_t count, loff_t *pos)
247{
248 struct mlx5_cache_ent *ent = filp->private_data;
249 struct mlx5_ib_dev *dev = ent->dev;
250 char lbuf[20];
251 u32 var;
252 int err;
253 int c;
254
255 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300256 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300257
258 c = order2idx(dev, ent->order);
259 lbuf[sizeof(lbuf) - 1] = 0;
260
261 if (sscanf(lbuf, "%u", &var) != 1)
262 return -EINVAL;
263
264 if (var < ent->limit)
265 return -EINVAL;
266
267 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300268 do {
269 err = add_keys(dev, c, var - ent->size);
270 if (err && err != -EAGAIN)
271 return err;
272
273 usleep_range(3000, 5000);
274 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300275 } else if (var < ent->size) {
276 remove_keys(dev, c, ent->size - var);
277 }
278
279 return count;
280}
281
282static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
283 loff_t *pos)
284{
285 struct mlx5_cache_ent *ent = filp->private_data;
286 char lbuf[20];
287 int err;
288
289 if (*pos)
290 return 0;
291
292 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
293 if (err < 0)
294 return err;
295
296 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300297 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300298
299 *pos += err;
300
301 return err;
302}
303
304static const struct file_operations size_fops = {
305 .owner = THIS_MODULE,
306 .open = simple_open,
307 .write = size_write,
308 .read = size_read,
309};
310
311static ssize_t limit_write(struct file *filp, const char __user *buf,
312 size_t count, loff_t *pos)
313{
314 struct mlx5_cache_ent *ent = filp->private_data;
315 struct mlx5_ib_dev *dev = ent->dev;
316 char lbuf[20];
317 u32 var;
318 int err;
319 int c;
320
321 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300322 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300323
324 c = order2idx(dev, ent->order);
325 lbuf[sizeof(lbuf) - 1] = 0;
326
327 if (sscanf(lbuf, "%u", &var) != 1)
328 return -EINVAL;
329
330 if (var > ent->size)
331 return -EINVAL;
332
333 ent->limit = var;
334
335 if (ent->cur < ent->limit) {
336 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
337 if (err)
338 return err;
339 }
340
341 return count;
342}
343
344static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
345 loff_t *pos)
346{
347 struct mlx5_cache_ent *ent = filp->private_data;
348 char lbuf[20];
349 int err;
350
351 if (*pos)
352 return 0;
353
354 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
355 if (err < 0)
356 return err;
357
358 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300359 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300360
361 *pos += err;
362
363 return err;
364}
365
366static const struct file_operations limit_fops = {
367 .owner = THIS_MODULE,
368 .open = simple_open,
369 .write = limit_write,
370 .read = limit_read,
371};
372
373static int someone_adding(struct mlx5_mr_cache *cache)
374{
375 int i;
376
377 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
378 if (cache->ent[i].cur < cache->ent[i].limit)
379 return 1;
380 }
381
382 return 0;
383}
384
385static void __cache_work_func(struct mlx5_cache_ent *ent)
386{
387 struct mlx5_ib_dev *dev = ent->dev;
388 struct mlx5_mr_cache *cache = &dev->cache;
389 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300390 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300391
392 if (cache->stopped)
393 return;
394
395 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300396 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
397 err = add_keys(dev, i, 1);
398 if (ent->cur < 2 * ent->limit) {
399 if (err == -EAGAIN) {
400 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
401 i + 2);
402 queue_delayed_work(cache->wq, &ent->dwork,
403 msecs_to_jiffies(3));
404 } else if (err) {
405 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
406 i + 2, err);
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(1000));
409 } else {
410 queue_work(cache->wq, &ent->work);
411 }
412 }
Eli Cohene126ba92013-07-07 17:25:49 +0300413 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300414 /*
415 * The remove_keys() logic is performed as garbage collection
416 * task. Such task is intended to be run when no other active
417 * processes are running.
418 *
419 * The need_resched() will return TRUE if there are user tasks
420 * to be activated in near future.
421 *
422 * In such case, we don't execute remove_keys() and postpone
423 * the garbage collection work to try to run in next cycle,
424 * in order to free CPU resources to other tasks.
425 */
426 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300427 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300428 remove_keys(dev, i, 1);
429 if (ent->cur > ent->limit)
430 queue_work(cache->wq, &ent->work);
431 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300432 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300433 }
434 }
435}
436
437static void delayed_cache_work_func(struct work_struct *work)
438{
439 struct mlx5_cache_ent *ent;
440
441 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
442 __cache_work_func(ent);
443}
444
445static void cache_work_func(struct work_struct *work)
446{
447 struct mlx5_cache_ent *ent;
448
449 ent = container_of(work, struct mlx5_cache_ent, work);
450 __cache_work_func(ent);
451}
452
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200453struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
454{
455 struct mlx5_mr_cache *cache = &dev->cache;
456 struct mlx5_cache_ent *ent;
457 struct mlx5_ib_mr *mr;
458 int err;
459
460 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
461 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
462 return NULL;
463 }
464
465 ent = &cache->ent[entry];
466 while (1) {
467 spin_lock_irq(&ent->lock);
468 if (list_empty(&ent->head)) {
469 spin_unlock_irq(&ent->lock);
470
471 err = add_keys(dev, entry, 1);
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200472 if (err && err != -EAGAIN)
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200473 return ERR_PTR(err);
474
475 wait_for_completion(&ent->compl);
476 } else {
477 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
478 list);
479 list_del(&mr->list);
480 ent->cur--;
481 spin_unlock_irq(&ent->lock);
482 if (ent->cur < ent->limit)
483 queue_work(cache->wq, &ent->work);
484 return mr;
485 }
486 }
487}
488
Eli Cohene126ba92013-07-07 17:25:49 +0300489static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
490{
491 struct mlx5_mr_cache *cache = &dev->cache;
492 struct mlx5_ib_mr *mr = NULL;
493 struct mlx5_cache_ent *ent;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300494 int last_umr_cache_entry;
Eli Cohene126ba92013-07-07 17:25:49 +0300495 int c;
496 int i;
497
498 c = order2idx(dev, order);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300499 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300500 if (c < 0 || c > last_umr_cache_entry) {
Eli Cohene126ba92013-07-07 17:25:49 +0300501 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
502 return NULL;
503 }
504
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300505 for (i = c; i <= last_umr_cache_entry; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300506 ent = &cache->ent[i];
507
508 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
509
Eli Cohen746b5582013-10-23 09:53:14 +0300510 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300511 if (!list_empty(&ent->head)) {
512 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
513 list);
514 list_del(&mr->list);
515 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300516 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300517 if (ent->cur < ent->limit)
518 queue_work(cache->wq, &ent->work);
519 break;
520 }
Eli Cohen746b5582013-10-23 09:53:14 +0300521 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300522
523 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300524 }
525
526 if (!mr)
527 cache->ent[c].miss++;
528
529 return mr;
530}
531
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200532void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +0300533{
534 struct mlx5_mr_cache *cache = &dev->cache;
535 struct mlx5_cache_ent *ent;
536 int shrink = 0;
537 int c;
538
539 c = order2idx(dev, mr->order);
540 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
541 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
542 return;
543 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200544
545 if (unreg_umr(dev, mr))
546 return;
547
Eli Cohene126ba92013-07-07 17:25:49 +0300548 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300549 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300550 list_add_tail(&mr->list, &ent->head);
551 ent->cur++;
552 if (ent->cur > 2 * ent->limit)
553 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300554 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300555
556 if (shrink)
557 queue_work(cache->wq, &ent->work);
558}
559
560static void clean_keys(struct mlx5_ib_dev *dev, int c)
561{
Eli Cohene126ba92013-07-07 17:25:49 +0300562 struct mlx5_mr_cache *cache = &dev->cache;
563 struct mlx5_cache_ent *ent = &cache->ent[c];
564 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300565 int err;
566
Moshe Lazer3c461912013-09-11 16:35:23 +0300567 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300568 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300569 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300570 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300571 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300572 return;
573 }
574 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
575 list_del(&mr->list);
576 ent->cur--;
577 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300578 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200579 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300580 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300581 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300582 else
Eli Cohene126ba92013-07-07 17:25:49 +0300583 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300584 }
585}
586
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300587static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
588{
589 if (!mlx5_debugfs_root)
590 return;
591
592 debugfs_remove_recursive(dev->cache.root);
593 dev->cache.root = NULL;
594}
595
Eli Cohene126ba92013-07-07 17:25:49 +0300596static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
597{
598 struct mlx5_mr_cache *cache = &dev->cache;
599 struct mlx5_cache_ent *ent;
600 int i;
601
602 if (!mlx5_debugfs_root)
603 return 0;
604
Jack Morgenstein9603b612014-07-28 23:30:22 +0300605 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300606 if (!cache->root)
607 return -ENOMEM;
608
609 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
610 ent = &cache->ent[i];
611 sprintf(ent->name, "%d", ent->order);
612 ent->dir = debugfs_create_dir(ent->name, cache->root);
613 if (!ent->dir)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300614 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300615
616 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
617 &size_fops);
618 if (!ent->fsize)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300619 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300620
621 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
622 &limit_fops);
623 if (!ent->flimit)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300624 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300625
626 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
627 &ent->cur);
628 if (!ent->fcur)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300629 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300630
631 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
632 &ent->miss);
633 if (!ent->fmiss)
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300634 goto err;
Eli Cohene126ba92013-07-07 17:25:49 +0300635 }
636
637 return 0;
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300638err:
639 mlx5_mr_cache_debugfs_cleanup(dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300640
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300641 return -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300642}
643
Eli Cohen746b5582013-10-23 09:53:14 +0300644static void delay_time_func(unsigned long ctx)
645{
646 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
647
648 dev->fill_delay = 0;
649}
650
Eli Cohene126ba92013-07-07 17:25:49 +0300651int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
652{
653 struct mlx5_mr_cache *cache = &dev->cache;
654 struct mlx5_cache_ent *ent;
Eli Cohene126ba92013-07-07 17:25:49 +0300655 int err;
656 int i;
657
Moshe Lazer6bc1a652016-10-27 16:36:42 +0300658 mutex_init(&dev->slow_path_mutex);
Bhaktipriya Shridhar3c856c82016-08-15 23:41:18 +0530659 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
Eli Cohene126ba92013-07-07 17:25:49 +0300660 if (!cache->wq) {
661 mlx5_ib_warn(dev, "failed to create work queue\n");
662 return -ENOMEM;
663 }
664
Eli Cohen746b5582013-10-23 09:53:14 +0300665 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300666 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300667 ent = &cache->ent[i];
668 INIT_LIST_HEAD(&ent->head);
669 spin_lock_init(&ent->lock);
670 ent->order = i + 2;
671 ent->dev = dev;
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200672 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300673
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200674 init_completion(&ent->compl);
Eli Cohene126ba92013-07-07 17:25:49 +0300675 INIT_WORK(&ent->work, cache_work_func);
676 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
Eli Cohene126ba92013-07-07 17:25:49 +0300677 queue_work(cache->wq, &ent->work);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200678
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300679 if (i > MR_CACHE_LAST_STD_ENTRY) {
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200680 mlx5_odp_init_mr_cache_entry(ent);
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200681 continue;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200682 }
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200683
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300684 if (ent->order > mr_cache_max_order(dev))
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200685 continue;
686
687 ent->page = PAGE_SHIFT;
688 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
689 MLX5_IB_UMR_OCTOWORD;
690 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
691 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
692 mlx5_core_is_pf(dev->mdev))
693 ent->limit = dev->mdev->profile->mr_cache[i].limit;
694 else
695 ent->limit = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300696 }
697
698 err = mlx5_mr_cache_debugfs_init(dev);
699 if (err)
700 mlx5_ib_warn(dev, "cache debugfs failure\n");
701
Leon Romanovsky12cc1a02017-05-30 09:44:48 +0300702 /*
703 * We don't want to fail driver if debugfs failed to initialize,
704 * so we are not forwarding error to the user.
705 */
706
Eli Cohene126ba92013-07-07 17:25:49 +0300707 return 0;
708}
709
Eli Cohenacbda522016-10-27 16:36:43 +0300710static void wait_for_async_commands(struct mlx5_ib_dev *dev)
711{
712 struct mlx5_mr_cache *cache = &dev->cache;
713 struct mlx5_cache_ent *ent;
714 int total = 0;
715 int i;
716 int j;
717
718 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
719 ent = &cache->ent[i];
720 for (j = 0 ; j < 1000; j++) {
721 if (!ent->pending)
722 break;
723 msleep(50);
724 }
725 }
726 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
727 ent = &cache->ent[i];
728 total += ent->pending;
729 }
730
731 if (total)
732 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
733 else
734 mlx5_ib_warn(dev, "done with all pending requests\n");
735}
736
Eli Cohene126ba92013-07-07 17:25:49 +0300737int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
738{
739 int i;
740
741 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300742 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300743
744 mlx5_mr_cache_debugfs_cleanup(dev);
745
746 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
747 clean_keys(dev, i);
748
Moshe Lazer3c461912013-09-11 16:35:23 +0300749 destroy_workqueue(dev->cache.wq);
Eli Cohenacbda522016-10-27 16:36:43 +0300750 wait_for_async_commands(dev);
Eli Cohen746b5582013-10-23 09:53:14 +0300751 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300752
Eli Cohene126ba92013-07-07 17:25:49 +0300753 return 0;
754}
755
756struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
757{
758 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300759 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300760 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300761 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300762 void *mkc;
763 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300764 int err;
765
766 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
767 if (!mr)
768 return ERR_PTR(-ENOMEM);
769
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300770 in = kzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +0300771 if (!in) {
772 err = -ENOMEM;
773 goto err_free;
774 }
775
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300776 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Eli Cohene126ba92013-07-07 17:25:49 +0300777
Saeed Mahameedec22eb52016-07-16 06:28:36 +0300778 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
779 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
780 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
781 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
782 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
783 MLX5_SET(mkc, mkc, lr, 1);
784
785 MLX5_SET(mkc, mkc, length64, 1);
786 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
787 MLX5_SET(mkc, mkc, qpn, 0xffffff);
788 MLX5_SET64(mkc, mkc, start_addr, 0);
789
790 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300791 if (err)
792 goto err_in;
793
794 kfree(in);
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +0200795 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +0200796 mr->ibmr.lkey = mr->mmkey.key;
797 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300798 mr->umem = NULL;
799
800 return &mr->ibmr;
801
802err_in:
803 kfree(in);
804
805err_free:
806 kfree(mr);
807
808 return ERR_PTR(err);
809}
810
811static int get_octo_len(u64 addr, u64 len, int page_size)
812{
813 u64 offset;
814 int npages;
815
816 offset = addr & (page_size - 1);
817 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
818 return (npages + 1) / 2;
819}
820
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300821static int mr_cache_max_order(struct mlx5_ib_dev *dev)
Eli Cohene126ba92013-07-07 17:25:49 +0300822{
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200823 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +0300824 return MR_CACHE_LAST_STD_ENTRY + 2;
Majd Dibbiny4c25b7a2017-06-12 10:36:15 +0300825 return MLX5_MAX_UMR_SHIFT;
826}
827
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200828static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
829 int access_flags, struct ib_umem **umem,
830 int *npages, int *page_shift, int *ncont,
831 int *order)
Noa Osherovich395a8e42016-02-29 16:46:50 +0200832{
833 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200834 int err;
835
836 *umem = ib_umem_get(pd->uobject->context, start, length,
837 access_flags, 0);
838 err = PTR_ERR_OR_ZERO(*umem);
839 if (err < 0) {
Dan Carpenter396551e2017-06-14 13:20:09 +0300840 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200841 return err;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200842 }
843
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200844 mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
Majd Dibbiny762f8992016-10-27 16:36:47 +0300845 page_shift, ncont, order);
Noa Osherovich395a8e42016-02-29 16:46:50 +0200846 if (!*npages) {
847 mlx5_ib_warn(dev, "avoid zero region\n");
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200848 ib_umem_release(*umem);
849 return -EINVAL;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200850 }
851
852 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
853 *npages, *ncont, *order, *page_shift);
854
Arnd Bergmann14ab8892016-10-24 22:48:21 +0200855 return 0;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200856}
857
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100858static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300859{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100860 struct mlx5_ib_umr_context *context =
861 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300862
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100863 context->status = wc->status;
864 complete(&context->done);
865}
Eli Cohene126ba92013-07-07 17:25:49 +0300866
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100867static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
868{
869 context->cqe.done = mlx5_ib_umr_done;
870 context->status = -1;
871 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300872}
873
Binoy Jayand5ea2df2017-01-02 11:37:40 +0200874static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
875 struct mlx5_umr_wr *umrwr)
876{
877 struct umr_common *umrc = &dev->umrc;
878 struct ib_send_wr *bad;
879 int err;
880 struct mlx5_ib_umr_context umr_context;
881
882 mlx5_ib_init_umr_context(&umr_context);
883 umrwr->wr.wr_cqe = &umr_context.cqe;
884
885 down(&umrc->sem);
886 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
887 if (err) {
888 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
889 } else {
890 wait_for_completion(&umr_context.done);
891 if (umr_context.status != IB_WC_SUCCESS) {
892 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
893 umr_context.status);
894 err = -EFAULT;
895 }
896 }
897 up(&umrc->sem);
898 return err;
899}
900
Eli Cohene126ba92013-07-07 17:25:49 +0300901static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
902 u64 virt_addr, u64 len, int npages,
903 int page_shift, int order, int access_flags)
904{
905 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +0300906 struct mlx5_ib_mr *mr;
Haggai Eran096f7e72014-05-22 14:50:08 +0300907 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300908 int i;
909
Eli Cohen746b5582013-10-23 09:53:14 +0300910 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300911 mr = alloc_cached_mr(dev, order);
912 if (mr)
913 break;
914
915 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300916 if (err && err != -EAGAIN) {
917 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300918 break;
919 }
920 }
921
922 if (!mr)
923 return ERR_PTR(-EAGAIN);
924
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200925 mr->ibmr.pd = pd;
926 mr->umem = umem;
927 mr->access_flags = access_flags;
928 mr->desc_size = sizeof(struct mlx5_mtt);
Matan Baraka606b0f2016-02-29 18:05:28 +0200929 mr->mmkey.iova = virt_addr;
930 mr->mmkey.size = len;
931 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300932
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200933 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
934 MLX5_IB_UPD_XLT_ENABLE);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200935
Haggai Eran096f7e72014-05-22 14:50:08 +0300936 if (err) {
Artemy Kovalyov49780d42017-01-18 16:58:10 +0200937 mlx5_mr_cache_free(dev, mr);
Haggai Eran096f7e72014-05-22 14:50:08 +0300938 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +0300939 }
940
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200941 mr->live = 1;
942
Eli Cohene126ba92013-07-07 17:25:49 +0300943 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300944}
945
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200946static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
947 void *xlt, int page_shift, size_t size,
948 int flags)
949{
950 struct mlx5_ib_dev *dev = mr->dev;
951 struct ib_umem *umem = mr->umem;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200952 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
953 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
954 return npages;
955 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200956
957 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
958
959 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
960 __mlx5_ib_populate_pas(dev, umem, page_shift,
961 idx, npages, xlt,
962 MLX5_IB_MTT_PRESENT);
963 /* Clear padding after the pages
964 * brought from the umem.
965 */
966 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
967 size - npages * sizeof(struct mlx5_mtt));
968 }
969
970 return npages;
971}
972
973#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
974 MLX5_UMR_MTT_ALIGNMENT)
975#define MLX5_SPARE_UMR_CHUNK 0x10000
976
977int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
978 int page_shift, int flags)
Haggai Eran832a6b02014-12-11 17:04:22 +0200979{
980 struct mlx5_ib_dev *dev = mr->dev;
Bart Van Assche9b0c2892017-01-20 13:04:21 -0800981 struct device *ddev = dev->ib_dev.dev.parent;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200982 struct mlx5_ib_ucontext *uctx = NULL;
Haggai Eran832a6b02014-12-11 17:04:22 +0200983 int size;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200984 void *xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +0200985 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100986 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200987 struct ib_sge sg;
988 int err = 0;
Artemy Kovalyov81713d32017-01-18 16:58:11 +0200989 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
990 ? sizeof(struct mlx5_klm)
991 : sizeof(struct mlx5_mtt);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200992 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
993 const int page_mask = page_align - 1;
Haggai Eran832a6b02014-12-11 17:04:22 +0200994 size_t pages_mapped = 0;
995 size_t pages_to_map = 0;
996 size_t pages_iter = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +0200997 gfp_t gfp;
Haggai Eran832a6b02014-12-11 17:04:22 +0200998
999 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001000 * so we need to align the offset and length accordingly
1001 */
1002 if (idx & page_mask) {
1003 npages += idx & page_mask;
1004 idx &= ~page_mask;
Haggai Eran832a6b02014-12-11 17:04:22 +02001005 }
1006
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001007 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1008 gfp |= __GFP_ZERO | __GFP_NOWARN;
Haggai Eran832a6b02014-12-11 17:04:22 +02001009
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001010 pages_to_map = ALIGN(npages, page_align);
1011 size = desc_size * pages_to_map;
1012 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
Haggai Eran832a6b02014-12-11 17:04:22 +02001013
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001014 xlt = (void *)__get_free_pages(gfp, get_order(size));
1015 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1016 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1017 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1018
1019 size = MLX5_SPARE_UMR_CHUNK;
1020 xlt = (void *)__get_free_pages(gfp, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001021 }
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001022
1023 if (!xlt) {
Artemy Kovalyovbd174fc2017-04-05 09:23:51 +03001024 uctx = to_mucontext(mr->ibmr.pd->uobject->context);
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001025 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1026 size = PAGE_SIZE;
1027 xlt = (void *)uctx->upd_xlt_page;
1028 mutex_lock(&uctx->upd_xlt_page_mutex);
1029 memset(xlt, 0, size);
1030 }
1031 pages_iter = size / desc_size;
1032 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
Haggai Eran832a6b02014-12-11 17:04:22 +02001033 if (dma_mapping_error(ddev, dma)) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001034 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
Haggai Eran832a6b02014-12-11 17:04:22 +02001035 err = -ENOMEM;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001036 goto free_xlt;
Haggai Eran832a6b02014-12-11 17:04:22 +02001037 }
1038
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001039 sg.addr = dma;
1040 sg.lkey = dev->umrc.pd->local_dma_lkey;
1041
1042 memset(&wr, 0, sizeof(wr));
1043 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1044 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1045 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1046 wr.wr.sg_list = &sg;
1047 wr.wr.num_sge = 1;
1048 wr.wr.opcode = MLX5_IB_WR_UMR;
1049
1050 wr.pd = mr->ibmr.pd;
1051 wr.mkey = mr->mmkey.key;
1052 wr.length = mr->mmkey.size;
1053 wr.virt_addr = mr->mmkey.iova;
1054 wr.access_flags = mr->access_flags;
1055 wr.page_shift = page_shift;
1056
Haggai Eran832a6b02014-12-11 17:04:22 +02001057 for (pages_mapped = 0;
1058 pages_mapped < pages_to_map && !err;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001059 pages_mapped += pages_iter, idx += pages_iter) {
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001060 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
Haggai Eran832a6b02014-12-11 17:04:22 +02001061 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
Artemy Kovalyov438b2282017-04-05 09:23:52 +03001062 npages = populate_xlt(mr, idx, npages, xlt,
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001063 page_shift, size, flags);
Haggai Eran832a6b02014-12-11 17:04:22 +02001064
1065 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1066
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001067 sg.length = ALIGN(npages * desc_size,
1068 MLX5_UMR_MTT_ALIGNMENT);
Haggai Eran832a6b02014-12-11 17:04:22 +02001069
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001070 if (pages_mapped + pages_iter >= pages_to_map) {
1071 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1072 wr.wr.send_flags |=
1073 MLX5_IB_SEND_UMR_ENABLE_MR |
1074 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1075 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1076 if (flags & MLX5_IB_UPD_XLT_PD ||
1077 flags & MLX5_IB_UPD_XLT_ACCESS)
1078 wr.wr.send_flags |=
1079 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1080 if (flags & MLX5_IB_UPD_XLT_ADDR)
1081 wr.wr.send_flags |=
1082 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1083 }
Haggai Eran832a6b02014-12-11 17:04:22 +02001084
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001085 wr.offset = idx * desc_size;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001086 wr.xlt_size = sg.length;
Haggai Eran832a6b02014-12-11 17:04:22 +02001087
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001088 err = mlx5_ib_post_send_wait(dev, &wr);
Haggai Eran832a6b02014-12-11 17:04:22 +02001089 }
1090 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1091
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001092free_xlt:
1093 if (uctx)
1094 mutex_unlock(&uctx->upd_xlt_page_mutex);
Haggai Eran832a6b02014-12-11 17:04:22 +02001095 else
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001096 free_pages((unsigned long)xlt, get_order(size));
Haggai Eran832a6b02014-12-11 17:04:22 +02001097
1098 return err;
1099}
Haggai Eran832a6b02014-12-11 17:04:22 +02001100
Noa Osherovich395a8e42016-02-29 16:46:50 +02001101/*
1102 * If ibmr is NULL it will be allocated by reg_create.
1103 * Else, the given ibmr will be used.
1104 */
1105static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1106 u64 virt_addr, u64 length,
1107 struct ib_umem *umem, int npages,
1108 int page_shift, int access_flags)
Eli Cohene126ba92013-07-07 17:25:49 +03001109{
1110 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohene126ba92013-07-07 17:25:49 +03001111 struct mlx5_ib_mr *mr;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001112 __be64 *pas;
1113 void *mkc;
Eli Cohene126ba92013-07-07 17:25:49 +03001114 int inlen;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001115 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +03001116 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001117 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001118
Noa Osherovich395a8e42016-02-29 16:46:50 +02001119 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001120 if (!mr)
1121 return ERR_PTR(-ENOMEM);
1122
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001123 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
1124 sizeof(*pas) * ((npages + 1) / 2) * 2;
Leon Romanovsky1b9a07e2017-05-10 21:32:18 +03001125 in = kvzalloc(inlen, GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001126 if (!in) {
1127 err = -ENOMEM;
1128 goto err_1;
1129 }
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001130 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001131 if (!(access_flags & IB_ACCESS_ON_DEMAND))
1132 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1133 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001134
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001135 /* The pg_access bit allows setting the access flags
Haggai Erancc149f752014-12-11 17:04:21 +02001136 * in the page list submitted with the command. */
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001137 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1138
1139 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1140 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
1141 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1142 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1143 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1144 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1145 MLX5_SET(mkc, mkc, lr, 1);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001146 MLX5_SET(mkc, mkc, umr_en, 1);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001147
1148 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1149 MLX5_SET64(mkc, mkc, len, length);
1150 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1151 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1152 MLX5_SET(mkc, mkc, translations_octword_size,
1153 get_octo_len(virt_addr, length, 1 << page_shift));
1154 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1155 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1156 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1157 get_octo_len(virt_addr, length, 1 << page_shift));
1158
1159 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Eli Cohene126ba92013-07-07 17:25:49 +03001160 if (err) {
1161 mlx5_ib_warn(dev, "create mkey failed\n");
1162 goto err_2;
1163 }
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001164 mr->mmkey.type = MLX5_MKEY_MR;
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001165 mr->desc_size = sizeof(struct mlx5_mtt);
Eli Cohene126ba92013-07-07 17:25:49 +03001166 mr->umem = umem;
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001167 mr->dev = dev;
Haggai Eranb4cfe442014-12-11 17:04:26 +02001168 mr->live = 1;
Al Viro479163f2014-11-20 08:13:57 +00001169 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001170
Matan Baraka606b0f2016-02-29 18:05:28 +02001171 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001172
1173 return mr;
1174
1175err_2:
Al Viro479163f2014-11-20 08:13:57 +00001176 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001177
1178err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001179 if (!ibmr)
1180 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001181
1182 return ERR_PTR(err);
1183}
1184
Noa Osherovich395a8e42016-02-29 16:46:50 +02001185static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1186 int npages, u64 length, int access_flags)
1187{
1188 mr->npages = npages;
1189 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001190 mr->ibmr.lkey = mr->mmkey.key;
1191 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001192 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001193 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001194}
1195
Eli Cohene126ba92013-07-07 17:25:49 +03001196struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1197 u64 virt_addr, int access_flags,
1198 struct ib_udata *udata)
1199{
1200 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1201 struct mlx5_ib_mr *mr = NULL;
1202 struct ib_umem *umem;
1203 int page_shift;
1204 int npages;
1205 int ncont;
1206 int order;
1207 int err;
1208
Eli Cohen900a6d72014-09-14 16:47:51 +03001209 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1210 start, virt_addr, length, access_flags);
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001211
1212#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1213 if (!start && length == U64_MAX) {
1214 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1215 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1216 return ERR_PTR(-EINVAL);
1217
1218 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1219 return &mr->ibmr;
1220 }
1221#endif
1222
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001223 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
Noa Osherovich395a8e42016-02-29 16:46:50 +02001224 &page_shift, &ncont, &order);
1225
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001226 if (err < 0)
1227 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +03001228
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001229 if (order <= mr_cache_max_order(dev)) {
Eli Cohene126ba92013-07-07 17:25:49 +03001230 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1231 order, access_flags);
1232 if (PTR_ERR(mr) == -EAGAIN) {
1233 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1234 mr = NULL;
1235 }
Artemy Kovalyovc438fde2017-01-02 11:37:43 +02001236 } else if (access_flags & IB_ACCESS_ON_DEMAND &&
1237 !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
Haggai Eran6aec21f2014-12-11 17:04:23 +02001238 err = -EINVAL;
1239 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1240 goto error;
Eli Cohene126ba92013-07-07 17:25:49 +03001241 }
1242
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001243 if (!mr) {
1244 mutex_lock(&dev->slow_path_mutex);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001245 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1246 page_shift, access_flags);
Moshe Lazer6bc1a652016-10-27 16:36:42 +03001247 mutex_unlock(&dev->slow_path_mutex);
1248 }
Eli Cohene126ba92013-07-07 17:25:49 +03001249
1250 if (IS_ERR(mr)) {
1251 err = PTR_ERR(mr);
1252 goto error;
1253 }
1254
Matan Baraka606b0f2016-02-29 18:05:28 +02001255 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001256
1257 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001258 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001259
Haggai Eranb4cfe442014-12-11 17:04:26 +02001260#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001261 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001262#endif
1263
Eli Cohene126ba92013-07-07 17:25:49 +03001264 return &mr->ibmr;
1265
1266error:
1267 ib_umem_release(umem);
1268 return ERR_PTR(err);
1269}
1270
1271static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1272{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001273 struct mlx5_core_dev *mdev = dev->mdev;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001274 struct mlx5_umr_wr umrwr = {};
Eli Cohene126ba92013-07-07 17:25:49 +03001275
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001276 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1277 return 0;
1278
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001279 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1280 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1281 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1282 umrwr.mkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +03001283
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001284 return mlx5_ib_post_send_wait(dev, &umrwr);
Eli Cohene126ba92013-07-07 17:25:49 +03001285}
1286
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001287static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001288 int access_flags, int flags)
1289{
1290 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001291 struct mlx5_umr_wr umrwr = {};
Noa Osherovich56e11d62016-02-29 16:46:51 +02001292 int err;
1293
Noa Osherovich56e11d62016-02-29 16:46:51 +02001294 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1295
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001296 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1297 umrwr.mkey = mr->mmkey.key;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001298
Artemy Kovalyov31616252017-01-02 11:37:42 +02001299 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001300 umrwr.pd = pd;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001301 umrwr.access_flags = access_flags;
Artemy Kovalyov31616252017-01-02 11:37:42 +02001302 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001303 }
1304
Binoy Jayand5ea2df2017-01-02 11:37:40 +02001305 err = mlx5_ib_post_send_wait(dev, &umrwr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001306
Noa Osherovich56e11d62016-02-29 16:46:51 +02001307 return err;
1308}
1309
1310int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1311 u64 length, u64 virt_addr, int new_access_flags,
1312 struct ib_pd *new_pd, struct ib_udata *udata)
1313{
1314 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1315 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1316 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1317 int access_flags = flags & IB_MR_REREG_ACCESS ?
1318 new_access_flags :
1319 mr->access_flags;
1320 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1321 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1322 int page_shift = 0;
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001323 int upd_flags = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001324 int npages = 0;
1325 int ncont = 0;
1326 int order = 0;
1327 int err;
1328
1329 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1330 start, virt_addr, length, access_flags);
1331
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001332 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1333
Noa Osherovich56e11d62016-02-29 16:46:51 +02001334 if (flags != IB_MR_REREG_PD) {
1335 /*
1336 * Replace umem. This needs to be done whether or not UMR is
1337 * used.
1338 */
1339 flags |= IB_MR_REREG_TRANS;
1340 ib_umem_release(mr->umem);
Arnd Bergmann14ab8892016-10-24 22:48:21 +02001341 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1342 &npages, &page_shift, &ncont, &order);
1343 if (err < 0) {
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001344 clean_mr(mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001345 return err;
1346 }
1347 }
1348
1349 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1350 /*
1351 * UMR can't be used - MKey needs to be replaced.
1352 */
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001353 if (mr->allocated_from_cache) {
Noa Osherovich56e11d62016-02-29 16:46:51 +02001354 err = unreg_umr(dev, mr);
1355 if (err)
1356 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1357 } else {
1358 err = destroy_mkey(dev, mr);
1359 if (err)
1360 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1361 }
1362 if (err)
1363 return err;
1364
1365 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1366 page_shift, access_flags);
1367
1368 if (IS_ERR(mr))
1369 return PTR_ERR(mr);
1370
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001371 mr->allocated_from_cache = 0;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001372 } else {
1373 /*
1374 * Send a UMR WQE
1375 */
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001376 mr->ibmr.pd = pd;
1377 mr->access_flags = access_flags;
1378 mr->mmkey.iova = addr;
1379 mr->mmkey.size = len;
1380 mr->mmkey.pd = to_mpd(pd)->pdn;
1381
1382 if (flags & IB_MR_REREG_TRANS) {
1383 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1384 if (flags & IB_MR_REREG_PD)
1385 upd_flags |= MLX5_IB_UPD_XLT_PD;
1386 if (flags & IB_MR_REREG_ACCESS)
1387 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1388 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1389 upd_flags);
1390 } else {
1391 err = rereg_umr(pd, mr, access_flags, flags);
1392 }
1393
Noa Osherovich56e11d62016-02-29 16:46:51 +02001394 if (err) {
1395 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001396 ib_umem_release(mr->umem);
1397 clean_mr(mr);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001398 return err;
1399 }
1400 }
1401
Artemy Kovalyov7d0cc6e2017-01-02 11:37:44 +02001402 set_mr_fileds(dev, mr, npages, len, access_flags);
Noa Osherovich56e11d62016-02-29 16:46:51 +02001403
Noa Osherovich56e11d62016-02-29 16:46:51 +02001404#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1405 update_odp_mr(mr);
1406#endif
Noa Osherovich56e11d62016-02-29 16:46:51 +02001407 return 0;
1408}
1409
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001410static int
1411mlx5_alloc_priv_descs(struct ib_device *device,
1412 struct mlx5_ib_mr *mr,
1413 int ndescs,
1414 int desc_size)
1415{
1416 int size = ndescs * desc_size;
1417 int add_size;
1418 int ret;
1419
1420 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1421
1422 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1423 if (!mr->descs_alloc)
1424 return -ENOMEM;
1425
1426 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1427
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001428 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001429 size, DMA_TO_DEVICE);
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001430 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001431 ret = -ENOMEM;
1432 goto err;
1433 }
1434
1435 return 0;
1436err:
1437 kfree(mr->descs_alloc);
1438
1439 return ret;
1440}
1441
1442static void
1443mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1444{
1445 if (mr->descs) {
1446 struct ib_device *device = mr->ibmr.device;
1447 int size = mr->max_descs * mr->desc_size;
1448
Bart Van Assche9b0c2892017-01-20 13:04:21 -08001449 dma_unmap_single(device->dev.parent, mr->desc_map,
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001450 size, DMA_TO_DEVICE);
1451 kfree(mr->descs_alloc);
1452 mr->descs = NULL;
1453 }
1454}
1455
Haggai Eran6aec21f2014-12-11 17:04:23 +02001456static int clean_mr(struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001457{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001458 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001459 int allocated_from_cache = mr->allocated_from_cache;
Eli Cohene126ba92013-07-07 17:25:49 +03001460 int err;
1461
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001462 if (mr->sig) {
1463 if (mlx5_core_destroy_psv(dev->mdev,
1464 mr->sig->psv_memory.psv_idx))
1465 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1466 mr->sig->psv_memory.psv_idx);
1467 if (mlx5_core_destroy_psv(dev->mdev,
1468 mr->sig->psv_wire.psv_idx))
1469 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1470 mr->sig->psv_wire.psv_idx);
1471 kfree(mr->sig);
1472 mr->sig = NULL;
1473 }
1474
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001475 mlx5_free_priv_descs(mr);
1476
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001477 if (!allocated_from_cache) {
Haggai Eranb4cfe442014-12-11 17:04:26 +02001478 err = destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001479 if (err) {
1480 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Matan Baraka606b0f2016-02-29 18:05:28 +02001481 mr->mmkey.key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001482 return err;
1483 }
1484 } else {
Artemy Kovalyov49780d42017-01-18 16:58:10 +02001485 mlx5_mr_cache_free(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001486 }
1487
Ilya Lesokhin8b7ff7f2017-08-17 15:52:29 +03001488 if (!allocated_from_cache)
Eli Cohene126ba92013-07-07 17:25:49 +03001489 kfree(mr);
1490
1491 return 0;
1492}
1493
Haggai Eran6aec21f2014-12-11 17:04:23 +02001494int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1495{
1496 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1497 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1498 int npages = mr->npages;
1499 struct ib_umem *umem = mr->umem;
1500
1501#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001502 if (umem && umem->odp_data) {
1503 /* Prevent new page faults from succeeding */
1504 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001505 /* Wait for all running page-fault handlers to finish. */
1506 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001507 /* Destroy all page mappings */
Artemy Kovalyov81713d32017-01-18 16:58:11 +02001508 if (umem->odp_data->page_list)
1509 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1510 ib_umem_end(umem));
1511 else
1512 mlx5_ib_free_implicit_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001513 /*
1514 * We kill the umem before the MR for ODP,
1515 * so that there will not be any invalidations in
1516 * flight, looking at the *mr struct.
1517 */
1518 ib_umem_release(umem);
1519 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1520
1521 /* Avoid double-freeing the umem. */
1522 umem = NULL;
1523 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001524#endif
1525
1526 clean_mr(mr);
1527
1528 if (umem) {
1529 ib_umem_release(umem);
1530 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1531 }
1532
1533 return 0;
1534}
1535
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001536struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1537 enum ib_mr_type mr_type,
1538 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001539{
1540 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001541 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001542 int ndescs = ALIGN(max_num_sg, 4);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001543 struct mlx5_ib_mr *mr;
1544 void *mkc;
1545 u32 *in;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001546 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001547
1548 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1549 if (!mr)
1550 return ERR_PTR(-ENOMEM);
1551
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001552 in = kzalloc(inlen, GFP_KERNEL);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001553 if (!in) {
1554 err = -ENOMEM;
1555 goto err_free;
1556 }
1557
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001558 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1559 MLX5_SET(mkc, mkc, free, 1);
1560 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1561 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1562 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001563
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001564 if (mr_type == IB_MR_TYPE_MEM_REG) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001565 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1566 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001567 err = mlx5_alloc_priv_descs(pd->device, mr,
Artemy Kovalyov31616252017-01-02 11:37:42 +02001568 ndescs, sizeof(struct mlx5_mtt));
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001569 if (err)
1570 goto err_free_in;
1571
Artemy Kovalyov31616252017-01-02 11:37:42 +02001572 mr->desc_size = sizeof(struct mlx5_mtt);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001573 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001574 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001575 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001576
1577 err = mlx5_alloc_priv_descs(pd->device, mr,
1578 ndescs, sizeof(struct mlx5_klm));
1579 if (err)
1580 goto err_free_in;
1581 mr->desc_size = sizeof(struct mlx5_klm);
1582 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001583 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001584 u32 psv_index[2];
1585
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001586 MLX5_SET(mkc, mkc, bsf_en, 1);
1587 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001588 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1589 if (!mr->sig) {
1590 err = -ENOMEM;
1591 goto err_free_in;
1592 }
1593
1594 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001595 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001596 2, psv_index);
1597 if (err)
1598 goto err_free_sig;
1599
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001600 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001601 mr->sig->psv_memory.psv_idx = psv_index[0];
1602 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001603
1604 mr->sig->sig_status_checked = true;
1605 mr->sig->sig_err_exists = false;
1606 /* Next UMR, Arm SIGERR */
1607 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001608 } else {
1609 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1610 err = -EINVAL;
1611 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001612 }
1613
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001614 MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
1615 MLX5_SET(mkc, mkc, umr_en, 1);
1616
1617 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001618 if (err)
1619 goto err_destroy_psv;
1620
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001621 mr->mmkey.type = MLX5_MKEY_MR;
Matan Baraka606b0f2016-02-29 18:05:28 +02001622 mr->ibmr.lkey = mr->mmkey.key;
1623 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001624 mr->umem = NULL;
1625 kfree(in);
1626
1627 return &mr->ibmr;
1628
1629err_destroy_psv:
1630 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001631 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001632 mr->sig->psv_memory.psv_idx))
1633 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1634 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001635 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001636 mr->sig->psv_wire.psv_idx))
1637 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1638 mr->sig->psv_wire.psv_idx);
1639 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001640 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001641err_free_sig:
1642 kfree(mr->sig);
1643err_free_in:
1644 kfree(in);
1645err_free:
1646 kfree(mr);
1647 return ERR_PTR(err);
1648}
1649
Matan Barakd2370e02016-02-29 18:05:30 +02001650struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1651 struct ib_udata *udata)
1652{
1653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001654 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
Matan Barakd2370e02016-02-29 18:05:30 +02001655 struct mlx5_ib_mw *mw = NULL;
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001656 u32 *in = NULL;
1657 void *mkc;
Matan Barakd2370e02016-02-29 18:05:30 +02001658 int ndescs;
1659 int err;
1660 struct mlx5_ib_alloc_mw req = {};
1661 struct {
1662 __u32 comp_mask;
1663 __u32 response_length;
1664 } resp = {};
1665
1666 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1667 if (err)
1668 return ERR_PTR(err);
1669
1670 if (req.comp_mask || req.reserved1 || req.reserved2)
1671 return ERR_PTR(-EOPNOTSUPP);
1672
1673 if (udata->inlen > sizeof(req) &&
1674 !ib_is_udata_cleared(udata, sizeof(req),
1675 udata->inlen - sizeof(req)))
1676 return ERR_PTR(-EOPNOTSUPP);
1677
1678 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1679
1680 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001681 in = kzalloc(inlen, GFP_KERNEL);
Matan Barakd2370e02016-02-29 18:05:30 +02001682 if (!mw || !in) {
1683 err = -ENOMEM;
1684 goto free;
1685 }
1686
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001687 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
Matan Barakd2370e02016-02-29 18:05:30 +02001688
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001689 MLX5_SET(mkc, mkc, free, 1);
1690 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1691 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1692 MLX5_SET(mkc, mkc, umr_en, 1);
1693 MLX5_SET(mkc, mkc, lr, 1);
1694 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
1695 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1696 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1697
1698 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
Matan Barakd2370e02016-02-29 18:05:30 +02001699 if (err)
1700 goto free;
1701
Artemy Kovalyovaa8e08d2017-01-02 11:37:48 +02001702 mw->mmkey.type = MLX5_MKEY_MW;
Matan Barakd2370e02016-02-29 18:05:30 +02001703 mw->ibmw.rkey = mw->mmkey.key;
Artemy Kovalyovdb570d72017-04-05 09:23:59 +03001704 mw->ndescs = ndescs;
Matan Barakd2370e02016-02-29 18:05:30 +02001705
1706 resp.response_length = min(offsetof(typeof(resp), response_length) +
1707 sizeof(resp.response_length), udata->outlen);
1708 if (resp.response_length) {
1709 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1710 if (err) {
1711 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1712 goto free;
1713 }
1714 }
1715
1716 kfree(in);
1717 return &mw->ibmw;
1718
1719free:
1720 kfree(mw);
1721 kfree(in);
1722 return ERR_PTR(err);
1723}
1724
1725int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1726{
1727 struct mlx5_ib_mw *mmw = to_mmw(mw);
1728 int err;
1729
1730 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1731 &mmw->mmkey);
1732 if (!err)
1733 kfree(mmw);
1734 return err;
1735}
1736
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001737int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1738 struct ib_mr_status *mr_status)
1739{
1740 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1741 int ret = 0;
1742
1743 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1744 pr_err("Invalid status check mask\n");
1745 ret = -EINVAL;
1746 goto done;
1747 }
1748
1749 mr_status->fail_status = 0;
1750 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1751 if (!mmr->sig) {
1752 ret = -EINVAL;
1753 pr_err("signature status check requested on a non-signature enabled MR\n");
1754 goto done;
1755 }
1756
1757 mmr->sig->sig_status_checked = true;
1758 if (!mmr->sig->sig_err_exists)
1759 goto done;
1760
1761 if (ibmr->lkey == mmr->sig->err_item.key)
1762 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1763 sizeof(mr_status->sig_err));
1764 else {
1765 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1766 mr_status->sig_err.sig_err_offset = 0;
1767 mr_status->sig_err.key = mmr->sig->err_item.key;
1768 }
1769
1770 mmr->sig->sig_err_exists = false;
1771 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1772 }
1773
1774done:
1775 return ret;
1776}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001777
Sagi Grimbergb005d312016-02-29 19:07:33 +02001778static int
1779mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1780 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001781 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001782 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001783{
1784 struct scatterlist *sg = sgl;
1785 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001786 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001787 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1788 int i;
1789
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001790 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001791 mr->ibmr.length = 0;
1792 mr->ndescs = sg_nents;
1793
1794 for_each_sg(sgl, sg, sg_nents, i) {
Bart Van Assche99975cd2017-04-24 15:15:28 -07001795 if (unlikely(i >= mr->max_descs))
Sagi Grimbergb005d312016-02-29 19:07:33 +02001796 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001797 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1798 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001799 klms[i].key = cpu_to_be32(lkey);
Sagi Grimberg0a49f2c2017-04-23 14:31:42 +03001800 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001801
1802 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001803 }
1804
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001805 if (sg_offset_p)
1806 *sg_offset_p = sg_offset;
1807
Sagi Grimbergb005d312016-02-29 19:07:33 +02001808 return i;
1809}
1810
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001811static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1812{
1813 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1814 __be64 *descs;
1815
1816 if (unlikely(mr->ndescs == mr->max_descs))
1817 return -ENOMEM;
1818
1819 descs = mr->descs;
1820 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1821
1822 return 0;
1823}
1824
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001825int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001826 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001827{
1828 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1829 int n;
1830
1831 mr->ndescs = 0;
1832
1833 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1834 mr->desc_size * mr->max_descs,
1835 DMA_TO_DEVICE);
1836
Saeed Mahameedec22eb52016-07-16 06:28:36 +03001837 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001838 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001839 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001840 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1841 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001842
1843 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1844 mr->desc_size * mr->max_descs,
1845 DMA_TO_DEVICE);
1846
1847 return n;
1848}