blob: 4b021305c321bbf3513743d34d9c9f1e2e164210 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed6cf0a152015-04-02 17:07:30 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
Haggai Eranb4cfe442014-12-11 17:04:26 +020040#include <rdma/ib_umem_odp.h>
Haggai Eran968e78d2014-12-11 17:04:11 +020041#include <rdma/ib_verbs.h>
Eli Cohene126ba92013-07-07 17:25:49 +030042#include "mlx5_ib.h"
Matan Barakd2370e02016-02-29 18:05:30 +020043#include "user.h"
Eli Cohene126ba92013-07-07 17:25:49 +030044
45enum {
Eli Cohen746b5582013-10-23 09:53:14 +030046 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030047};
48
Haggai Eran832a6b02014-12-11 17:04:22 +020049#define MLX5_UMR_ALIGN 2048
50#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
51static __be64 mlx5_ib_update_mtt_emergency_buffer[
52 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
53 __aligned(MLX5_UMR_ALIGN);
54static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
55#endif
Eli Cohenfe45f822013-09-11 16:35:35 +030056
Haggai Eran6aec21f2014-12-11 17:04:23 +020057static int clean_mr(struct mlx5_ib_mr *mr);
58
Haggai Eranb4cfe442014-12-11 17:04:26 +020059static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
60{
Matan Baraka606b0f2016-02-29 18:05:28 +020061 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
Haggai Eranb4cfe442014-12-11 17:04:26 +020062
63#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
64 /* Wait until all page fault handlers using the mr complete. */
65 synchronize_srcu(&dev->mr_srcu);
66#endif
67
68 return err;
69}
70
Eli Cohene126ba92013-07-07 17:25:49 +030071static int order2idx(struct mlx5_ib_dev *dev, int order)
72{
73 struct mlx5_mr_cache *cache = &dev->cache;
74
75 if (order < cache->ent[0].order)
76 return 0;
77 else
78 return order - cache->ent[0].order;
79}
80
Noa Osherovich56e11d62016-02-29 16:46:51 +020081static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
82{
83 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
84 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
85}
86
Noa Osherovich395a8e42016-02-29 16:46:50 +020087#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
88static void update_odp_mr(struct mlx5_ib_mr *mr)
89{
90 if (mr->umem->odp_data) {
91 /*
92 * This barrier prevents the compiler from moving the
93 * setting of umem->odp_data->private to point to our
94 * MR, before reg_umr finished, to ensure that the MR
95 * initialization have finished before starting to
96 * handle invalidations.
97 */
98 smp_wmb();
99 mr->umem->odp_data->private = mr;
100 /*
101 * Make sure we will see the new
102 * umem->odp_data->private value in the invalidation
103 * routines, before we can get page faults on the
104 * MR. Page faults can happen once we put the MR in
105 * the tree, below this line. Without the barrier,
106 * there can be a fault handling and an invalidation
107 * before umem->odp_data->private == mr is visible to
108 * the invalidation handler.
109 */
110 smp_wmb();
111 }
112}
113#endif
114
Eli Cohen746b5582013-10-23 09:53:14 +0300115static void reg_mr_callback(int status, void *context)
116{
117 struct mlx5_ib_mr *mr = context;
118 struct mlx5_ib_dev *dev = mr->dev;
119 struct mlx5_mr_cache *cache = &dev->cache;
120 int c = order2idx(dev, mr->order);
121 struct mlx5_cache_ent *ent = &cache->ent[c];
122 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +0300123 unsigned long flags;
Matan Baraka606b0f2016-02-29 18:05:28 +0200124 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
Haggai Eran86059332014-05-22 14:50:09 +0300125 int err;
Eli Cohen746b5582013-10-23 09:53:14 +0300126
Eli Cohen746b5582013-10-23 09:53:14 +0300127 spin_lock_irqsave(&ent->lock, flags);
128 ent->pending--;
129 spin_unlock_irqrestore(&ent->lock, flags);
130 if (status) {
131 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
132 kfree(mr);
133 dev->fill_delay = 1;
134 mod_timer(&dev->delay_timer, jiffies + HZ);
135 return;
136 }
137
138 if (mr->out.hdr.status) {
139 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
140 mr->out.hdr.status,
141 be32_to_cpu(mr->out.hdr.syndrome));
142 kfree(mr);
143 dev->fill_delay = 1;
144 mod_timer(&dev->delay_timer, jiffies + HZ);
145 return;
146 }
147
Jack Morgenstein9603b612014-07-28 23:30:22 +0300148 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
149 key = dev->mdev->priv.mkey_key++;
150 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200151 mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
Eli Cohen746b5582013-10-23 09:53:14 +0300152
153 cache->last_add = jiffies;
154
155 spin_lock_irqsave(&ent->lock, flags);
156 list_add_tail(&mr->list, &ent->head);
157 ent->cur++;
158 ent->size++;
159 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300160
161 write_lock_irqsave(&table->lock, flags);
Matan Baraka606b0f2016-02-29 18:05:28 +0200162 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
163 &mr->mmkey);
Haggai Eran86059332014-05-22 14:50:09 +0300164 if (err)
Matan Baraka606b0f2016-02-29 18:05:28 +0200165 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
Haggai Eran86059332014-05-22 14:50:09 +0300166 write_unlock_irqrestore(&table->lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300167}
168
Eli Cohene126ba92013-07-07 17:25:49 +0300169static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
170{
Eli Cohene126ba92013-07-07 17:25:49 +0300171 struct mlx5_mr_cache *cache = &dev->cache;
172 struct mlx5_cache_ent *ent = &cache->ent[c];
173 struct mlx5_create_mkey_mbox_in *in;
174 struct mlx5_ib_mr *mr;
175 int npages = 1 << ent->order;
Eli Cohene126ba92013-07-07 17:25:49 +0300176 int err = 0;
177 int i;
178
179 in = kzalloc(sizeof(*in), GFP_KERNEL);
180 if (!in)
181 return -ENOMEM;
182
183 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300184 if (ent->pending >= MAX_PENDING_REG_MR) {
185 err = -EAGAIN;
186 break;
187 }
188
Eli Cohene126ba92013-07-07 17:25:49 +0300189 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
190 if (!mr) {
191 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300192 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300193 }
194 mr->order = ent->order;
195 mr->umred = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300196 mr->dev = dev;
Haggai Eran968e78d2014-12-11 17:04:11 +0200197 in->seg.status = MLX5_MKEY_STATUS_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +0300198 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
199 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
200 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
201 in->seg.log2_page_size = 12;
202
Eli Cohen746b5582013-10-23 09:53:14 +0300203 spin_lock_irq(&ent->lock);
204 ent->pending++;
205 spin_unlock_irq(&ent->lock);
Matan Baraka606b0f2016-02-29 18:05:28 +0200206 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
Eli Cohen746b5582013-10-23 09:53:14 +0300207 sizeof(*in), reg_mr_callback,
208 mr, &mr->out);
Eli Cohene126ba92013-07-07 17:25:49 +0300209 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200210 spin_lock_irq(&ent->lock);
211 ent->pending--;
212 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300213 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300214 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300215 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300216 }
Eli Cohene126ba92013-07-07 17:25:49 +0300217 }
218
Eli Cohene126ba92013-07-07 17:25:49 +0300219 kfree(in);
220 return err;
221}
222
223static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
224{
Eli Cohene126ba92013-07-07 17:25:49 +0300225 struct mlx5_mr_cache *cache = &dev->cache;
226 struct mlx5_cache_ent *ent = &cache->ent[c];
227 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300228 int err;
229 int i;
230
231 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300232 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300233 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300234 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300235 return;
236 }
237 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
238 list_del(&mr->list);
239 ent->cur--;
240 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300241 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200242 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300243 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300244 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300245 else
Eli Cohene126ba92013-07-07 17:25:49 +0300246 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300247 }
248}
249
250static ssize_t size_write(struct file *filp, const char __user *buf,
251 size_t count, loff_t *pos)
252{
253 struct mlx5_cache_ent *ent = filp->private_data;
254 struct mlx5_ib_dev *dev = ent->dev;
255 char lbuf[20];
256 u32 var;
257 int err;
258 int c;
259
260 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300261 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300262
263 c = order2idx(dev, ent->order);
264 lbuf[sizeof(lbuf) - 1] = 0;
265
266 if (sscanf(lbuf, "%u", &var) != 1)
267 return -EINVAL;
268
269 if (var < ent->limit)
270 return -EINVAL;
271
272 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300273 do {
274 err = add_keys(dev, c, var - ent->size);
275 if (err && err != -EAGAIN)
276 return err;
277
278 usleep_range(3000, 5000);
279 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300280 } else if (var < ent->size) {
281 remove_keys(dev, c, ent->size - var);
282 }
283
284 return count;
285}
286
287static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
288 loff_t *pos)
289{
290 struct mlx5_cache_ent *ent = filp->private_data;
291 char lbuf[20];
292 int err;
293
294 if (*pos)
295 return 0;
296
297 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
298 if (err < 0)
299 return err;
300
301 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300302 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300303
304 *pos += err;
305
306 return err;
307}
308
309static const struct file_operations size_fops = {
310 .owner = THIS_MODULE,
311 .open = simple_open,
312 .write = size_write,
313 .read = size_read,
314};
315
316static ssize_t limit_write(struct file *filp, const char __user *buf,
317 size_t count, loff_t *pos)
318{
319 struct mlx5_cache_ent *ent = filp->private_data;
320 struct mlx5_ib_dev *dev = ent->dev;
321 char lbuf[20];
322 u32 var;
323 int err;
324 int c;
325
326 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300327 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300328
329 c = order2idx(dev, ent->order);
330 lbuf[sizeof(lbuf) - 1] = 0;
331
332 if (sscanf(lbuf, "%u", &var) != 1)
333 return -EINVAL;
334
335 if (var > ent->size)
336 return -EINVAL;
337
338 ent->limit = var;
339
340 if (ent->cur < ent->limit) {
341 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
342 if (err)
343 return err;
344 }
345
346 return count;
347}
348
349static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
350 loff_t *pos)
351{
352 struct mlx5_cache_ent *ent = filp->private_data;
353 char lbuf[20];
354 int err;
355
356 if (*pos)
357 return 0;
358
359 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
360 if (err < 0)
361 return err;
362
363 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300364 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300365
366 *pos += err;
367
368 return err;
369}
370
371static const struct file_operations limit_fops = {
372 .owner = THIS_MODULE,
373 .open = simple_open,
374 .write = limit_write,
375 .read = limit_read,
376};
377
378static int someone_adding(struct mlx5_mr_cache *cache)
379{
380 int i;
381
382 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
383 if (cache->ent[i].cur < cache->ent[i].limit)
384 return 1;
385 }
386
387 return 0;
388}
389
390static void __cache_work_func(struct mlx5_cache_ent *ent)
391{
392 struct mlx5_ib_dev *dev = ent->dev;
393 struct mlx5_mr_cache *cache = &dev->cache;
394 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300395 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300396
397 if (cache->stopped)
398 return;
399
400 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300401 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
402 err = add_keys(dev, i, 1);
403 if (ent->cur < 2 * ent->limit) {
404 if (err == -EAGAIN) {
405 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
406 i + 2);
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(3));
409 } else if (err) {
410 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
411 i + 2, err);
412 queue_delayed_work(cache->wq, &ent->dwork,
413 msecs_to_jiffies(1000));
414 } else {
415 queue_work(cache->wq, &ent->work);
416 }
417 }
Eli Cohene126ba92013-07-07 17:25:49 +0300418 } else if (ent->cur > 2 * ent->limit) {
Leon Romanovskyab5cdc32015-10-21 09:21:17 +0300419 /*
420 * The remove_keys() logic is performed as garbage collection
421 * task. Such task is intended to be run when no other active
422 * processes are running.
423 *
424 * The need_resched() will return TRUE if there are user tasks
425 * to be activated in near future.
426 *
427 * In such case, we don't execute remove_keys() and postpone
428 * the garbage collection work to try to run in next cycle,
429 * in order to free CPU resources to other tasks.
430 */
431 if (!need_resched() && !someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300432 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300433 remove_keys(dev, i, 1);
434 if (ent->cur > ent->limit)
435 queue_work(cache->wq, &ent->work);
436 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300437 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300438 }
439 }
440}
441
442static void delayed_cache_work_func(struct work_struct *work)
443{
444 struct mlx5_cache_ent *ent;
445
446 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
447 __cache_work_func(ent);
448}
449
450static void cache_work_func(struct work_struct *work)
451{
452 struct mlx5_cache_ent *ent;
453
454 ent = container_of(work, struct mlx5_cache_ent, work);
455 __cache_work_func(ent);
456}
457
458static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
459{
460 struct mlx5_mr_cache *cache = &dev->cache;
461 struct mlx5_ib_mr *mr = NULL;
462 struct mlx5_cache_ent *ent;
463 int c;
464 int i;
465
466 c = order2idx(dev, order);
467 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
468 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
469 return NULL;
470 }
471
472 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
473 ent = &cache->ent[i];
474
475 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
476
Eli Cohen746b5582013-10-23 09:53:14 +0300477 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300478 if (!list_empty(&ent->head)) {
479 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
480 list);
481 list_del(&mr->list);
482 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300483 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300484 if (ent->cur < ent->limit)
485 queue_work(cache->wq, &ent->work);
486 break;
487 }
Eli Cohen746b5582013-10-23 09:53:14 +0300488 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300489
490 queue_work(cache->wq, &ent->work);
Eli Cohene126ba92013-07-07 17:25:49 +0300491 }
492
493 if (!mr)
494 cache->ent[c].miss++;
495
496 return mr;
497}
498
499static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
500{
501 struct mlx5_mr_cache *cache = &dev->cache;
502 struct mlx5_cache_ent *ent;
503 int shrink = 0;
504 int c;
505
506 c = order2idx(dev, mr->order);
507 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
508 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
509 return;
510 }
511 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300512 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300513 list_add_tail(&mr->list, &ent->head);
514 ent->cur++;
515 if (ent->cur > 2 * ent->limit)
516 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300517 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300518
519 if (shrink)
520 queue_work(cache->wq, &ent->work);
521}
522
523static void clean_keys(struct mlx5_ib_dev *dev, int c)
524{
Eli Cohene126ba92013-07-07 17:25:49 +0300525 struct mlx5_mr_cache *cache = &dev->cache;
526 struct mlx5_cache_ent *ent = &cache->ent[c];
527 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300528 int err;
529
Moshe Lazer3c461912013-09-11 16:35:23 +0300530 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300531 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300532 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300533 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300534 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300535 return;
536 }
537 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
538 list_del(&mr->list);
539 ent->cur--;
540 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300541 spin_unlock_irq(&ent->lock);
Haggai Eranb4cfe442014-12-11 17:04:26 +0200542 err = destroy_mkey(dev, mr);
Eli Cohen203099f2013-09-11 16:35:26 +0300543 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300544 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300545 else
Eli Cohene126ba92013-07-07 17:25:49 +0300546 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300547 }
548}
549
550static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
551{
552 struct mlx5_mr_cache *cache = &dev->cache;
553 struct mlx5_cache_ent *ent;
554 int i;
555
556 if (!mlx5_debugfs_root)
557 return 0;
558
Jack Morgenstein9603b612014-07-28 23:30:22 +0300559 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300560 if (!cache->root)
561 return -ENOMEM;
562
563 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
564 ent = &cache->ent[i];
565 sprintf(ent->name, "%d", ent->order);
566 ent->dir = debugfs_create_dir(ent->name, cache->root);
567 if (!ent->dir)
568 return -ENOMEM;
569
570 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
571 &size_fops);
572 if (!ent->fsize)
573 return -ENOMEM;
574
575 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
576 &limit_fops);
577 if (!ent->flimit)
578 return -ENOMEM;
579
580 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
581 &ent->cur);
582 if (!ent->fcur)
583 return -ENOMEM;
584
585 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
586 &ent->miss);
587 if (!ent->fmiss)
588 return -ENOMEM;
589 }
590
591 return 0;
592}
593
594static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
595{
596 if (!mlx5_debugfs_root)
597 return;
598
599 debugfs_remove_recursive(dev->cache.root);
600}
601
Eli Cohen746b5582013-10-23 09:53:14 +0300602static void delay_time_func(unsigned long ctx)
603{
604 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
605
606 dev->fill_delay = 0;
607}
608
Eli Cohene126ba92013-07-07 17:25:49 +0300609int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
610{
611 struct mlx5_mr_cache *cache = &dev->cache;
612 struct mlx5_cache_ent *ent;
613 int limit;
Eli Cohene126ba92013-07-07 17:25:49 +0300614 int err;
615 int i;
616
617 cache->wq = create_singlethread_workqueue("mkey_cache");
618 if (!cache->wq) {
619 mlx5_ib_warn(dev, "failed to create work queue\n");
620 return -ENOMEM;
621 }
622
Eli Cohen746b5582013-10-23 09:53:14 +0300623 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300624 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
625 INIT_LIST_HEAD(&cache->ent[i].head);
626 spin_lock_init(&cache->ent[i].lock);
627
628 ent = &cache->ent[i];
629 INIT_LIST_HEAD(&ent->head);
630 spin_lock_init(&ent->lock);
631 ent->order = i + 2;
632 ent->dev = dev;
633
Jack Morgenstein9603b612014-07-28 23:30:22 +0300634 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
635 limit = dev->mdev->profile->mr_cache[i].limit;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300636 else
Eli Cohene126ba92013-07-07 17:25:49 +0300637 limit = 0;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300638
Eli Cohene126ba92013-07-07 17:25:49 +0300639 INIT_WORK(&ent->work, cache_work_func);
640 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
641 ent->limit = limit;
642 queue_work(cache->wq, &ent->work);
643 }
644
645 err = mlx5_mr_cache_debugfs_init(dev);
646 if (err)
647 mlx5_ib_warn(dev, "cache debugfs failure\n");
648
649 return 0;
650}
651
652int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
653{
654 int i;
655
656 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300657 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300658
659 mlx5_mr_cache_debugfs_cleanup(dev);
660
661 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
662 clean_keys(dev, i);
663
Moshe Lazer3c461912013-09-11 16:35:23 +0300664 destroy_workqueue(dev->cache.wq);
Eli Cohen746b5582013-10-23 09:53:14 +0300665 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300666
Eli Cohene126ba92013-07-07 17:25:49 +0300667 return 0;
668}
669
670struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
671{
672 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300673 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300674 struct mlx5_create_mkey_mbox_in *in;
675 struct mlx5_mkey_seg *seg;
676 struct mlx5_ib_mr *mr;
677 int err;
678
679 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
680 if (!mr)
681 return ERR_PTR(-ENOMEM);
682
683 in = kzalloc(sizeof(*in), GFP_KERNEL);
684 if (!in) {
685 err = -ENOMEM;
686 goto err_free;
687 }
688
689 seg = &in->seg;
690 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
691 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
692 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
693 seg->start_addr = 0;
694
Matan Baraka606b0f2016-02-29 18:05:28 +0200695 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
Eli Cohen746b5582013-10-23 09:53:14 +0300696 NULL);
Eli Cohene126ba92013-07-07 17:25:49 +0300697 if (err)
698 goto err_in;
699
700 kfree(in);
Matan Baraka606b0f2016-02-29 18:05:28 +0200701 mr->ibmr.lkey = mr->mmkey.key;
702 mr->ibmr.rkey = mr->mmkey.key;
Eli Cohene126ba92013-07-07 17:25:49 +0300703 mr->umem = NULL;
704
705 return &mr->ibmr;
706
707err_in:
708 kfree(in);
709
710err_free:
711 kfree(mr);
712
713 return ERR_PTR(err);
714}
715
716static int get_octo_len(u64 addr, u64 len, int page_size)
717{
718 u64 offset;
719 int npages;
720
721 offset = addr & (page_size - 1);
722 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
723 return (npages + 1) / 2;
724}
725
726static int use_umr(int order)
727{
Haggai Erancc149f752014-12-11 17:04:21 +0200728 return order <= MLX5_MAX_UMR_SHIFT;
Eli Cohene126ba92013-07-07 17:25:49 +0300729}
730
Noa Osherovich395a8e42016-02-29 16:46:50 +0200731static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
732 int npages, int page_shift, int *size,
733 __be64 **mr_pas, dma_addr_t *dma)
734{
735 __be64 *pas;
736 struct device *ddev = dev->ib_dev.dma_device;
737
738 /*
739 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
740 * To avoid copying garbage after the pas array, we allocate
741 * a little more.
742 */
743 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
744 *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
745 if (!(*mr_pas))
746 return -ENOMEM;
747
748 pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
749 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
750 /* Clear padding after the actual pages. */
751 memset(pas + npages, 0, *size - npages * sizeof(u64));
752
753 *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
754 if (dma_mapping_error(ddev, *dma)) {
755 kfree(*mr_pas);
756 return -ENOMEM;
757 }
758
759 return 0;
760}
761
762static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
763 struct ib_sge *sg, u64 dma, int n, u32 key,
764 int page_shift)
Eli Cohene126ba92013-07-07 17:25:49 +0300765{
766 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100767 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Eli Cohene126ba92013-07-07 17:25:49 +0300768
769 sg->addr = dma;
770 sg->length = ALIGN(sizeof(u64) * n, 64);
Jason Gunthorpeb37c7882015-07-30 17:22:19 -0600771 sg->lkey = dev->umrc.pd->local_dma_lkey;
Eli Cohene126ba92013-07-07 17:25:49 +0300772
773 wr->next = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300774 wr->sg_list = sg;
775 if (n)
776 wr->num_sge = 1;
777 else
778 wr->num_sge = 0;
779
780 wr->opcode = MLX5_IB_WR_UMR;
Haggai Eran968e78d2014-12-11 17:04:11 +0200781
782 umrwr->npages = n;
783 umrwr->page_shift = page_shift;
784 umrwr->mkey = key;
Noa Osherovich395a8e42016-02-29 16:46:50 +0200785}
786
787static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
788 struct ib_sge *sg, u64 dma, int n, u32 key,
789 int page_shift, u64 virt_addr, u64 len,
790 int access_flags)
791{
792 struct mlx5_umr_wr *umrwr = umr_wr(wr);
793
794 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
795
796 wr->send_flags = 0;
797
Haggai Eran968e78d2014-12-11 17:04:11 +0200798 umrwr->target.virt_addr = virt_addr;
799 umrwr->length = len;
800 umrwr->access_flags = access_flags;
801 umrwr->pd = pd;
Eli Cohene126ba92013-07-07 17:25:49 +0300802}
803
804static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
805 struct ib_send_wr *wr, u32 key)
806{
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100807 struct mlx5_umr_wr *umrwr = umr_wr(wr);
Haggai Eran968e78d2014-12-11 17:04:11 +0200808
809 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
Eli Cohene126ba92013-07-07 17:25:49 +0300810 wr->opcode = MLX5_IB_WR_UMR;
Haggai Eran968e78d2014-12-11 17:04:11 +0200811 umrwr->mkey = key;
Eli Cohene126ba92013-07-07 17:25:49 +0300812}
813
Noa Osherovich395a8e42016-02-29 16:46:50 +0200814static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
815 int access_flags, int *npages,
816 int *page_shift, int *ncont, int *order)
817{
818 struct mlx5_ib_dev *dev = to_mdev(pd->device);
819 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
820 access_flags, 0);
821 if (IS_ERR(umem)) {
822 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
823 return (void *)umem;
824 }
825
826 mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
827 if (!*npages) {
828 mlx5_ib_warn(dev, "avoid zero region\n");
829 ib_umem_release(umem);
830 return ERR_PTR(-EINVAL);
831 }
832
833 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
834 *npages, *ncont, *order, *page_shift);
835
836 return umem;
837}
838
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100839static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
Eli Cohene126ba92013-07-07 17:25:49 +0300840{
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100841 struct mlx5_ib_umr_context *context =
842 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
Eli Cohene126ba92013-07-07 17:25:49 +0300843
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100844 context->status = wc->status;
845 complete(&context->done);
846}
Eli Cohene126ba92013-07-07 17:25:49 +0300847
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100848static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
849{
850 context->cqe.done = mlx5_ib_umr_done;
851 context->status = -1;
852 init_completion(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300853}
854
855static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
856 u64 virt_addr, u64 len, int npages,
857 int page_shift, int order, int access_flags)
858{
859 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohen203099f2013-09-11 16:35:26 +0300860 struct device *ddev = dev->ib_dev.dma_device;
Eli Cohene126ba92013-07-07 17:25:49 +0300861 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300862 struct mlx5_ib_umr_context umr_context;
Doug Ledford0025b0b2016-03-03 11:23:37 -0500863 struct mlx5_umr_wr umrwr = {};
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100864 struct ib_send_wr *bad;
Eli Cohene126ba92013-07-07 17:25:49 +0300865 struct mlx5_ib_mr *mr;
866 struct ib_sge sg;
Haggai Erancc149f752014-12-11 17:04:21 +0200867 int size;
Haggai Eran21af2c32014-12-11 17:04:10 +0200868 __be64 *mr_pas;
869 dma_addr_t dma;
Haggai Eran096f7e72014-05-22 14:50:08 +0300870 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300871 int i;
872
Eli Cohen746b5582013-10-23 09:53:14 +0300873 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300874 mr = alloc_cached_mr(dev, order);
875 if (mr)
876 break;
877
878 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300879 if (err && err != -EAGAIN) {
880 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300881 break;
882 }
883 }
884
885 if (!mr)
886 return ERR_PTR(-EAGAIN);
887
Noa Osherovich395a8e42016-02-29 16:46:50 +0200888 err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
889 &dma);
890 if (err)
Haggai Eran096f7e72014-05-22 14:50:08 +0300891 goto free_mr;
Eli Cohen203099f2013-09-11 16:35:26 +0300892
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100893 mlx5_ib_init_umr_context(&umr_context);
894
Christoph Hellwigadd08d72016-03-03 09:38:22 +0100895 umrwr.wr.wr_cqe = &umr_context.cqe;
Matan Baraka606b0f2016-02-29 18:05:28 +0200896 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100897 page_shift, virt_addr, len, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300898
Eli Cohene126ba92013-07-07 17:25:49 +0300899 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100900 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
Eli Cohene126ba92013-07-07 17:25:49 +0300901 if (err) {
902 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
Haggai Eran096f7e72014-05-22 14:50:08 +0300903 goto unmap_dma;
Shachar Raindela74d2412014-05-22 14:50:12 +0300904 } else {
905 wait_for_completion(&umr_context.done);
906 if (umr_context.status != IB_WC_SUCCESS) {
907 mlx5_ib_warn(dev, "reg umr failed\n");
908 err = -EFAULT;
909 }
Haggai Eran096f7e72014-05-22 14:50:08 +0300910 }
911
Matan Baraka606b0f2016-02-29 18:05:28 +0200912 mr->mmkey.iova = virt_addr;
913 mr->mmkey.size = len;
914 mr->mmkey.pd = to_mpd(pd)->pdn;
Haggai Eranb4755982014-05-22 14:50:10 +0300915
Haggai Eranb4cfe442014-12-11 17:04:26 +0200916 mr->live = 1;
917
Haggai Eran096f7e72014-05-22 14:50:08 +0300918unmap_dma:
919 up(&umrc->sem);
Haggai Eran21af2c32014-12-11 17:04:10 +0200920 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
Haggai Eran096f7e72014-05-22 14:50:08 +0300921
Haggai Eran21af2c32014-12-11 17:04:10 +0200922 kfree(mr_pas);
Haggai Eran096f7e72014-05-22 14:50:08 +0300923
924free_mr:
925 if (err) {
926 free_cached_mr(dev, mr);
927 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +0300928 }
929
930 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300931}
932
Haggai Eran832a6b02014-12-11 17:04:22 +0200933#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
934int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
935 int zap)
936{
937 struct mlx5_ib_dev *dev = mr->dev;
938 struct device *ddev = dev->ib_dev.dma_device;
939 struct umr_common *umrc = &dev->umrc;
940 struct mlx5_ib_umr_context umr_context;
941 struct ib_umem *umem = mr->umem;
942 int size;
943 __be64 *pas;
944 dma_addr_t dma;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100945 struct ib_send_wr *bad;
946 struct mlx5_umr_wr wr;
Haggai Eran832a6b02014-12-11 17:04:22 +0200947 struct ib_sge sg;
948 int err = 0;
949 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
950 const int page_index_mask = page_index_alignment - 1;
951 size_t pages_mapped = 0;
952 size_t pages_to_map = 0;
953 size_t pages_iter = 0;
954 int use_emergency_buf = 0;
955
956 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
957 * so we need to align the offset and length accordingly */
958 if (start_page_index & page_index_mask) {
959 npages += start_page_index & page_index_mask;
960 start_page_index &= ~page_index_mask;
961 }
962
963 pages_to_map = ALIGN(npages, page_index_alignment);
964
965 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
966 return -EINVAL;
967
968 size = sizeof(u64) * pages_to_map;
969 size = min_t(int, PAGE_SIZE, size);
970 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
971 * code, when we are called from an invalidation. The pas buffer must
972 * be 2k-aligned for Connect-IB. */
973 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
974 if (!pas) {
975 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
976 pas = mlx5_ib_update_mtt_emergency_buffer;
977 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
978 use_emergency_buf = 1;
979 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
980 memset(pas, 0, size);
981 }
982 pages_iter = size / sizeof(u64);
983 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
984 if (dma_mapping_error(ddev, dma)) {
985 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
986 err = -ENOMEM;
987 goto free_pas;
988 }
989
990 for (pages_mapped = 0;
991 pages_mapped < pages_to_map && !err;
992 pages_mapped += pages_iter, start_page_index += pages_iter) {
993 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
994
995 npages = min_t(size_t,
996 pages_iter,
997 ib_umem_num_pages(umem) - start_page_index);
998
999 if (!zap) {
1000 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
1001 start_page_index, npages, pas,
1002 MLX5_IB_MTT_PRESENT);
1003 /* Clear padding after the pages brought from the
1004 * umem. */
1005 memset(pas + npages, 0, size - npages * sizeof(u64));
1006 }
1007
1008 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1009
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001010 mlx5_ib_init_umr_context(&umr_context);
1011
Haggai Eran832a6b02014-12-11 17:04:22 +02001012 memset(&wr, 0, sizeof(wr));
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001013 wr.wr.wr_cqe = &umr_context.cqe;
Haggai Eran832a6b02014-12-11 17:04:22 +02001014
1015 sg.addr = dma;
1016 sg.length = ALIGN(npages * sizeof(u64),
1017 MLX5_UMR_MTT_ALIGNMENT);
Jason Gunthorpeb37c7882015-07-30 17:22:19 -06001018 sg.lkey = dev->umrc.pd->local_dma_lkey;
Haggai Eran832a6b02014-12-11 17:04:22 +02001019
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001020 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
Haggai Eran832a6b02014-12-11 17:04:22 +02001021 MLX5_IB_SEND_UMR_UPDATE_MTT;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001022 wr.wr.sg_list = &sg;
1023 wr.wr.num_sge = 1;
1024 wr.wr.opcode = MLX5_IB_WR_UMR;
1025 wr.npages = sg.length / sizeof(u64);
1026 wr.page_shift = PAGE_SHIFT;
Matan Baraka606b0f2016-02-29 18:05:28 +02001027 wr.mkey = mr->mmkey.key;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001028 wr.target.offset = start_page_index;
Haggai Eran832a6b02014-12-11 17:04:22 +02001029
Haggai Eran832a6b02014-12-11 17:04:22 +02001030 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001031 err = ib_post_send(umrc->qp, &wr.wr, &bad);
Haggai Eran832a6b02014-12-11 17:04:22 +02001032 if (err) {
1033 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
1034 } else {
1035 wait_for_completion(&umr_context.done);
1036 if (umr_context.status != IB_WC_SUCCESS) {
1037 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
1038 umr_context.status);
1039 err = -EFAULT;
1040 }
1041 }
1042 up(&umrc->sem);
1043 }
1044 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1045
1046free_pas:
1047 if (!use_emergency_buf)
1048 free_page((unsigned long)pas);
1049 else
1050 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
1051
1052 return err;
1053}
1054#endif
1055
Noa Osherovich395a8e42016-02-29 16:46:50 +02001056/*
1057 * If ibmr is NULL it will be allocated by reg_create.
1058 * Else, the given ibmr will be used.
1059 */
1060static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1061 u64 virt_addr, u64 length,
1062 struct ib_umem *umem, int npages,
1063 int page_shift, int access_flags)
Eli Cohene126ba92013-07-07 17:25:49 +03001064{
1065 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1066 struct mlx5_create_mkey_mbox_in *in;
1067 struct mlx5_ib_mr *mr;
1068 int inlen;
1069 int err;
Saeed Mahameed938fe832015-05-28 22:28:41 +03001070 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
Eli Cohene126ba92013-07-07 17:25:49 +03001071
Noa Osherovich395a8e42016-02-29 16:46:50 +02001072 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +03001073 if (!mr)
1074 return ERR_PTR(-ENOMEM);
1075
1076 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
1077 in = mlx5_vzalloc(inlen);
1078 if (!in) {
1079 err = -ENOMEM;
1080 goto err_1;
1081 }
Haggai Erancc149f752014-12-11 17:04:21 +02001082 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
1083 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
Eli Cohene126ba92013-07-07 17:25:49 +03001084
Haggai Erancc149f752014-12-11 17:04:21 +02001085 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
1086 * in the page list submitted with the command. */
1087 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
Eli Cohene126ba92013-07-07 17:25:49 +03001088 in->seg.flags = convert_access(access_flags) |
1089 MLX5_ACCESS_MODE_MTT;
1090 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1091 in->seg.start_addr = cpu_to_be64(virt_addr);
1092 in->seg.len = cpu_to_be64(length);
1093 in->seg.bsfs_octo_size = 0;
1094 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
1095 in->seg.log2_page_size = page_shift;
1096 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
Eli Cohen746b5582013-10-23 09:53:14 +03001097 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1098 1 << page_shift));
Matan Baraka606b0f2016-02-29 18:05:28 +02001099 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
Eli Cohen746b5582013-10-23 09:53:14 +03001100 NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03001101 if (err) {
1102 mlx5_ib_warn(dev, "create mkey failed\n");
1103 goto err_2;
1104 }
1105 mr->umem = umem;
Majd Dibbiny7eae20d2015-01-06 13:56:01 +02001106 mr->dev = dev;
Haggai Eranb4cfe442014-12-11 17:04:26 +02001107 mr->live = 1;
Al Viro479163f2014-11-20 08:13:57 +00001108 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001109
Matan Baraka606b0f2016-02-29 18:05:28 +02001110 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001111
1112 return mr;
1113
1114err_2:
Al Viro479163f2014-11-20 08:13:57 +00001115 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +03001116
1117err_1:
Noa Osherovich395a8e42016-02-29 16:46:50 +02001118 if (!ibmr)
1119 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001120
1121 return ERR_PTR(err);
1122}
1123
Noa Osherovich395a8e42016-02-29 16:46:50 +02001124static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1125 int npages, u64 length, int access_flags)
1126{
1127 mr->npages = npages;
1128 atomic_add(npages, &dev->mdev->priv.reg_pages);
Matan Baraka606b0f2016-02-29 18:05:28 +02001129 mr->ibmr.lkey = mr->mmkey.key;
1130 mr->ibmr.rkey = mr->mmkey.key;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001131 mr->ibmr.length = length;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001132 mr->access_flags = access_flags;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001133}
1134
Eli Cohene126ba92013-07-07 17:25:49 +03001135struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1136 u64 virt_addr, int access_flags,
1137 struct ib_udata *udata)
1138{
1139 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1140 struct mlx5_ib_mr *mr = NULL;
1141 struct ib_umem *umem;
1142 int page_shift;
1143 int npages;
1144 int ncont;
1145 int order;
1146 int err;
1147
Eli Cohen900a6d72014-09-14 16:47:51 +03001148 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1149 start, virt_addr, length, access_flags);
Noa Osherovich395a8e42016-02-29 16:46:50 +02001150 umem = mr_umem_get(pd, start, length, access_flags, &npages,
1151 &page_shift, &ncont, &order);
1152
1153 if (IS_ERR(umem))
Eli Cohene126ba92013-07-07 17:25:49 +03001154 return (void *)umem;
Eli Cohene126ba92013-07-07 17:25:49 +03001155
1156 if (use_umr(order)) {
1157 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1158 order, access_flags);
1159 if (PTR_ERR(mr) == -EAGAIN) {
1160 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1161 mr = NULL;
1162 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001163 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1164 err = -EINVAL;
1165 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1166 goto error;
Eli Cohene126ba92013-07-07 17:25:49 +03001167 }
1168
1169 if (!mr)
Noa Osherovich395a8e42016-02-29 16:46:50 +02001170 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1171 page_shift, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001172
1173 if (IS_ERR(mr)) {
1174 err = PTR_ERR(mr);
1175 goto error;
1176 }
1177
Matan Baraka606b0f2016-02-29 18:05:28 +02001178 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001179
1180 mr->umem = umem;
Noa Osherovich395a8e42016-02-29 16:46:50 +02001181 set_mr_fileds(dev, mr, npages, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +03001182
Haggai Eranb4cfe442014-12-11 17:04:26 +02001183#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Noa Osherovich395a8e42016-02-29 16:46:50 +02001184 update_odp_mr(mr);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001185#endif
1186
Eli Cohene126ba92013-07-07 17:25:49 +03001187 return &mr->ibmr;
1188
1189error:
1190 ib_umem_release(umem);
1191 return ERR_PTR(err);
1192}
1193
1194static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1195{
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001196 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +03001197 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +03001198 struct mlx5_ib_umr_context umr_context;
Doug Ledford0025b0b2016-03-03 11:23:37 -05001199 struct mlx5_umr_wr umrwr = {};
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001200 struct ib_send_wr *bad;
Eli Cohene126ba92013-07-07 17:25:49 +03001201 int err;
1202
Maor Gottlieb89ea94a72016-06-17 15:01:38 +03001203 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1204 return 0;
1205
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001206 mlx5_ib_init_umr_context(&umr_context);
1207
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001208 umrwr.wr.wr_cqe = &umr_context.cqe;
Matan Baraka606b0f2016-02-29 18:05:28 +02001209 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
Eli Cohene126ba92013-07-07 17:25:49 +03001210
1211 down(&umrc->sem);
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001212 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
Eli Cohene126ba92013-07-07 17:25:49 +03001213 if (err) {
1214 up(&umrc->sem);
1215 mlx5_ib_dbg(dev, "err %d\n", err);
1216 goto error;
Shachar Raindela74d2412014-05-22 14:50:12 +03001217 } else {
1218 wait_for_completion(&umr_context.done);
1219 up(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +03001220 }
Shachar Raindela74d2412014-05-22 14:50:12 +03001221 if (umr_context.status != IB_WC_SUCCESS) {
Eli Cohene126ba92013-07-07 17:25:49 +03001222 mlx5_ib_warn(dev, "unreg umr failed\n");
1223 err = -EFAULT;
1224 goto error;
1225 }
1226 return 0;
1227
1228error:
1229 return err;
1230}
1231
Noa Osherovich56e11d62016-02-29 16:46:51 +02001232static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1233 u64 length, int npages, int page_shift, int order,
1234 int access_flags, int flags)
1235{
1236 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1237 struct device *ddev = dev->ib_dev.dma_device;
1238 struct mlx5_ib_umr_context umr_context;
1239 struct ib_send_wr *bad;
1240 struct mlx5_umr_wr umrwr = {};
1241 struct ib_sge sg;
1242 struct umr_common *umrc = &dev->umrc;
1243 dma_addr_t dma = 0;
1244 __be64 *mr_pas = NULL;
1245 int size;
1246 int err;
1247
Christoph Hellwigadd08d72016-03-03 09:38:22 +01001248 mlx5_ib_init_umr_context(&umr_context);
1249
1250 umrwr.wr.wr_cqe = &umr_context.cqe;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001251 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1252
1253 if (flags & IB_MR_REREG_TRANS) {
1254 err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
1255 &mr_pas, &dma);
1256 if (err)
1257 return err;
1258
1259 umrwr.target.virt_addr = virt_addr;
1260 umrwr.length = length;
1261 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1262 }
1263
Matan Baraka606b0f2016-02-29 18:05:28 +02001264 prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
Noa Osherovich56e11d62016-02-29 16:46:51 +02001265 page_shift);
1266
1267 if (flags & IB_MR_REREG_PD) {
1268 umrwr.pd = pd;
1269 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
1270 }
1271
1272 if (flags & IB_MR_REREG_ACCESS) {
1273 umrwr.access_flags = access_flags;
1274 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1275 }
1276
Noa Osherovich56e11d62016-02-29 16:46:51 +02001277 /* post send request to UMR QP */
1278 down(&umrc->sem);
1279 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1280
1281 if (err) {
1282 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
1283 } else {
1284 wait_for_completion(&umr_context.done);
1285 if (umr_context.status != IB_WC_SUCCESS) {
1286 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
1287 umr_context.status);
1288 err = -EFAULT;
1289 }
1290 }
1291
1292 up(&umrc->sem);
1293 if (flags & IB_MR_REREG_TRANS) {
1294 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1295 kfree(mr_pas);
1296 }
1297 return err;
1298}
1299
1300int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1301 u64 length, u64 virt_addr, int new_access_flags,
1302 struct ib_pd *new_pd, struct ib_udata *udata)
1303{
1304 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1305 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1306 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1307 int access_flags = flags & IB_MR_REREG_ACCESS ?
1308 new_access_flags :
1309 mr->access_flags;
1310 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1311 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1312 int page_shift = 0;
1313 int npages = 0;
1314 int ncont = 0;
1315 int order = 0;
1316 int err;
1317
1318 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1319 start, virt_addr, length, access_flags);
1320
1321 if (flags != IB_MR_REREG_PD) {
1322 /*
1323 * Replace umem. This needs to be done whether or not UMR is
1324 * used.
1325 */
1326 flags |= IB_MR_REREG_TRANS;
1327 ib_umem_release(mr->umem);
1328 mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
1329 &page_shift, &ncont, &order);
1330 if (IS_ERR(mr->umem)) {
1331 err = PTR_ERR(mr->umem);
1332 mr->umem = NULL;
1333 return err;
1334 }
1335 }
1336
1337 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1338 /*
1339 * UMR can't be used - MKey needs to be replaced.
1340 */
1341 if (mr->umred) {
1342 err = unreg_umr(dev, mr);
1343 if (err)
1344 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1345 } else {
1346 err = destroy_mkey(dev, mr);
1347 if (err)
1348 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1349 }
1350 if (err)
1351 return err;
1352
1353 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1354 page_shift, access_flags);
1355
1356 if (IS_ERR(mr))
1357 return PTR_ERR(mr);
1358
1359 mr->umred = 0;
1360 } else {
1361 /*
1362 * Send a UMR WQE
1363 */
1364 err = rereg_umr(pd, mr, addr, len, npages, page_shift,
1365 order, access_flags, flags);
1366 if (err) {
1367 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1368 return err;
1369 }
1370 }
1371
1372 if (flags & IB_MR_REREG_PD) {
1373 ib_mr->pd = pd;
Matan Baraka606b0f2016-02-29 18:05:28 +02001374 mr->mmkey.pd = to_mpd(pd)->pdn;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001375 }
1376
1377 if (flags & IB_MR_REREG_ACCESS)
1378 mr->access_flags = access_flags;
1379
1380 if (flags & IB_MR_REREG_TRANS) {
1381 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1382 set_mr_fileds(dev, mr, npages, len, access_flags);
Matan Baraka606b0f2016-02-29 18:05:28 +02001383 mr->mmkey.iova = addr;
1384 mr->mmkey.size = len;
Noa Osherovich56e11d62016-02-29 16:46:51 +02001385 }
1386#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1387 update_odp_mr(mr);
1388#endif
1389
1390 return 0;
1391}
1392
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001393static int
1394mlx5_alloc_priv_descs(struct ib_device *device,
1395 struct mlx5_ib_mr *mr,
1396 int ndescs,
1397 int desc_size)
1398{
1399 int size = ndescs * desc_size;
1400 int add_size;
1401 int ret;
1402
1403 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1404
1405 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1406 if (!mr->descs_alloc)
1407 return -ENOMEM;
1408
1409 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1410
1411 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1412 size, DMA_TO_DEVICE);
1413 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1414 ret = -ENOMEM;
1415 goto err;
1416 }
1417
1418 return 0;
1419err:
1420 kfree(mr->descs_alloc);
1421
1422 return ret;
1423}
1424
1425static void
1426mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1427{
1428 if (mr->descs) {
1429 struct ib_device *device = mr->ibmr.device;
1430 int size = mr->max_descs * mr->desc_size;
1431
1432 dma_unmap_single(device->dma_device, mr->desc_map,
1433 size, DMA_TO_DEVICE);
1434 kfree(mr->descs_alloc);
1435 mr->descs = NULL;
1436 }
1437}
1438
Haggai Eran6aec21f2014-12-11 17:04:23 +02001439static int clean_mr(struct mlx5_ib_mr *mr)
Eli Cohene126ba92013-07-07 17:25:49 +03001440{
Haggai Eran6aec21f2014-12-11 17:04:23 +02001441 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
Eli Cohene126ba92013-07-07 17:25:49 +03001442 int umred = mr->umred;
1443 int err;
1444
Sagi Grimberg8b91ffc2015-07-30 10:32:34 +03001445 if (mr->sig) {
1446 if (mlx5_core_destroy_psv(dev->mdev,
1447 mr->sig->psv_memory.psv_idx))
1448 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1449 mr->sig->psv_memory.psv_idx);
1450 if (mlx5_core_destroy_psv(dev->mdev,
1451 mr->sig->psv_wire.psv_idx))
1452 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1453 mr->sig->psv_wire.psv_idx);
1454 kfree(mr->sig);
1455 mr->sig = NULL;
1456 }
1457
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001458 mlx5_free_priv_descs(mr);
1459
Eli Cohene126ba92013-07-07 17:25:49 +03001460 if (!umred) {
Haggai Eranb4cfe442014-12-11 17:04:26 +02001461 err = destroy_mkey(dev, mr);
Eli Cohene126ba92013-07-07 17:25:49 +03001462 if (err) {
1463 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
Matan Baraka606b0f2016-02-29 18:05:28 +02001464 mr->mmkey.key, err);
Eli Cohene126ba92013-07-07 17:25:49 +03001465 return err;
1466 }
1467 } else {
1468 err = unreg_umr(dev, mr);
1469 if (err) {
1470 mlx5_ib_warn(dev, "failed unregister\n");
1471 return err;
1472 }
1473 free_cached_mr(dev, mr);
1474 }
1475
Eli Cohene126ba92013-07-07 17:25:49 +03001476 if (!umred)
1477 kfree(mr);
1478
1479 return 0;
1480}
1481
Haggai Eran6aec21f2014-12-11 17:04:23 +02001482int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1483{
1484 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1485 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1486 int npages = mr->npages;
1487 struct ib_umem *umem = mr->umem;
1488
1489#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
Haggai Eranb4cfe442014-12-11 17:04:26 +02001490 if (umem && umem->odp_data) {
1491 /* Prevent new page faults from succeeding */
1492 mr->live = 0;
Haggai Eran6aec21f2014-12-11 17:04:23 +02001493 /* Wait for all running page-fault handlers to finish. */
1494 synchronize_srcu(&dev->mr_srcu);
Haggai Eranb4cfe442014-12-11 17:04:26 +02001495 /* Destroy all page mappings */
1496 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1497 ib_umem_end(umem));
1498 /*
1499 * We kill the umem before the MR for ODP,
1500 * so that there will not be any invalidations in
1501 * flight, looking at the *mr struct.
1502 */
1503 ib_umem_release(umem);
1504 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1505
1506 /* Avoid double-freeing the umem. */
1507 umem = NULL;
1508 }
Haggai Eran6aec21f2014-12-11 17:04:23 +02001509#endif
1510
1511 clean_mr(mr);
1512
1513 if (umem) {
1514 ib_umem_release(umem);
1515 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1516 }
1517
1518 return 0;
1519}
1520
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001521struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1522 enum ib_mr_type mr_type,
1523 u32 max_num_sg)
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001524{
1525 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1526 struct mlx5_create_mkey_mbox_in *in;
1527 struct mlx5_ib_mr *mr;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001528 int ndescs = ALIGN(max_num_sg, 4);
1529 int err;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001530
1531 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1532 if (!mr)
1533 return ERR_PTR(-ENOMEM);
1534
1535 in = kzalloc(sizeof(*in), GFP_KERNEL);
1536 if (!in) {
1537 err = -ENOMEM;
1538 goto err_free;
1539 }
1540
Haggai Eran968e78d2014-12-11 17:04:11 +02001541 in->seg.status = MLX5_MKEY_STATUS_FREE;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001542 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1543 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1544 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001545
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001546 if (mr_type == IB_MR_TYPE_MEM_REG) {
Sagi Grimbergb005d312016-02-29 19:07:33 +02001547 mr->access_mode = MLX5_ACCESS_MODE_MTT;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001548 in->seg.log2_page_size = PAGE_SHIFT;
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001549
1550 err = mlx5_alloc_priv_descs(pd->device, mr,
1551 ndescs, sizeof(u64));
1552 if (err)
1553 goto err_free_in;
1554
1555 mr->desc_size = sizeof(u64);
1556 mr->max_descs = ndescs;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001557 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1558 mr->access_mode = MLX5_ACCESS_MODE_KLM;
1559
1560 err = mlx5_alloc_priv_descs(pd->device, mr,
1561 ndescs, sizeof(struct mlx5_klm));
1562 if (err)
1563 goto err_free_in;
1564 mr->desc_size = sizeof(struct mlx5_klm);
1565 mr->max_descs = ndescs;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001566 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001567 u32 psv_index[2];
1568
1569 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1570 MLX5_MKEY_BSF_EN);
1571 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1572 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1573 if (!mr->sig) {
1574 err = -ENOMEM;
1575 goto err_free_in;
1576 }
1577
1578 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001579 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001580 2, psv_index);
1581 if (err)
1582 goto err_free_sig;
1583
Sagi Grimbergb005d312016-02-29 19:07:33 +02001584 mr->access_mode = MLX5_ACCESS_MODE_KLM;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001585 mr->sig->psv_memory.psv_idx = psv_index[0];
1586 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001587
1588 mr->sig->sig_status_checked = true;
1589 mr->sig->sig_err_exists = false;
1590 /* Next UMR, Arm SIGERR */
1591 ++mr->sig->sigerr_count;
Sagi Grimberg9bee1782015-07-30 10:32:35 +03001592 } else {
1593 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1594 err = -EINVAL;
1595 goto err_free_in;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001596 }
1597
Sagi Grimbergb005d312016-02-29 19:07:33 +02001598 in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
Matan Baraka606b0f2016-02-29 18:05:28 +02001599 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001600 NULL, NULL, NULL);
1601 if (err)
1602 goto err_destroy_psv;
1603
Matan Baraka606b0f2016-02-29 18:05:28 +02001604 mr->ibmr.lkey = mr->mmkey.key;
1605 mr->ibmr.rkey = mr->mmkey.key;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001606 mr->umem = NULL;
1607 kfree(in);
1608
1609 return &mr->ibmr;
1610
1611err_destroy_psv:
1612 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001613 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001614 mr->sig->psv_memory.psv_idx))
1615 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1616 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001617 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001618 mr->sig->psv_wire.psv_idx))
1619 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1620 mr->sig->psv_wire.psv_idx);
1621 }
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001622 mlx5_free_priv_descs(mr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001623err_free_sig:
1624 kfree(mr->sig);
1625err_free_in:
1626 kfree(in);
1627err_free:
1628 kfree(mr);
1629 return ERR_PTR(err);
1630}
1631
Matan Barakd2370e02016-02-29 18:05:30 +02001632struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1633 struct ib_udata *udata)
1634{
1635 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1636 struct mlx5_create_mkey_mbox_in *in = NULL;
1637 struct mlx5_ib_mw *mw = NULL;
1638 int ndescs;
1639 int err;
1640 struct mlx5_ib_alloc_mw req = {};
1641 struct {
1642 __u32 comp_mask;
1643 __u32 response_length;
1644 } resp = {};
1645
1646 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1647 if (err)
1648 return ERR_PTR(err);
1649
1650 if (req.comp_mask || req.reserved1 || req.reserved2)
1651 return ERR_PTR(-EOPNOTSUPP);
1652
1653 if (udata->inlen > sizeof(req) &&
1654 !ib_is_udata_cleared(udata, sizeof(req),
1655 udata->inlen - sizeof(req)))
1656 return ERR_PTR(-EOPNOTSUPP);
1657
1658 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1659
1660 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1661 in = kzalloc(sizeof(*in), GFP_KERNEL);
1662 if (!mw || !in) {
1663 err = -ENOMEM;
1664 goto free;
1665 }
1666
1667 in->seg.status = MLX5_MKEY_STATUS_FREE;
1668 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1669 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1670 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
1671 MLX5_PERM_LOCAL_READ;
1672 if (type == IB_MW_TYPE_2)
1673 in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1674 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1675
1676 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
1677 NULL, NULL, NULL);
1678 if (err)
1679 goto free;
1680
1681 mw->ibmw.rkey = mw->mmkey.key;
1682
1683 resp.response_length = min(offsetof(typeof(resp), response_length) +
1684 sizeof(resp.response_length), udata->outlen);
1685 if (resp.response_length) {
1686 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1687 if (err) {
1688 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1689 goto free;
1690 }
1691 }
1692
1693 kfree(in);
1694 return &mw->ibmw;
1695
1696free:
1697 kfree(mw);
1698 kfree(in);
1699 return ERR_PTR(err);
1700}
1701
1702int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1703{
1704 struct mlx5_ib_mw *mmw = to_mmw(mw);
1705 int err;
1706
1707 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1708 &mmw->mmkey);
1709 if (!err)
1710 kfree(mmw);
1711 return err;
1712}
1713
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001714int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1715 struct ib_mr_status *mr_status)
1716{
1717 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1718 int ret = 0;
1719
1720 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1721 pr_err("Invalid status check mask\n");
1722 ret = -EINVAL;
1723 goto done;
1724 }
1725
1726 mr_status->fail_status = 0;
1727 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1728 if (!mmr->sig) {
1729 ret = -EINVAL;
1730 pr_err("signature status check requested on a non-signature enabled MR\n");
1731 goto done;
1732 }
1733
1734 mmr->sig->sig_status_checked = true;
1735 if (!mmr->sig->sig_err_exists)
1736 goto done;
1737
1738 if (ibmr->lkey == mmr->sig->err_item.key)
1739 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1740 sizeof(mr_status->sig_err));
1741 else {
1742 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1743 mr_status->sig_err.sig_err_offset = 0;
1744 mr_status->sig_err.key = mmr->sig->err_item.key;
1745 }
1746
1747 mmr->sig->sig_err_exists = false;
1748 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1749 }
1750
1751done:
1752 return ret;
1753}
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001754
Sagi Grimbergb005d312016-02-29 19:07:33 +02001755static int
1756mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1757 struct scatterlist *sgl,
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001758 unsigned short sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001759 unsigned int *sg_offset_p)
Sagi Grimbergb005d312016-02-29 19:07:33 +02001760{
1761 struct scatterlist *sg = sgl;
1762 struct mlx5_klm *klms = mr->descs;
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001763 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001764 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1765 int i;
1766
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001767 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001768 mr->ibmr.length = 0;
1769 mr->ndescs = sg_nents;
1770
1771 for_each_sg(sgl, sg, sg_nents, i) {
1772 if (unlikely(i > mr->max_descs))
1773 break;
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001774 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1775 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001776 klms[i].key = cpu_to_be32(lkey);
1777 mr->ibmr.length += sg_dma_len(sg);
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001778
1779 sg_offset = 0;
Sagi Grimbergb005d312016-02-29 19:07:33 +02001780 }
1781
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001782 if (sg_offset_p)
1783 *sg_offset_p = sg_offset;
1784
Sagi Grimbergb005d312016-02-29 19:07:33 +02001785 return i;
1786}
1787
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001788static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1789{
1790 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1791 __be64 *descs;
1792
1793 if (unlikely(mr->ndescs == mr->max_descs))
1794 return -ENOMEM;
1795
1796 descs = mr->descs;
1797 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1798
1799 return 0;
1800}
1801
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001802int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07001803 unsigned int *sg_offset)
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001804{
1805 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1806 int n;
1807
1808 mr->ndescs = 0;
1809
1810 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1811 mr->desc_size * mr->max_descs,
1812 DMA_TO_DEVICE);
1813
Sagi Grimbergb005d312016-02-29 19:07:33 +02001814 if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001815 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
Sagi Grimbergb005d312016-02-29 19:07:33 +02001816 else
Christoph Hellwigff2ba992016-05-03 18:01:04 +02001817 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1818 mlx5_set_page);
Sagi Grimberg8a187ee2015-10-13 19:11:26 +03001819
1820 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1821 mr->desc_size * mr->max_descs,
1822 DMA_TO_DEVICE);
1823
1824 return n;
1825}