blob: 5a80dd9937612f5804d64135838ff389570e6b6e [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
40#include "mlx5_ib.h"
41
42enum {
Eli Cohen746b5582013-10-23 09:53:14 +030043 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030044};
45
Eli Cohenfe45f822013-09-11 16:35:35 +030046enum {
47 MLX5_UMR_ALIGN = 2048
48};
49
Eli Cohene126ba92013-07-07 17:25:49 +030050static __be64 *mr_align(__be64 *ptr, int align)
51{
52 unsigned long mask = align - 1;
53
54 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
55}
56
57static int order2idx(struct mlx5_ib_dev *dev, int order)
58{
59 struct mlx5_mr_cache *cache = &dev->cache;
60
61 if (order < cache->ent[0].order)
62 return 0;
63 else
64 return order - cache->ent[0].order;
65}
66
Eli Cohen746b5582013-10-23 09:53:14 +030067static void reg_mr_callback(int status, void *context)
68{
69 struct mlx5_ib_mr *mr = context;
70 struct mlx5_ib_dev *dev = mr->dev;
71 struct mlx5_mr_cache *cache = &dev->cache;
72 int c = order2idx(dev, mr->order);
73 struct mlx5_cache_ent *ent = &cache->ent[c];
74 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +030075 unsigned long flags;
Jack Morgenstein9603b612014-07-28 23:30:22 +030076 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
Haggai Eran86059332014-05-22 14:50:09 +030077 int err;
Eli Cohen746b5582013-10-23 09:53:14 +030078
Eli Cohen746b5582013-10-23 09:53:14 +030079 spin_lock_irqsave(&ent->lock, flags);
80 ent->pending--;
81 spin_unlock_irqrestore(&ent->lock, flags);
82 if (status) {
83 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
84 kfree(mr);
85 dev->fill_delay = 1;
86 mod_timer(&dev->delay_timer, jiffies + HZ);
87 return;
88 }
89
90 if (mr->out.hdr.status) {
91 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
92 mr->out.hdr.status,
93 be32_to_cpu(mr->out.hdr.syndrome));
94 kfree(mr);
95 dev->fill_delay = 1;
96 mod_timer(&dev->delay_timer, jiffies + HZ);
97 return;
98 }
99
Jack Morgenstein9603b612014-07-28 23:30:22 +0300100 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
101 key = dev->mdev->priv.mkey_key++;
102 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300103 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
104
105 cache->last_add = jiffies;
106
107 spin_lock_irqsave(&ent->lock, flags);
108 list_add_tail(&mr->list, &ent->head);
109 ent->cur++;
110 ent->size++;
111 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300112
113 write_lock_irqsave(&table->lock, flags);
114 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
115 &mr->mmr);
116 if (err)
117 pr_err("Error inserting to mr tree. 0x%x\n", -err);
118 write_unlock_irqrestore(&table->lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300119}
120
Eli Cohene126ba92013-07-07 17:25:49 +0300121static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
122{
Eli Cohene126ba92013-07-07 17:25:49 +0300123 struct mlx5_mr_cache *cache = &dev->cache;
124 struct mlx5_cache_ent *ent = &cache->ent[c];
125 struct mlx5_create_mkey_mbox_in *in;
126 struct mlx5_ib_mr *mr;
127 int npages = 1 << ent->order;
Eli Cohene126ba92013-07-07 17:25:49 +0300128 int err = 0;
129 int i;
130
131 in = kzalloc(sizeof(*in), GFP_KERNEL);
132 if (!in)
133 return -ENOMEM;
134
135 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300136 if (ent->pending >= MAX_PENDING_REG_MR) {
137 err = -EAGAIN;
138 break;
139 }
140
Eli Cohene126ba92013-07-07 17:25:49 +0300141 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
142 if (!mr) {
143 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300144 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300145 }
146 mr->order = ent->order;
147 mr->umred = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300148 mr->dev = dev;
Eli Cohene126ba92013-07-07 17:25:49 +0300149 in->seg.status = 1 << 6;
150 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
151 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
152 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
153 in->seg.log2_page_size = 12;
154
Eli Cohen746b5582013-10-23 09:53:14 +0300155 spin_lock_irq(&ent->lock);
156 ent->pending++;
157 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300158 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
Eli Cohen746b5582013-10-23 09:53:14 +0300159 sizeof(*in), reg_mr_callback,
160 mr, &mr->out);
Eli Cohene126ba92013-07-07 17:25:49 +0300161 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200162 spin_lock_irq(&ent->lock);
163 ent->pending--;
164 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300165 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300166 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300167 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300168 }
Eli Cohene126ba92013-07-07 17:25:49 +0300169 }
170
Eli Cohene126ba92013-07-07 17:25:49 +0300171 kfree(in);
172 return err;
173}
174
175static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
176{
Eli Cohene126ba92013-07-07 17:25:49 +0300177 struct mlx5_mr_cache *cache = &dev->cache;
178 struct mlx5_cache_ent *ent = &cache->ent[c];
179 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300180 int err;
181 int i;
182
183 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300184 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300185 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300186 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300187 return;
188 }
189 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
190 list_del(&mr->list);
191 ent->cur--;
192 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300193 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300194 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Eli Cohen203099f2013-09-11 16:35:26 +0300195 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300196 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300197 else
Eli Cohene126ba92013-07-07 17:25:49 +0300198 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300199 }
200}
201
202static ssize_t size_write(struct file *filp, const char __user *buf,
203 size_t count, loff_t *pos)
204{
205 struct mlx5_cache_ent *ent = filp->private_data;
206 struct mlx5_ib_dev *dev = ent->dev;
207 char lbuf[20];
208 u32 var;
209 int err;
210 int c;
211
212 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300213 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300214
215 c = order2idx(dev, ent->order);
216 lbuf[sizeof(lbuf) - 1] = 0;
217
218 if (sscanf(lbuf, "%u", &var) != 1)
219 return -EINVAL;
220
221 if (var < ent->limit)
222 return -EINVAL;
223
224 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300225 do {
226 err = add_keys(dev, c, var - ent->size);
227 if (err && err != -EAGAIN)
228 return err;
229
230 usleep_range(3000, 5000);
231 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300232 } else if (var < ent->size) {
233 remove_keys(dev, c, ent->size - var);
234 }
235
236 return count;
237}
238
239static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
240 loff_t *pos)
241{
242 struct mlx5_cache_ent *ent = filp->private_data;
243 char lbuf[20];
244 int err;
245
246 if (*pos)
247 return 0;
248
249 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
250 if (err < 0)
251 return err;
252
253 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300254 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300255
256 *pos += err;
257
258 return err;
259}
260
261static const struct file_operations size_fops = {
262 .owner = THIS_MODULE,
263 .open = simple_open,
264 .write = size_write,
265 .read = size_read,
266};
267
268static ssize_t limit_write(struct file *filp, const char __user *buf,
269 size_t count, loff_t *pos)
270{
271 struct mlx5_cache_ent *ent = filp->private_data;
272 struct mlx5_ib_dev *dev = ent->dev;
273 char lbuf[20];
274 u32 var;
275 int err;
276 int c;
277
278 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300279 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300280
281 c = order2idx(dev, ent->order);
282 lbuf[sizeof(lbuf) - 1] = 0;
283
284 if (sscanf(lbuf, "%u", &var) != 1)
285 return -EINVAL;
286
287 if (var > ent->size)
288 return -EINVAL;
289
290 ent->limit = var;
291
292 if (ent->cur < ent->limit) {
293 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
294 if (err)
295 return err;
296 }
297
298 return count;
299}
300
301static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
302 loff_t *pos)
303{
304 struct mlx5_cache_ent *ent = filp->private_data;
305 char lbuf[20];
306 int err;
307
308 if (*pos)
309 return 0;
310
311 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
312 if (err < 0)
313 return err;
314
315 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300316 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300317
318 *pos += err;
319
320 return err;
321}
322
323static const struct file_operations limit_fops = {
324 .owner = THIS_MODULE,
325 .open = simple_open,
326 .write = limit_write,
327 .read = limit_read,
328};
329
330static int someone_adding(struct mlx5_mr_cache *cache)
331{
332 int i;
333
334 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
335 if (cache->ent[i].cur < cache->ent[i].limit)
336 return 1;
337 }
338
339 return 0;
340}
341
342static void __cache_work_func(struct mlx5_cache_ent *ent)
343{
344 struct mlx5_ib_dev *dev = ent->dev;
345 struct mlx5_mr_cache *cache = &dev->cache;
346 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300347 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300348
349 if (cache->stopped)
350 return;
351
352 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300353 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
354 err = add_keys(dev, i, 1);
355 if (ent->cur < 2 * ent->limit) {
356 if (err == -EAGAIN) {
357 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
358 i + 2);
359 queue_delayed_work(cache->wq, &ent->dwork,
360 msecs_to_jiffies(3));
361 } else if (err) {
362 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
363 i + 2, err);
364 queue_delayed_work(cache->wq, &ent->dwork,
365 msecs_to_jiffies(1000));
366 } else {
367 queue_work(cache->wq, &ent->work);
368 }
369 }
Eli Cohene126ba92013-07-07 17:25:49 +0300370 } else if (ent->cur > 2 * ent->limit) {
371 if (!someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300372 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300373 remove_keys(dev, i, 1);
374 if (ent->cur > ent->limit)
375 queue_work(cache->wq, &ent->work);
376 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300377 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300378 }
379 }
380}
381
382static void delayed_cache_work_func(struct work_struct *work)
383{
384 struct mlx5_cache_ent *ent;
385
386 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
387 __cache_work_func(ent);
388}
389
390static void cache_work_func(struct work_struct *work)
391{
392 struct mlx5_cache_ent *ent;
393
394 ent = container_of(work, struct mlx5_cache_ent, work);
395 __cache_work_func(ent);
396}
397
398static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
399{
400 struct mlx5_mr_cache *cache = &dev->cache;
401 struct mlx5_ib_mr *mr = NULL;
402 struct mlx5_cache_ent *ent;
403 int c;
404 int i;
405
406 c = order2idx(dev, order);
407 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
408 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
409 return NULL;
410 }
411
412 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
413 ent = &cache->ent[i];
414
415 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
416
Eli Cohen746b5582013-10-23 09:53:14 +0300417 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300418 if (!list_empty(&ent->head)) {
419 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
420 list);
421 list_del(&mr->list);
422 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300423 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300424 if (ent->cur < ent->limit)
425 queue_work(cache->wq, &ent->work);
426 break;
427 }
Eli Cohen746b5582013-10-23 09:53:14 +0300428 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300429
430 queue_work(cache->wq, &ent->work);
431
432 if (mr)
433 break;
434 }
435
436 if (!mr)
437 cache->ent[c].miss++;
438
439 return mr;
440}
441
442static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
443{
444 struct mlx5_mr_cache *cache = &dev->cache;
445 struct mlx5_cache_ent *ent;
446 int shrink = 0;
447 int c;
448
449 c = order2idx(dev, mr->order);
450 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
451 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
452 return;
453 }
454 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300455 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300456 list_add_tail(&mr->list, &ent->head);
457 ent->cur++;
458 if (ent->cur > 2 * ent->limit)
459 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300460 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300461
462 if (shrink)
463 queue_work(cache->wq, &ent->work);
464}
465
466static void clean_keys(struct mlx5_ib_dev *dev, int c)
467{
Eli Cohene126ba92013-07-07 17:25:49 +0300468 struct mlx5_mr_cache *cache = &dev->cache;
469 struct mlx5_cache_ent *ent = &cache->ent[c];
470 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300471 int err;
472
Moshe Lazer3c461912013-09-11 16:35:23 +0300473 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300474 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300475 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300476 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300477 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300478 return;
479 }
480 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
481 list_del(&mr->list);
482 ent->cur--;
483 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300484 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300485 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Eli Cohen203099f2013-09-11 16:35:26 +0300486 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300487 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300488 else
Eli Cohene126ba92013-07-07 17:25:49 +0300489 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300490 }
491}
492
493static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
494{
495 struct mlx5_mr_cache *cache = &dev->cache;
496 struct mlx5_cache_ent *ent;
497 int i;
498
499 if (!mlx5_debugfs_root)
500 return 0;
501
Jack Morgenstein9603b612014-07-28 23:30:22 +0300502 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300503 if (!cache->root)
504 return -ENOMEM;
505
506 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
507 ent = &cache->ent[i];
508 sprintf(ent->name, "%d", ent->order);
509 ent->dir = debugfs_create_dir(ent->name, cache->root);
510 if (!ent->dir)
511 return -ENOMEM;
512
513 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
514 &size_fops);
515 if (!ent->fsize)
516 return -ENOMEM;
517
518 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
519 &limit_fops);
520 if (!ent->flimit)
521 return -ENOMEM;
522
523 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
524 &ent->cur);
525 if (!ent->fcur)
526 return -ENOMEM;
527
528 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
529 &ent->miss);
530 if (!ent->fmiss)
531 return -ENOMEM;
532 }
533
534 return 0;
535}
536
537static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
538{
539 if (!mlx5_debugfs_root)
540 return;
541
542 debugfs_remove_recursive(dev->cache.root);
543}
544
Eli Cohen746b5582013-10-23 09:53:14 +0300545static void delay_time_func(unsigned long ctx)
546{
547 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
548
549 dev->fill_delay = 0;
550}
551
Eli Cohene126ba92013-07-07 17:25:49 +0300552int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
553{
554 struct mlx5_mr_cache *cache = &dev->cache;
555 struct mlx5_cache_ent *ent;
556 int limit;
Eli Cohene126ba92013-07-07 17:25:49 +0300557 int err;
558 int i;
559
560 cache->wq = create_singlethread_workqueue("mkey_cache");
561 if (!cache->wq) {
562 mlx5_ib_warn(dev, "failed to create work queue\n");
563 return -ENOMEM;
564 }
565
Eli Cohen746b5582013-10-23 09:53:14 +0300566 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300567 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
568 INIT_LIST_HEAD(&cache->ent[i].head);
569 spin_lock_init(&cache->ent[i].lock);
570
571 ent = &cache->ent[i];
572 INIT_LIST_HEAD(&ent->head);
573 spin_lock_init(&ent->lock);
574 ent->order = i + 2;
575 ent->dev = dev;
576
Jack Morgenstein9603b612014-07-28 23:30:22 +0300577 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
578 limit = dev->mdev->profile->mr_cache[i].limit;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300579 else
Eli Cohene126ba92013-07-07 17:25:49 +0300580 limit = 0;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300581
Eli Cohene126ba92013-07-07 17:25:49 +0300582 INIT_WORK(&ent->work, cache_work_func);
583 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
584 ent->limit = limit;
585 queue_work(cache->wq, &ent->work);
586 }
587
588 err = mlx5_mr_cache_debugfs_init(dev);
589 if (err)
590 mlx5_ib_warn(dev, "cache debugfs failure\n");
591
592 return 0;
593}
594
595int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
596{
597 int i;
598
599 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300600 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300601
602 mlx5_mr_cache_debugfs_cleanup(dev);
603
604 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
605 clean_keys(dev, i);
606
Moshe Lazer3c461912013-09-11 16:35:23 +0300607 destroy_workqueue(dev->cache.wq);
Eli Cohen746b5582013-10-23 09:53:14 +0300608 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300609
Eli Cohene126ba92013-07-07 17:25:49 +0300610 return 0;
611}
612
613struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
614{
615 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300616 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300617 struct mlx5_create_mkey_mbox_in *in;
618 struct mlx5_mkey_seg *seg;
619 struct mlx5_ib_mr *mr;
620 int err;
621
622 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
623 if (!mr)
624 return ERR_PTR(-ENOMEM);
625
626 in = kzalloc(sizeof(*in), GFP_KERNEL);
627 if (!in) {
628 err = -ENOMEM;
629 goto err_free;
630 }
631
632 seg = &in->seg;
633 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
634 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
635 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
636 seg->start_addr = 0;
637
Eli Cohen746b5582013-10-23 09:53:14 +0300638 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
639 NULL);
Eli Cohene126ba92013-07-07 17:25:49 +0300640 if (err)
641 goto err_in;
642
643 kfree(in);
644 mr->ibmr.lkey = mr->mmr.key;
645 mr->ibmr.rkey = mr->mmr.key;
646 mr->umem = NULL;
647
648 return &mr->ibmr;
649
650err_in:
651 kfree(in);
652
653err_free:
654 kfree(mr);
655
656 return ERR_PTR(err);
657}
658
659static int get_octo_len(u64 addr, u64 len, int page_size)
660{
661 u64 offset;
662 int npages;
663
664 offset = addr & (page_size - 1);
665 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
666 return (npages + 1) / 2;
667}
668
669static int use_umr(int order)
670{
671 return order <= 17;
672}
673
674static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
675 struct ib_sge *sg, u64 dma, int n, u32 key,
676 int page_shift, u64 virt_addr, u64 len,
677 int access_flags)
678{
679 struct mlx5_ib_dev *dev = to_mdev(pd->device);
680 struct ib_mr *mr = dev->umrc.mr;
681
682 sg->addr = dma;
683 sg->length = ALIGN(sizeof(u64) * n, 64);
684 sg->lkey = mr->lkey;
685
686 wr->next = NULL;
687 wr->send_flags = 0;
688 wr->sg_list = sg;
689 if (n)
690 wr->num_sge = 1;
691 else
692 wr->num_sge = 0;
693
694 wr->opcode = MLX5_IB_WR_UMR;
695 wr->wr.fast_reg.page_list_len = n;
696 wr->wr.fast_reg.page_shift = page_shift;
697 wr->wr.fast_reg.rkey = key;
698 wr->wr.fast_reg.iova_start = virt_addr;
699 wr->wr.fast_reg.length = len;
700 wr->wr.fast_reg.access_flags = access_flags;
701 wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
702}
703
704static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
705 struct ib_send_wr *wr, u32 key)
706{
707 wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
708 wr->opcode = MLX5_IB_WR_UMR;
709 wr->wr.fast_reg.rkey = key;
710}
711
712void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
713{
Shachar Raindela74d2412014-05-22 14:50:12 +0300714 struct mlx5_ib_umr_context *context;
Eli Cohene126ba92013-07-07 17:25:49 +0300715 struct ib_wc wc;
716 int err;
717
718 while (1) {
719 err = ib_poll_cq(cq, 1, &wc);
720 if (err < 0) {
721 pr_warn("poll cq error %d\n", err);
722 return;
723 }
724 if (err == 0)
725 break;
726
Roland Dreier6c9b5d92014-05-28 09:23:03 -0700727 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
Shachar Raindela74d2412014-05-22 14:50:12 +0300728 context->status = wc.status;
729 complete(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300730 }
731 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
732}
733
734static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
735 u64 virt_addr, u64 len, int npages,
736 int page_shift, int order, int access_flags)
737{
738 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohen203099f2013-09-11 16:35:26 +0300739 struct device *ddev = dev->ib_dev.dma_device;
Eli Cohene126ba92013-07-07 17:25:49 +0300740 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300741 struct mlx5_ib_umr_context umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300742 struct ib_send_wr wr, *bad;
743 struct mlx5_ib_mr *mr;
744 struct ib_sge sg;
Eli Cohen203099f2013-09-11 16:35:26 +0300745 int size = sizeof(u64) * npages;
Haggai Eran096f7e72014-05-22 14:50:08 +0300746 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300747 int i;
748
Eli Cohen746b5582013-10-23 09:53:14 +0300749 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300750 mr = alloc_cached_mr(dev, order);
751 if (mr)
752 break;
753
754 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300755 if (err && err != -EAGAIN) {
756 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300757 break;
758 }
759 }
760
761 if (!mr)
762 return ERR_PTR(-EAGAIN);
763
Eli Cohenfe45f822013-09-11 16:35:35 +0300764 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
Eli Cohen203099f2013-09-11 16:35:26 +0300765 if (!mr->pas) {
766 err = -ENOMEM;
Haggai Eran096f7e72014-05-22 14:50:08 +0300767 goto free_mr;
Eli Cohen203099f2013-09-11 16:35:26 +0300768 }
Eli Cohen543139072013-09-11 16:35:36 +0300769
770 mlx5_ib_populate_pas(dev, umem, page_shift,
771 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
772
Eli Cohenfe45f822013-09-11 16:35:35 +0300773 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
Eli Cohen203099f2013-09-11 16:35:26 +0300774 DMA_TO_DEVICE);
775 if (dma_mapping_error(ddev, mr->dma)) {
Eli Cohen203099f2013-09-11 16:35:26 +0300776 err = -ENOMEM;
Haggai Eran096f7e72014-05-22 14:50:08 +0300777 goto free_pas;
Eli Cohen203099f2013-09-11 16:35:26 +0300778 }
779
Eli Cohene126ba92013-07-07 17:25:49 +0300780 memset(&wr, 0, sizeof(wr));
Shachar Raindela74d2412014-05-22 14:50:12 +0300781 wr.wr_id = (u64)(unsigned long)&umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300782 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
783
Shachar Raindela74d2412014-05-22 14:50:12 +0300784 mlx5_ib_init_umr_context(&umr_context);
Eli Cohene126ba92013-07-07 17:25:49 +0300785 down(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +0300786 err = ib_post_send(umrc->qp, &wr, &bad);
787 if (err) {
788 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
Haggai Eran096f7e72014-05-22 14:50:08 +0300789 goto unmap_dma;
Shachar Raindela74d2412014-05-22 14:50:12 +0300790 } else {
791 wait_for_completion(&umr_context.done);
792 if (umr_context.status != IB_WC_SUCCESS) {
793 mlx5_ib_warn(dev, "reg umr failed\n");
794 err = -EFAULT;
795 }
Haggai Eran096f7e72014-05-22 14:50:08 +0300796 }
797
Haggai Eranb4755982014-05-22 14:50:10 +0300798 mr->mmr.iova = virt_addr;
799 mr->mmr.size = len;
800 mr->mmr.pd = to_mpd(pd)->pdn;
801
Haggai Eran096f7e72014-05-22 14:50:08 +0300802unmap_dma:
803 up(&umrc->sem);
804 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
805
806free_pas:
807 kfree(mr->pas);
808
809free_mr:
810 if (err) {
811 free_cached_mr(dev, mr);
812 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +0300813 }
814
815 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300816}
817
818static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
819 u64 length, struct ib_umem *umem,
820 int npages, int page_shift,
821 int access_flags)
822{
823 struct mlx5_ib_dev *dev = to_mdev(pd->device);
824 struct mlx5_create_mkey_mbox_in *in;
825 struct mlx5_ib_mr *mr;
826 int inlen;
827 int err;
828
829 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
830 if (!mr)
831 return ERR_PTR(-ENOMEM);
832
833 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
834 in = mlx5_vzalloc(inlen);
835 if (!in) {
836 err = -ENOMEM;
837 goto err_1;
838 }
839 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
840
841 in->seg.flags = convert_access(access_flags) |
842 MLX5_ACCESS_MODE_MTT;
843 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
844 in->seg.start_addr = cpu_to_be64(virt_addr);
845 in->seg.len = cpu_to_be64(length);
846 in->seg.bsfs_octo_size = 0;
847 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
848 in->seg.log2_page_size = page_shift;
849 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
Eli Cohen746b5582013-10-23 09:53:14 +0300850 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
851 1 << page_shift));
Jack Morgenstein9603b612014-07-28 23:30:22 +0300852 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
Eli Cohen746b5582013-10-23 09:53:14 +0300853 NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +0300854 if (err) {
855 mlx5_ib_warn(dev, "create mkey failed\n");
856 goto err_2;
857 }
858 mr->umem = umem;
Al Viro479163f2014-11-20 08:13:57 +0000859 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +0300860
861 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
862
863 return mr;
864
865err_2:
Al Viro479163f2014-11-20 08:13:57 +0000866 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +0300867
868err_1:
869 kfree(mr);
870
871 return ERR_PTR(err);
872}
873
874struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
875 u64 virt_addr, int access_flags,
876 struct ib_udata *udata)
877{
878 struct mlx5_ib_dev *dev = to_mdev(pd->device);
879 struct mlx5_ib_mr *mr = NULL;
880 struct ib_umem *umem;
881 int page_shift;
882 int npages;
883 int ncont;
884 int order;
885 int err;
886
Eli Cohen900a6d72014-09-14 16:47:51 +0300887 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
888 start, virt_addr, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300889 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
890 0);
891 if (IS_ERR(umem)) {
Eli Cohen900a6d72014-09-14 16:47:51 +0300892 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
Eli Cohene126ba92013-07-07 17:25:49 +0300893 return (void *)umem;
894 }
895
896 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
897 if (!npages) {
898 mlx5_ib_warn(dev, "avoid zero region\n");
899 err = -EINVAL;
900 goto error;
901 }
902
903 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
904 npages, ncont, order, page_shift);
905
906 if (use_umr(order)) {
907 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
908 order, access_flags);
909 if (PTR_ERR(mr) == -EAGAIN) {
910 mlx5_ib_dbg(dev, "cache empty for order %d", order);
911 mr = NULL;
912 }
913 }
914
915 if (!mr)
916 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
917 access_flags);
918
919 if (IS_ERR(mr)) {
920 err = PTR_ERR(mr);
921 goto error;
922 }
923
924 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
925
926 mr->umem = umem;
927 mr->npages = npages;
928 spin_lock(&dev->mr_lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300929 dev->mdev->priv.reg_pages += npages;
Eli Cohene126ba92013-07-07 17:25:49 +0300930 spin_unlock(&dev->mr_lock);
931 mr->ibmr.lkey = mr->mmr.key;
932 mr->ibmr.rkey = mr->mmr.key;
933
934 return &mr->ibmr;
935
936error:
937 ib_umem_release(umem);
938 return ERR_PTR(err);
939}
940
941static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
942{
943 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300944 struct mlx5_ib_umr_context umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300945 struct ib_send_wr wr, *bad;
946 int err;
947
948 memset(&wr, 0, sizeof(wr));
Shachar Raindela74d2412014-05-22 14:50:12 +0300949 wr.wr_id = (u64)(unsigned long)&umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300950 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
951
Shachar Raindela74d2412014-05-22 14:50:12 +0300952 mlx5_ib_init_umr_context(&umr_context);
Eli Cohene126ba92013-07-07 17:25:49 +0300953 down(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +0300954 err = ib_post_send(umrc->qp, &wr, &bad);
955 if (err) {
956 up(&umrc->sem);
957 mlx5_ib_dbg(dev, "err %d\n", err);
958 goto error;
Shachar Raindela74d2412014-05-22 14:50:12 +0300959 } else {
960 wait_for_completion(&umr_context.done);
961 up(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +0300962 }
Shachar Raindela74d2412014-05-22 14:50:12 +0300963 if (umr_context.status != IB_WC_SUCCESS) {
Eli Cohene126ba92013-07-07 17:25:49 +0300964 mlx5_ib_warn(dev, "unreg umr failed\n");
965 err = -EFAULT;
966 goto error;
967 }
968 return 0;
969
970error:
971 return err;
972}
973
974int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
975{
976 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
977 struct mlx5_ib_mr *mr = to_mmr(ibmr);
978 struct ib_umem *umem = mr->umem;
979 int npages = mr->npages;
980 int umred = mr->umred;
981 int err;
982
983 if (!umred) {
Jack Morgenstein9603b612014-07-28 23:30:22 +0300984 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Eli Cohene126ba92013-07-07 17:25:49 +0300985 if (err) {
986 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
987 mr->mmr.key, err);
988 return err;
989 }
990 } else {
991 err = unreg_umr(dev, mr);
992 if (err) {
993 mlx5_ib_warn(dev, "failed unregister\n");
994 return err;
995 }
996 free_cached_mr(dev, mr);
997 }
998
999 if (umem) {
1000 ib_umem_release(umem);
1001 spin_lock(&dev->mr_lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001002 dev->mdev->priv.reg_pages -= npages;
Eli Cohene126ba92013-07-07 17:25:49 +03001003 spin_unlock(&dev->mr_lock);
1004 }
1005
1006 if (!umred)
1007 kfree(mr);
1008
1009 return 0;
1010}
1011
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001012struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1013 struct ib_mr_init_attr *mr_init_attr)
1014{
1015 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1016 struct mlx5_create_mkey_mbox_in *in;
1017 struct mlx5_ib_mr *mr;
1018 int access_mode, err;
1019 int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
1020
1021 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1022 if (!mr)
1023 return ERR_PTR(-ENOMEM);
1024
1025 in = kzalloc(sizeof(*in), GFP_KERNEL);
1026 if (!in) {
1027 err = -ENOMEM;
1028 goto err_free;
1029 }
1030
1031 in->seg.status = 1 << 6; /* free */
1032 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1033 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1034 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1035 access_mode = MLX5_ACCESS_MODE_MTT;
1036
1037 if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
1038 u32 psv_index[2];
1039
1040 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1041 MLX5_MKEY_BSF_EN);
1042 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1043 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1044 if (!mr->sig) {
1045 err = -ENOMEM;
1046 goto err_free_in;
1047 }
1048
1049 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001050 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001051 2, psv_index);
1052 if (err)
1053 goto err_free_sig;
1054
1055 access_mode = MLX5_ACCESS_MODE_KLM;
1056 mr->sig->psv_memory.psv_idx = psv_index[0];
1057 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001058
1059 mr->sig->sig_status_checked = true;
1060 mr->sig->sig_err_exists = false;
1061 /* Next UMR, Arm SIGERR */
1062 ++mr->sig->sigerr_count;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001063 }
1064
1065 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
Jack Morgenstein9603b612014-07-28 23:30:22 +03001066 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001067 NULL, NULL, NULL);
1068 if (err)
1069 goto err_destroy_psv;
1070
1071 mr->ibmr.lkey = mr->mmr.key;
1072 mr->ibmr.rkey = mr->mmr.key;
1073 mr->umem = NULL;
1074 kfree(in);
1075
1076 return &mr->ibmr;
1077
1078err_destroy_psv:
1079 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001080 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001081 mr->sig->psv_memory.psv_idx))
1082 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1083 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001084 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001085 mr->sig->psv_wire.psv_idx))
1086 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1087 mr->sig->psv_wire.psv_idx);
1088 }
1089err_free_sig:
1090 kfree(mr->sig);
1091err_free_in:
1092 kfree(in);
1093err_free:
1094 kfree(mr);
1095 return ERR_PTR(err);
1096}
1097
1098int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
1099{
1100 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1101 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1102 int err;
1103
1104 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001105 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001106 mr->sig->psv_memory.psv_idx))
1107 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1108 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001109 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001110 mr->sig->psv_wire.psv_idx))
1111 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1112 mr->sig->psv_wire.psv_idx);
1113 kfree(mr->sig);
1114 }
1115
Jack Morgenstein9603b612014-07-28 23:30:22 +03001116 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001117 if (err) {
1118 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1119 mr->mmr.key, err);
1120 return err;
1121 }
1122
1123 kfree(mr);
1124
1125 return err;
1126}
1127
Eli Cohene126ba92013-07-07 17:25:49 +03001128struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
1129 int max_page_list_len)
1130{
1131 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1132 struct mlx5_create_mkey_mbox_in *in;
1133 struct mlx5_ib_mr *mr;
1134 int err;
1135
1136 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1137 if (!mr)
1138 return ERR_PTR(-ENOMEM);
1139
1140 in = kzalloc(sizeof(*in), GFP_KERNEL);
1141 if (!in) {
1142 err = -ENOMEM;
1143 goto err_free;
1144 }
1145
1146 in->seg.status = 1 << 6; /* free */
1147 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
1148 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1149 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1150 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1151 /*
1152 * TBD not needed - issue 197292 */
1153 in->seg.log2_page_size = PAGE_SHIFT;
1154
Jack Morgenstein9603b612014-07-28 23:30:22 +03001155 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
Eli Cohen746b5582013-10-23 09:53:14 +03001156 NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03001157 kfree(in);
1158 if (err)
1159 goto err_free;
1160
1161 mr->ibmr.lkey = mr->mmr.key;
1162 mr->ibmr.rkey = mr->mmr.key;
1163 mr->umem = NULL;
1164
1165 return &mr->ibmr;
1166
1167err_free:
1168 kfree(mr);
1169 return ERR_PTR(err);
1170}
1171
1172struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1173 int page_list_len)
1174{
1175 struct mlx5_ib_fast_reg_page_list *mfrpl;
1176 int size = page_list_len * sizeof(u64);
1177
1178 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1179 if (!mfrpl)
1180 return ERR_PTR(-ENOMEM);
1181
1182 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1183 if (!mfrpl->ibfrpl.page_list)
1184 goto err_free;
1185
1186 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1187 size, &mfrpl->map,
1188 GFP_KERNEL);
1189 if (!mfrpl->mapped_page_list)
1190 goto err_free;
1191
1192 WARN_ON(mfrpl->map & 0x3f);
1193
1194 return &mfrpl->ibfrpl;
1195
1196err_free:
1197 kfree(mfrpl->ibfrpl.page_list);
1198 kfree(mfrpl);
1199 return ERR_PTR(-ENOMEM);
1200}
1201
1202void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1203{
1204 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1205 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1206 int size = page_list->max_page_list_len * sizeof(u64);
1207
Jack Morgenstein9603b612014-07-28 23:30:22 +03001208 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
Eli Cohene126ba92013-07-07 17:25:49 +03001209 mfrpl->map);
1210 kfree(mfrpl->ibfrpl.page_list);
1211 kfree(mfrpl);
1212}
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001213
1214int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1215 struct ib_mr_status *mr_status)
1216{
1217 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1218 int ret = 0;
1219
1220 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1221 pr_err("Invalid status check mask\n");
1222 ret = -EINVAL;
1223 goto done;
1224 }
1225
1226 mr_status->fail_status = 0;
1227 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1228 if (!mmr->sig) {
1229 ret = -EINVAL;
1230 pr_err("signature status check requested on a non-signature enabled MR\n");
1231 goto done;
1232 }
1233
1234 mmr->sig->sig_status_checked = true;
1235 if (!mmr->sig->sig_err_exists)
1236 goto done;
1237
1238 if (ibmr->lkey == mmr->sig->err_item.key)
1239 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1240 sizeof(mr_status->sig_err));
1241 else {
1242 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1243 mr_status->sig_err.sig_err_offset = 0;
1244 mr_status->sig_err.key = mmr->sig->err_item.key;
1245 }
1246
1247 mmr->sig->sig_err_exists = false;
1248 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1249 }
1250
1251done:
1252 return ret;
1253}