blob: 2ab081cdbca0d8b13ee23da19f41d3a4c6cdaab7 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
Eli Cohen746b5582013-10-23 09:53:14 +030038#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030039#include <rdma/ib_umem.h>
40#include "mlx5_ib.h"
41
42enum {
Eli Cohen746b5582013-10-23 09:53:14 +030043 MAX_PENDING_REG_MR = 8,
Eli Cohene126ba92013-07-07 17:25:49 +030044};
45
Eli Cohenfe45f822013-09-11 16:35:35 +030046enum {
47 MLX5_UMR_ALIGN = 2048
48};
49
Eli Cohene126ba92013-07-07 17:25:49 +030050static __be64 *mr_align(__be64 *ptr, int align)
51{
52 unsigned long mask = align - 1;
53
54 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
55}
56
57static int order2idx(struct mlx5_ib_dev *dev, int order)
58{
59 struct mlx5_mr_cache *cache = &dev->cache;
60
61 if (order < cache->ent[0].order)
62 return 0;
63 else
64 return order - cache->ent[0].order;
65}
66
Eli Cohen746b5582013-10-23 09:53:14 +030067static void reg_mr_callback(int status, void *context)
68{
69 struct mlx5_ib_mr *mr = context;
70 struct mlx5_ib_dev *dev = mr->dev;
71 struct mlx5_mr_cache *cache = &dev->cache;
72 int c = order2idx(dev, mr->order);
73 struct mlx5_cache_ent *ent = &cache->ent[c];
74 u8 key;
Eli Cohen746b5582013-10-23 09:53:14 +030075 unsigned long flags;
Jack Morgenstein9603b612014-07-28 23:30:22 +030076 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
Haggai Eran86059332014-05-22 14:50:09 +030077 int err;
Eli Cohen746b5582013-10-23 09:53:14 +030078
Eli Cohen746b5582013-10-23 09:53:14 +030079 spin_lock_irqsave(&ent->lock, flags);
80 ent->pending--;
81 spin_unlock_irqrestore(&ent->lock, flags);
82 if (status) {
83 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
84 kfree(mr);
85 dev->fill_delay = 1;
86 mod_timer(&dev->delay_timer, jiffies + HZ);
87 return;
88 }
89
90 if (mr->out.hdr.status) {
91 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
92 mr->out.hdr.status,
93 be32_to_cpu(mr->out.hdr.syndrome));
94 kfree(mr);
95 dev->fill_delay = 1;
96 mod_timer(&dev->delay_timer, jiffies + HZ);
97 return;
98 }
99
Jack Morgenstein9603b612014-07-28 23:30:22 +0300100 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
101 key = dev->mdev->priv.mkey_key++;
102 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300103 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
104
105 cache->last_add = jiffies;
106
107 spin_lock_irqsave(&ent->lock, flags);
108 list_add_tail(&mr->list, &ent->head);
109 ent->cur++;
110 ent->size++;
111 spin_unlock_irqrestore(&ent->lock, flags);
Haggai Eran86059332014-05-22 14:50:09 +0300112
113 write_lock_irqsave(&table->lock, flags);
114 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
115 &mr->mmr);
116 if (err)
117 pr_err("Error inserting to mr tree. 0x%x\n", -err);
118 write_unlock_irqrestore(&table->lock, flags);
Eli Cohen746b5582013-10-23 09:53:14 +0300119}
120
Eli Cohene126ba92013-07-07 17:25:49 +0300121static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
122{
Eli Cohene126ba92013-07-07 17:25:49 +0300123 struct mlx5_mr_cache *cache = &dev->cache;
124 struct mlx5_cache_ent *ent = &cache->ent[c];
125 struct mlx5_create_mkey_mbox_in *in;
126 struct mlx5_ib_mr *mr;
127 int npages = 1 << ent->order;
Eli Cohene126ba92013-07-07 17:25:49 +0300128 int err = 0;
129 int i;
130
131 in = kzalloc(sizeof(*in), GFP_KERNEL);
132 if (!in)
133 return -ENOMEM;
134
135 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300136 if (ent->pending >= MAX_PENDING_REG_MR) {
137 err = -EAGAIN;
138 break;
139 }
140
Eli Cohene126ba92013-07-07 17:25:49 +0300141 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
142 if (!mr) {
143 err = -ENOMEM;
Eli Cohen746b5582013-10-23 09:53:14 +0300144 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300145 }
146 mr->order = ent->order;
147 mr->umred = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300148 mr->dev = dev;
Eli Cohene126ba92013-07-07 17:25:49 +0300149 in->seg.status = 1 << 6;
150 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
151 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
152 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
153 in->seg.log2_page_size = 12;
154
Eli Cohen746b5582013-10-23 09:53:14 +0300155 spin_lock_irq(&ent->lock);
156 ent->pending++;
157 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300158 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
Eli Cohen746b5582013-10-23 09:53:14 +0300159 sizeof(*in), reg_mr_callback,
160 mr, &mr->out);
Eli Cohene126ba92013-07-07 17:25:49 +0300161 if (err) {
Eli Cohend14e7112014-12-02 12:26:19 +0200162 spin_lock_irq(&ent->lock);
163 ent->pending--;
164 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300165 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300166 kfree(mr);
Eli Cohen746b5582013-10-23 09:53:14 +0300167 break;
Eli Cohene126ba92013-07-07 17:25:49 +0300168 }
Eli Cohene126ba92013-07-07 17:25:49 +0300169 }
170
Eli Cohene126ba92013-07-07 17:25:49 +0300171 kfree(in);
172 return err;
173}
174
175static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
176{
Eli Cohene126ba92013-07-07 17:25:49 +0300177 struct mlx5_mr_cache *cache = &dev->cache;
178 struct mlx5_cache_ent *ent = &cache->ent[c];
179 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300180 int err;
181 int i;
182
183 for (i = 0; i < num; i++) {
Eli Cohen746b5582013-10-23 09:53:14 +0300184 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300185 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300186 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300187 return;
188 }
189 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
190 list_del(&mr->list);
191 ent->cur--;
192 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300193 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300194 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Eli Cohen203099f2013-09-11 16:35:26 +0300195 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300196 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300197 else
Eli Cohene126ba92013-07-07 17:25:49 +0300198 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300199 }
200}
201
202static ssize_t size_write(struct file *filp, const char __user *buf,
203 size_t count, loff_t *pos)
204{
205 struct mlx5_cache_ent *ent = filp->private_data;
206 struct mlx5_ib_dev *dev = ent->dev;
207 char lbuf[20];
208 u32 var;
209 int err;
210 int c;
211
212 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300213 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300214
215 c = order2idx(dev, ent->order);
216 lbuf[sizeof(lbuf) - 1] = 0;
217
218 if (sscanf(lbuf, "%u", &var) != 1)
219 return -EINVAL;
220
221 if (var < ent->limit)
222 return -EINVAL;
223
224 if (var > ent->size) {
Eli Cohen746b5582013-10-23 09:53:14 +0300225 do {
226 err = add_keys(dev, c, var - ent->size);
227 if (err && err != -EAGAIN)
228 return err;
229
230 usleep_range(3000, 5000);
231 } while (err);
Eli Cohene126ba92013-07-07 17:25:49 +0300232 } else if (var < ent->size) {
233 remove_keys(dev, c, ent->size - var);
234 }
235
236 return count;
237}
238
239static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
240 loff_t *pos)
241{
242 struct mlx5_cache_ent *ent = filp->private_data;
243 char lbuf[20];
244 int err;
245
246 if (*pos)
247 return 0;
248
249 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
250 if (err < 0)
251 return err;
252
253 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300254 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300255
256 *pos += err;
257
258 return err;
259}
260
261static const struct file_operations size_fops = {
262 .owner = THIS_MODULE,
263 .open = simple_open,
264 .write = size_write,
265 .read = size_read,
266};
267
268static ssize_t limit_write(struct file *filp, const char __user *buf,
269 size_t count, loff_t *pos)
270{
271 struct mlx5_cache_ent *ent = filp->private_data;
272 struct mlx5_ib_dev *dev = ent->dev;
273 char lbuf[20];
274 u32 var;
275 int err;
276 int c;
277
278 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300279 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300280
281 c = order2idx(dev, ent->order);
282 lbuf[sizeof(lbuf) - 1] = 0;
283
284 if (sscanf(lbuf, "%u", &var) != 1)
285 return -EINVAL;
286
287 if (var > ent->size)
288 return -EINVAL;
289
290 ent->limit = var;
291
292 if (ent->cur < ent->limit) {
293 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
294 if (err)
295 return err;
296 }
297
298 return count;
299}
300
301static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
302 loff_t *pos)
303{
304 struct mlx5_cache_ent *ent = filp->private_data;
305 char lbuf[20];
306 int err;
307
308 if (*pos)
309 return 0;
310
311 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
312 if (err < 0)
313 return err;
314
315 if (copy_to_user(buf, lbuf, err))
Dan Carpenter5e631a02013-07-10 13:58:59 +0300316 return -EFAULT;
Eli Cohene126ba92013-07-07 17:25:49 +0300317
318 *pos += err;
319
320 return err;
321}
322
323static const struct file_operations limit_fops = {
324 .owner = THIS_MODULE,
325 .open = simple_open,
326 .write = limit_write,
327 .read = limit_read,
328};
329
330static int someone_adding(struct mlx5_mr_cache *cache)
331{
332 int i;
333
334 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
335 if (cache->ent[i].cur < cache->ent[i].limit)
336 return 1;
337 }
338
339 return 0;
340}
341
342static void __cache_work_func(struct mlx5_cache_ent *ent)
343{
344 struct mlx5_ib_dev *dev = ent->dev;
345 struct mlx5_mr_cache *cache = &dev->cache;
346 int i = order2idx(dev, ent->order);
Eli Cohen746b5582013-10-23 09:53:14 +0300347 int err;
Eli Cohene126ba92013-07-07 17:25:49 +0300348
349 if (cache->stopped)
350 return;
351
352 ent = &dev->cache.ent[i];
Eli Cohen746b5582013-10-23 09:53:14 +0300353 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
354 err = add_keys(dev, i, 1);
355 if (ent->cur < 2 * ent->limit) {
356 if (err == -EAGAIN) {
357 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
358 i + 2);
359 queue_delayed_work(cache->wq, &ent->dwork,
360 msecs_to_jiffies(3));
361 } else if (err) {
362 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
363 i + 2, err);
364 queue_delayed_work(cache->wq, &ent->dwork,
365 msecs_to_jiffies(1000));
366 } else {
367 queue_work(cache->wq, &ent->work);
368 }
369 }
Eli Cohene126ba92013-07-07 17:25:49 +0300370 } else if (ent->cur > 2 * ent->limit) {
371 if (!someone_adding(cache) &&
Eli Cohen746b5582013-10-23 09:53:14 +0300372 time_after(jiffies, cache->last_add + 300 * HZ)) {
Eli Cohene126ba92013-07-07 17:25:49 +0300373 remove_keys(dev, i, 1);
374 if (ent->cur > ent->limit)
375 queue_work(cache->wq, &ent->work);
376 } else {
Eli Cohen746b5582013-10-23 09:53:14 +0300377 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
Eli Cohene126ba92013-07-07 17:25:49 +0300378 }
379 }
380}
381
382static void delayed_cache_work_func(struct work_struct *work)
383{
384 struct mlx5_cache_ent *ent;
385
386 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
387 __cache_work_func(ent);
388}
389
390static void cache_work_func(struct work_struct *work)
391{
392 struct mlx5_cache_ent *ent;
393
394 ent = container_of(work, struct mlx5_cache_ent, work);
395 __cache_work_func(ent);
396}
397
398static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
399{
400 struct mlx5_mr_cache *cache = &dev->cache;
401 struct mlx5_ib_mr *mr = NULL;
402 struct mlx5_cache_ent *ent;
403 int c;
404 int i;
405
406 c = order2idx(dev, order);
407 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
408 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
409 return NULL;
410 }
411
412 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
413 ent = &cache->ent[i];
414
415 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
416
Eli Cohen746b5582013-10-23 09:53:14 +0300417 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300418 if (!list_empty(&ent->head)) {
419 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
420 list);
421 list_del(&mr->list);
422 ent->cur--;
Eli Cohen746b5582013-10-23 09:53:14 +0300423 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300424 if (ent->cur < ent->limit)
425 queue_work(cache->wq, &ent->work);
426 break;
427 }
Eli Cohen746b5582013-10-23 09:53:14 +0300428 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300429
430 queue_work(cache->wq, &ent->work);
431
432 if (mr)
433 break;
434 }
435
436 if (!mr)
437 cache->ent[c].miss++;
438
439 return mr;
440}
441
442static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
443{
444 struct mlx5_mr_cache *cache = &dev->cache;
445 struct mlx5_cache_ent *ent;
446 int shrink = 0;
447 int c;
448
449 c = order2idx(dev, mr->order);
450 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
451 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
452 return;
453 }
454 ent = &cache->ent[c];
Eli Cohen746b5582013-10-23 09:53:14 +0300455 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300456 list_add_tail(&mr->list, &ent->head);
457 ent->cur++;
458 if (ent->cur > 2 * ent->limit)
459 shrink = 1;
Eli Cohen746b5582013-10-23 09:53:14 +0300460 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300461
462 if (shrink)
463 queue_work(cache->wq, &ent->work);
464}
465
466static void clean_keys(struct mlx5_ib_dev *dev, int c)
467{
Eli Cohene126ba92013-07-07 17:25:49 +0300468 struct mlx5_mr_cache *cache = &dev->cache;
469 struct mlx5_cache_ent *ent = &cache->ent[c];
470 struct mlx5_ib_mr *mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300471 int err;
472
Moshe Lazer3c461912013-09-11 16:35:23 +0300473 cancel_delayed_work(&ent->dwork);
Eli Cohene126ba92013-07-07 17:25:49 +0300474 while (1) {
Eli Cohen746b5582013-10-23 09:53:14 +0300475 spin_lock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300476 if (list_empty(&ent->head)) {
Eli Cohen746b5582013-10-23 09:53:14 +0300477 spin_unlock_irq(&ent->lock);
Eli Cohene126ba92013-07-07 17:25:49 +0300478 return;
479 }
480 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
481 list_del(&mr->list);
482 ent->cur--;
483 ent->size--;
Eli Cohen746b5582013-10-23 09:53:14 +0300484 spin_unlock_irq(&ent->lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300485 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Eli Cohen203099f2013-09-11 16:35:26 +0300486 if (err)
Eli Cohene126ba92013-07-07 17:25:49 +0300487 mlx5_ib_warn(dev, "failed destroy mkey\n");
Eli Cohen203099f2013-09-11 16:35:26 +0300488 else
Eli Cohene126ba92013-07-07 17:25:49 +0300489 kfree(mr);
Eli Cohene126ba92013-07-07 17:25:49 +0300490 }
491}
492
493static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
494{
495 struct mlx5_mr_cache *cache = &dev->cache;
496 struct mlx5_cache_ent *ent;
497 int i;
498
499 if (!mlx5_debugfs_root)
500 return 0;
501
Jack Morgenstein9603b612014-07-28 23:30:22 +0300502 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
Eli Cohene126ba92013-07-07 17:25:49 +0300503 if (!cache->root)
504 return -ENOMEM;
505
506 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
507 ent = &cache->ent[i];
508 sprintf(ent->name, "%d", ent->order);
509 ent->dir = debugfs_create_dir(ent->name, cache->root);
510 if (!ent->dir)
511 return -ENOMEM;
512
513 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
514 &size_fops);
515 if (!ent->fsize)
516 return -ENOMEM;
517
518 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
519 &limit_fops);
520 if (!ent->flimit)
521 return -ENOMEM;
522
523 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
524 &ent->cur);
525 if (!ent->fcur)
526 return -ENOMEM;
527
528 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
529 &ent->miss);
530 if (!ent->fmiss)
531 return -ENOMEM;
532 }
533
534 return 0;
535}
536
537static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
538{
539 if (!mlx5_debugfs_root)
540 return;
541
542 debugfs_remove_recursive(dev->cache.root);
543}
544
Eli Cohen746b5582013-10-23 09:53:14 +0300545static void delay_time_func(unsigned long ctx)
546{
547 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
548
549 dev->fill_delay = 0;
550}
551
Eli Cohene126ba92013-07-07 17:25:49 +0300552int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
553{
554 struct mlx5_mr_cache *cache = &dev->cache;
555 struct mlx5_cache_ent *ent;
556 int limit;
Eli Cohene126ba92013-07-07 17:25:49 +0300557 int err;
558 int i;
559
560 cache->wq = create_singlethread_workqueue("mkey_cache");
561 if (!cache->wq) {
562 mlx5_ib_warn(dev, "failed to create work queue\n");
563 return -ENOMEM;
564 }
565
Eli Cohen746b5582013-10-23 09:53:14 +0300566 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
Eli Cohene126ba92013-07-07 17:25:49 +0300567 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
568 INIT_LIST_HEAD(&cache->ent[i].head);
569 spin_lock_init(&cache->ent[i].lock);
570
571 ent = &cache->ent[i];
572 INIT_LIST_HEAD(&ent->head);
573 spin_lock_init(&ent->lock);
574 ent->order = i + 2;
575 ent->dev = dev;
576
Jack Morgenstein9603b612014-07-28 23:30:22 +0300577 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
578 limit = dev->mdev->profile->mr_cache[i].limit;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300579 else
Eli Cohene126ba92013-07-07 17:25:49 +0300580 limit = 0;
Eli Cohen2d036fa2013-10-24 12:01:00 +0300581
Eli Cohene126ba92013-07-07 17:25:49 +0300582 INIT_WORK(&ent->work, cache_work_func);
583 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
584 ent->limit = limit;
585 queue_work(cache->wq, &ent->work);
586 }
587
588 err = mlx5_mr_cache_debugfs_init(dev);
589 if (err)
590 mlx5_ib_warn(dev, "cache debugfs failure\n");
591
592 return 0;
593}
594
595int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
596{
597 int i;
598
599 dev->cache.stopped = 1;
Moshe Lazer3c461912013-09-11 16:35:23 +0300600 flush_workqueue(dev->cache.wq);
Eli Cohene126ba92013-07-07 17:25:49 +0300601
602 mlx5_mr_cache_debugfs_cleanup(dev);
603
604 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
605 clean_keys(dev, i);
606
Moshe Lazer3c461912013-09-11 16:35:23 +0300607 destroy_workqueue(dev->cache.wq);
Eli Cohen746b5582013-10-23 09:53:14 +0300608 del_timer_sync(&dev->delay_timer);
Moshe Lazer3c461912013-09-11 16:35:23 +0300609
Eli Cohene126ba92013-07-07 17:25:49 +0300610 return 0;
611}
612
613struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
614{
615 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300616 struct mlx5_core_dev *mdev = dev->mdev;
Eli Cohene126ba92013-07-07 17:25:49 +0300617 struct mlx5_create_mkey_mbox_in *in;
618 struct mlx5_mkey_seg *seg;
619 struct mlx5_ib_mr *mr;
620 int err;
621
622 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
623 if (!mr)
624 return ERR_PTR(-ENOMEM);
625
626 in = kzalloc(sizeof(*in), GFP_KERNEL);
627 if (!in) {
628 err = -ENOMEM;
629 goto err_free;
630 }
631
632 seg = &in->seg;
633 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
634 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
635 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
636 seg->start_addr = 0;
637
Eli Cohen746b5582013-10-23 09:53:14 +0300638 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
639 NULL);
Eli Cohene126ba92013-07-07 17:25:49 +0300640 if (err)
641 goto err_in;
642
643 kfree(in);
644 mr->ibmr.lkey = mr->mmr.key;
645 mr->ibmr.rkey = mr->mmr.key;
646 mr->umem = NULL;
647
648 return &mr->ibmr;
649
650err_in:
651 kfree(in);
652
653err_free:
654 kfree(mr);
655
656 return ERR_PTR(err);
657}
658
659static int get_octo_len(u64 addr, u64 len, int page_size)
660{
661 u64 offset;
662 int npages;
663
664 offset = addr & (page_size - 1);
665 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
666 return (npages + 1) / 2;
667}
668
669static int use_umr(int order)
670{
671 return order <= 17;
672}
673
674static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
675 struct ib_sge *sg, u64 dma, int n, u32 key,
676 int page_shift, u64 virt_addr, u64 len,
677 int access_flags)
678{
679 struct mlx5_ib_dev *dev = to_mdev(pd->device);
680 struct ib_mr *mr = dev->umrc.mr;
681
682 sg->addr = dma;
683 sg->length = ALIGN(sizeof(u64) * n, 64);
684 sg->lkey = mr->lkey;
685
686 wr->next = NULL;
687 wr->send_flags = 0;
688 wr->sg_list = sg;
689 if (n)
690 wr->num_sge = 1;
691 else
692 wr->num_sge = 0;
693
694 wr->opcode = MLX5_IB_WR_UMR;
695 wr->wr.fast_reg.page_list_len = n;
696 wr->wr.fast_reg.page_shift = page_shift;
697 wr->wr.fast_reg.rkey = key;
698 wr->wr.fast_reg.iova_start = virt_addr;
699 wr->wr.fast_reg.length = len;
700 wr->wr.fast_reg.access_flags = access_flags;
701 wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
702}
703
704static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
705 struct ib_send_wr *wr, u32 key)
706{
707 wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
708 wr->opcode = MLX5_IB_WR_UMR;
709 wr->wr.fast_reg.rkey = key;
710}
711
712void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
713{
Shachar Raindela74d2412014-05-22 14:50:12 +0300714 struct mlx5_ib_umr_context *context;
Eli Cohene126ba92013-07-07 17:25:49 +0300715 struct ib_wc wc;
716 int err;
717
718 while (1) {
719 err = ib_poll_cq(cq, 1, &wc);
720 if (err < 0) {
721 pr_warn("poll cq error %d\n", err);
722 return;
723 }
724 if (err == 0)
725 break;
726
Roland Dreier6c9b5d92014-05-28 09:23:03 -0700727 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
Shachar Raindela74d2412014-05-22 14:50:12 +0300728 context->status = wc.status;
729 complete(&context->done);
Eli Cohene126ba92013-07-07 17:25:49 +0300730 }
731 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
732}
733
734static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
735 u64 virt_addr, u64 len, int npages,
736 int page_shift, int order, int access_flags)
737{
738 struct mlx5_ib_dev *dev = to_mdev(pd->device);
Eli Cohen203099f2013-09-11 16:35:26 +0300739 struct device *ddev = dev->ib_dev.dma_device;
Eli Cohene126ba92013-07-07 17:25:49 +0300740 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300741 struct mlx5_ib_umr_context umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300742 struct ib_send_wr wr, *bad;
743 struct mlx5_ib_mr *mr;
744 struct ib_sge sg;
Eli Cohen203099f2013-09-11 16:35:26 +0300745 int size = sizeof(u64) * npages;
Haggai Eran21af2c32014-12-11 17:04:10 +0200746 __be64 *mr_pas;
747 dma_addr_t dma;
Haggai Eran096f7e72014-05-22 14:50:08 +0300748 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300749 int i;
750
Eli Cohen746b5582013-10-23 09:53:14 +0300751 for (i = 0; i < 1; i++) {
Eli Cohene126ba92013-07-07 17:25:49 +0300752 mr = alloc_cached_mr(dev, order);
753 if (mr)
754 break;
755
756 err = add_keys(dev, order2idx(dev, order), 1);
Eli Cohen746b5582013-10-23 09:53:14 +0300757 if (err && err != -EAGAIN) {
758 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300759 break;
760 }
761 }
762
763 if (!mr)
764 return ERR_PTR(-EAGAIN);
765
Haggai Eran21af2c32014-12-11 17:04:10 +0200766 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
767 if (!mr_pas) {
Eli Cohen203099f2013-09-11 16:35:26 +0300768 err = -ENOMEM;
Haggai Eran096f7e72014-05-22 14:50:08 +0300769 goto free_mr;
Eli Cohen203099f2013-09-11 16:35:26 +0300770 }
Eli Cohen543139072013-09-11 16:35:36 +0300771
772 mlx5_ib_populate_pas(dev, umem, page_shift,
Haggai Eran21af2c32014-12-11 17:04:10 +0200773 mr_align(mr_pas, MLX5_UMR_ALIGN), 1);
Eli Cohen543139072013-09-11 16:35:36 +0300774
Haggai Eran21af2c32014-12-11 17:04:10 +0200775 dma = dma_map_single(ddev, mr_align(mr_pas, MLX5_UMR_ALIGN), size,
776 DMA_TO_DEVICE);
777 if (dma_mapping_error(ddev, dma)) {
Eli Cohen203099f2013-09-11 16:35:26 +0300778 err = -ENOMEM;
Haggai Eran096f7e72014-05-22 14:50:08 +0300779 goto free_pas;
Eli Cohen203099f2013-09-11 16:35:26 +0300780 }
781
Eli Cohene126ba92013-07-07 17:25:49 +0300782 memset(&wr, 0, sizeof(wr));
Shachar Raindela74d2412014-05-22 14:50:12 +0300783 wr.wr_id = (u64)(unsigned long)&umr_context;
Haggai Eran21af2c32014-12-11 17:04:10 +0200784 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
785 virt_addr, len, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300786
Shachar Raindela74d2412014-05-22 14:50:12 +0300787 mlx5_ib_init_umr_context(&umr_context);
Eli Cohene126ba92013-07-07 17:25:49 +0300788 down(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +0300789 err = ib_post_send(umrc->qp, &wr, &bad);
790 if (err) {
791 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
Haggai Eran096f7e72014-05-22 14:50:08 +0300792 goto unmap_dma;
Shachar Raindela74d2412014-05-22 14:50:12 +0300793 } else {
794 wait_for_completion(&umr_context.done);
795 if (umr_context.status != IB_WC_SUCCESS) {
796 mlx5_ib_warn(dev, "reg umr failed\n");
797 err = -EFAULT;
798 }
Haggai Eran096f7e72014-05-22 14:50:08 +0300799 }
800
Haggai Eranb4755982014-05-22 14:50:10 +0300801 mr->mmr.iova = virt_addr;
802 mr->mmr.size = len;
803 mr->mmr.pd = to_mpd(pd)->pdn;
804
Haggai Eran096f7e72014-05-22 14:50:08 +0300805unmap_dma:
806 up(&umrc->sem);
Haggai Eran21af2c32014-12-11 17:04:10 +0200807 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
Haggai Eran096f7e72014-05-22 14:50:08 +0300808
809free_pas:
Haggai Eran21af2c32014-12-11 17:04:10 +0200810 kfree(mr_pas);
Haggai Eran096f7e72014-05-22 14:50:08 +0300811
812free_mr:
813 if (err) {
814 free_cached_mr(dev, mr);
815 return ERR_PTR(err);
Eli Cohene126ba92013-07-07 17:25:49 +0300816 }
817
818 return mr;
Eli Cohene126ba92013-07-07 17:25:49 +0300819}
820
821static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
822 u64 length, struct ib_umem *umem,
823 int npages, int page_shift,
824 int access_flags)
825{
826 struct mlx5_ib_dev *dev = to_mdev(pd->device);
827 struct mlx5_create_mkey_mbox_in *in;
828 struct mlx5_ib_mr *mr;
829 int inlen;
830 int err;
831
832 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
833 if (!mr)
834 return ERR_PTR(-ENOMEM);
835
836 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
837 in = mlx5_vzalloc(inlen);
838 if (!in) {
839 err = -ENOMEM;
840 goto err_1;
841 }
842 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
843
844 in->seg.flags = convert_access(access_flags) |
845 MLX5_ACCESS_MODE_MTT;
846 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
847 in->seg.start_addr = cpu_to_be64(virt_addr);
848 in->seg.len = cpu_to_be64(length);
849 in->seg.bsfs_octo_size = 0;
850 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
851 in->seg.log2_page_size = page_shift;
852 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
Eli Cohen746b5582013-10-23 09:53:14 +0300853 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
854 1 << page_shift));
Jack Morgenstein9603b612014-07-28 23:30:22 +0300855 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
Eli Cohen746b5582013-10-23 09:53:14 +0300856 NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +0300857 if (err) {
858 mlx5_ib_warn(dev, "create mkey failed\n");
859 goto err_2;
860 }
861 mr->umem = umem;
Al Viro479163f2014-11-20 08:13:57 +0000862 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +0300863
864 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
865
866 return mr;
867
868err_2:
Al Viro479163f2014-11-20 08:13:57 +0000869 kvfree(in);
Eli Cohene126ba92013-07-07 17:25:49 +0300870
871err_1:
872 kfree(mr);
873
874 return ERR_PTR(err);
875}
876
877struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
878 u64 virt_addr, int access_flags,
879 struct ib_udata *udata)
880{
881 struct mlx5_ib_dev *dev = to_mdev(pd->device);
882 struct mlx5_ib_mr *mr = NULL;
883 struct ib_umem *umem;
884 int page_shift;
885 int npages;
886 int ncont;
887 int order;
888 int err;
889
Eli Cohen900a6d72014-09-14 16:47:51 +0300890 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
891 start, virt_addr, length, access_flags);
Eli Cohene126ba92013-07-07 17:25:49 +0300892 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
893 0);
894 if (IS_ERR(umem)) {
Eli Cohen900a6d72014-09-14 16:47:51 +0300895 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
Eli Cohene126ba92013-07-07 17:25:49 +0300896 return (void *)umem;
897 }
898
899 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
900 if (!npages) {
901 mlx5_ib_warn(dev, "avoid zero region\n");
902 err = -EINVAL;
903 goto error;
904 }
905
906 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
907 npages, ncont, order, page_shift);
908
909 if (use_umr(order)) {
910 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
911 order, access_flags);
912 if (PTR_ERR(mr) == -EAGAIN) {
913 mlx5_ib_dbg(dev, "cache empty for order %d", order);
914 mr = NULL;
915 }
916 }
917
918 if (!mr)
919 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
920 access_flags);
921
922 if (IS_ERR(mr)) {
923 err = PTR_ERR(mr);
924 goto error;
925 }
926
927 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
928
929 mr->umem = umem;
930 mr->npages = npages;
931 spin_lock(&dev->mr_lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +0300932 dev->mdev->priv.reg_pages += npages;
Eli Cohene126ba92013-07-07 17:25:49 +0300933 spin_unlock(&dev->mr_lock);
934 mr->ibmr.lkey = mr->mmr.key;
935 mr->ibmr.rkey = mr->mmr.key;
936
937 return &mr->ibmr;
938
939error:
940 ib_umem_release(umem);
941 return ERR_PTR(err);
942}
943
944static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
945{
946 struct umr_common *umrc = &dev->umrc;
Shachar Raindela74d2412014-05-22 14:50:12 +0300947 struct mlx5_ib_umr_context umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300948 struct ib_send_wr wr, *bad;
949 int err;
950
951 memset(&wr, 0, sizeof(wr));
Shachar Raindela74d2412014-05-22 14:50:12 +0300952 wr.wr_id = (u64)(unsigned long)&umr_context;
Eli Cohene126ba92013-07-07 17:25:49 +0300953 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
954
Shachar Raindela74d2412014-05-22 14:50:12 +0300955 mlx5_ib_init_umr_context(&umr_context);
Eli Cohene126ba92013-07-07 17:25:49 +0300956 down(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +0300957 err = ib_post_send(umrc->qp, &wr, &bad);
958 if (err) {
959 up(&umrc->sem);
960 mlx5_ib_dbg(dev, "err %d\n", err);
961 goto error;
Shachar Raindela74d2412014-05-22 14:50:12 +0300962 } else {
963 wait_for_completion(&umr_context.done);
964 up(&umrc->sem);
Eli Cohene126ba92013-07-07 17:25:49 +0300965 }
Shachar Raindela74d2412014-05-22 14:50:12 +0300966 if (umr_context.status != IB_WC_SUCCESS) {
Eli Cohene126ba92013-07-07 17:25:49 +0300967 mlx5_ib_warn(dev, "unreg umr failed\n");
968 err = -EFAULT;
969 goto error;
970 }
971 return 0;
972
973error:
974 return err;
975}
976
977int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
978{
979 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
980 struct mlx5_ib_mr *mr = to_mmr(ibmr);
981 struct ib_umem *umem = mr->umem;
982 int npages = mr->npages;
983 int umred = mr->umred;
984 int err;
985
986 if (!umred) {
Jack Morgenstein9603b612014-07-28 23:30:22 +0300987 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Eli Cohene126ba92013-07-07 17:25:49 +0300988 if (err) {
989 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
990 mr->mmr.key, err);
991 return err;
992 }
993 } else {
994 err = unreg_umr(dev, mr);
995 if (err) {
996 mlx5_ib_warn(dev, "failed unregister\n");
997 return err;
998 }
999 free_cached_mr(dev, mr);
1000 }
1001
1002 if (umem) {
1003 ib_umem_release(umem);
1004 spin_lock(&dev->mr_lock);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001005 dev->mdev->priv.reg_pages -= npages;
Eli Cohene126ba92013-07-07 17:25:49 +03001006 spin_unlock(&dev->mr_lock);
1007 }
1008
1009 if (!umred)
1010 kfree(mr);
1011
1012 return 0;
1013}
1014
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001015struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
1016 struct ib_mr_init_attr *mr_init_attr)
1017{
1018 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1019 struct mlx5_create_mkey_mbox_in *in;
1020 struct mlx5_ib_mr *mr;
1021 int access_mode, err;
1022 int ndescs = roundup(mr_init_attr->max_reg_descriptors, 4);
1023
1024 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1025 if (!mr)
1026 return ERR_PTR(-ENOMEM);
1027
1028 in = kzalloc(sizeof(*in), GFP_KERNEL);
1029 if (!in) {
1030 err = -ENOMEM;
1031 goto err_free;
1032 }
1033
1034 in->seg.status = 1 << 6; /* free */
1035 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1036 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1037 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1038 access_mode = MLX5_ACCESS_MODE_MTT;
1039
1040 if (mr_init_attr->flags & IB_MR_SIGNATURE_EN) {
1041 u32 psv_index[2];
1042
1043 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1044 MLX5_MKEY_BSF_EN);
1045 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1046 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1047 if (!mr->sig) {
1048 err = -ENOMEM;
1049 goto err_free_in;
1050 }
1051
1052 /* create mem & wire PSVs */
Jack Morgenstein9603b612014-07-28 23:30:22 +03001053 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001054 2, psv_index);
1055 if (err)
1056 goto err_free_sig;
1057
1058 access_mode = MLX5_ACCESS_MODE_KLM;
1059 mr->sig->psv_memory.psv_idx = psv_index[0];
1060 mr->sig->psv_wire.psv_idx = psv_index[1];
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001061
1062 mr->sig->sig_status_checked = true;
1063 mr->sig->sig_err_exists = false;
1064 /* Next UMR, Arm SIGERR */
1065 ++mr->sig->sigerr_count;
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001066 }
1067
1068 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
Jack Morgenstein9603b612014-07-28 23:30:22 +03001069 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001070 NULL, NULL, NULL);
1071 if (err)
1072 goto err_destroy_psv;
1073
1074 mr->ibmr.lkey = mr->mmr.key;
1075 mr->ibmr.rkey = mr->mmr.key;
1076 mr->umem = NULL;
1077 kfree(in);
1078
1079 return &mr->ibmr;
1080
1081err_destroy_psv:
1082 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001083 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001084 mr->sig->psv_memory.psv_idx))
1085 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1086 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001087 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001088 mr->sig->psv_wire.psv_idx))
1089 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1090 mr->sig->psv_wire.psv_idx);
1091 }
1092err_free_sig:
1093 kfree(mr->sig);
1094err_free_in:
1095 kfree(in);
1096err_free:
1097 kfree(mr);
1098 return ERR_PTR(err);
1099}
1100
1101int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
1102{
1103 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1104 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1105 int err;
1106
1107 if (mr->sig) {
Jack Morgenstein9603b612014-07-28 23:30:22 +03001108 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001109 mr->sig->psv_memory.psv_idx))
1110 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1111 mr->sig->psv_memory.psv_idx);
Jack Morgenstein9603b612014-07-28 23:30:22 +03001112 if (mlx5_core_destroy_psv(dev->mdev,
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001113 mr->sig->psv_wire.psv_idx))
1114 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1115 mr->sig->psv_wire.psv_idx);
1116 kfree(mr->sig);
1117 }
1118
Jack Morgenstein9603b612014-07-28 23:30:22 +03001119 err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
Sagi Grimberg3121e3c2014-02-23 14:19:06 +02001120 if (err) {
1121 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1122 mr->mmr.key, err);
1123 return err;
1124 }
1125
1126 kfree(mr);
1127
1128 return err;
1129}
1130
Eli Cohene126ba92013-07-07 17:25:49 +03001131struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
1132 int max_page_list_len)
1133{
1134 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1135 struct mlx5_create_mkey_mbox_in *in;
1136 struct mlx5_ib_mr *mr;
1137 int err;
1138
1139 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1140 if (!mr)
1141 return ERR_PTR(-ENOMEM);
1142
1143 in = kzalloc(sizeof(*in), GFP_KERNEL);
1144 if (!in) {
1145 err = -ENOMEM;
1146 goto err_free;
1147 }
1148
1149 in->seg.status = 1 << 6; /* free */
1150 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
1151 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1152 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1153 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1154 /*
1155 * TBD not needed - issue 197292 */
1156 in->seg.log2_page_size = PAGE_SHIFT;
1157
Jack Morgenstein9603b612014-07-28 23:30:22 +03001158 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
Eli Cohen746b5582013-10-23 09:53:14 +03001159 NULL, NULL);
Eli Cohene126ba92013-07-07 17:25:49 +03001160 kfree(in);
1161 if (err)
1162 goto err_free;
1163
1164 mr->ibmr.lkey = mr->mmr.key;
1165 mr->ibmr.rkey = mr->mmr.key;
1166 mr->umem = NULL;
1167
1168 return &mr->ibmr;
1169
1170err_free:
1171 kfree(mr);
1172 return ERR_PTR(err);
1173}
1174
1175struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1176 int page_list_len)
1177{
1178 struct mlx5_ib_fast_reg_page_list *mfrpl;
1179 int size = page_list_len * sizeof(u64);
1180
1181 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1182 if (!mfrpl)
1183 return ERR_PTR(-ENOMEM);
1184
1185 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1186 if (!mfrpl->ibfrpl.page_list)
1187 goto err_free;
1188
1189 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1190 size, &mfrpl->map,
1191 GFP_KERNEL);
1192 if (!mfrpl->mapped_page_list)
1193 goto err_free;
1194
1195 WARN_ON(mfrpl->map & 0x3f);
1196
1197 return &mfrpl->ibfrpl;
1198
1199err_free:
1200 kfree(mfrpl->ibfrpl.page_list);
1201 kfree(mfrpl);
1202 return ERR_PTR(-ENOMEM);
1203}
1204
1205void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1206{
1207 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1208 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1209 int size = page_list->max_page_list_len * sizeof(u64);
1210
Jack Morgenstein9603b612014-07-28 23:30:22 +03001211 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
Eli Cohene126ba92013-07-07 17:25:49 +03001212 mfrpl->map);
1213 kfree(mfrpl->ibfrpl.page_list);
1214 kfree(mfrpl);
1215}
Sagi Grimbergd5436ba2014-02-23 14:19:12 +02001216
1217int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1218 struct ib_mr_status *mr_status)
1219{
1220 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1221 int ret = 0;
1222
1223 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1224 pr_err("Invalid status check mask\n");
1225 ret = -EINVAL;
1226 goto done;
1227 }
1228
1229 mr_status->fail_status = 0;
1230 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1231 if (!mmr->sig) {
1232 ret = -EINVAL;
1233 pr_err("signature status check requested on a non-signature enabled MR\n");
1234 goto done;
1235 }
1236
1237 mmr->sig->sig_status_checked = true;
1238 if (!mmr->sig->sig_err_exists)
1239 goto done;
1240
1241 if (ibmr->lkey == mmr->sig->err_item.key)
1242 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1243 sizeof(mr_status->sig_err));
1244 else {
1245 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1246 mr_status->sig_err.sig_err_offset = 0;
1247 mr_status->sig_err.key = mmr->sig->err_item.key;
1248 }
1249
1250 mmr->sig->sig_err_exists = false;
1251 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1252 }
1253
1254done:
1255 return ret;
1256}