blob: 64b5ede037c8ad937083f29d79f4f420f8e0d0a3 [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason764f2dd2010-04-22 21:59:15 -040035#include <linux/rculist.h>
Andy Grover08b48a12009-02-24 15:30:32 +000036
37#include "rds.h"
Andy Grover08b48a12009-02-24 15:30:32 +000038#include "ib.h"
39
40
41/*
42 * This is stored as mr->r_trans_private.
43 */
44struct rds_ib_mr {
45 struct rds_ib_device *device;
46 struct rds_ib_mr_pool *pool;
47 struct ib_fmr *fmr;
48 struct list_head list;
49 unsigned int remap_count;
50
51 struct scatterlist *sg;
52 unsigned int sg_len;
53 u64 *dma;
54 int sg_dma_len;
55};
56
57/*
58 * Our own little FMR pool
59 */
60struct rds_ib_mr_pool {
61 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */
63
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head drop_list; /* MRs that have reached their max_maps limit */
68 struct list_head free_list; /* unused MRs */
69 struct list_head clean_list; /* unused & unamapped MRs */
70 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_items;
72 unsigned long max_items_soft;
73 unsigned long max_free_pinned;
74 struct ib_fmr_attr fmr_attr;
75};
76
77static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
78static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
79static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
80
81static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
82{
83 struct rds_ib_device *rds_ibdev;
84 struct rds_ib_ipaddr *i_ipaddr;
85
86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
Chris Mason764f2dd2010-04-22 21:59:15 -040087 rcu_read_lock();
88 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +000089 if (i_ipaddr->ipaddr == ipaddr) {
Zach Brown3e0249f2010-05-18 15:48:51 -070090 atomic_inc(&rds_ibdev->refcount);
Chris Mason764f2dd2010-04-22 21:59:15 -040091 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +000092 return rds_ibdev;
93 }
94 }
Chris Mason764f2dd2010-04-22 21:59:15 -040095 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +000096 }
97
98 return NULL;
99}
100
101static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
102{
103 struct rds_ib_ipaddr *i_ipaddr;
104
105 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
106 if (!i_ipaddr)
107 return -ENOMEM;
108
109 i_ipaddr->ipaddr = ipaddr;
110
111 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400112 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000113 spin_unlock_irq(&rds_ibdev->spinlock);
114
115 return 0;
116}
117
118static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
119{
Andy Grover4a818022010-04-23 11:04:21 -0700120 struct rds_ib_ipaddr *i_ipaddr;
Chris Mason764f2dd2010-04-22 21:59:15 -0400121 struct rds_ib_ipaddr *to_free = NULL;
122
Andy Grover08b48a12009-02-24 15:30:32 +0000123
124 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400125 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000126 if (i_ipaddr->ipaddr == ipaddr) {
Chris Mason764f2dd2010-04-22 21:59:15 -0400127 list_del_rcu(&i_ipaddr->list);
128 to_free = i_ipaddr;
Andy Grover08b48a12009-02-24 15:30:32 +0000129 break;
130 }
131 }
132 spin_unlock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400133
134 if (to_free) {
135 synchronize_rcu();
136 kfree(to_free);
137 }
Andy Grover08b48a12009-02-24 15:30:32 +0000138}
139
140int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
141{
142 struct rds_ib_device *rds_ibdev_old;
143
144 rds_ibdev_old = rds_ib_get_device(ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700145 if (rds_ibdev_old) {
Andy Grover08b48a12009-02-24 15:30:32 +0000146 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700147 rds_ib_dev_put(rds_ibdev_old);
148 }
Andy Grover08b48a12009-02-24 15:30:32 +0000149
150 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
151}
152
Andy Grover745cbcc2009-04-01 08:20:19 +0000153void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000154{
155 struct rds_ib_connection *ic = conn->c_transport_data;
156
157 /* conn was previously on the nodev_conns_list */
158 spin_lock_irq(&ib_nodev_conns_lock);
159 BUG_ON(list_empty(&ib_nodev_conns));
160 BUG_ON(list_empty(&ic->ib_node));
161 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000162
163 spin_lock_irq(&rds_ibdev->spinlock);
164 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
165 spin_unlock_irq(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000166 spin_unlock_irq(&ib_nodev_conns_lock);
167
Andy Grover745cbcc2009-04-01 08:20:19 +0000168 ic->rds_ibdev = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700169 atomic_inc(&rds_ibdev->refcount);
Andy Grover08b48a12009-02-24 15:30:32 +0000170}
171
Andy Grover745cbcc2009-04-01 08:20:19 +0000172void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
173{
174 struct rds_ib_connection *ic = conn->c_transport_data;
175
176 /* place conn on nodev_conns_list */
177 spin_lock(&ib_nodev_conns_lock);
178
179 spin_lock_irq(&rds_ibdev->spinlock);
180 BUG_ON(list_empty(&ic->ib_node));
181 list_del(&ic->ib_node);
182 spin_unlock_irq(&rds_ibdev->spinlock);
183
184 list_add_tail(&ic->ib_node, &ib_nodev_conns);
185
186 spin_unlock(&ib_nodev_conns_lock);
187
188 ic->rds_ibdev = NULL;
Zach Brown3e0249f2010-05-18 15:48:51 -0700189 rds_ib_dev_put(rds_ibdev);
Andy Grover745cbcc2009-04-01 08:20:19 +0000190}
191
192void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
Andy Grover08b48a12009-02-24 15:30:32 +0000193{
194 struct rds_ib_connection *ic, *_ic;
195 LIST_HEAD(tmp_list);
196
197 /* avoid calling conn_destroy with irqs off */
Andy Grover745cbcc2009-04-01 08:20:19 +0000198 spin_lock_irq(list_lock);
199 list_splice(list, &tmp_list);
200 INIT_LIST_HEAD(list);
201 spin_unlock_irq(list_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000202
Andy Grover433d3082009-10-30 08:51:55 +0000203 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000204 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000205}
206
207struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
208{
209 struct rds_ib_mr_pool *pool;
210
211 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
212 if (!pool)
213 return ERR_PTR(-ENOMEM);
214
215 INIT_LIST_HEAD(&pool->free_list);
216 INIT_LIST_HEAD(&pool->drop_list);
217 INIT_LIST_HEAD(&pool->clean_list);
218 mutex_init(&pool->flush_lock);
219 spin_lock_init(&pool->list_lock);
220 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
221
222 pool->fmr_attr.max_pages = fmr_message_size;
223 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
Andy Grovera870d622009-07-17 13:13:33 +0000224 pool->fmr_attr.page_shift = PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000225 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
226
227 /* We never allow more than max_items MRs to be allocated.
228 * When we exceed more than max_items_soft, we start freeing
229 * items more aggressively.
230 * Make sure that max_items > max_items_soft > max_items / 2
231 */
232 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
233 pool->max_items = rds_ibdev->max_fmrs;
234
235 return pool;
236}
237
238void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
239{
240 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
241
242 iinfo->rdma_mr_max = pool->max_items;
243 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
244}
245
246void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
247{
Zach Brown3e0249f2010-05-18 15:48:51 -0700248 cancel_work_sync(&pool->flush_worker);
Andy Grover08b48a12009-02-24 15:30:32 +0000249 rds_ib_flush_mr_pool(pool, 1);
Andy Grover571c02f2010-03-11 13:50:01 +0000250 WARN_ON(atomic_read(&pool->item_count));
251 WARN_ON(atomic_read(&pool->free_pinned));
Andy Grover08b48a12009-02-24 15:30:32 +0000252 kfree(pool);
253}
254
255static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
256{
257 struct rds_ib_mr *ibmr = NULL;
258 unsigned long flags;
259
260 spin_lock_irqsave(&pool->list_lock, flags);
261 if (!list_empty(&pool->clean_list)) {
262 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
263 list_del_init(&ibmr->list);
264 }
265 spin_unlock_irqrestore(&pool->list_lock, flags);
266
267 return ibmr;
268}
269
270static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
271{
272 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
273 struct rds_ib_mr *ibmr = NULL;
274 int err = 0, iter = 0;
275
276 while (1) {
277 ibmr = rds_ib_reuse_fmr(pool);
278 if (ibmr)
279 return ibmr;
280
281 /* No clean MRs - now we have the choice of either
282 * allocating a fresh MR up to the limit imposed by the
283 * driver, or flush any dirty unused MRs.
284 * We try to avoid stalling in the send path if possible,
285 * so we allocate as long as we're allowed to.
286 *
287 * We're fussy with enforcing the FMR limit, though. If the driver
288 * tells us we can't use more than N fmrs, we shouldn't start
289 * arguing with it */
290 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
291 break;
292
293 atomic_dec(&pool->item_count);
294
295 if (++iter > 2) {
296 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
297 return ERR_PTR(-EAGAIN);
298 }
299
300 /* We do have some empty MRs. Flush them out. */
301 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
302 rds_ib_flush_mr_pool(pool, 0);
303 }
304
Andy Grovere4c52c92010-04-23 10:49:53 -0700305 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000306 if (!ibmr) {
307 err = -ENOMEM;
308 goto out_no_cigar;
309 }
310
Chris Mason38a4e5e2010-05-11 15:09:45 -0700311 memset(ibmr, 0, sizeof(*ibmr));
312
Andy Grover08b48a12009-02-24 15:30:32 +0000313 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
314 (IB_ACCESS_LOCAL_WRITE |
315 IB_ACCESS_REMOTE_READ |
Andy Grover15133f62010-01-12 14:33:38 -0800316 IB_ACCESS_REMOTE_WRITE|
317 IB_ACCESS_REMOTE_ATOMIC),
318
Andy Grover08b48a12009-02-24 15:30:32 +0000319 &pool->fmr_attr);
320 if (IS_ERR(ibmr->fmr)) {
321 err = PTR_ERR(ibmr->fmr);
322 ibmr->fmr = NULL;
323 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
324 goto out_no_cigar;
325 }
326
327 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
328 return ibmr;
329
330out_no_cigar:
331 if (ibmr) {
332 if (ibmr->fmr)
333 ib_dealloc_fmr(ibmr->fmr);
334 kfree(ibmr);
335 }
336 atomic_dec(&pool->item_count);
337 return ERR_PTR(err);
338}
339
340static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
341 struct scatterlist *sg, unsigned int nents)
342{
343 struct ib_device *dev = rds_ibdev->dev;
344 struct scatterlist *scat = sg;
345 u64 io_addr = 0;
346 u64 *dma_pages;
347 u32 len;
348 int page_cnt, sg_dma_len;
349 int i, j;
350 int ret;
351
352 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
353 DMA_BIDIRECTIONAL);
354 if (unlikely(!sg_dma_len)) {
355 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
356 return -EBUSY;
357 }
358
359 len = 0;
360 page_cnt = 0;
361
362 for (i = 0; i < sg_dma_len; ++i) {
363 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
364 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
365
Andy Grovera870d622009-07-17 13:13:33 +0000366 if (dma_addr & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000367 if (i > 0)
368 return -EINVAL;
369 else
370 ++page_cnt;
371 }
Andy Grovera870d622009-07-17 13:13:33 +0000372 if ((dma_addr + dma_len) & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000373 if (i < sg_dma_len - 1)
374 return -EINVAL;
375 else
376 ++page_cnt;
377 }
378
379 len += dma_len;
380 }
381
Andy Grovera870d622009-07-17 13:13:33 +0000382 page_cnt += len >> PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000383 if (page_cnt > fmr_message_size)
384 return -EINVAL;
385
Andy Grovere4c52c92010-04-23 10:49:53 -0700386 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
387 rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000388 if (!dma_pages)
389 return -ENOMEM;
390
391 page_cnt = 0;
392 for (i = 0; i < sg_dma_len; ++i) {
393 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
394 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
395
Andy Grovera870d622009-07-17 13:13:33 +0000396 for (j = 0; j < dma_len; j += PAGE_SIZE)
Andy Grover08b48a12009-02-24 15:30:32 +0000397 dma_pages[page_cnt++] =
Andy Grovera870d622009-07-17 13:13:33 +0000398 (dma_addr & PAGE_MASK) + j;
Andy Grover08b48a12009-02-24 15:30:32 +0000399 }
400
401 ret = ib_map_phys_fmr(ibmr->fmr,
402 dma_pages, page_cnt, io_addr);
403 if (ret)
404 goto out;
405
406 /* Success - we successfully remapped the MR, so we can
407 * safely tear down the old mapping. */
408 rds_ib_teardown_mr(ibmr);
409
410 ibmr->sg = scat;
411 ibmr->sg_len = nents;
412 ibmr->sg_dma_len = sg_dma_len;
413 ibmr->remap_count++;
414
415 rds_ib_stats_inc(s_ib_rdma_mr_used);
416 ret = 0;
417
418out:
419 kfree(dma_pages);
420
421 return ret;
422}
423
424void rds_ib_sync_mr(void *trans_private, int direction)
425{
426 struct rds_ib_mr *ibmr = trans_private;
427 struct rds_ib_device *rds_ibdev = ibmr->device;
428
429 switch (direction) {
430 case DMA_FROM_DEVICE:
431 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
432 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
433 break;
434 case DMA_TO_DEVICE:
435 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
436 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
437 break;
438 }
439}
440
441static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
442{
443 struct rds_ib_device *rds_ibdev = ibmr->device;
444
445 if (ibmr->sg_dma_len) {
446 ib_dma_unmap_sg(rds_ibdev->dev,
447 ibmr->sg, ibmr->sg_len,
448 DMA_BIDIRECTIONAL);
449 ibmr->sg_dma_len = 0;
450 }
451
452 /* Release the s/g list */
453 if (ibmr->sg_len) {
454 unsigned int i;
455
456 for (i = 0; i < ibmr->sg_len; ++i) {
457 struct page *page = sg_page(&ibmr->sg[i]);
458
459 /* FIXME we need a way to tell a r/w MR
460 * from a r/o MR */
Andy Grover9e2effb2010-03-12 16:22:32 -0800461 BUG_ON(irqs_disabled());
Andy Grover08b48a12009-02-24 15:30:32 +0000462 set_page_dirty(page);
463 put_page(page);
464 }
465 kfree(ibmr->sg);
466
467 ibmr->sg = NULL;
468 ibmr->sg_len = 0;
469 }
470}
471
472static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
473{
474 unsigned int pinned = ibmr->sg_len;
475
476 __rds_ib_teardown_mr(ibmr);
477 if (pinned) {
478 struct rds_ib_device *rds_ibdev = ibmr->device;
479 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
480
481 atomic_sub(pinned, &pool->free_pinned);
482 }
483}
484
485static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
486{
487 unsigned int item_count;
488
489 item_count = atomic_read(&pool->item_count);
490 if (free_all)
491 return item_count;
492
493 return 0;
494}
495
496/*
497 * Flush our pool of MRs.
498 * At a minimum, all currently unused MRs are unmapped.
499 * If the number of MRs allocated exceeds the limit, we also try
500 * to free as many MRs as needed to get back to this limit.
501 */
502static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
503{
504 struct rds_ib_mr *ibmr, *next;
505 LIST_HEAD(unmap_list);
506 LIST_HEAD(fmr_list);
507 unsigned long unpinned = 0;
508 unsigned long flags;
509 unsigned int nfreed = 0, ncleaned = 0, free_goal;
510 int ret = 0;
511
512 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
513
514 mutex_lock(&pool->flush_lock);
515
516 spin_lock_irqsave(&pool->list_lock, flags);
517 /* Get the list of all MRs to be dropped. Ordering matters -
518 * we want to put drop_list ahead of free_list. */
519 list_splice_init(&pool->free_list, &unmap_list);
520 list_splice_init(&pool->drop_list, &unmap_list);
521 if (free_all)
522 list_splice_init(&pool->clean_list, &unmap_list);
523 spin_unlock_irqrestore(&pool->list_lock, flags);
524
525 free_goal = rds_ib_flush_goal(pool, free_all);
526
527 if (list_empty(&unmap_list))
528 goto out;
529
530 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
531 list_for_each_entry(ibmr, &unmap_list, list)
532 list_add(&ibmr->fmr->list, &fmr_list);
533 ret = ib_unmap_fmr(&fmr_list);
534 if (ret)
535 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
536
537 /* Now we can destroy the DMA mapping and unpin any pages */
538 list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
539 unpinned += ibmr->sg_len;
540 __rds_ib_teardown_mr(ibmr);
541 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
542 rds_ib_stats_inc(s_ib_rdma_mr_free);
543 list_del(&ibmr->list);
544 ib_dealloc_fmr(ibmr->fmr);
545 kfree(ibmr);
546 nfreed++;
547 }
548 ncleaned++;
549 }
550
551 spin_lock_irqsave(&pool->list_lock, flags);
552 list_splice(&unmap_list, &pool->clean_list);
553 spin_unlock_irqrestore(&pool->list_lock, flags);
554
555 atomic_sub(unpinned, &pool->free_pinned);
556 atomic_sub(ncleaned, &pool->dirty_count);
557 atomic_sub(nfreed, &pool->item_count);
558
559out:
560 mutex_unlock(&pool->flush_lock);
561 return ret;
562}
563
564static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
565{
566 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
567
568 rds_ib_flush_mr_pool(pool, 0);
569}
570
571void rds_ib_free_mr(void *trans_private, int invalidate)
572{
573 struct rds_ib_mr *ibmr = trans_private;
574 struct rds_ib_device *rds_ibdev = ibmr->device;
575 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
576 unsigned long flags;
577
578 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
579
580 /* Return it to the pool's free list */
581 spin_lock_irqsave(&pool->list_lock, flags);
582 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
583 list_add(&ibmr->list, &pool->drop_list);
584 else
585 list_add(&ibmr->list, &pool->free_list);
586
587 atomic_add(ibmr->sg_len, &pool->free_pinned);
588 atomic_inc(&pool->dirty_count);
589 spin_unlock_irqrestore(&pool->list_lock, flags);
590
591 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800592 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
593 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
Andy Grover08b48a12009-02-24 15:30:32 +0000594 queue_work(rds_wq, &pool->flush_worker);
595
596 if (invalidate) {
597 if (likely(!in_interrupt())) {
598 rds_ib_flush_mr_pool(pool, 0);
599 } else {
600 /* We get here if the user created a MR marked
601 * as use_once and invalidate at the same time. */
602 queue_work(rds_wq, &pool->flush_worker);
603 }
604 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700605
606 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000607}
608
609void rds_ib_flush_mrs(void)
610{
611 struct rds_ib_device *rds_ibdev;
612
613 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
614 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
615
616 if (pool)
617 rds_ib_flush_mr_pool(pool, 0);
618 }
619}
620
621void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
622 struct rds_sock *rs, u32 *key_ret)
623{
624 struct rds_ib_device *rds_ibdev;
625 struct rds_ib_mr *ibmr = NULL;
626 int ret;
627
628 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
629 if (!rds_ibdev) {
630 ret = -ENODEV;
631 goto out;
632 }
633
634 if (!rds_ibdev->mr_pool) {
635 ret = -ENODEV;
636 goto out;
637 }
638
639 ibmr = rds_ib_alloc_fmr(rds_ibdev);
640 if (IS_ERR(ibmr))
641 return ibmr;
642
643 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
644 if (ret == 0)
645 *key_ret = ibmr->fmr->rkey;
646 else
647 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
648
649 ibmr->device = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700650 rds_ibdev = NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000651
652 out:
653 if (ret) {
654 if (ibmr)
655 rds_ib_free_mr(ibmr, 0);
656 ibmr = ERR_PTR(ret);
657 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700658 if (rds_ibdev)
659 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000660 return ibmr;
661}