blob: 3a275af9d52fe37aec28c021400aff103621e776 [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason764f2dd2010-04-22 21:59:15 -040035#include <linux/rculist.h>
Andy Grover08b48a12009-02-24 15:30:32 +000036
37#include "rds.h"
Andy Grover08b48a12009-02-24 15:30:32 +000038#include "ib.h"
Chris Mason6fa70da2010-06-11 11:17:59 -070039#include "xlist.h"
Andy Grover08b48a12009-02-24 15:30:32 +000040
Chris Mason6fa70da2010-06-11 11:17:59 -070041static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42#define CLEAN_LIST_BUSY_BIT 0
Andy Grover08b48a12009-02-24 15:30:32 +000043
44/*
45 * This is stored as mr->r_trans_private.
46 */
47struct rds_ib_mr {
48 struct rds_ib_device *device;
49 struct rds_ib_mr_pool *pool;
50 struct ib_fmr *fmr;
Chris Mason6fa70da2010-06-11 11:17:59 -070051
52 struct xlist_head xlist;
53
54 /* unmap_list is for freeing */
55 struct list_head unmap_list;
Andy Grover08b48a12009-02-24 15:30:32 +000056 unsigned int remap_count;
57
58 struct scatterlist *sg;
59 unsigned int sg_len;
60 u64 *dma;
61 int sg_dma_len;
62};
63
64/*
65 * Our own little FMR pool
66 */
67struct rds_ib_mr_pool {
68 struct mutex flush_lock; /* serialize fmr invalidate */
Chris Mason7a0ff5d2010-06-11 11:26:02 -070069 struct delayed_work flush_worker; /* flush worker */
Andy Grover08b48a12009-02-24 15:30:32 +000070
Andy Grover08b48a12009-02-24 15:30:32 +000071 atomic_t item_count; /* total # of MRs */
72 atomic_t dirty_count; /* # dirty of MRs */
Chris Mason6fa70da2010-06-11 11:17:59 -070073
74 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
75 struct xlist_head free_list; /* unused MRs */
76 struct xlist_head clean_list; /* global unused & unamapped MRs */
77 wait_queue_head_t flush_wait;
78
Andy Grover08b48a12009-02-24 15:30:32 +000079 atomic_t free_pinned; /* memory pinned by free MRs */
80 unsigned long max_items;
81 unsigned long max_items_soft;
82 unsigned long max_free_pinned;
83 struct ib_fmr_attr fmr_attr;
84};
85
Chris Mason6fa70da2010-06-11 11:17:59 -070086static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
Andy Grover08b48a12009-02-24 15:30:32 +000087static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
88static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
89
90static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
91{
92 struct rds_ib_device *rds_ibdev;
93 struct rds_ib_ipaddr *i_ipaddr;
94
95 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
Chris Mason764f2dd2010-04-22 21:59:15 -040096 rcu_read_lock();
97 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +000098 if (i_ipaddr->ipaddr == ipaddr) {
Zach Brown3e0249f2010-05-18 15:48:51 -070099 atomic_inc(&rds_ibdev->refcount);
Chris Mason764f2dd2010-04-22 21:59:15 -0400100 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +0000101 return rds_ibdev;
102 }
103 }
Chris Mason764f2dd2010-04-22 21:59:15 -0400104 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +0000105 }
106
107 return NULL;
108}
109
110static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
111{
112 struct rds_ib_ipaddr *i_ipaddr;
113
114 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
115 if (!i_ipaddr)
116 return -ENOMEM;
117
118 i_ipaddr->ipaddr = ipaddr;
119
120 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400121 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000122 spin_unlock_irq(&rds_ibdev->spinlock);
123
124 return 0;
125}
126
127static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
128{
Andy Grover4a818022010-04-23 11:04:21 -0700129 struct rds_ib_ipaddr *i_ipaddr;
Chris Mason764f2dd2010-04-22 21:59:15 -0400130 struct rds_ib_ipaddr *to_free = NULL;
131
Andy Grover08b48a12009-02-24 15:30:32 +0000132
133 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400134 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000135 if (i_ipaddr->ipaddr == ipaddr) {
Chris Mason764f2dd2010-04-22 21:59:15 -0400136 list_del_rcu(&i_ipaddr->list);
137 to_free = i_ipaddr;
Andy Grover08b48a12009-02-24 15:30:32 +0000138 break;
139 }
140 }
141 spin_unlock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400142
143 if (to_free) {
144 synchronize_rcu();
145 kfree(to_free);
146 }
Andy Grover08b48a12009-02-24 15:30:32 +0000147}
148
149int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
150{
151 struct rds_ib_device *rds_ibdev_old;
152
153 rds_ibdev_old = rds_ib_get_device(ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700154 if (rds_ibdev_old) {
Andy Grover08b48a12009-02-24 15:30:32 +0000155 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700156 rds_ib_dev_put(rds_ibdev_old);
157 }
Andy Grover08b48a12009-02-24 15:30:32 +0000158
159 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
160}
161
Andy Grover745cbcc2009-04-01 08:20:19 +0000162void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000163{
164 struct rds_ib_connection *ic = conn->c_transport_data;
165
166 /* conn was previously on the nodev_conns_list */
167 spin_lock_irq(&ib_nodev_conns_lock);
168 BUG_ON(list_empty(&ib_nodev_conns));
169 BUG_ON(list_empty(&ic->ib_node));
170 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000171
172 spin_lock_irq(&rds_ibdev->spinlock);
173 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
174 spin_unlock_irq(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000175 spin_unlock_irq(&ib_nodev_conns_lock);
176
Andy Grover745cbcc2009-04-01 08:20:19 +0000177 ic->rds_ibdev = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700178 atomic_inc(&rds_ibdev->refcount);
Andy Grover08b48a12009-02-24 15:30:32 +0000179}
180
Andy Grover745cbcc2009-04-01 08:20:19 +0000181void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
182{
183 struct rds_ib_connection *ic = conn->c_transport_data;
184
185 /* place conn on nodev_conns_list */
186 spin_lock(&ib_nodev_conns_lock);
187
188 spin_lock_irq(&rds_ibdev->spinlock);
189 BUG_ON(list_empty(&ic->ib_node));
190 list_del(&ic->ib_node);
191 spin_unlock_irq(&rds_ibdev->spinlock);
192
193 list_add_tail(&ic->ib_node, &ib_nodev_conns);
194
195 spin_unlock(&ib_nodev_conns_lock);
196
197 ic->rds_ibdev = NULL;
Zach Brown3e0249f2010-05-18 15:48:51 -0700198 rds_ib_dev_put(rds_ibdev);
Andy Grover745cbcc2009-04-01 08:20:19 +0000199}
200
201void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
Andy Grover08b48a12009-02-24 15:30:32 +0000202{
203 struct rds_ib_connection *ic, *_ic;
204 LIST_HEAD(tmp_list);
205
206 /* avoid calling conn_destroy with irqs off */
Andy Grover745cbcc2009-04-01 08:20:19 +0000207 spin_lock_irq(list_lock);
208 list_splice(list, &tmp_list);
209 INIT_LIST_HEAD(list);
210 spin_unlock_irq(list_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000211
Andy Grover433d3082009-10-30 08:51:55 +0000212 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000213 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000214}
215
216struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
217{
218 struct rds_ib_mr_pool *pool;
219
220 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
221 if (!pool)
222 return ERR_PTR(-ENOMEM);
223
Chris Mason6fa70da2010-06-11 11:17:59 -0700224 INIT_XLIST_HEAD(&pool->free_list);
225 INIT_XLIST_HEAD(&pool->drop_list);
226 INIT_XLIST_HEAD(&pool->clean_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000227 mutex_init(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700228 init_waitqueue_head(&pool->flush_wait);
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700229 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
Andy Grover08b48a12009-02-24 15:30:32 +0000230
231 pool->fmr_attr.max_pages = fmr_message_size;
232 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
Andy Grovera870d622009-07-17 13:13:33 +0000233 pool->fmr_attr.page_shift = PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000234 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
235
236 /* We never allow more than max_items MRs to be allocated.
237 * When we exceed more than max_items_soft, we start freeing
238 * items more aggressively.
239 * Make sure that max_items > max_items_soft > max_items / 2
240 */
241 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
242 pool->max_items = rds_ibdev->max_fmrs;
243
244 return pool;
245}
246
247void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
248{
249 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
250
251 iinfo->rdma_mr_max = pool->max_items;
252 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
253}
254
255void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
256{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700257 cancel_delayed_work_sync(&pool->flush_worker);
Chris Mason6fa70da2010-06-11 11:17:59 -0700258 rds_ib_flush_mr_pool(pool, 1, NULL);
Andy Grover571c02f2010-03-11 13:50:01 +0000259 WARN_ON(atomic_read(&pool->item_count));
260 WARN_ON(atomic_read(&pool->free_pinned));
Andy Grover08b48a12009-02-24 15:30:32 +0000261 kfree(pool);
262}
263
Chris Mason6fa70da2010-06-11 11:17:59 -0700264static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
265 struct rds_ib_mr **ibmr_ret)
266{
267 struct xlist_head *ibmr_xl;
268 ibmr_xl = xlist_del_head_fast(xl);
269 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
270}
271
Andy Grover08b48a12009-02-24 15:30:32 +0000272static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
273{
274 struct rds_ib_mr *ibmr = NULL;
Chris Mason6fa70da2010-06-11 11:17:59 -0700275 struct xlist_head *ret;
276 unsigned long *flag;
Andy Grover08b48a12009-02-24 15:30:32 +0000277
Chris Mason6fa70da2010-06-11 11:17:59 -0700278 preempt_disable();
279 flag = &__get_cpu_var(clean_list_grace);
280 set_bit(CLEAN_LIST_BUSY_BIT, flag);
281 ret = xlist_del_head(&pool->clean_list);
282 if (ret)
283 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
Andy Grover08b48a12009-02-24 15:30:32 +0000284
Chris Mason6fa70da2010-06-11 11:17:59 -0700285 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
286 preempt_enable();
Andy Grover08b48a12009-02-24 15:30:32 +0000287 return ibmr;
288}
289
Chris Mason6fa70da2010-06-11 11:17:59 -0700290static inline void wait_clean_list_grace(void)
291{
292 int cpu;
293 unsigned long *flag;
294
295 for_each_online_cpu(cpu) {
296 flag = &per_cpu(clean_list_grace, cpu);
297 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
298 cpu_relax();
299 }
300}
301
Andy Grover08b48a12009-02-24 15:30:32 +0000302static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
303{
304 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
305 struct rds_ib_mr *ibmr = NULL;
306 int err = 0, iter = 0;
307
308 while (1) {
309 ibmr = rds_ib_reuse_fmr(pool);
310 if (ibmr)
311 return ibmr;
312
313 /* No clean MRs - now we have the choice of either
314 * allocating a fresh MR up to the limit imposed by the
315 * driver, or flush any dirty unused MRs.
316 * We try to avoid stalling in the send path if possible,
317 * so we allocate as long as we're allowed to.
318 *
319 * We're fussy with enforcing the FMR limit, though. If the driver
320 * tells us we can't use more than N fmrs, we shouldn't start
321 * arguing with it */
322 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
323 break;
324
325 atomic_dec(&pool->item_count);
326
327 if (++iter > 2) {
328 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
329 return ERR_PTR(-EAGAIN);
330 }
331
332 /* We do have some empty MRs. Flush them out. */
333 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
Chris Mason6fa70da2010-06-11 11:17:59 -0700334 rds_ib_flush_mr_pool(pool, 0, &ibmr);
335 if (ibmr)
336 return ibmr;
Andy Grover08b48a12009-02-24 15:30:32 +0000337 }
338
Andy Grovere4c52c92010-04-23 10:49:53 -0700339 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000340 if (!ibmr) {
341 err = -ENOMEM;
342 goto out_no_cigar;
343 }
344
Chris Mason38a4e5e2010-05-11 15:09:45 -0700345 memset(ibmr, 0, sizeof(*ibmr));
346
Andy Grover08b48a12009-02-24 15:30:32 +0000347 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
348 (IB_ACCESS_LOCAL_WRITE |
349 IB_ACCESS_REMOTE_READ |
Andy Grover15133f62010-01-12 14:33:38 -0800350 IB_ACCESS_REMOTE_WRITE|
351 IB_ACCESS_REMOTE_ATOMIC),
352
Andy Grover08b48a12009-02-24 15:30:32 +0000353 &pool->fmr_attr);
354 if (IS_ERR(ibmr->fmr)) {
355 err = PTR_ERR(ibmr->fmr);
356 ibmr->fmr = NULL;
357 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
358 goto out_no_cigar;
359 }
360
361 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
362 return ibmr;
363
364out_no_cigar:
365 if (ibmr) {
366 if (ibmr->fmr)
367 ib_dealloc_fmr(ibmr->fmr);
368 kfree(ibmr);
369 }
370 atomic_dec(&pool->item_count);
371 return ERR_PTR(err);
372}
373
374static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
375 struct scatterlist *sg, unsigned int nents)
376{
377 struct ib_device *dev = rds_ibdev->dev;
378 struct scatterlist *scat = sg;
379 u64 io_addr = 0;
380 u64 *dma_pages;
381 u32 len;
382 int page_cnt, sg_dma_len;
383 int i, j;
384 int ret;
385
386 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
387 DMA_BIDIRECTIONAL);
388 if (unlikely(!sg_dma_len)) {
389 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
390 return -EBUSY;
391 }
392
393 len = 0;
394 page_cnt = 0;
395
396 for (i = 0; i < sg_dma_len; ++i) {
397 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
398 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
399
Andy Grovera870d622009-07-17 13:13:33 +0000400 if (dma_addr & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000401 if (i > 0)
402 return -EINVAL;
403 else
404 ++page_cnt;
405 }
Andy Grovera870d622009-07-17 13:13:33 +0000406 if ((dma_addr + dma_len) & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000407 if (i < sg_dma_len - 1)
408 return -EINVAL;
409 else
410 ++page_cnt;
411 }
412
413 len += dma_len;
414 }
415
Andy Grovera870d622009-07-17 13:13:33 +0000416 page_cnt += len >> PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000417 if (page_cnt > fmr_message_size)
418 return -EINVAL;
419
Andy Grovere4c52c92010-04-23 10:49:53 -0700420 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
421 rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000422 if (!dma_pages)
423 return -ENOMEM;
424
425 page_cnt = 0;
426 for (i = 0; i < sg_dma_len; ++i) {
427 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
428 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
429
Andy Grovera870d622009-07-17 13:13:33 +0000430 for (j = 0; j < dma_len; j += PAGE_SIZE)
Andy Grover08b48a12009-02-24 15:30:32 +0000431 dma_pages[page_cnt++] =
Andy Grovera870d622009-07-17 13:13:33 +0000432 (dma_addr & PAGE_MASK) + j;
Andy Grover08b48a12009-02-24 15:30:32 +0000433 }
434
435 ret = ib_map_phys_fmr(ibmr->fmr,
436 dma_pages, page_cnt, io_addr);
437 if (ret)
438 goto out;
439
440 /* Success - we successfully remapped the MR, so we can
441 * safely tear down the old mapping. */
442 rds_ib_teardown_mr(ibmr);
443
444 ibmr->sg = scat;
445 ibmr->sg_len = nents;
446 ibmr->sg_dma_len = sg_dma_len;
447 ibmr->remap_count++;
448
449 rds_ib_stats_inc(s_ib_rdma_mr_used);
450 ret = 0;
451
452out:
453 kfree(dma_pages);
454
455 return ret;
456}
457
458void rds_ib_sync_mr(void *trans_private, int direction)
459{
460 struct rds_ib_mr *ibmr = trans_private;
461 struct rds_ib_device *rds_ibdev = ibmr->device;
462
463 switch (direction) {
464 case DMA_FROM_DEVICE:
465 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
466 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
467 break;
468 case DMA_TO_DEVICE:
469 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
470 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
471 break;
472 }
473}
474
475static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
476{
477 struct rds_ib_device *rds_ibdev = ibmr->device;
478
479 if (ibmr->sg_dma_len) {
480 ib_dma_unmap_sg(rds_ibdev->dev,
481 ibmr->sg, ibmr->sg_len,
482 DMA_BIDIRECTIONAL);
483 ibmr->sg_dma_len = 0;
484 }
485
486 /* Release the s/g list */
487 if (ibmr->sg_len) {
488 unsigned int i;
489
490 for (i = 0; i < ibmr->sg_len; ++i) {
491 struct page *page = sg_page(&ibmr->sg[i]);
492
493 /* FIXME we need a way to tell a r/w MR
494 * from a r/o MR */
Andy Grover9e2effb2010-03-12 16:22:32 -0800495 BUG_ON(irqs_disabled());
Andy Grover08b48a12009-02-24 15:30:32 +0000496 set_page_dirty(page);
497 put_page(page);
498 }
499 kfree(ibmr->sg);
500
501 ibmr->sg = NULL;
502 ibmr->sg_len = 0;
503 }
504}
505
506static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
507{
508 unsigned int pinned = ibmr->sg_len;
509
510 __rds_ib_teardown_mr(ibmr);
511 if (pinned) {
512 struct rds_ib_device *rds_ibdev = ibmr->device;
513 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
514
515 atomic_sub(pinned, &pool->free_pinned);
516 }
517}
518
519static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
520{
521 unsigned int item_count;
522
523 item_count = atomic_read(&pool->item_count);
524 if (free_all)
525 return item_count;
526
527 return 0;
528}
529
530/*
Chris Mason6fa70da2010-06-11 11:17:59 -0700531 * given an xlist of mrs, put them all into the list_head for more processing
532 */
533static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
534{
535 struct rds_ib_mr *ibmr;
536 struct xlist_head splice;
537 struct xlist_head *cur;
538 struct xlist_head *next;
539
540 splice.next = NULL;
541 xlist_splice(xlist, &splice);
542 cur = splice.next;
543 while (cur) {
544 next = cur->next;
545 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
546 list_add_tail(&ibmr->unmap_list, list);
547 cur = next;
548 }
549}
550
551/*
552 * this takes a list head of mrs and turns it into an xlist of clusters.
553 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
554 * reuse.
555 */
556static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
557 struct list_head *list, struct xlist_head *xlist,
558 struct xlist_head **tail_ret)
559{
560 struct rds_ib_mr *ibmr;
561 struct xlist_head *cur_mr = xlist;
562 struct xlist_head *tail_mr = NULL;
563
564 list_for_each_entry(ibmr, list, unmap_list) {
565 tail_mr = &ibmr->xlist;
566 tail_mr->next = NULL;
567 cur_mr->next = tail_mr;
568 cur_mr = tail_mr;
569 }
570 *tail_ret = tail_mr;
571}
572
573/*
Andy Grover08b48a12009-02-24 15:30:32 +0000574 * Flush our pool of MRs.
575 * At a minimum, all currently unused MRs are unmapped.
576 * If the number of MRs allocated exceeds the limit, we also try
577 * to free as many MRs as needed to get back to this limit.
578 */
Chris Mason6fa70da2010-06-11 11:17:59 -0700579static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
580 int free_all, struct rds_ib_mr **ibmr_ret)
Andy Grover08b48a12009-02-24 15:30:32 +0000581{
582 struct rds_ib_mr *ibmr, *next;
Chris Mason6fa70da2010-06-11 11:17:59 -0700583 struct xlist_head clean_xlist;
584 struct xlist_head *clean_tail;
Andy Grover08b48a12009-02-24 15:30:32 +0000585 LIST_HEAD(unmap_list);
586 LIST_HEAD(fmr_list);
587 unsigned long unpinned = 0;
Andy Grover08b48a12009-02-24 15:30:32 +0000588 unsigned int nfreed = 0, ncleaned = 0, free_goal;
589 int ret = 0;
590
591 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
592
Chris Mason6fa70da2010-06-11 11:17:59 -0700593 if (ibmr_ret) {
594 DEFINE_WAIT(wait);
595 while(!mutex_trylock(&pool->flush_lock)) {
596 ibmr = rds_ib_reuse_fmr(pool);
597 if (ibmr) {
598 *ibmr_ret = ibmr;
599 finish_wait(&pool->flush_wait, &wait);
600 goto out_nolock;
601 }
Andy Grover08b48a12009-02-24 15:30:32 +0000602
Chris Mason6fa70da2010-06-11 11:17:59 -0700603 prepare_to_wait(&pool->flush_wait, &wait,
604 TASK_UNINTERRUPTIBLE);
605 if (xlist_empty(&pool->clean_list))
606 schedule();
607
608 ibmr = rds_ib_reuse_fmr(pool);
609 if (ibmr) {
610 *ibmr_ret = ibmr;
611 finish_wait(&pool->flush_wait, &wait);
612 goto out_nolock;
613 }
614 }
615 finish_wait(&pool->flush_wait, &wait);
616 } else
617 mutex_lock(&pool->flush_lock);
618
619 if (ibmr_ret) {
620 ibmr = rds_ib_reuse_fmr(pool);
621 if (ibmr) {
622 *ibmr_ret = ibmr;
623 goto out;
624 }
625 }
626
Andy Grover08b48a12009-02-24 15:30:32 +0000627 /* Get the list of all MRs to be dropped. Ordering matters -
Chris Mason6fa70da2010-06-11 11:17:59 -0700628 * we want to put drop_list ahead of free_list.
629 */
630 xlist_append_to_list(&pool->drop_list, &unmap_list);
631 xlist_append_to_list(&pool->free_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000632 if (free_all)
Chris Mason6fa70da2010-06-11 11:17:59 -0700633 xlist_append_to_list(&pool->clean_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000634
635 free_goal = rds_ib_flush_goal(pool, free_all);
636
637 if (list_empty(&unmap_list))
638 goto out;
639
640 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
Chris Mason6fa70da2010-06-11 11:17:59 -0700641 list_for_each_entry(ibmr, &unmap_list, unmap_list)
Andy Grover08b48a12009-02-24 15:30:32 +0000642 list_add(&ibmr->fmr->list, &fmr_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700643
Andy Grover08b48a12009-02-24 15:30:32 +0000644 ret = ib_unmap_fmr(&fmr_list);
645 if (ret)
646 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
647
648 /* Now we can destroy the DMA mapping and unpin any pages */
Chris Mason6fa70da2010-06-11 11:17:59 -0700649 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000650 unpinned += ibmr->sg_len;
651 __rds_ib_teardown_mr(ibmr);
652 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
653 rds_ib_stats_inc(s_ib_rdma_mr_free);
Chris Mason6fa70da2010-06-11 11:17:59 -0700654 list_del(&ibmr->unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000655 ib_dealloc_fmr(ibmr->fmr);
656 kfree(ibmr);
657 nfreed++;
658 }
659 ncleaned++;
660 }
661
Chris Mason6fa70da2010-06-11 11:17:59 -0700662 if (!list_empty(&unmap_list)) {
663 /* we have to make sure that none of the things we're about
664 * to put on the clean list would race with other cpus trying
665 * to pull items off. The xlist would explode if we managed to
666 * remove something from the clean list and then add it back again
667 * while another CPU was spinning on that same item in xlist_del_head.
668 *
669 * This is pretty unlikely, but just in case wait for an xlist grace period
670 * here before adding anything back into the clean list.
671 */
672 wait_clean_list_grace();
673
674 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
675 if (ibmr_ret)
676 refill_local(pool, &clean_xlist, ibmr_ret);
677
678 /* refill_local may have emptied our list */
679 if (!xlist_empty(&clean_xlist))
680 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
681
682 }
Andy Grover08b48a12009-02-24 15:30:32 +0000683
684 atomic_sub(unpinned, &pool->free_pinned);
685 atomic_sub(ncleaned, &pool->dirty_count);
686 atomic_sub(nfreed, &pool->item_count);
687
688out:
689 mutex_unlock(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700690 if (waitqueue_active(&pool->flush_wait))
691 wake_up(&pool->flush_wait);
692out_nolock:
Andy Grover08b48a12009-02-24 15:30:32 +0000693 return ret;
694}
695
696static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
697{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700698 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
Andy Grover08b48a12009-02-24 15:30:32 +0000699
Chris Mason6fa70da2010-06-11 11:17:59 -0700700 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000701}
702
703void rds_ib_free_mr(void *trans_private, int invalidate)
704{
705 struct rds_ib_mr *ibmr = trans_private;
706 struct rds_ib_device *rds_ibdev = ibmr->device;
707 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000708
709 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
710
711 /* Return it to the pool's free list */
Andy Grover08b48a12009-02-24 15:30:32 +0000712 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
Chris Mason6fa70da2010-06-11 11:17:59 -0700713 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000714 else
Chris Mason6fa70da2010-06-11 11:17:59 -0700715 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000716
717 atomic_add(ibmr->sg_len, &pool->free_pinned);
718 atomic_inc(&pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000719
720 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800721 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
722 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700723 queue_delayed_work(rds_wq, &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000724
725 if (invalidate) {
726 if (likely(!in_interrupt())) {
Chris Mason6fa70da2010-06-11 11:17:59 -0700727 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000728 } else {
729 /* We get here if the user created a MR marked
730 * as use_once and invalidate at the same time. */
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700731 queue_delayed_work(rds_wq, &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000732 }
733 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700734
735 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000736}
737
738void rds_ib_flush_mrs(void)
739{
740 struct rds_ib_device *rds_ibdev;
741
742 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
743 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
744
745 if (pool)
Chris Mason6fa70da2010-06-11 11:17:59 -0700746 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000747 }
748}
749
750void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
751 struct rds_sock *rs, u32 *key_ret)
752{
753 struct rds_ib_device *rds_ibdev;
754 struct rds_ib_mr *ibmr = NULL;
755 int ret;
756
757 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
758 if (!rds_ibdev) {
759 ret = -ENODEV;
760 goto out;
761 }
762
763 if (!rds_ibdev->mr_pool) {
764 ret = -ENODEV;
765 goto out;
766 }
767
768 ibmr = rds_ib_alloc_fmr(rds_ibdev);
769 if (IS_ERR(ibmr))
770 return ibmr;
771
772 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
773 if (ret == 0)
774 *key_ret = ibmr->fmr->rkey;
775 else
776 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
777
778 ibmr->device = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700779 rds_ibdev = NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000780
781 out:
782 if (ret) {
783 if (ibmr)
784 rds_ib_free_mr(ibmr, 0);
785 ibmr = ERR_PTR(ret);
786 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700787 if (rds_ibdev)
788 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000789 return ibmr;
790}
Chris Mason6fa70da2010-06-11 11:17:59 -0700791