blob: b5a88415a18e811e79c56e83c56b6df13336059a [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Chris Mason764f2dd2010-04-22 21:59:15 -040035#include <linux/rculist.h>
Andy Grover08b48a12009-02-24 15:30:32 +000036
37#include "rds.h"
Andy Grover08b48a12009-02-24 15:30:32 +000038#include "ib.h"
Chris Mason6fa70da2010-06-11 11:17:59 -070039#include "xlist.h"
Andy Grover08b48a12009-02-24 15:30:32 +000040
Chris Mason8576f372010-07-19 17:06:46 -070041struct workqueue_struct *rds_ib_fmr_wq;
42
Chris Mason6fa70da2010-06-11 11:17:59 -070043static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44#define CLEAN_LIST_BUSY_BIT 0
Andy Grover08b48a12009-02-24 15:30:32 +000045
46/*
47 * This is stored as mr->r_trans_private.
48 */
49struct rds_ib_mr {
50 struct rds_ib_device *device;
51 struct rds_ib_mr_pool *pool;
52 struct ib_fmr *fmr;
Chris Mason6fa70da2010-06-11 11:17:59 -070053
54 struct xlist_head xlist;
55
56 /* unmap_list is for freeing */
57 struct list_head unmap_list;
Andy Grover08b48a12009-02-24 15:30:32 +000058 unsigned int remap_count;
59
60 struct scatterlist *sg;
61 unsigned int sg_len;
62 u64 *dma;
63 int sg_dma_len;
64};
65
66/*
67 * Our own little FMR pool
68 */
69struct rds_ib_mr_pool {
70 struct mutex flush_lock; /* serialize fmr invalidate */
Chris Mason7a0ff5d2010-06-11 11:26:02 -070071 struct delayed_work flush_worker; /* flush worker */
Andy Grover08b48a12009-02-24 15:30:32 +000072
Andy Grover08b48a12009-02-24 15:30:32 +000073 atomic_t item_count; /* total # of MRs */
74 atomic_t dirty_count; /* # dirty of MRs */
Chris Mason6fa70da2010-06-11 11:17:59 -070075
76 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
77 struct xlist_head free_list; /* unused MRs */
78 struct xlist_head clean_list; /* global unused & unamapped MRs */
79 wait_queue_head_t flush_wait;
80
Andy Grover08b48a12009-02-24 15:30:32 +000081 atomic_t free_pinned; /* memory pinned by free MRs */
82 unsigned long max_items;
83 unsigned long max_items_soft;
84 unsigned long max_free_pinned;
85 struct ib_fmr_attr fmr_attr;
86};
87
Chris Mason6fa70da2010-06-11 11:17:59 -070088static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
Andy Grover08b48a12009-02-24 15:30:32 +000089static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
90static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
91
92static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
93{
94 struct rds_ib_device *rds_ibdev;
95 struct rds_ib_ipaddr *i_ipaddr;
96
Zach Brownea819862010-07-15 12:34:33 -070097 rcu_read_lock();
98 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
Chris Mason764f2dd2010-04-22 21:59:15 -040099 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000100 if (i_ipaddr->ipaddr == ipaddr) {
Zach Brown3e0249f2010-05-18 15:48:51 -0700101 atomic_inc(&rds_ibdev->refcount);
Chris Mason764f2dd2010-04-22 21:59:15 -0400102 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +0000103 return rds_ibdev;
104 }
105 }
Andy Grover08b48a12009-02-24 15:30:32 +0000106 }
Zach Brownea819862010-07-15 12:34:33 -0700107 rcu_read_unlock();
Andy Grover08b48a12009-02-24 15:30:32 +0000108
109 return NULL;
110}
111
112static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
113{
114 struct rds_ib_ipaddr *i_ipaddr;
115
116 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
117 if (!i_ipaddr)
118 return -ENOMEM;
119
120 i_ipaddr->ipaddr = ipaddr;
121
122 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400123 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000124 spin_unlock_irq(&rds_ibdev->spinlock);
125
126 return 0;
127}
128
129static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
130{
Andy Grover4a818022010-04-23 11:04:21 -0700131 struct rds_ib_ipaddr *i_ipaddr;
Chris Mason764f2dd2010-04-22 21:59:15 -0400132 struct rds_ib_ipaddr *to_free = NULL;
133
Andy Grover08b48a12009-02-24 15:30:32 +0000134
135 spin_lock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400136 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000137 if (i_ipaddr->ipaddr == ipaddr) {
Chris Mason764f2dd2010-04-22 21:59:15 -0400138 list_del_rcu(&i_ipaddr->list);
139 to_free = i_ipaddr;
Andy Grover08b48a12009-02-24 15:30:32 +0000140 break;
141 }
142 }
143 spin_unlock_irq(&rds_ibdev->spinlock);
Chris Mason764f2dd2010-04-22 21:59:15 -0400144
145 if (to_free) {
146 synchronize_rcu();
147 kfree(to_free);
148 }
Andy Grover08b48a12009-02-24 15:30:32 +0000149}
150
151int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
152{
153 struct rds_ib_device *rds_ibdev_old;
154
155 rds_ibdev_old = rds_ib_get_device(ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700156 if (rds_ibdev_old) {
Andy Grover08b48a12009-02-24 15:30:32 +0000157 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
Zach Brown3e0249f2010-05-18 15:48:51 -0700158 rds_ib_dev_put(rds_ibdev_old);
159 }
Andy Grover08b48a12009-02-24 15:30:32 +0000160
161 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
162}
163
Andy Grover745cbcc2009-04-01 08:20:19 +0000164void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000165{
166 struct rds_ib_connection *ic = conn->c_transport_data;
167
168 /* conn was previously on the nodev_conns_list */
169 spin_lock_irq(&ib_nodev_conns_lock);
170 BUG_ON(list_empty(&ib_nodev_conns));
171 BUG_ON(list_empty(&ic->ib_node));
172 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000173
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000174 spin_lock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000175 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000176 spin_unlock(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000177 spin_unlock_irq(&ib_nodev_conns_lock);
178
Andy Grover745cbcc2009-04-01 08:20:19 +0000179 ic->rds_ibdev = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700180 atomic_inc(&rds_ibdev->refcount);
Andy Grover08b48a12009-02-24 15:30:32 +0000181}
182
Andy Grover745cbcc2009-04-01 08:20:19 +0000183void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
184{
185 struct rds_ib_connection *ic = conn->c_transport_data;
186
187 /* place conn on nodev_conns_list */
188 spin_lock(&ib_nodev_conns_lock);
189
190 spin_lock_irq(&rds_ibdev->spinlock);
191 BUG_ON(list_empty(&ic->ib_node));
192 list_del(&ic->ib_node);
193 spin_unlock_irq(&rds_ibdev->spinlock);
194
195 list_add_tail(&ic->ib_node, &ib_nodev_conns);
196
197 spin_unlock(&ib_nodev_conns_lock);
198
199 ic->rds_ibdev = NULL;
Zach Brown3e0249f2010-05-18 15:48:51 -0700200 rds_ib_dev_put(rds_ibdev);
Andy Grover745cbcc2009-04-01 08:20:19 +0000201}
202
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700203void rds_ib_destroy_nodev_conns(void)
Andy Grover08b48a12009-02-24 15:30:32 +0000204{
205 struct rds_ib_connection *ic, *_ic;
206 LIST_HEAD(tmp_list);
207
208 /* avoid calling conn_destroy with irqs off */
Zach Brown8aeb1ba2010-06-25 14:58:16 -0700209 spin_lock_irq(&ib_nodev_conns_lock);
210 list_splice(&ib_nodev_conns, &tmp_list);
211 spin_unlock_irq(&ib_nodev_conns_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000212
Andy Grover433d3082009-10-30 08:51:55 +0000213 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000214 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000215}
216
217struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
218{
219 struct rds_ib_mr_pool *pool;
220
221 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
222 if (!pool)
223 return ERR_PTR(-ENOMEM);
224
Chris Mason6fa70da2010-06-11 11:17:59 -0700225 INIT_XLIST_HEAD(&pool->free_list);
226 INIT_XLIST_HEAD(&pool->drop_list);
227 INIT_XLIST_HEAD(&pool->clean_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000228 mutex_init(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700229 init_waitqueue_head(&pool->flush_wait);
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700230 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
Andy Grover08b48a12009-02-24 15:30:32 +0000231
232 pool->fmr_attr.max_pages = fmr_message_size;
233 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
Andy Grovera870d622009-07-17 13:13:33 +0000234 pool->fmr_attr.page_shift = PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000235 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
236
237 /* We never allow more than max_items MRs to be allocated.
238 * When we exceed more than max_items_soft, we start freeing
239 * items more aggressively.
240 * Make sure that max_items > max_items_soft > max_items / 2
241 */
242 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
243 pool->max_items = rds_ibdev->max_fmrs;
244
245 return pool;
246}
247
248void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
249{
250 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
251
252 iinfo->rdma_mr_max = pool->max_items;
253 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
254}
255
256void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
257{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700258 cancel_delayed_work_sync(&pool->flush_worker);
Chris Mason6fa70da2010-06-11 11:17:59 -0700259 rds_ib_flush_mr_pool(pool, 1, NULL);
Andy Grover571c02f2010-03-11 13:50:01 +0000260 WARN_ON(atomic_read(&pool->item_count));
261 WARN_ON(atomic_read(&pool->free_pinned));
Andy Grover08b48a12009-02-24 15:30:32 +0000262 kfree(pool);
263}
264
Chris Mason6fa70da2010-06-11 11:17:59 -0700265static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
266 struct rds_ib_mr **ibmr_ret)
267{
268 struct xlist_head *ibmr_xl;
269 ibmr_xl = xlist_del_head_fast(xl);
270 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
271}
272
Andy Grover08b48a12009-02-24 15:30:32 +0000273static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
274{
275 struct rds_ib_mr *ibmr = NULL;
Chris Mason6fa70da2010-06-11 11:17:59 -0700276 struct xlist_head *ret;
277 unsigned long *flag;
Andy Grover08b48a12009-02-24 15:30:32 +0000278
Chris Mason6fa70da2010-06-11 11:17:59 -0700279 preempt_disable();
280 flag = &__get_cpu_var(clean_list_grace);
281 set_bit(CLEAN_LIST_BUSY_BIT, flag);
282 ret = xlist_del_head(&pool->clean_list);
283 if (ret)
284 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
Andy Grover08b48a12009-02-24 15:30:32 +0000285
Chris Mason6fa70da2010-06-11 11:17:59 -0700286 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
287 preempt_enable();
Andy Grover08b48a12009-02-24 15:30:32 +0000288 return ibmr;
289}
290
Chris Mason6fa70da2010-06-11 11:17:59 -0700291static inline void wait_clean_list_grace(void)
292{
293 int cpu;
294 unsigned long *flag;
295
296 for_each_online_cpu(cpu) {
297 flag = &per_cpu(clean_list_grace, cpu);
298 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
299 cpu_relax();
300 }
301}
302
Andy Grover08b48a12009-02-24 15:30:32 +0000303static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
304{
305 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
306 struct rds_ib_mr *ibmr = NULL;
307 int err = 0, iter = 0;
308
Chris Mason8576f372010-07-19 17:06:46 -0700309 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
310 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
311
Andy Grover08b48a12009-02-24 15:30:32 +0000312 while (1) {
313 ibmr = rds_ib_reuse_fmr(pool);
314 if (ibmr)
315 return ibmr;
316
317 /* No clean MRs - now we have the choice of either
318 * allocating a fresh MR up to the limit imposed by the
319 * driver, or flush any dirty unused MRs.
320 * We try to avoid stalling in the send path if possible,
321 * so we allocate as long as we're allowed to.
322 *
323 * We're fussy with enforcing the FMR limit, though. If the driver
324 * tells us we can't use more than N fmrs, we shouldn't start
325 * arguing with it */
326 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
327 break;
328
329 atomic_dec(&pool->item_count);
330
331 if (++iter > 2) {
332 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
333 return ERR_PTR(-EAGAIN);
334 }
335
336 /* We do have some empty MRs. Flush them out. */
337 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
Chris Mason6fa70da2010-06-11 11:17:59 -0700338 rds_ib_flush_mr_pool(pool, 0, &ibmr);
339 if (ibmr)
340 return ibmr;
Andy Grover08b48a12009-02-24 15:30:32 +0000341 }
342
Andy Grovere4c52c92010-04-23 10:49:53 -0700343 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000344 if (!ibmr) {
345 err = -ENOMEM;
346 goto out_no_cigar;
347 }
348
Chris Mason38a4e5e2010-05-11 15:09:45 -0700349 memset(ibmr, 0, sizeof(*ibmr));
350
Andy Grover08b48a12009-02-24 15:30:32 +0000351 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
352 (IB_ACCESS_LOCAL_WRITE |
353 IB_ACCESS_REMOTE_READ |
Andy Grover15133f62010-01-12 14:33:38 -0800354 IB_ACCESS_REMOTE_WRITE|
355 IB_ACCESS_REMOTE_ATOMIC),
Andy Grover08b48a12009-02-24 15:30:32 +0000356 &pool->fmr_attr);
357 if (IS_ERR(ibmr->fmr)) {
358 err = PTR_ERR(ibmr->fmr);
359 ibmr->fmr = NULL;
360 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
361 goto out_no_cigar;
362 }
363
364 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
365 return ibmr;
366
367out_no_cigar:
368 if (ibmr) {
369 if (ibmr->fmr)
370 ib_dealloc_fmr(ibmr->fmr);
371 kfree(ibmr);
372 }
373 atomic_dec(&pool->item_count);
374 return ERR_PTR(err);
375}
376
377static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
378 struct scatterlist *sg, unsigned int nents)
379{
380 struct ib_device *dev = rds_ibdev->dev;
381 struct scatterlist *scat = sg;
382 u64 io_addr = 0;
383 u64 *dma_pages;
384 u32 len;
385 int page_cnt, sg_dma_len;
386 int i, j;
387 int ret;
388
389 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
390 DMA_BIDIRECTIONAL);
391 if (unlikely(!sg_dma_len)) {
392 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
393 return -EBUSY;
394 }
395
396 len = 0;
397 page_cnt = 0;
398
399 for (i = 0; i < sg_dma_len; ++i) {
400 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
401 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
402
Andy Grovera870d622009-07-17 13:13:33 +0000403 if (dma_addr & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000404 if (i > 0)
405 return -EINVAL;
406 else
407 ++page_cnt;
408 }
Andy Grovera870d622009-07-17 13:13:33 +0000409 if ((dma_addr + dma_len) & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000410 if (i < sg_dma_len - 1)
411 return -EINVAL;
412 else
413 ++page_cnt;
414 }
415
416 len += dma_len;
417 }
418
Andy Grovera870d622009-07-17 13:13:33 +0000419 page_cnt += len >> PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000420 if (page_cnt > fmr_message_size)
421 return -EINVAL;
422
Andy Grovere4c52c92010-04-23 10:49:53 -0700423 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
424 rdsibdev_to_node(rds_ibdev));
Andy Grover08b48a12009-02-24 15:30:32 +0000425 if (!dma_pages)
426 return -ENOMEM;
427
428 page_cnt = 0;
429 for (i = 0; i < sg_dma_len; ++i) {
430 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
431 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
432
Andy Grovera870d622009-07-17 13:13:33 +0000433 for (j = 0; j < dma_len; j += PAGE_SIZE)
Andy Grover08b48a12009-02-24 15:30:32 +0000434 dma_pages[page_cnt++] =
Andy Grovera870d622009-07-17 13:13:33 +0000435 (dma_addr & PAGE_MASK) + j;
Andy Grover08b48a12009-02-24 15:30:32 +0000436 }
437
438 ret = ib_map_phys_fmr(ibmr->fmr,
439 dma_pages, page_cnt, io_addr);
440 if (ret)
441 goto out;
442
443 /* Success - we successfully remapped the MR, so we can
444 * safely tear down the old mapping. */
445 rds_ib_teardown_mr(ibmr);
446
447 ibmr->sg = scat;
448 ibmr->sg_len = nents;
449 ibmr->sg_dma_len = sg_dma_len;
450 ibmr->remap_count++;
451
452 rds_ib_stats_inc(s_ib_rdma_mr_used);
453 ret = 0;
454
455out:
456 kfree(dma_pages);
457
458 return ret;
459}
460
461void rds_ib_sync_mr(void *trans_private, int direction)
462{
463 struct rds_ib_mr *ibmr = trans_private;
464 struct rds_ib_device *rds_ibdev = ibmr->device;
465
466 switch (direction) {
467 case DMA_FROM_DEVICE:
468 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
469 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
470 break;
471 case DMA_TO_DEVICE:
472 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
473 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
474 break;
475 }
476}
477
478static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
479{
480 struct rds_ib_device *rds_ibdev = ibmr->device;
481
482 if (ibmr->sg_dma_len) {
483 ib_dma_unmap_sg(rds_ibdev->dev,
484 ibmr->sg, ibmr->sg_len,
485 DMA_BIDIRECTIONAL);
486 ibmr->sg_dma_len = 0;
487 }
488
489 /* Release the s/g list */
490 if (ibmr->sg_len) {
491 unsigned int i;
492
493 for (i = 0; i < ibmr->sg_len; ++i) {
494 struct page *page = sg_page(&ibmr->sg[i]);
495
496 /* FIXME we need a way to tell a r/w MR
497 * from a r/o MR */
Andy Grover9e2effb2010-03-12 16:22:32 -0800498 BUG_ON(irqs_disabled());
Andy Grover08b48a12009-02-24 15:30:32 +0000499 set_page_dirty(page);
500 put_page(page);
501 }
502 kfree(ibmr->sg);
503
504 ibmr->sg = NULL;
505 ibmr->sg_len = 0;
506 }
507}
508
509static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
510{
511 unsigned int pinned = ibmr->sg_len;
512
513 __rds_ib_teardown_mr(ibmr);
514 if (pinned) {
515 struct rds_ib_device *rds_ibdev = ibmr->device;
516 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
517
518 atomic_sub(pinned, &pool->free_pinned);
519 }
520}
521
522static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
523{
524 unsigned int item_count;
525
526 item_count = atomic_read(&pool->item_count);
527 if (free_all)
528 return item_count;
529
530 return 0;
531}
532
533/*
Chris Mason6fa70da2010-06-11 11:17:59 -0700534 * given an xlist of mrs, put them all into the list_head for more processing
535 */
536static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
537{
538 struct rds_ib_mr *ibmr;
539 struct xlist_head splice;
540 struct xlist_head *cur;
541 struct xlist_head *next;
542
543 splice.next = NULL;
544 xlist_splice(xlist, &splice);
545 cur = splice.next;
546 while (cur) {
547 next = cur->next;
548 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
549 list_add_tail(&ibmr->unmap_list, list);
550 cur = next;
551 }
552}
553
554/*
555 * this takes a list head of mrs and turns it into an xlist of clusters.
556 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
557 * reuse.
558 */
559static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
560 struct list_head *list, struct xlist_head *xlist,
561 struct xlist_head **tail_ret)
562{
563 struct rds_ib_mr *ibmr;
564 struct xlist_head *cur_mr = xlist;
565 struct xlist_head *tail_mr = NULL;
566
567 list_for_each_entry(ibmr, list, unmap_list) {
568 tail_mr = &ibmr->xlist;
569 tail_mr->next = NULL;
570 cur_mr->next = tail_mr;
571 cur_mr = tail_mr;
572 }
573 *tail_ret = tail_mr;
574}
575
576/*
Andy Grover08b48a12009-02-24 15:30:32 +0000577 * Flush our pool of MRs.
578 * At a minimum, all currently unused MRs are unmapped.
579 * If the number of MRs allocated exceeds the limit, we also try
580 * to free as many MRs as needed to get back to this limit.
581 */
Chris Mason6fa70da2010-06-11 11:17:59 -0700582static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
583 int free_all, struct rds_ib_mr **ibmr_ret)
Andy Grover08b48a12009-02-24 15:30:32 +0000584{
585 struct rds_ib_mr *ibmr, *next;
Chris Mason6fa70da2010-06-11 11:17:59 -0700586 struct xlist_head clean_xlist;
587 struct xlist_head *clean_tail;
Andy Grover08b48a12009-02-24 15:30:32 +0000588 LIST_HEAD(unmap_list);
589 LIST_HEAD(fmr_list);
590 unsigned long unpinned = 0;
Andy Grover08b48a12009-02-24 15:30:32 +0000591 unsigned int nfreed = 0, ncleaned = 0, free_goal;
592 int ret = 0;
593
594 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
595
Chris Mason6fa70da2010-06-11 11:17:59 -0700596 if (ibmr_ret) {
597 DEFINE_WAIT(wait);
598 while(!mutex_trylock(&pool->flush_lock)) {
599 ibmr = rds_ib_reuse_fmr(pool);
600 if (ibmr) {
601 *ibmr_ret = ibmr;
602 finish_wait(&pool->flush_wait, &wait);
603 goto out_nolock;
604 }
Andy Grover08b48a12009-02-24 15:30:32 +0000605
Chris Mason6fa70da2010-06-11 11:17:59 -0700606 prepare_to_wait(&pool->flush_wait, &wait,
607 TASK_UNINTERRUPTIBLE);
608 if (xlist_empty(&pool->clean_list))
609 schedule();
610
611 ibmr = rds_ib_reuse_fmr(pool);
612 if (ibmr) {
613 *ibmr_ret = ibmr;
614 finish_wait(&pool->flush_wait, &wait);
615 goto out_nolock;
616 }
617 }
618 finish_wait(&pool->flush_wait, &wait);
619 } else
620 mutex_lock(&pool->flush_lock);
621
622 if (ibmr_ret) {
623 ibmr = rds_ib_reuse_fmr(pool);
624 if (ibmr) {
625 *ibmr_ret = ibmr;
626 goto out;
627 }
628 }
629
Andy Grover08b48a12009-02-24 15:30:32 +0000630 /* Get the list of all MRs to be dropped. Ordering matters -
Chris Mason6fa70da2010-06-11 11:17:59 -0700631 * we want to put drop_list ahead of free_list.
632 */
633 xlist_append_to_list(&pool->drop_list, &unmap_list);
634 xlist_append_to_list(&pool->free_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000635 if (free_all)
Chris Mason6fa70da2010-06-11 11:17:59 -0700636 xlist_append_to_list(&pool->clean_list, &unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000637
638 free_goal = rds_ib_flush_goal(pool, free_all);
639
640 if (list_empty(&unmap_list))
641 goto out;
642
643 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
Chris Mason6fa70da2010-06-11 11:17:59 -0700644 list_for_each_entry(ibmr, &unmap_list, unmap_list)
Andy Grover08b48a12009-02-24 15:30:32 +0000645 list_add(&ibmr->fmr->list, &fmr_list);
Chris Mason6fa70da2010-06-11 11:17:59 -0700646
Andy Grover08b48a12009-02-24 15:30:32 +0000647 ret = ib_unmap_fmr(&fmr_list);
648 if (ret)
649 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
650
651 /* Now we can destroy the DMA mapping and unpin any pages */
Chris Mason6fa70da2010-06-11 11:17:59 -0700652 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
Andy Grover08b48a12009-02-24 15:30:32 +0000653 unpinned += ibmr->sg_len;
654 __rds_ib_teardown_mr(ibmr);
655 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
656 rds_ib_stats_inc(s_ib_rdma_mr_free);
Chris Mason6fa70da2010-06-11 11:17:59 -0700657 list_del(&ibmr->unmap_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000658 ib_dealloc_fmr(ibmr->fmr);
659 kfree(ibmr);
660 nfreed++;
661 }
662 ncleaned++;
663 }
664
Chris Mason6fa70da2010-06-11 11:17:59 -0700665 if (!list_empty(&unmap_list)) {
666 /* we have to make sure that none of the things we're about
667 * to put on the clean list would race with other cpus trying
668 * to pull items off. The xlist would explode if we managed to
669 * remove something from the clean list and then add it back again
670 * while another CPU was spinning on that same item in xlist_del_head.
671 *
672 * This is pretty unlikely, but just in case wait for an xlist grace period
673 * here before adding anything back into the clean list.
674 */
675 wait_clean_list_grace();
676
677 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
678 if (ibmr_ret)
679 refill_local(pool, &clean_xlist, ibmr_ret);
680
681 /* refill_local may have emptied our list */
682 if (!xlist_empty(&clean_xlist))
683 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
684
685 }
Andy Grover08b48a12009-02-24 15:30:32 +0000686
687 atomic_sub(unpinned, &pool->free_pinned);
688 atomic_sub(ncleaned, &pool->dirty_count);
689 atomic_sub(nfreed, &pool->item_count);
690
691out:
692 mutex_unlock(&pool->flush_lock);
Chris Mason6fa70da2010-06-11 11:17:59 -0700693 if (waitqueue_active(&pool->flush_wait))
694 wake_up(&pool->flush_wait);
695out_nolock:
Andy Grover08b48a12009-02-24 15:30:32 +0000696 return ret;
697}
698
Zach Brownef87b7e2010-07-09 12:26:20 -0700699int rds_ib_fmr_init(void)
Zach Brown515e0792010-07-06 15:09:56 -0700700{
701 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
702 if (!rds_ib_fmr_wq)
703 return -ENOMEM;
704 return 0;
705}
706
707/*
708 * By the time this is called all the IB devices should have been torn down and
709 * had their pools freed. As each pool is freed its work struct is waited on,
710 * so the pool flushing work queue should be idle by the time we get here.
711 */
Zach Brownef87b7e2010-07-09 12:26:20 -0700712void rds_ib_fmr_exit(void)
Zach Brown515e0792010-07-06 15:09:56 -0700713{
714 destroy_workqueue(rds_ib_fmr_wq);
715}
716
Andy Grover08b48a12009-02-24 15:30:32 +0000717static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
718{
Chris Mason7a0ff5d2010-06-11 11:26:02 -0700719 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
Andy Grover08b48a12009-02-24 15:30:32 +0000720
Chris Mason6fa70da2010-06-11 11:17:59 -0700721 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000722}
723
724void rds_ib_free_mr(void *trans_private, int invalidate)
725{
726 struct rds_ib_mr *ibmr = trans_private;
727 struct rds_ib_device *rds_ibdev = ibmr->device;
728 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
Andy Grover08b48a12009-02-24 15:30:32 +0000729
730 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
731
732 /* Return it to the pool's free list */
Andy Grover08b48a12009-02-24 15:30:32 +0000733 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
Chris Mason6fa70da2010-06-11 11:17:59 -0700734 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000735 else
Chris Mason6fa70da2010-06-11 11:17:59 -0700736 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
Andy Grover08b48a12009-02-24 15:30:32 +0000737
738 atomic_add(ibmr->sg_len, &pool->free_pinned);
739 atomic_inc(&pool->dirty_count);
Andy Grover08b48a12009-02-24 15:30:32 +0000740
741 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800742 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
743 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
Zach Brown515e0792010-07-06 15:09:56 -0700744 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000745
746 if (invalidate) {
747 if (likely(!in_interrupt())) {
Chris Mason6fa70da2010-06-11 11:17:59 -0700748 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000749 } else {
750 /* We get here if the user created a MR marked
751 * as use_once and invalidate at the same time. */
Zach Brown515e0792010-07-06 15:09:56 -0700752 queue_delayed_work(rds_ib_fmr_wq,
753 &pool->flush_worker, 10);
Andy Grover08b48a12009-02-24 15:30:32 +0000754 }
755 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700756
757 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000758}
759
760void rds_ib_flush_mrs(void)
761{
762 struct rds_ib_device *rds_ibdev;
763
Zach Brownea819862010-07-15 12:34:33 -0700764 down_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000765 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
766 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
767
768 if (pool)
Chris Mason6fa70da2010-06-11 11:17:59 -0700769 rds_ib_flush_mr_pool(pool, 0, NULL);
Andy Grover08b48a12009-02-24 15:30:32 +0000770 }
Zach Brownea819862010-07-15 12:34:33 -0700771 up_read(&rds_ib_devices_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000772}
773
774void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
775 struct rds_sock *rs, u32 *key_ret)
776{
777 struct rds_ib_device *rds_ibdev;
778 struct rds_ib_mr *ibmr = NULL;
779 int ret;
780
781 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
782 if (!rds_ibdev) {
783 ret = -ENODEV;
784 goto out;
785 }
786
787 if (!rds_ibdev->mr_pool) {
788 ret = -ENODEV;
789 goto out;
790 }
791
792 ibmr = rds_ib_alloc_fmr(rds_ibdev);
793 if (IS_ERR(ibmr))
794 return ibmr;
795
796 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
797 if (ret == 0)
798 *key_ret = ibmr->fmr->rkey;
799 else
800 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
801
802 ibmr->device = rds_ibdev;
Zach Brown3e0249f2010-05-18 15:48:51 -0700803 rds_ibdev = NULL;
Andy Grover08b48a12009-02-24 15:30:32 +0000804
805 out:
806 if (ret) {
807 if (ibmr)
808 rds_ib_free_mr(ibmr, 0);
809 ibmr = ERR_PTR(ret);
810 }
Zach Brown3e0249f2010-05-18 15:48:51 -0700811 if (rds_ibdev)
812 rds_ib_dev_put(rds_ibdev);
Andy Grover08b48a12009-02-24 15:30:32 +0000813 return ibmr;
814}
Chris Mason6fa70da2010-06-11 11:17:59 -0700815