blob: 4e1de171866c7ab170297f1fcdf0878c2d8a5f62 [file] [log] [blame]
Andy Groverfcd8b7c2009-02-24 15:30:36 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Manuel Zerpiescb0a6052011-06-16 02:09:57 +000035#include <linux/ratelimit.h>
Andy Groverfcd8b7c2009-02-24 15:30:36 +000036
37#include "rds.h"
Andy Groverfcd8b7c2009-02-24 15:30:36 +000038#include "iw.h"
39
40
41/*
42 * This is stored as mr->r_trans_private.
43 */
44struct rds_iw_mr {
45 struct rds_iw_device *device;
46 struct rds_iw_mr_pool *pool;
47 struct rdma_cm_id *cm_id;
48
49 struct ib_mr *mr;
50 struct ib_fast_reg_page_list *page_list;
51
52 struct rds_iw_mapping mapping;
53 unsigned char remap_count;
54};
55
56/*
57 * Our own little MR pool
58 */
59struct rds_iw_mr_pool {
60 struct rds_iw_device *device; /* back ptr to the device that owns us */
61
62 struct mutex flush_lock; /* serialize fmr invalidate */
63 struct work_struct flush_worker; /* flush worker */
64
65 spinlock_t list_lock; /* protect variables below */
66 atomic_t item_count; /* total # of MRs */
67 atomic_t dirty_count; /* # dirty of MRs */
68 struct list_head dirty_list; /* dirty mappings */
69 struct list_head clean_list; /* unused & unamapped MRs */
70 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_message_size; /* in pages */
72 unsigned long max_items;
73 unsigned long max_items_soft;
74 unsigned long max_free_pinned;
75 int max_pages;
76};
77
78static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
79static void rds_iw_mr_pool_flush_worker(struct work_struct *work);
80static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
81static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
82 struct rds_iw_mr *ibmr,
83 struct scatterlist *sg, unsigned int nents);
84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
86 struct list_head *unmap_list,
Jonathan Lallinger85a64882011-09-29 07:58:41 +000087 struct list_head *kill_list,
88 int *unpinned);
Andy Groverfcd8b7c2009-02-24 15:30:36 +000089static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
90
91static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
92{
93 struct rds_iw_device *iwdev;
94 struct rds_iw_cm_id *i_cm_id;
95
96 *rds_iwdev = NULL;
97 *cm_id = NULL;
98
99 list_for_each_entry(iwdev, &rds_iw_devices, list) {
100 spin_lock_irq(&iwdev->spinlock);
101 list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) {
102 struct sockaddr_in *src_addr, *dst_addr;
103
104 src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr;
105 dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr;
106
107 rdsdebug("local ipaddr = %x port %d, "
108 "remote ipaddr = %x port %d"
109 "..looking for %x port %d, "
110 "remote ipaddr = %x port %d\n",
111 src_addr->sin_addr.s_addr,
112 src_addr->sin_port,
113 dst_addr->sin_addr.s_addr,
114 dst_addr->sin_port,
115 rs->rs_bound_addr,
116 rs->rs_bound_port,
117 rs->rs_conn_addr,
118 rs->rs_conn_port);
119#ifdef WORKING_TUPLE_DETECTION
120 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
121 src_addr->sin_port == rs->rs_bound_port &&
122 dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
123 dst_addr->sin_port == rs->rs_conn_port) {
124#else
125 /* FIXME - needs to compare the local and remote
126 * ipaddr/port tuple, but the ipaddr is the only
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300127 * available information in the rds_sock (as the rest are
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000128 * zero'ed. It doesn't appear to be properly populated
129 * during connection setup...
130 */
131 if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
132#endif
133 spin_unlock_irq(&iwdev->spinlock);
134 *rds_iwdev = iwdev;
135 *cm_id = i_cm_id->cm_id;
136 return 0;
137 }
138 }
139 spin_unlock_irq(&iwdev->spinlock);
140 }
141
142 return 1;
143}
144
145static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
146{
147 struct rds_iw_cm_id *i_cm_id;
148
149 i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL);
150 if (!i_cm_id)
151 return -ENOMEM;
152
153 i_cm_id->cm_id = cm_id;
154
155 spin_lock_irq(&rds_iwdev->spinlock);
156 list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list);
157 spin_unlock_irq(&rds_iwdev->spinlock);
158
159 return 0;
160}
161
stephen hemmingerff51bf82010-10-19 08:08:33 +0000162static void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev,
163 struct rdma_cm_id *cm_id)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000164{
165 struct rds_iw_cm_id *i_cm_id;
166
167 spin_lock_irq(&rds_iwdev->spinlock);
168 list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) {
169 if (i_cm_id->cm_id == cm_id) {
170 list_del(&i_cm_id->list);
171 kfree(i_cm_id);
172 break;
173 }
174 }
175 spin_unlock_irq(&rds_iwdev->spinlock);
176}
177
178
179int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id)
180{
181 struct sockaddr_in *src_addr, *dst_addr;
182 struct rds_iw_device *rds_iwdev_old;
183 struct rds_sock rs;
184 struct rdma_cm_id *pcm_id;
185 int rc;
186
187 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
188 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
189
190 rs.rs_bound_addr = src_addr->sin_addr.s_addr;
191 rs.rs_bound_port = src_addr->sin_port;
192 rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
193 rs.rs_conn_port = dst_addr->sin_port;
194
195 rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
196 if (rc)
197 rds_iw_remove_cm_id(rds_iwdev, cm_id);
198
199 return rds_iw_add_cm_id(rds_iwdev, cm_id);
200}
201
Andy Grover745cbcc2009-04-01 08:20:19 +0000202void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000203{
204 struct rds_iw_connection *ic = conn->c_transport_data;
205
206 /* conn was previously on the nodev_conns_list */
207 spin_lock_irq(&iw_nodev_conns_lock);
208 BUG_ON(list_empty(&iw_nodev_conns));
209 BUG_ON(list_empty(&ic->iw_node));
210 list_del(&ic->iw_node);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000211
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000212 spin_lock(&rds_iwdev->spinlock);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000213 list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
Dan Carpenteraef3ea32010-09-18 13:44:14 +0000214 spin_unlock(&rds_iwdev->spinlock);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000215 spin_unlock_irq(&iw_nodev_conns_lock);
216
Andy Grover745cbcc2009-04-01 08:20:19 +0000217 ic->rds_iwdev = rds_iwdev;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000218}
219
Andy Grover745cbcc2009-04-01 08:20:19 +0000220void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn)
221{
222 struct rds_iw_connection *ic = conn->c_transport_data;
223
224 /* place conn on nodev_conns_list */
225 spin_lock(&iw_nodev_conns_lock);
226
227 spin_lock_irq(&rds_iwdev->spinlock);
228 BUG_ON(list_empty(&ic->iw_node));
229 list_del(&ic->iw_node);
230 spin_unlock_irq(&rds_iwdev->spinlock);
231
232 list_add_tail(&ic->iw_node, &iw_nodev_conns);
233
234 spin_unlock(&iw_nodev_conns_lock);
235
236 rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id);
237 ic->rds_iwdev = NULL;
238}
239
240void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000241{
242 struct rds_iw_connection *ic, *_ic;
243 LIST_HEAD(tmp_list);
244
245 /* avoid calling conn_destroy with irqs off */
Andy Grover745cbcc2009-04-01 08:20:19 +0000246 spin_lock_irq(list_lock);
247 list_splice(list, &tmp_list);
248 INIT_LIST_HEAD(list);
249 spin_unlock_irq(list_lock);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000250
Andy Grover433d3082009-10-30 08:51:55 +0000251 list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000252 rds_conn_destroy(ic->conn);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000253}
254
255static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
256 struct scatterlist *list, unsigned int sg_len)
257{
258 sg->list = list;
259 sg->len = sg_len;
260 sg->dma_len = 0;
261 sg->dma_npages = 0;
262 sg->bytes = 0;
263}
264
265static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
Andy Grover404bb722009-07-17 13:13:34 +0000266 struct rds_iw_scatterlist *sg)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000267{
268 struct ib_device *dev = rds_iwdev->dev;
269 u64 *dma_pages = NULL;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000270 int i, j, ret;
271
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000272 WARN_ON(sg->dma_len);
273
274 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
275 if (unlikely(!sg->dma_len)) {
276 printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n");
277 return ERR_PTR(-EBUSY);
278 }
279
280 sg->bytes = 0;
281 sg->dma_npages = 0;
282
283 ret = -EINVAL;
284 for (i = 0; i < sg->dma_len; ++i) {
285 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
286 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
287 u64 end_addr;
288
289 sg->bytes += dma_len;
290
291 end_addr = dma_addr + dma_len;
Andy Grover404bb722009-07-17 13:13:34 +0000292 if (dma_addr & PAGE_MASK) {
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000293 if (i > 0)
294 goto out_unmap;
Andy Grover404bb722009-07-17 13:13:34 +0000295 dma_addr &= ~PAGE_MASK;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000296 }
Andy Grover404bb722009-07-17 13:13:34 +0000297 if (end_addr & PAGE_MASK) {
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000298 if (i < sg->dma_len - 1)
299 goto out_unmap;
Andy Grover404bb722009-07-17 13:13:34 +0000300 end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000301 }
302
Andy Grover404bb722009-07-17 13:13:34 +0000303 sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000304 }
305
306 /* Now gather the dma addrs into one list */
307 if (sg->dma_npages > fastreg_message_size)
308 goto out_unmap;
309
310 dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC);
311 if (!dma_pages) {
312 ret = -ENOMEM;
313 goto out_unmap;
314 }
315
316 for (i = j = 0; i < sg->dma_len; ++i) {
317 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]);
318 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]);
319 u64 end_addr;
320
321 end_addr = dma_addr + dma_len;
Andy Grover404bb722009-07-17 13:13:34 +0000322 dma_addr &= ~PAGE_MASK;
323 for (; dma_addr < end_addr; dma_addr += PAGE_SIZE)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000324 dma_pages[j++] = dma_addr;
325 BUG_ON(j > sg->dma_npages);
326 }
327
328 return dma_pages;
329
330out_unmap:
331 ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
332 sg->dma_len = 0;
333 kfree(dma_pages);
334 return ERR_PTR(ret);
335}
336
337
338struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev)
339{
340 struct rds_iw_mr_pool *pool;
341
342 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
343 if (!pool) {
344 printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n");
345 return ERR_PTR(-ENOMEM);
346 }
347
348 pool->device = rds_iwdev;
349 INIT_LIST_HEAD(&pool->dirty_list);
350 INIT_LIST_HEAD(&pool->clean_list);
351 mutex_init(&pool->flush_lock);
352 spin_lock_init(&pool->list_lock);
353 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
354
355 pool->max_message_size = fastreg_message_size;
356 pool->max_items = fastreg_pool_size;
357 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
358 pool->max_pages = fastreg_message_size;
359
360 /* We never allow more than max_items MRs to be allocated.
361 * When we exceed more than max_items_soft, we start freeing
362 * items more aggressively.
363 * Make sure that max_items > max_items_soft > max_items / 2
364 */
365 pool->max_items_soft = pool->max_items * 3 / 4;
366
367 return pool;
368}
369
370void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo)
371{
372 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
373
374 iinfo->rdma_mr_max = pool->max_items;
375 iinfo->rdma_mr_size = pool->max_pages;
376}
377
378void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool)
379{
380 flush_workqueue(rds_wq);
381 rds_iw_flush_mr_pool(pool, 1);
382 BUG_ON(atomic_read(&pool->item_count));
383 BUG_ON(atomic_read(&pool->free_pinned));
384 kfree(pool);
385}
386
387static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool)
388{
389 struct rds_iw_mr *ibmr = NULL;
390 unsigned long flags;
391
392 spin_lock_irqsave(&pool->list_lock, flags);
393 if (!list_empty(&pool->clean_list)) {
394 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
395 list_del_init(&ibmr->mapping.m_list);
396 }
397 spin_unlock_irqrestore(&pool->list_lock, flags);
398
399 return ibmr;
400}
401
402static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev)
403{
404 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
405 struct rds_iw_mr *ibmr = NULL;
406 int err = 0, iter = 0;
407
408 while (1) {
409 ibmr = rds_iw_reuse_fmr(pool);
410 if (ibmr)
411 return ibmr;
412
413 /* No clean MRs - now we have the choice of either
414 * allocating a fresh MR up to the limit imposed by the
415 * driver, or flush any dirty unused MRs.
416 * We try to avoid stalling in the send path if possible,
417 * so we allocate as long as we're allowed to.
418 *
419 * We're fussy with enforcing the FMR limit, though. If the driver
420 * tells us we can't use more than N fmrs, we shouldn't start
421 * arguing with it */
422 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
423 break;
424
425 atomic_dec(&pool->item_count);
426
427 if (++iter > 2) {
428 rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted);
429 return ERR_PTR(-EAGAIN);
430 }
431
432 /* We do have some empty MRs. Flush them out. */
433 rds_iw_stats_inc(s_iw_rdma_mr_pool_wait);
434 rds_iw_flush_mr_pool(pool, 0);
435 }
436
437 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
438 if (!ibmr) {
439 err = -ENOMEM;
440 goto out_no_cigar;
441 }
442
443 spin_lock_init(&ibmr->mapping.m_lock);
444 INIT_LIST_HEAD(&ibmr->mapping.m_list);
445 ibmr->mapping.m_mr = ibmr;
446
447 err = rds_iw_init_fastreg(pool, ibmr);
448 if (err)
449 goto out_no_cigar;
450
451 rds_iw_stats_inc(s_iw_rdma_mr_alloc);
452 return ibmr;
453
454out_no_cigar:
455 if (ibmr) {
456 rds_iw_destroy_fastreg(pool, ibmr);
457 kfree(ibmr);
458 }
459 atomic_dec(&pool->item_count);
460 return ERR_PTR(err);
461}
462
463void rds_iw_sync_mr(void *trans_private, int direction)
464{
465 struct rds_iw_mr *ibmr = trans_private;
466 struct rds_iw_device *rds_iwdev = ibmr->device;
467
468 switch (direction) {
469 case DMA_FROM_DEVICE:
470 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list,
471 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
472 break;
473 case DMA_TO_DEVICE:
474 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list,
475 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
476 break;
477 }
478}
479
480static inline unsigned int rds_iw_flush_goal(struct rds_iw_mr_pool *pool, int free_all)
481{
482 unsigned int item_count;
483
484 item_count = atomic_read(&pool->item_count);
485 if (free_all)
486 return item_count;
487
488 return 0;
489}
490
491/*
492 * Flush our pool of MRs.
493 * At a minimum, all currently unused MRs are unmapped.
494 * If the number of MRs allocated exceeds the limit, we also try
495 * to free as many MRs as needed to get back to this limit.
496 */
497static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all)
498{
499 struct rds_iw_mr *ibmr, *next;
500 LIST_HEAD(unmap_list);
501 LIST_HEAD(kill_list);
502 unsigned long flags;
Jonathan Lallinger85a64882011-09-29 07:58:41 +0000503 unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000504 int ret = 0;
505
506 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush);
507
508 mutex_lock(&pool->flush_lock);
509
510 spin_lock_irqsave(&pool->list_lock, flags);
511 /* Get the list of all mappings to be destroyed */
512 list_splice_init(&pool->dirty_list, &unmap_list);
513 if (free_all)
514 list_splice_init(&pool->clean_list, &kill_list);
515 spin_unlock_irqrestore(&pool->list_lock, flags);
516
517 free_goal = rds_iw_flush_goal(pool, free_all);
518
519 /* Batched invalidate of dirty MRs.
520 * For FMR based MRs, the mappings on the unmap list are
521 * actually members of an ibmr (ibmr->mapping). They either
522 * migrate to the kill_list, or have been cleaned and should be
523 * moved to the clean_list.
524 * For fastregs, they will be dynamically allocated, and
525 * will be destroyed by the unmap function.
526 */
527 if (!list_empty(&unmap_list)) {
Jonathan Lallinger85a64882011-09-29 07:58:41 +0000528 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
529 &kill_list, &unpinned);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000530 /* If we've been asked to destroy all MRs, move those
531 * that were simply cleaned to the kill list */
532 if (free_all)
533 list_splice_init(&unmap_list, &kill_list);
534 }
535
536 /* Destroy any MRs that are past their best before date */
537 list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) {
538 rds_iw_stats_inc(s_iw_rdma_mr_free);
539 list_del(&ibmr->mapping.m_list);
540 rds_iw_destroy_fastreg(pool, ibmr);
541 kfree(ibmr);
542 nfreed++;
543 }
544
545 /* Anything that remains are laundered ibmrs, which we can add
546 * back to the clean list. */
547 if (!list_empty(&unmap_list)) {
548 spin_lock_irqsave(&pool->list_lock, flags);
549 list_splice(&unmap_list, &pool->clean_list);
550 spin_unlock_irqrestore(&pool->list_lock, flags);
551 }
552
Jonathan Lallinger85a64882011-09-29 07:58:41 +0000553 atomic_sub(unpinned, &pool->free_pinned);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000554 atomic_sub(ncleaned, &pool->dirty_count);
555 atomic_sub(nfreed, &pool->item_count);
556
557 mutex_unlock(&pool->flush_lock);
558 return ret;
559}
560
561static void rds_iw_mr_pool_flush_worker(struct work_struct *work)
562{
563 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker);
564
565 rds_iw_flush_mr_pool(pool, 0);
566}
567
568void rds_iw_free_mr(void *trans_private, int invalidate)
569{
570 struct rds_iw_mr *ibmr = trans_private;
571 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
572
573 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len);
574 if (!pool)
575 return;
576
577 /* Return it to the pool's free list */
578 rds_iw_free_fastreg(pool, ibmr);
579
580 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800581 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
582 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000583 queue_work(rds_wq, &pool->flush_worker);
584
585 if (invalidate) {
586 if (likely(!in_interrupt())) {
587 rds_iw_flush_mr_pool(pool, 0);
588 } else {
589 /* We get here if the user created a MR marked
590 * as use_once and invalidate at the same time. */
591 queue_work(rds_wq, &pool->flush_worker);
592 }
593 }
594}
595
596void rds_iw_flush_mrs(void)
597{
598 struct rds_iw_device *rds_iwdev;
599
600 list_for_each_entry(rds_iwdev, &rds_iw_devices, list) {
601 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool;
602
603 if (pool)
604 rds_iw_flush_mr_pool(pool, 0);
605 }
606}
607
608void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
609 struct rds_sock *rs, u32 *key_ret)
610{
611 struct rds_iw_device *rds_iwdev;
612 struct rds_iw_mr *ibmr = NULL;
613 struct rdma_cm_id *cm_id;
614 int ret;
615
616 ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
617 if (ret || !cm_id) {
618 ret = -ENODEV;
619 goto out;
620 }
621
622 if (!rds_iwdev->mr_pool) {
623 ret = -ENODEV;
624 goto out;
625 }
626
627 ibmr = rds_iw_alloc_mr(rds_iwdev);
628 if (IS_ERR(ibmr))
629 return ibmr;
630
631 ibmr->cm_id = cm_id;
632 ibmr->device = rds_iwdev;
633
634 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents);
635 if (ret == 0)
636 *key_ret = ibmr->mr->rkey;
637 else
638 printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret);
639
640out:
641 if (ret) {
642 if (ibmr)
643 rds_iw_free_mr(ibmr, 0);
644 ibmr = ERR_PTR(ret);
645 }
646 return ibmr;
647}
648
649/*
650 * iWARP fastreg handling
651 *
652 * The life cycle of a fastreg registration is a bit different from
653 * FMRs.
654 * The idea behind fastreg is to have one MR, to which we bind different
655 * mappings over time. To avoid stalling on the expensive map and invalidate
656 * operations, these operations are pipelined on the same send queue on
657 * which we want to send the message containing the r_key.
658 *
659 * This creates a bit of a problem for us, as we do not have the destination
660 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for
661 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit
662 * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request
663 * before queuing the SEND. When completions for these arrive, they are
664 * dispatched to the MR has a bit set showing that RDMa can be performed.
665 *
666 * There is another interesting aspect that's related to invalidation.
667 * The application can request that a mapping is invalidated in FREE_MR.
668 * The expectation there is that this invalidation step includes ALL
669 * PREVIOUSLY FREED MRs.
670 */
671static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool,
672 struct rds_iw_mr *ibmr)
673{
674 struct rds_iw_device *rds_iwdev = pool->device;
675 struct ib_fast_reg_page_list *page_list = NULL;
676 struct ib_mr *mr;
677 int err;
678
679 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size);
680 if (IS_ERR(mr)) {
681 err = PTR_ERR(mr);
682
683 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err);
684 return err;
685 }
686
687 /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages
688 * is not filled in.
689 */
690 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
691 if (IS_ERR(page_list)) {
692 err = PTR_ERR(page_list);
693
694 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err);
695 ib_dereg_mr(mr);
696 return err;
697 }
698
699 ibmr->page_list = page_list;
700 ibmr->mr = mr;
701 return 0;
702}
703
704static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
705{
706 struct rds_iw_mr *ibmr = mapping->m_mr;
707 struct ib_send_wr f_wr, *failed_wr;
708 int ret;
709
710 /*
711 * Perform a WR for the fast_reg_mr. Each individual page
712 * in the sg list is added to the fast reg page list and placed
713 * inside the fast_reg_mr WR. The key used is a rolling 8bit
714 * counter, which should guarantee uniqueness.
715 */
716 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
717 mapping->m_rkey = ibmr->mr->rkey;
718
719 memset(&f_wr, 0, sizeof(f_wr));
720 f_wr.wr_id = RDS_IW_FAST_REG_WR_ID;
721 f_wr.opcode = IB_WR_FAST_REG_MR;
722 f_wr.wr.fast_reg.length = mapping->m_sg.bytes;
723 f_wr.wr.fast_reg.rkey = mapping->m_rkey;
724 f_wr.wr.fast_reg.page_list = ibmr->page_list;
725 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len;
Andy Grover404bb722009-07-17 13:13:34 +0000726 f_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000727 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE |
728 IB_ACCESS_REMOTE_READ |
729 IB_ACCESS_REMOTE_WRITE;
730 f_wr.wr.fast_reg.iova_start = 0;
731 f_wr.send_flags = IB_SEND_SIGNALED;
732
733 failed_wr = &f_wr;
734 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
735 BUG_ON(failed_wr != &f_wr);
Manuel Zerpiescb0a6052011-06-16 02:09:57 +0000736 if (ret)
737 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000738 __func__, __LINE__, ret);
739 return ret;
740}
741
742static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr)
743{
744 struct ib_send_wr s_wr, *failed_wr;
745 int ret = 0;
746
747 if (!ibmr->cm_id->qp || !ibmr->mr)
748 goto out;
749
750 memset(&s_wr, 0, sizeof(s_wr));
751 s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID;
752 s_wr.opcode = IB_WR_LOCAL_INV;
753 s_wr.ex.invalidate_rkey = ibmr->mr->rkey;
754 s_wr.send_flags = IB_SEND_SIGNALED;
755
756 failed_wr = &s_wr;
757 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
Manuel Zerpiescb0a6052011-06-16 02:09:57 +0000758 if (ret) {
759 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000760 __func__, __LINE__, ret);
761 goto out;
762 }
763out:
764 return ret;
765}
766
767static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
768 struct rds_iw_mr *ibmr,
769 struct scatterlist *sg,
770 unsigned int sg_len)
771{
772 struct rds_iw_device *rds_iwdev = pool->device;
773 struct rds_iw_mapping *mapping = &ibmr->mapping;
774 u64 *dma_pages;
775 int i, ret = 0;
776
777 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
778
Andy Grover404bb722009-07-17 13:13:34 +0000779 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000780 if (IS_ERR(dma_pages)) {
781 ret = PTR_ERR(dma_pages);
782 dma_pages = NULL;
783 goto out;
784 }
785
786 if (mapping->m_sg.dma_len > pool->max_message_size) {
787 ret = -EMSGSIZE;
788 goto out;
789 }
790
791 for (i = 0; i < mapping->m_sg.dma_npages; ++i)
792 ibmr->page_list->page_list[i] = dma_pages[i];
793
794 ret = rds_iw_rdma_build_fastreg(mapping);
795 if (ret)
796 goto out;
797
798 rds_iw_stats_inc(s_iw_rdma_mr_used);
799
800out:
801 kfree(dma_pages);
802
803 return ret;
804}
805
806/*
807 * "Free" a fastreg MR.
808 */
809static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool,
810 struct rds_iw_mr *ibmr)
811{
812 unsigned long flags;
813 int ret;
814
815 if (!ibmr->mapping.m_sg.dma_len)
816 return;
817
818 ret = rds_iw_rdma_fastreg_inv(ibmr);
819 if (ret)
820 return;
821
822 /* Try to post the LOCAL_INV WR to the queue. */
823 spin_lock_irqsave(&pool->list_lock, flags);
824
825 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
826 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
827 atomic_inc(&pool->dirty_count);
828
829 spin_unlock_irqrestore(&pool->list_lock, flags);
830}
831
832static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
833 struct list_head *unmap_list,
Jonathan Lallinger85a64882011-09-29 07:58:41 +0000834 struct list_head *kill_list,
835 int *unpinned)
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000836{
837 struct rds_iw_mapping *mapping, *next;
838 unsigned int ncleaned = 0;
839 LIST_HEAD(laundered);
840
841 /* Batched invalidation of fastreg MRs.
842 * Why do we do it this way, even though we could pipeline unmap
843 * and remap? The reason is the application semantics - when the
844 * application requests an invalidation of MRs, it expects all
845 * previously released R_Keys to become invalid.
846 *
847 * If we implement MR reuse naively, we risk memory corruption
848 * (this has actually been observed). So the default behavior
849 * requires that a MR goes through an explicit unmap operation before
850 * we can reuse it again.
851 *
852 * We could probably improve on this a little, by allowing immediate
853 * reuse of a MR on the same socket (eg you could add small
854 * cache of unused MRs to strct rds_socket - GET_MR could grab one
855 * of these without requiring an explicit invalidate).
856 */
857 while (!list_empty(unmap_list)) {
858 unsigned long flags;
859
860 spin_lock_irqsave(&pool->list_lock, flags);
861 list_for_each_entry_safe(mapping, next, unmap_list, m_list) {
Jonathan Lallinger85a64882011-09-29 07:58:41 +0000862 *unpinned += mapping->m_sg.len;
Andy Groverfcd8b7c2009-02-24 15:30:36 +0000863 list_move(&mapping->m_list, &laundered);
864 ncleaned++;
865 }
866 spin_unlock_irqrestore(&pool->list_lock, flags);
867 }
868
869 /* Move all laundered mappings back to the unmap list.
870 * We do not kill any WRs right now - it doesn't seem the
871 * fastreg API has a max_remap limit. */
872 list_splice_init(&laundered, unmap_list);
873
874 return ncleaned;
875}
876
877static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool,
878 struct rds_iw_mr *ibmr)
879{
880 if (ibmr->page_list)
881 ib_free_fast_reg_page_list(ibmr->page_list);
882 if (ibmr->mr)
883 ib_dereg_mr(ibmr->mr);
884}