blob: f43831e4186a3543af1b8cd8beba803c66a233d3 [file] [log] [blame]
Andy Grover1e23b3e2009-02-24 15:30:34 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover1e23b3e2009-02-24 15:30:34 +000035#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h>
38
39#include "rds.h"
40#include "ib.h"
41
42static struct kmem_cache *rds_ib_incoming_slab;
43static struct kmem_cache *rds_ib_frag_slab;
44static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
45
Andy Grover1e23b3e2009-02-24 15:30:34 +000046void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
47{
48 struct rds_ib_recv_work *recv;
49 u32 i;
50
51 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
52 struct ib_sge *sge;
53
54 recv->r_ibinc = NULL;
55 recv->r_frag = NULL;
56
57 recv->r_wr.next = NULL;
58 recv->r_wr.wr_id = i;
59 recv->r_wr.sg_list = recv->r_sge;
60 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
61
Andy Grover919ced42010-01-13 16:32:24 -080062 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +000063 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
64 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -060065 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover919ced42010-01-13 16:32:24 -080066
67 sge = &recv->r_sge[1];
68 sge->addr = 0;
69 sge->length = RDS_FRAG_SIZE;
Jason Gunthorpee5580242015-07-30 17:22:26 -060070 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +000071 }
72}
73
Chris Mason33244122010-05-26 22:05:37 -070074/*
75 * The entire 'from' list, including the from element itself, is put on
76 * to the tail of the 'to' list.
77 */
78static void list_splice_entire_tail(struct list_head *from,
79 struct list_head *to)
80{
81 struct list_head *from_last = from->prev;
82
83 list_splice_tail(from_last, to);
84 list_add_tail(from_last, to);
85}
86
87static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
88{
89 struct list_head *tmp;
90
91 tmp = xchg(&cache->xfer, NULL);
92 if (tmp) {
93 if (cache->ready)
94 list_splice_entire_tail(tmp, cache->ready);
95 else
96 cache->ready = tmp;
97 }
98}
99
100static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
101{
102 struct rds_ib_cache_head *head;
103 int cpu;
104
105 cache->percpu = alloc_percpu(struct rds_ib_cache_head);
106 if (!cache->percpu)
107 return -ENOMEM;
108
109 for_each_possible_cpu(cpu) {
110 head = per_cpu_ptr(cache->percpu, cpu);
111 head->first = NULL;
112 head->count = 0;
113 }
114 cache->xfer = NULL;
115 cache->ready = NULL;
116
117 return 0;
118}
119
120int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
121{
122 int ret;
123
124 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
125 if (!ret) {
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
127 if (ret)
128 free_percpu(ic->i_cache_incs.percpu);
129 }
130
131 return ret;
132}
133
134static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
135 struct list_head *caller_list)
136{
137 struct rds_ib_cache_head *head;
138 int cpu;
139
140 for_each_possible_cpu(cpu) {
141 head = per_cpu_ptr(cache->percpu, cpu);
142 if (head->first) {
143 list_splice_entire_tail(head->first, caller_list);
144 head->first = NULL;
145 }
146 }
147
148 if (cache->ready) {
149 list_splice_entire_tail(cache->ready, caller_list);
150 cache->ready = NULL;
151 }
152}
153
154void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
155{
156 struct rds_ib_incoming *inc;
157 struct rds_ib_incoming *inc_tmp;
158 struct rds_page_frag *frag;
159 struct rds_page_frag *frag_tmp;
160 LIST_HEAD(list);
161
162 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
163 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
164 free_percpu(ic->i_cache_incs.percpu);
165
166 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
167 list_del(&inc->ii_cache_entry);
168 WARN_ON(!list_empty(&inc->ii_frags));
169 kmem_cache_free(rds_ib_incoming_slab, inc);
170 }
171
172 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
173 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
174 free_percpu(ic->i_cache_frags.percpu);
175
176 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
177 list_del(&frag->f_cache_entry);
178 WARN_ON(!list_empty(&frag->f_item));
179 kmem_cache_free(rds_ib_frag_slab, frag);
180 }
181}
182
183/* fwd decl */
184static void rds_ib_recv_cache_put(struct list_head *new_item,
185 struct rds_ib_refill_cache *cache);
186static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
187
188
189/* Recycle frag and attached recv buffer f_sg */
190static void rds_ib_frag_free(struct rds_ib_connection *ic,
191 struct rds_page_frag *frag)
192{
193 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
194
195 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
196}
197
198/* Recycle inc after freeing attached frags */
199void rds_ib_inc_free(struct rds_incoming *inc)
200{
201 struct rds_ib_incoming *ibinc;
202 struct rds_page_frag *frag;
203 struct rds_page_frag *pos;
204 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
205
206 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
207
208 /* Free attached frags */
209 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
210 list_del_init(&frag->f_item);
211 rds_ib_frag_free(ic, frag);
212 }
213 BUG_ON(!list_empty(&ibinc->ii_frags));
214
215 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
216 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
217}
218
Andy Grover1e23b3e2009-02-24 15:30:34 +0000219static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
220 struct rds_ib_recv_work *recv)
221{
222 if (recv->r_ibinc) {
223 rds_inc_put(&recv->r_ibinc->ii_inc);
224 recv->r_ibinc = NULL;
225 }
226 if (recv->r_frag) {
Andy Groverfc24f782010-05-25 11:20:09 -0700227 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
Chris Mason33244122010-05-26 22:05:37 -0700228 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000229 recv->r_frag = NULL;
230 }
231}
232
233void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
234{
235 u32 i;
236
237 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000239}
240
Chris Mason037f18a32010-05-26 21:45:06 -0700241static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
242 gfp_t slab_mask)
Chris Mason33244122010-05-26 22:05:37 -0700243{
244 struct rds_ib_incoming *ibinc;
245 struct list_head *cache_item;
246 int avail_allocs;
247
248 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
249 if (cache_item) {
250 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
251 } else {
252 avail_allocs = atomic_add_unless(&rds_ib_allocation,
253 1, rds_ib_sysctl_max_recv_allocation);
254 if (!avail_allocs) {
255 rds_ib_stats_inc(s_ib_rx_alloc_limit);
256 return NULL;
257 }
Chris Mason037f18a32010-05-26 21:45:06 -0700258 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700259 if (!ibinc) {
260 atomic_dec(&rds_ib_allocation);
261 return NULL;
262 }
263 }
264 INIT_LIST_HEAD(&ibinc->ii_frags);
265 rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
266
267 return ibinc;
268}
269
Chris Mason037f18a32010-05-26 21:45:06 -0700270static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
271 gfp_t slab_mask, gfp_t page_mask)
Chris Mason33244122010-05-26 22:05:37 -0700272{
273 struct rds_page_frag *frag;
274 struct list_head *cache_item;
275 int ret;
276
277 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
278 if (cache_item) {
279 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
280 } else {
Chris Mason037f18a32010-05-26 21:45:06 -0700281 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700282 if (!frag)
283 return NULL;
284
Chris Masonb4e1da32010-07-19 17:02:41 -0700285 sg_init_table(&frag->f_sg, 1);
Chris Mason33244122010-05-26 22:05:37 -0700286 ret = rds_page_remainder_alloc(&frag->f_sg,
Chris Mason037f18a32010-05-26 21:45:06 -0700287 RDS_FRAG_SIZE, page_mask);
Chris Mason33244122010-05-26 22:05:37 -0700288 if (ret) {
289 kmem_cache_free(rds_ib_frag_slab, frag);
290 return NULL;
291 }
292 }
293
294 INIT_LIST_HEAD(&frag->f_item);
295
296 return frag;
297}
298
Andy Grover1e23b3e2009-02-24 15:30:34 +0000299static int rds_ib_recv_refill_one(struct rds_connection *conn,
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700300 struct rds_ib_recv_work *recv, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000301{
302 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000303 struct ib_sge *sge;
304 int ret = -ENOMEM;
Chris Mason037f18a32010-05-26 21:45:06 -0700305 gfp_t slab_mask = GFP_NOWAIT;
306 gfp_t page_mask = GFP_NOWAIT;
307
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700308 if (gfp & __GFP_WAIT) {
Chris Mason037f18a32010-05-26 21:45:06 -0700309 slab_mask = GFP_KERNEL;
310 page_mask = GFP_HIGHUSER;
311 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000312
Chris Mason33244122010-05-26 22:05:37 -0700313 if (!ic->i_cache_incs.ready)
314 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
315 if (!ic->i_cache_frags.ready)
316 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
317
Andy Grover3427e852010-05-24 20:28:49 -0700318 /*
319 * ibinc was taken from recv if recv contained the start of a message.
320 * recvs that were continuations will still have this allocated.
321 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800322 if (!recv->r_ibinc) {
Chris Mason037f18a32010-05-26 21:45:06 -0700323 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700324 if (!recv->r_ibinc)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000325 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000326 }
327
Andy Grover3427e852010-05-24 20:28:49 -0700328 WARN_ON(recv->r_frag); /* leak! */
Chris Mason037f18a32010-05-26 21:45:06 -0700329 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
Andy Grover3427e852010-05-24 20:28:49 -0700330 if (!recv->r_frag)
331 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000332
Andy Grover0b088e02010-05-24 20:12:41 -0700333 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
334 1, DMA_FROM_DEVICE);
335 WARN_ON(ret != 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000336
Andy Grover919ced42010-01-13 16:32:24 -0800337 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000338 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
339 sge->length = sizeof(struct rds_header);
340
Andy Grover919ced42010-01-13 16:32:24 -0800341 sge = &recv->r_sge[1];
Marciniszyn, Mikef2e9bd72012-12-21 08:01:49 +0000342 sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
343 sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000344
345 ret = 0;
346out:
347 return ret;
348}
349
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700350static int acquire_refill(struct rds_connection *conn)
351{
352 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
353}
354
355static void release_refill(struct rds_connection *conn)
356{
357 clear_bit(RDS_RECV_REFILL, &conn->c_flags);
358
359 /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
360 * hot path and finding waiters is very rare. We don't want to walk
361 * the system-wide hashed waitqueue buckets in the fast path only to
362 * almost never find waiters.
363 */
364 if (waitqueue_active(&conn->c_waitq))
365 wake_up_all(&conn->c_waitq);
366}
367
Andy Grover1e23b3e2009-02-24 15:30:34 +0000368/*
369 * This tries to allocate and post unused work requests after making sure that
370 * they have all the allocations they need to queue received fragments into
Chris Mason33244122010-05-26 22:05:37 -0700371 * sockets.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000372 *
373 * -1 is returned if posting fails due to temporary resource exhaustion.
374 */
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700375void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000376{
377 struct rds_ib_connection *ic = conn->c_transport_data;
378 struct rds_ib_recv_work *recv;
379 struct ib_recv_wr *failed_wr;
380 unsigned int posted = 0;
381 int ret = 0;
David S. Millerb01d04a2015-08-25 15:54:25 -0700382 bool can_wait = !!(gfp & __GFP_WAIT);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000383 u32 pos;
384
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700385 /* the goal here is to just make sure that someone, somewhere
386 * is posting buffers. If we can't get the refill lock,
387 * let them do their thing
388 */
389 if (!acquire_refill(conn))
390 return;
391
Joe Perchesf64f9e72009-11-29 16:55:45 -0800392 while ((prefill || rds_conn_up(conn)) &&
393 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000394 if (pos >= ic->i_recv_ring.w_nr) {
395 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
396 pos);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000397 break;
398 }
399
400 recv = &ic->i_recvs[pos];
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700401 ret = rds_ib_recv_refill_one(conn, recv, gfp);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000402 if (ret) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000403 break;
404 }
405
406 /* XXX when can this fail? */
407 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
408 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
Andy Grover0b088e02010-05-24 20:12:41 -0700409 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
Marciniszyn, Mikef2e9bd72012-12-21 08:01:49 +0000410 (long) ib_sg_dma_address(
411 ic->i_cm_id->device,
412 &recv->r_frag->f_sg),
413 ret);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000414 if (ret) {
415 rds_ib_conn_error(conn, "recv post on "
416 "%pI4 returned %d, disconnecting and "
417 "reconnecting\n", &conn->c_faddr,
418 ret);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000419 break;
420 }
421
422 posted++;
423 }
424
425 /* We're doing flow control - update the window. */
426 if (ic->i_flowctl && posted)
427 rds_ib_advertise_credits(conn, posted);
428
429 if (ret)
430 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700431
432 release_refill(conn);
433
434 /* if we're called from the softirq handler, we'll be GFP_NOWAIT.
435 * in this case the ring being low is going to lead to more interrupts
436 * and we can safely let the softirq code take care of it unless the
437 * ring is completely empty.
438 *
439 * if we're called from krdsd, we'll be GFP_KERNEL. In this case
440 * we might have raced with the softirq code while we had the refill
441 * lock held. Use rds_ib_ring_low() instead of ring_empty to decide
442 * if we should requeue.
443 */
444 if (rds_conn_up(conn) &&
445 ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
446 rds_ib_ring_empty(&ic->i_recv_ring))) {
447 queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
448 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000449}
450
Chris Mason33244122010-05-26 22:05:37 -0700451/*
452 * We want to recycle several types of recv allocations, like incs and frags.
453 * To use this, the *_free() function passes in the ptr to a list_head within
454 * the recyclee, as well as the cache to put it on.
455 *
456 * First, we put the memory on a percpu list. When this reaches a certain size,
457 * We move it to an intermediate non-percpu list in a lockless manner, with some
458 * xchg/compxchg wizardry.
459 *
460 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
461 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
462 * list_empty() will return true with one element is actually present.
463 */
464static void rds_ib_recv_cache_put(struct list_head *new_item,
465 struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000466{
Chris Mason33244122010-05-26 22:05:37 -0700467 unsigned long flags;
Gerald Schaeferc1964032014-01-16 16:54:48 +0100468 struct list_head *old, *chpfirst;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000469
Chris Mason33244122010-05-26 22:05:37 -0700470 local_irq_save(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000471
Shan Weiae4b46e2012-11-12 15:52:01 +0000472 chpfirst = __this_cpu_read(cache->percpu->first);
473 if (!chpfirst)
Chris Mason33244122010-05-26 22:05:37 -0700474 INIT_LIST_HEAD(new_item);
475 else /* put on front */
Shan Weiae4b46e2012-11-12 15:52:01 +0000476 list_add_tail(new_item, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700477
Gerald Schaeferc1964032014-01-16 16:54:48 +0100478 __this_cpu_write(cache->percpu->first, new_item);
Shan Weiae4b46e2012-11-12 15:52:01 +0000479 __this_cpu_inc(cache->percpu->count);
480
481 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
Chris Mason33244122010-05-26 22:05:37 -0700482 goto end;
483
484 /*
485 * Return our per-cpu first list to the cache's xfer by atomically
486 * grabbing the current xfer list, appending it to our per-cpu list,
487 * and then atomically returning that entire list back to the
488 * cache's xfer list as long as it's still empty.
489 */
490 do {
491 old = xchg(&cache->xfer, NULL);
492 if (old)
Shan Weiae4b46e2012-11-12 15:52:01 +0000493 list_splice_entire_tail(old, chpfirst);
494 old = cmpxchg(&cache->xfer, NULL, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700495 } while (old);
496
Shan Weiae4b46e2012-11-12 15:52:01 +0000497
Gerald Schaeferc1964032014-01-16 16:54:48 +0100498 __this_cpu_write(cache->percpu->first, NULL);
Shan Weiae4b46e2012-11-12 15:52:01 +0000499 __this_cpu_write(cache->percpu->count, 0);
Chris Mason33244122010-05-26 22:05:37 -0700500end:
501 local_irq_restore(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000502}
503
Chris Mason33244122010-05-26 22:05:37 -0700504static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000505{
Chris Mason33244122010-05-26 22:05:37 -0700506 struct list_head *head = cache->ready;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000507
Chris Mason33244122010-05-26 22:05:37 -0700508 if (head) {
509 if (!list_empty(head)) {
510 cache->ready = head->next;
511 list_del_init(head);
512 } else
513 cache->ready = NULL;
514 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000515
Chris Mason33244122010-05-26 22:05:37 -0700516 return head;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000517}
518
Al Viroc310e722014-11-20 09:21:14 -0500519int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000520{
521 struct rds_ib_incoming *ibinc;
522 struct rds_page_frag *frag;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000523 unsigned long to_copy;
524 unsigned long frag_off = 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000525 int copied = 0;
526 int ret;
527 u32 len;
528
529 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
530 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
531 len = be32_to_cpu(inc->i_hdr.h_len);
532
Al Viroc310e722014-11-20 09:21:14 -0500533 while (iov_iter_count(to) && copied < len) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000534 if (frag_off == RDS_FRAG_SIZE) {
535 frag = list_entry(frag->f_item.next,
536 struct rds_page_frag, f_item);
537 frag_off = 0;
538 }
Al Viroc310e722014-11-20 09:21:14 -0500539 to_copy = min_t(unsigned long, iov_iter_count(to),
540 RDS_FRAG_SIZE - frag_off);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000541 to_copy = min_t(unsigned long, to_copy, len - copied);
542
Andy Grover1e23b3e2009-02-24 15:30:34 +0000543 /* XXX needs + offset for multiple recvs per page */
Al Viroc310e722014-11-20 09:21:14 -0500544 rds_stats_add(s_copy_to_user, to_copy);
545 ret = copy_page_to_iter(sg_page(&frag->f_sg),
546 frag->f_sg.offset + frag_off,
547 to_copy,
548 to);
549 if (ret != to_copy)
550 return -EFAULT;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000551
Andy Grover1e23b3e2009-02-24 15:30:34 +0000552 frag_off += to_copy;
553 copied += to_copy;
554 }
555
556 return copied;
557}
558
559/* ic starts out kzalloc()ed */
560void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
561{
562 struct ib_send_wr *wr = &ic->i_ack_wr;
563 struct ib_sge *sge = &ic->i_ack_sge;
564
565 sge->addr = ic->i_ack_dma;
566 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -0600567 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000568
569 wr->sg_list = sge;
570 wr->num_sge = 1;
571 wr->opcode = IB_WR_SEND;
572 wr->wr_id = RDS_IB_ACK_WR_ID;
573 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
574}
575
576/*
577 * You'd think that with reliable IB connections you wouldn't need to ack
578 * messages that have been received. The problem is that IB hardware generates
579 * an ack message before it has DMAed the message into memory. This creates a
580 * potential message loss if the HCA is disabled for any reason between when it
581 * sends the ack and before the message is DMAed and processed. This is only a
582 * potential issue if another HCA is available for fail-over.
583 *
584 * When the remote host receives our ack they'll free the sent message from
585 * their send queue. To decrease the latency of this we always send an ack
586 * immediately after we've received messages.
587 *
588 * For simplicity, we only have one ack in flight at a time. This puts
589 * pressure on senders to have deep enough send queues to absorb the latency of
590 * a single ack frame being in flight. This might not be good enough.
591 *
592 * This is implemented by have a long-lived send_wr and sge which point to a
593 * statically allocated ack frame. This ack wr does not fall under the ring
594 * accounting that the tx and rx wrs do. The QP attribute specifically makes
595 * room for it beyond the ring size. Send completion notices its special
596 * wr_id and avoids working with the ring in that case.
597 */
Andy Grover8cbd9602009-04-01 08:20:20 +0000598#ifndef KERNEL_HAS_ATOMIC64
Andy Grover1e23b3e2009-02-24 15:30:34 +0000599static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
600 int ack_required)
601{
Andy Grover8cbd9602009-04-01 08:20:20 +0000602 unsigned long flags;
603
604 spin_lock_irqsave(&ic->i_ack_lock, flags);
605 ic->i_ack_next = seq;
606 if (ack_required)
607 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
608 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
609}
610
611static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
612{
613 unsigned long flags;
614 u64 seq;
615
616 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
617
618 spin_lock_irqsave(&ic->i_ack_lock, flags);
619 seq = ic->i_ack_next;
620 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
621
622 return seq;
623}
624#else
625static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
626 int ack_required)
627{
628 atomic64_set(&ic->i_ack_next, seq);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000629 if (ack_required) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100630 smp_mb__before_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000631 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
632 }
633}
634
635static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
636{
637 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100638 smp_mb__after_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000639
Andy Grover8cbd9602009-04-01 08:20:20 +0000640 return atomic64_read(&ic->i_ack_next);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000641}
Andy Grover8cbd9602009-04-01 08:20:20 +0000642#endif
643
Andy Grover1e23b3e2009-02-24 15:30:34 +0000644
645static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
646{
647 struct rds_header *hdr = ic->i_ack;
648 struct ib_send_wr *failed_wr;
649 u64 seq;
650 int ret;
651
652 seq = rds_ib_get_ack(ic);
653
654 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
655 rds_message_populate_header(hdr, 0, 0, 0);
656 hdr->h_ack = cpu_to_be64(seq);
657 hdr->h_credit = adv_credits;
658 rds_message_make_checksum(hdr);
659 ic->i_ack_queued = jiffies;
660
661 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
662 if (unlikely(ret)) {
663 /* Failed to send. Release the WR, and
664 * force another ACK.
665 */
666 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
667 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
668
669 rds_ib_stats_inc(s_ib_ack_send_failure);
Andy Grover735f61e2010-03-11 13:49:55 +0000670
671 rds_ib_conn_error(ic->conn, "sending ack failed\n");
Andy Grover1e23b3e2009-02-24 15:30:34 +0000672 } else
673 rds_ib_stats_inc(s_ib_ack_sent);
674}
675
676/*
677 * There are 3 ways of getting acknowledgements to the peer:
678 * 1. We call rds_ib_attempt_ack from the recv completion handler
679 * to send an ACK-only frame.
680 * However, there can be only one such frame in the send queue
681 * at any time, so we may have to postpone it.
682 * 2. When another (data) packet is transmitted while there's
683 * an ACK in the queue, we piggyback the ACK sequence number
684 * on the data packet.
685 * 3. If the ACK WR is done sending, we get called from the
686 * send queue completion handler, and check whether there's
687 * another ACK pending (postponed because the WR was on the
688 * queue). If so, we transmit it.
689 *
690 * We maintain 2 variables:
691 * - i_ack_flags, which keeps track of whether the ACK WR
692 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
693 * - i_ack_next, which is the last sequence number we received
694 *
695 * Potentially, send queue and receive queue handlers can run concurrently.
Andy Grover8cbd9602009-04-01 08:20:20 +0000696 * It would be nice to not have to use a spinlock to synchronize things,
697 * but the one problem that rules this out is that 64bit updates are
698 * not atomic on all platforms. Things would be a lot simpler if
699 * we had atomic64 or maybe cmpxchg64 everywhere.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000700 *
701 * Reconnecting complicates this picture just slightly. When we
702 * reconnect, we may be seeing duplicate packets. The peer
703 * is retransmitting them, because it hasn't seen an ACK for
704 * them. It is important that we ACK these.
705 *
706 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
707 * this flag set *MUST* be acknowledged immediately.
708 */
709
710/*
711 * When we get here, we're called from the recv queue handler.
712 * Check whether we ought to transmit an ACK.
713 */
714void rds_ib_attempt_ack(struct rds_ib_connection *ic)
715{
716 unsigned int adv_credits;
717
718 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
719 return;
720
721 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
722 rds_ib_stats_inc(s_ib_ack_send_delayed);
723 return;
724 }
725
726 /* Can we get a send credit? */
Steve Wise7b70d032009-04-09 14:09:39 +0000727 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000728 rds_ib_stats_inc(s_ib_tx_throttle);
729 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
730 return;
731 }
732
733 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
734 rds_ib_send_ack(ic, adv_credits);
735}
736
737/*
738 * We get here from the send completion handler, when the
739 * adapter tells us the ACK frame was sent.
740 */
741void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
742{
743 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
744 rds_ib_attempt_ack(ic);
745}
746
747/*
748 * This is called by the regular xmit code when it wants to piggyback
749 * an ACK on an outgoing frame.
750 */
751u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
752{
753 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
754 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
755 return rds_ib_get_ack(ic);
756}
757
758/*
759 * It's kind of lame that we're copying from the posted receive pages into
760 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
761 * them. But receiving new congestion bitmaps should be a *rare* event, so
762 * hopefully we won't need to invest that complexity in making it more
763 * efficient. By copying we can share a simpler core with TCP which has to
764 * copy.
765 */
766static void rds_ib_cong_recv(struct rds_connection *conn,
767 struct rds_ib_incoming *ibinc)
768{
769 struct rds_cong_map *map;
770 unsigned int map_off;
771 unsigned int map_page;
772 struct rds_page_frag *frag;
773 unsigned long frag_off;
774 unsigned long to_copy;
775 unsigned long copied;
776 uint64_t uncongested = 0;
777 void *addr;
778
779 /* catch completely corrupt packets */
780 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
781 return;
782
783 map = conn->c_fcong;
784 map_page = 0;
785 map_off = 0;
786
787 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
788 frag_off = 0;
789
790 copied = 0;
791
792 while (copied < RDS_CONG_MAP_BYTES) {
793 uint64_t *src, *dst;
794 unsigned int k;
795
796 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
797 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
798
Cong Wang6114eab2011-11-25 23:14:40 +0800799 addr = kmap_atomic(sg_page(&frag->f_sg));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000800
801 src = addr + frag_off;
802 dst = (void *)map->m_page_addrs[map_page] + map_off;
803 for (k = 0; k < to_copy; k += 8) {
804 /* Record ports that became uncongested, ie
805 * bits that changed from 0 to 1. */
806 uncongested |= ~(*src) & *dst;
807 *dst++ = *src++;
808 }
Cong Wang6114eab2011-11-25 23:14:40 +0800809 kunmap_atomic(addr);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000810
811 copied += to_copy;
812
813 map_off += to_copy;
814 if (map_off == PAGE_SIZE) {
815 map_off = 0;
816 map_page++;
817 }
818
819 frag_off += to_copy;
820 if (frag_off == RDS_FRAG_SIZE) {
821 frag = list_entry(frag->f_item.next,
822 struct rds_page_frag, f_item);
823 frag_off = 0;
824 }
825 }
826
827 /* the congestion map is in little endian order */
828 uncongested = le64_to_cpu(uncongested);
829
830 rds_cong_map_updated(map, uncongested);
831}
832
833/*
834 * Rings are posted with all the allocations they'll need to queue the
835 * incoming message to the receiving socket so this can't fail.
836 * All fragments start with a header, so we can make sure we're not receiving
837 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
838 */
839struct rds_ib_ack_state {
840 u64 ack_next;
841 u64 ack_recv;
842 unsigned int ack_required:1;
843 unsigned int ack_next_valid:1;
844 unsigned int ack_recv_valid:1;
845};
846
847static void rds_ib_process_recv(struct rds_connection *conn,
Andy Grover597ddd52009-07-17 13:13:27 +0000848 struct rds_ib_recv_work *recv, u32 data_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +0000849 struct rds_ib_ack_state *state)
850{
851 struct rds_ib_connection *ic = conn->c_transport_data;
852 struct rds_ib_incoming *ibinc = ic->i_ibinc;
853 struct rds_header *ihdr, *hdr;
854
855 /* XXX shut down the connection if port 0,0 are seen? */
856
857 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
Andy Grover597ddd52009-07-17 13:13:27 +0000858 data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000859
Andy Grover597ddd52009-07-17 13:13:27 +0000860 if (data_len < sizeof(struct rds_header)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000861 rds_ib_conn_error(conn, "incoming message "
Masanari Iida5fd5c442012-02-09 23:17:15 +0900862 "from %pI4 didn't include a "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000863 "header, disconnecting and "
864 "reconnecting\n",
865 &conn->c_faddr);
866 return;
867 }
Andy Grover597ddd52009-07-17 13:13:27 +0000868 data_len -= sizeof(struct rds_header);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000869
Andy Groverf147dd92010-01-13 15:50:09 -0800870 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000871
872 /* Validate the checksum. */
873 if (!rds_message_verify_checksum(ihdr)) {
874 rds_ib_conn_error(conn, "incoming message "
875 "from %pI4 has corrupted header - "
876 "forcing a reconnect\n",
877 &conn->c_faddr);
878 rds_stats_inc(s_recv_drop_bad_checksum);
879 return;
880 }
881
882 /* Process the ACK sequence which comes with every packet */
883 state->ack_recv = be64_to_cpu(ihdr->h_ack);
884 state->ack_recv_valid = 1;
885
886 /* Process the credits update if there was one */
887 if (ihdr->h_credit)
888 rds_ib_send_add_credits(conn, ihdr->h_credit);
889
Andy Grover597ddd52009-07-17 13:13:27 +0000890 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000891 /* This is an ACK-only packet. The fact that it gets
892 * special treatment here is that historically, ACKs
893 * were rather special beasts.
894 */
895 rds_ib_stats_inc(s_ib_ack_received);
896
897 /*
898 * Usually the frags make their way on to incs and are then freed as
899 * the inc is freed. We don't go that route, so we have to drop the
900 * page ref ourselves. We can't just leave the page on the recv
901 * because that confuses the dma mapping of pages and each recv's use
Andy Grover0b088e02010-05-24 20:12:41 -0700902 * of a partial page.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000903 *
904 * FIXME: Fold this into the code path below.
905 */
Chris Mason33244122010-05-26 22:05:37 -0700906 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover0b088e02010-05-24 20:12:41 -0700907 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000908 return;
909 }
910
911 /*
912 * If we don't already have an inc on the connection then this
913 * fragment has a header and starts a message.. copy its header
914 * into the inc and save the inc so we can hang upcoming fragments
915 * off its list.
916 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800917 if (!ibinc) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000918 ibinc = recv->r_ibinc;
919 recv->r_ibinc = NULL;
920 ic->i_ibinc = ibinc;
921
922 hdr = &ibinc->ii_inc.i_hdr;
923 memcpy(hdr, ihdr, sizeof(*hdr));
924 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
925
926 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
927 ic->i_recv_data_rem, hdr->h_flags);
928 } else {
929 hdr = &ibinc->ii_inc.i_hdr;
930 /* We can't just use memcmp here; fragments of a
931 * single message may carry different ACKs */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800932 if (hdr->h_sequence != ihdr->h_sequence ||
933 hdr->h_len != ihdr->h_len ||
934 hdr->h_sport != ihdr->h_sport ||
935 hdr->h_dport != ihdr->h_dport) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000936 rds_ib_conn_error(conn,
937 "fragment header mismatch; forcing reconnect\n");
938 return;
939 }
940 }
941
942 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
943 recv->r_frag = NULL;
944
945 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
946 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
947 else {
948 ic->i_recv_data_rem = 0;
949 ic->i_ibinc = NULL;
950
951 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
952 rds_ib_cong_recv(conn, ibinc);
953 else {
954 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800955 &ibinc->ii_inc, GFP_ATOMIC);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000956 state->ack_next = be64_to_cpu(hdr->h_sequence);
957 state->ack_next_valid = 1;
958 }
959
960 /* Evaluate the ACK_REQUIRED flag *after* we received
961 * the complete frame, and after bumping the next_rx
962 * sequence. */
963 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
964 rds_stats_inc(s_recv_ack_required);
965 state->ack_required = 1;
966 }
967
968 rds_inc_put(&ibinc->ii_inc);
969 }
970}
971
972/*
973 * Plucking the oldest entry from the ring can be done concurrently with
974 * the thread refilling the ring. Each ring operation is protected by
975 * spinlocks and the transient state of refilling doesn't change the
976 * recording of which entry is oldest.
977 *
978 * This relies on IB only calling one cq comp_handler for each cq so that
979 * there will only be one caller of rds_recv_incoming() per RDS connection.
980 */
981void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
982{
983 struct rds_connection *conn = context;
984 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000985
986 rdsdebug("conn %p cq %p\n", conn, cq);
987
988 rds_ib_stats_inc(s_ib_rx_cq_call);
989
Andy Groverd521b632009-10-30 08:51:57 +0000990 tasklet_schedule(&ic->i_recv_tasklet);
991}
Andy Grover1e23b3e2009-02-24 15:30:34 +0000992
Andy Groverd521b632009-10-30 08:51:57 +0000993static inline void rds_poll_cq(struct rds_ib_connection *ic,
994 struct rds_ib_ack_state *state)
995{
996 struct rds_connection *conn = ic->conn;
997 struct ib_wc wc;
998 struct rds_ib_recv_work *recv;
999
1000 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
Zach Brown59f740a2010-08-03 13:52:47 -07001001 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
1002 (unsigned long long)wc.wr_id, wc.status,
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +03001003 ib_wc_status_msg(wc.status), wc.byte_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +00001004 be32_to_cpu(wc.ex.imm_data));
1005 rds_ib_stats_inc(s_ib_rx_cq_event);
1006
1007 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
1008
Andy Groverfc24f782010-05-25 11:20:09 -07001009 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001010
1011 /*
1012 * Also process recvs in connecting state because it is possible
1013 * to get a recv completion _before_ the rdmacm ESTABLISHED
1014 * event is processed.
1015 */
Zach Brownd455ab62010-07-06 15:04:34 -07001016 if (wc.status == IB_WC_SUCCESS) {
1017 rds_ib_process_recv(conn, recv, wc.byte_len, state);
1018 } else {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001019 /* We expect errors as the qp is drained during shutdown */
Zach Brownd455ab62010-07-06 15:04:34 -07001020 if (rds_conn_up(conn) || rds_conn_connecting(conn))
Zach Brown59f740a2010-08-03 13:52:47 -07001021 rds_ib_conn_error(conn, "recv completion on %pI4 had "
1022 "status %u (%s), disconnecting and "
Zach Brownd455ab62010-07-06 15:04:34 -07001023 "reconnecting\n", &conn->c_faddr,
Zach Brown59f740a2010-08-03 13:52:47 -07001024 wc.status,
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +03001025 ib_wc_status_msg(wc.status));
Andy Grover1e23b3e2009-02-24 15:30:34 +00001026 }
1027
Zach Brownd455ab62010-07-06 15:04:34 -07001028 /*
santosh.shilimkar@oracle.com43962dd2015-08-22 15:45:23 -07001029 * rds_ib_process_recv() doesn't always consume the frag, and
1030 * we might not have called it at all if the wc didn't indicate
1031 * success. We already unmapped the frag's pages, though, and
1032 * the following rds_ib_ring_free() call tells the refill path
1033 * that it will not find an allocated frag here. Make sure we
1034 * keep that promise by freeing a frag that's still on the ring.
Zach Brownd455ab62010-07-06 15:04:34 -07001035 */
santosh.shilimkar@oracle.com43962dd2015-08-22 15:45:23 -07001036 if (recv->r_frag) {
1037 rds_ib_frag_free(ic, recv->r_frag);
1038 recv->r_frag = NULL;
1039 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001040 rds_ib_ring_free(&ic->i_recv_ring, 1);
1041 }
Andy Groverd521b632009-10-30 08:51:57 +00001042}
1043
1044void rds_ib_recv_tasklet_fn(unsigned long data)
1045{
1046 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
1047 struct rds_connection *conn = ic->conn;
1048 struct rds_ib_ack_state state = { 0, };
1049
1050 rds_poll_cq(ic, &state);
1051 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
1052 rds_poll_cq(ic, &state);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001053
1054 if (state.ack_next_valid)
1055 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
1056 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
1057 rds_send_drop_acked(conn, state.ack_recv, NULL);
1058 ic->i_ack_recv = state.ack_recv;
1059 }
1060 if (rds_conn_up(conn))
1061 rds_ib_attempt_ack(ic);
1062
1063 /* If we ever end up with a really empty receive ring, we're
1064 * in deep trouble, as the sender will definitely see RNR
1065 * timeouts. */
1066 if (rds_ib_ring_empty(&ic->i_recv_ring))
1067 rds_ib_stats_inc(s_ib_rx_ring_empty);
1068
Andy Grover1e23b3e2009-02-24 15:30:34 +00001069 if (rds_ib_ring_low(&ic->i_recv_ring))
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001070 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001071}
1072
1073int rds_ib_recv(struct rds_connection *conn)
1074{
1075 struct rds_ib_connection *ic = conn->c_transport_data;
1076 int ret = 0;
1077
1078 rdsdebug("conn %p\n", conn);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001079 if (rds_conn_up(conn)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001080 rds_ib_attempt_ack(ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001081 rds_ib_recv_refill(conn, 0, GFP_KERNEL);
1082 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001083
1084 return ret;
1085}
1086
Zach Brownef87b7e2010-07-09 12:26:20 -07001087int rds_ib_recv_init(void)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001088{
1089 struct sysinfo si;
1090 int ret = -ENOMEM;
1091
1092 /* Default to 30% of all available RAM for recv memory */
1093 si_meminfo(&si);
1094 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1095
1096 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1097 sizeof(struct rds_ib_incoming),
Andy Groverc20f5b92010-07-07 16:46:26 -07001098 0, SLAB_HWCACHE_ALIGN, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -08001099 if (!rds_ib_incoming_slab)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001100 goto out;
1101
1102 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1103 sizeof(struct rds_page_frag),
Andy Groverc20f5b92010-07-07 16:46:26 -07001104 0, SLAB_HWCACHE_ALIGN, NULL);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001105 if (!rds_ib_frag_slab) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001106 kmem_cache_destroy(rds_ib_incoming_slab);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001107 rds_ib_incoming_slab = NULL;
1108 } else
Andy Grover1e23b3e2009-02-24 15:30:34 +00001109 ret = 0;
1110out:
1111 return ret;
1112}
1113
1114void rds_ib_recv_exit(void)
1115{
1116 kmem_cache_destroy(rds_ib_incoming_slab);
1117 kmem_cache_destroy(rds_ib_frag_slab);
1118}