blob: 557ccbb1ce00b4440d6ad96a28f8cab4d3be4b6c [file] [log] [blame]
Andy Grover1e23b3e2009-02-24 15:30:34 +00001/*
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -07002 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
Andy Grover1e23b3e2009-02-24 15:30:34 +00003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover1e23b3e2009-02-24 15:30:34 +000035#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h>
38
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070039#include "rds_single_path.h"
Andy Grover1e23b3e2009-02-24 15:30:34 +000040#include "rds.h"
41#include "ib.h"
42
43static struct kmem_cache *rds_ib_incoming_slab;
44static struct kmem_cache *rds_ib_frag_slab;
45static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
46
Andy Grover1e23b3e2009-02-24 15:30:34 +000047void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
48{
49 struct rds_ib_recv_work *recv;
50 u32 i;
51
52 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
53 struct ib_sge *sge;
54
55 recv->r_ibinc = NULL;
56 recv->r_frag = NULL;
57
58 recv->r_wr.next = NULL;
59 recv->r_wr.wr_id = i;
60 recv->r_wr.sg_list = recv->r_sge;
61 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
62
Andy Grover919ced42010-01-13 16:32:24 -080063 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +000064 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
65 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -060066 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover919ced42010-01-13 16:32:24 -080067
68 sge = &recv->r_sge[1];
69 sge->addr = 0;
70 sge->length = RDS_FRAG_SIZE;
Jason Gunthorpee5580242015-07-30 17:22:26 -060071 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +000072 }
73}
74
Chris Mason33244122010-05-26 22:05:37 -070075/*
76 * The entire 'from' list, including the from element itself, is put on
77 * to the tail of the 'to' list.
78 */
79static void list_splice_entire_tail(struct list_head *from,
80 struct list_head *to)
81{
82 struct list_head *from_last = from->prev;
83
84 list_splice_tail(from_last, to);
85 list_add_tail(from_last, to);
86}
87
88static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
89{
90 struct list_head *tmp;
91
92 tmp = xchg(&cache->xfer, NULL);
93 if (tmp) {
94 if (cache->ready)
95 list_splice_entire_tail(tmp, cache->ready);
96 else
97 cache->ready = tmp;
98 }
99}
100
101static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
102{
103 struct rds_ib_cache_head *head;
104 int cpu;
105
106 cache->percpu = alloc_percpu(struct rds_ib_cache_head);
107 if (!cache->percpu)
108 return -ENOMEM;
109
110 for_each_possible_cpu(cpu) {
111 head = per_cpu_ptr(cache->percpu, cpu);
112 head->first = NULL;
113 head->count = 0;
114 }
115 cache->xfer = NULL;
116 cache->ready = NULL;
117
118 return 0;
119}
120
121int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
122{
123 int ret;
124
125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
126 if (!ret) {
127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
128 if (ret)
129 free_percpu(ic->i_cache_incs.percpu);
130 }
131
132 return ret;
133}
134
135static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
136 struct list_head *caller_list)
137{
138 struct rds_ib_cache_head *head;
139 int cpu;
140
141 for_each_possible_cpu(cpu) {
142 head = per_cpu_ptr(cache->percpu, cpu);
143 if (head->first) {
144 list_splice_entire_tail(head->first, caller_list);
145 head->first = NULL;
146 }
147 }
148
149 if (cache->ready) {
150 list_splice_entire_tail(cache->ready, caller_list);
151 cache->ready = NULL;
152 }
153}
154
155void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
156{
157 struct rds_ib_incoming *inc;
158 struct rds_ib_incoming *inc_tmp;
159 struct rds_page_frag *frag;
160 struct rds_page_frag *frag_tmp;
161 LIST_HEAD(list);
162
163 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
164 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
165 free_percpu(ic->i_cache_incs.percpu);
166
167 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
168 list_del(&inc->ii_cache_entry);
169 WARN_ON(!list_empty(&inc->ii_frags));
170 kmem_cache_free(rds_ib_incoming_slab, inc);
171 }
172
173 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
174 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
175 free_percpu(ic->i_cache_frags.percpu);
176
177 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
178 list_del(&frag->f_cache_entry);
179 WARN_ON(!list_empty(&frag->f_item));
180 kmem_cache_free(rds_ib_frag_slab, frag);
181 }
182}
183
184/* fwd decl */
185static void rds_ib_recv_cache_put(struct list_head *new_item,
186 struct rds_ib_refill_cache *cache);
187static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
188
189
190/* Recycle frag and attached recv buffer f_sg */
191static void rds_ib_frag_free(struct rds_ib_connection *ic,
192 struct rds_page_frag *frag)
193{
194 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
195
196 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700197 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
198 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
Chris Mason33244122010-05-26 22:05:37 -0700199}
200
201/* Recycle inc after freeing attached frags */
202void rds_ib_inc_free(struct rds_incoming *inc)
203{
204 struct rds_ib_incoming *ibinc;
205 struct rds_page_frag *frag;
206 struct rds_page_frag *pos;
207 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
208
209 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
210
211 /* Free attached frags */
212 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
213 list_del_init(&frag->f_item);
214 rds_ib_frag_free(ic, frag);
215 }
216 BUG_ON(!list_empty(&ibinc->ii_frags));
217
218 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
219 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
220}
221
Andy Grover1e23b3e2009-02-24 15:30:34 +0000222static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
223 struct rds_ib_recv_work *recv)
224{
225 if (recv->r_ibinc) {
226 rds_inc_put(&recv->r_ibinc->ii_inc);
227 recv->r_ibinc = NULL;
228 }
229 if (recv->r_frag) {
Andy Groverfc24f782010-05-25 11:20:09 -0700230 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
Chris Mason33244122010-05-26 22:05:37 -0700231 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000232 recv->r_frag = NULL;
233 }
234}
235
236void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
237{
238 u32 i;
239
240 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
241 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000242}
243
Chris Mason037f18a32010-05-26 21:45:06 -0700244static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
245 gfp_t slab_mask)
Chris Mason33244122010-05-26 22:05:37 -0700246{
247 struct rds_ib_incoming *ibinc;
248 struct list_head *cache_item;
249 int avail_allocs;
250
251 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
252 if (cache_item) {
253 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
254 } else {
255 avail_allocs = atomic_add_unless(&rds_ib_allocation,
256 1, rds_ib_sysctl_max_recv_allocation);
257 if (!avail_allocs) {
258 rds_ib_stats_inc(s_ib_rx_alloc_limit);
259 return NULL;
260 }
Chris Mason037f18a32010-05-26 21:45:06 -0700261 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700262 if (!ibinc) {
263 atomic_dec(&rds_ib_allocation);
264 return NULL;
265 }
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700266 rds_ib_stats_inc(s_ib_rx_total_incs);
Chris Mason33244122010-05-26 22:05:37 -0700267 }
268 INIT_LIST_HEAD(&ibinc->ii_frags);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700269 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
Chris Mason33244122010-05-26 22:05:37 -0700270
271 return ibinc;
272}
273
Chris Mason037f18a32010-05-26 21:45:06 -0700274static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
275 gfp_t slab_mask, gfp_t page_mask)
Chris Mason33244122010-05-26 22:05:37 -0700276{
277 struct rds_page_frag *frag;
278 struct list_head *cache_item;
279 int ret;
280
281 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
282 if (cache_item) {
283 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700284 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
285 rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
Chris Mason33244122010-05-26 22:05:37 -0700286 } else {
Chris Mason037f18a32010-05-26 21:45:06 -0700287 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700288 if (!frag)
289 return NULL;
290
Chris Masonb4e1da32010-07-19 17:02:41 -0700291 sg_init_table(&frag->f_sg, 1);
Chris Mason33244122010-05-26 22:05:37 -0700292 ret = rds_page_remainder_alloc(&frag->f_sg,
Chris Mason037f18a32010-05-26 21:45:06 -0700293 RDS_FRAG_SIZE, page_mask);
Chris Mason33244122010-05-26 22:05:37 -0700294 if (ret) {
295 kmem_cache_free(rds_ib_frag_slab, frag);
296 return NULL;
297 }
Santosh Shilimkar09b2b8f2016-07-09 17:14:02 -0700298 rds_ib_stats_inc(s_ib_rx_total_frags);
Chris Mason33244122010-05-26 22:05:37 -0700299 }
300
301 INIT_LIST_HEAD(&frag->f_item);
302
303 return frag;
304}
305
Andy Grover1e23b3e2009-02-24 15:30:34 +0000306static int rds_ib_recv_refill_one(struct rds_connection *conn,
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700307 struct rds_ib_recv_work *recv, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000308{
309 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000310 struct ib_sge *sge;
311 int ret = -ENOMEM;
Chris Mason037f18a32010-05-26 21:45:06 -0700312 gfp_t slab_mask = GFP_NOWAIT;
313 gfp_t page_mask = GFP_NOWAIT;
314
Mel Gormand0164ad2015-11-06 16:28:21 -0800315 if (gfp & __GFP_DIRECT_RECLAIM) {
Chris Mason037f18a32010-05-26 21:45:06 -0700316 slab_mask = GFP_KERNEL;
317 page_mask = GFP_HIGHUSER;
318 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000319
Chris Mason33244122010-05-26 22:05:37 -0700320 if (!ic->i_cache_incs.ready)
321 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
322 if (!ic->i_cache_frags.ready)
323 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
324
Andy Grover3427e852010-05-24 20:28:49 -0700325 /*
326 * ibinc was taken from recv if recv contained the start of a message.
327 * recvs that were continuations will still have this allocated.
328 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800329 if (!recv->r_ibinc) {
Chris Mason037f18a32010-05-26 21:45:06 -0700330 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
Chris Mason33244122010-05-26 22:05:37 -0700331 if (!recv->r_ibinc)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000332 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000333 }
334
Andy Grover3427e852010-05-24 20:28:49 -0700335 WARN_ON(recv->r_frag); /* leak! */
Chris Mason037f18a32010-05-26 21:45:06 -0700336 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
Andy Grover3427e852010-05-24 20:28:49 -0700337 if (!recv->r_frag)
338 goto out;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000339
Andy Grover0b088e02010-05-24 20:12:41 -0700340 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
341 1, DMA_FROM_DEVICE);
342 WARN_ON(ret != 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000343
Andy Grover919ced42010-01-13 16:32:24 -0800344 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000345 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
346 sge->length = sizeof(struct rds_header);
347
Andy Grover919ced42010-01-13 16:32:24 -0800348 sge = &recv->r_sge[1];
Marciniszyn, Mikef2e9bd72012-12-21 08:01:49 +0000349 sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
350 sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000351
352 ret = 0;
353out:
354 return ret;
355}
356
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700357static int acquire_refill(struct rds_connection *conn)
358{
359 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
360}
361
362static void release_refill(struct rds_connection *conn)
363{
364 clear_bit(RDS_RECV_REFILL, &conn->c_flags);
365
366 /* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
367 * hot path and finding waiters is very rare. We don't want to walk
368 * the system-wide hashed waitqueue buckets in the fast path only to
369 * almost never find waiters.
370 */
371 if (waitqueue_active(&conn->c_waitq))
372 wake_up_all(&conn->c_waitq);
373}
374
Andy Grover1e23b3e2009-02-24 15:30:34 +0000375/*
376 * This tries to allocate and post unused work requests after making sure that
377 * they have all the allocations they need to queue received fragments into
Chris Mason33244122010-05-26 22:05:37 -0700378 * sockets.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000379 */
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700380void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000381{
382 struct rds_ib_connection *ic = conn->c_transport_data;
383 struct rds_ib_recv_work *recv;
384 struct ib_recv_wr *failed_wr;
385 unsigned int posted = 0;
386 int ret = 0;
Mel Gormand0164ad2015-11-06 16:28:21 -0800387 bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000388 u32 pos;
389
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700390 /* the goal here is to just make sure that someone, somewhere
391 * is posting buffers. If we can't get the refill lock,
392 * let them do their thing
393 */
394 if (!acquire_refill(conn))
395 return;
396
Joe Perchesf64f9e72009-11-29 16:55:45 -0800397 while ((prefill || rds_conn_up(conn)) &&
398 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000399 if (pos >= ic->i_recv_ring.w_nr) {
400 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
401 pos);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000402 break;
403 }
404
405 recv = &ic->i_recvs[pos];
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700406 ret = rds_ib_recv_refill_one(conn, recv, gfp);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000407 if (ret) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000408 break;
409 }
410
Håkon Bugge1cb483a2017-11-07 16:33:34 +0100411 rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
Andy Grover0b088e02010-05-24 20:12:41 -0700412 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
Marciniszyn, Mikef2e9bd72012-12-21 08:01:49 +0000413 (long) ib_sg_dma_address(
414 ic->i_cm_id->device,
Håkon Bugge1cb483a2017-11-07 16:33:34 +0100415 &recv->r_frag->f_sg));
416
417 /* XXX when can this fail? */
418 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000419 if (ret) {
420 rds_ib_conn_error(conn, "recv post on "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700421 "%pI6c returned %d, disconnecting and "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000422 "reconnecting\n", &conn->c_faddr,
423 ret);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000424 break;
425 }
426
427 posted++;
428 }
429
430 /* We're doing flow control - update the window. */
431 if (ic->i_flowctl && posted)
432 rds_ib_advertise_credits(conn, posted);
433
434 if (ret)
435 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -0700436
437 release_refill(conn);
438
439 /* if we're called from the softirq handler, we'll be GFP_NOWAIT.
440 * in this case the ring being low is going to lead to more interrupts
441 * and we can safely let the softirq code take care of it unless the
442 * ring is completely empty.
443 *
444 * if we're called from krdsd, we'll be GFP_KERNEL. In this case
445 * we might have raced with the softirq code while we had the refill
446 * lock held. Use rds_ib_ring_low() instead of ring_empty to decide
447 * if we should requeue.
448 */
449 if (rds_conn_up(conn) &&
450 ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
451 rds_ib_ring_empty(&ic->i_recv_ring))) {
452 queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
453 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000454}
455
Chris Mason33244122010-05-26 22:05:37 -0700456/*
457 * We want to recycle several types of recv allocations, like incs and frags.
458 * To use this, the *_free() function passes in the ptr to a list_head within
459 * the recyclee, as well as the cache to put it on.
460 *
461 * First, we put the memory on a percpu list. When this reaches a certain size,
462 * We move it to an intermediate non-percpu list in a lockless manner, with some
463 * xchg/compxchg wizardry.
464 *
465 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
466 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
467 * list_empty() will return true with one element is actually present.
468 */
469static void rds_ib_recv_cache_put(struct list_head *new_item,
470 struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000471{
Chris Mason33244122010-05-26 22:05:37 -0700472 unsigned long flags;
Gerald Schaeferc1964032014-01-16 16:54:48 +0100473 struct list_head *old, *chpfirst;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000474
Chris Mason33244122010-05-26 22:05:37 -0700475 local_irq_save(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000476
Shan Weiae4b46e2012-11-12 15:52:01 +0000477 chpfirst = __this_cpu_read(cache->percpu->first);
478 if (!chpfirst)
Chris Mason33244122010-05-26 22:05:37 -0700479 INIT_LIST_HEAD(new_item);
480 else /* put on front */
Shan Weiae4b46e2012-11-12 15:52:01 +0000481 list_add_tail(new_item, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700482
Gerald Schaeferc1964032014-01-16 16:54:48 +0100483 __this_cpu_write(cache->percpu->first, new_item);
Shan Weiae4b46e2012-11-12 15:52:01 +0000484 __this_cpu_inc(cache->percpu->count);
485
486 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
Chris Mason33244122010-05-26 22:05:37 -0700487 goto end;
488
489 /*
490 * Return our per-cpu first list to the cache's xfer by atomically
491 * grabbing the current xfer list, appending it to our per-cpu list,
492 * and then atomically returning that entire list back to the
493 * cache's xfer list as long as it's still empty.
494 */
495 do {
496 old = xchg(&cache->xfer, NULL);
497 if (old)
Shan Weiae4b46e2012-11-12 15:52:01 +0000498 list_splice_entire_tail(old, chpfirst);
499 old = cmpxchg(&cache->xfer, NULL, chpfirst);
Chris Mason33244122010-05-26 22:05:37 -0700500 } while (old);
501
Shan Weiae4b46e2012-11-12 15:52:01 +0000502
Gerald Schaeferc1964032014-01-16 16:54:48 +0100503 __this_cpu_write(cache->percpu->first, NULL);
Shan Weiae4b46e2012-11-12 15:52:01 +0000504 __this_cpu_write(cache->percpu->count, 0);
Chris Mason33244122010-05-26 22:05:37 -0700505end:
506 local_irq_restore(flags);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000507}
508
Chris Mason33244122010-05-26 22:05:37 -0700509static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000510{
Chris Mason33244122010-05-26 22:05:37 -0700511 struct list_head *head = cache->ready;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000512
Chris Mason33244122010-05-26 22:05:37 -0700513 if (head) {
514 if (!list_empty(head)) {
515 cache->ready = head->next;
516 list_del_init(head);
517 } else
518 cache->ready = NULL;
519 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000520
Chris Mason33244122010-05-26 22:05:37 -0700521 return head;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000522}
523
Al Viroc310e722014-11-20 09:21:14 -0500524int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000525{
526 struct rds_ib_incoming *ibinc;
527 struct rds_page_frag *frag;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000528 unsigned long to_copy;
529 unsigned long frag_off = 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000530 int copied = 0;
531 int ret;
532 u32 len;
533
534 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
535 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
536 len = be32_to_cpu(inc->i_hdr.h_len);
537
Al Viroc310e722014-11-20 09:21:14 -0500538 while (iov_iter_count(to) && copied < len) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000539 if (frag_off == RDS_FRAG_SIZE) {
540 frag = list_entry(frag->f_item.next,
541 struct rds_page_frag, f_item);
542 frag_off = 0;
543 }
Al Viroc310e722014-11-20 09:21:14 -0500544 to_copy = min_t(unsigned long, iov_iter_count(to),
545 RDS_FRAG_SIZE - frag_off);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000546 to_copy = min_t(unsigned long, to_copy, len - copied);
547
Andy Grover1e23b3e2009-02-24 15:30:34 +0000548 /* XXX needs + offset for multiple recvs per page */
Al Viroc310e722014-11-20 09:21:14 -0500549 rds_stats_add(s_copy_to_user, to_copy);
550 ret = copy_page_to_iter(sg_page(&frag->f_sg),
551 frag->f_sg.offset + frag_off,
552 to_copy,
553 to);
554 if (ret != to_copy)
555 return -EFAULT;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000556
Andy Grover1e23b3e2009-02-24 15:30:34 +0000557 frag_off += to_copy;
558 copied += to_copy;
559 }
560
561 return copied;
562}
563
564/* ic starts out kzalloc()ed */
565void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
566{
567 struct ib_send_wr *wr = &ic->i_ack_wr;
568 struct ib_sge *sge = &ic->i_ack_sge;
569
570 sge->addr = ic->i_ack_dma;
571 sge->length = sizeof(struct rds_header);
Jason Gunthorpee5580242015-07-30 17:22:26 -0600572 sge->lkey = ic->i_pd->local_dma_lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000573
574 wr->sg_list = sge;
575 wr->num_sge = 1;
576 wr->opcode = IB_WR_SEND;
577 wr->wr_id = RDS_IB_ACK_WR_ID;
578 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
579}
580
581/*
582 * You'd think that with reliable IB connections you wouldn't need to ack
583 * messages that have been received. The problem is that IB hardware generates
584 * an ack message before it has DMAed the message into memory. This creates a
585 * potential message loss if the HCA is disabled for any reason between when it
586 * sends the ack and before the message is DMAed and processed. This is only a
587 * potential issue if another HCA is available for fail-over.
588 *
589 * When the remote host receives our ack they'll free the sent message from
590 * their send queue. To decrease the latency of this we always send an ack
591 * immediately after we've received messages.
592 *
593 * For simplicity, we only have one ack in flight at a time. This puts
594 * pressure on senders to have deep enough send queues to absorb the latency of
595 * a single ack frame being in flight. This might not be good enough.
596 *
597 * This is implemented by have a long-lived send_wr and sge which point to a
598 * statically allocated ack frame. This ack wr does not fall under the ring
599 * accounting that the tx and rx wrs do. The QP attribute specifically makes
600 * room for it beyond the ring size. Send completion notices its special
601 * wr_id and avoids working with the ring in that case.
602 */
Andy Grover8cbd9602009-04-01 08:20:20 +0000603#ifndef KERNEL_HAS_ATOMIC64
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400604void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000605{
Andy Grover8cbd9602009-04-01 08:20:20 +0000606 unsigned long flags;
607
608 spin_lock_irqsave(&ic->i_ack_lock, flags);
609 ic->i_ack_next = seq;
610 if (ack_required)
611 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
612 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
613}
614
615static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
616{
617 unsigned long flags;
618 u64 seq;
619
620 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
621
622 spin_lock_irqsave(&ic->i_ack_lock, flags);
623 seq = ic->i_ack_next;
624 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
625
626 return seq;
627}
628#else
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400629void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
Andy Grover8cbd9602009-04-01 08:20:20 +0000630{
631 atomic64_set(&ic->i_ack_next, seq);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000632 if (ack_required) {
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100633 smp_mb__before_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000634 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
635 }
636}
637
638static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
639{
640 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100641 smp_mb__after_atomic();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000642
Andy Grover8cbd9602009-04-01 08:20:20 +0000643 return atomic64_read(&ic->i_ack_next);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000644}
Andy Grover8cbd9602009-04-01 08:20:20 +0000645#endif
646
Andy Grover1e23b3e2009-02-24 15:30:34 +0000647
648static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
649{
650 struct rds_header *hdr = ic->i_ack;
651 struct ib_send_wr *failed_wr;
652 u64 seq;
653 int ret;
654
655 seq = rds_ib_get_ack(ic);
656
657 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
658 rds_message_populate_header(hdr, 0, 0, 0);
659 hdr->h_ack = cpu_to_be64(seq);
660 hdr->h_credit = adv_credits;
661 rds_message_make_checksum(hdr);
662 ic->i_ack_queued = jiffies;
663
664 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
665 if (unlikely(ret)) {
666 /* Failed to send. Release the WR, and
667 * force another ACK.
668 */
669 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
670 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
671
672 rds_ib_stats_inc(s_ib_ack_send_failure);
Andy Grover735f61e2010-03-11 13:49:55 +0000673
674 rds_ib_conn_error(ic->conn, "sending ack failed\n");
Andy Grover1e23b3e2009-02-24 15:30:34 +0000675 } else
676 rds_ib_stats_inc(s_ib_ack_sent);
677}
678
679/*
680 * There are 3 ways of getting acknowledgements to the peer:
681 * 1. We call rds_ib_attempt_ack from the recv completion handler
682 * to send an ACK-only frame.
683 * However, there can be only one such frame in the send queue
684 * at any time, so we may have to postpone it.
685 * 2. When another (data) packet is transmitted while there's
686 * an ACK in the queue, we piggyback the ACK sequence number
687 * on the data packet.
688 * 3. If the ACK WR is done sending, we get called from the
689 * send queue completion handler, and check whether there's
690 * another ACK pending (postponed because the WR was on the
691 * queue). If so, we transmit it.
692 *
693 * We maintain 2 variables:
694 * - i_ack_flags, which keeps track of whether the ACK WR
695 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
696 * - i_ack_next, which is the last sequence number we received
697 *
698 * Potentially, send queue and receive queue handlers can run concurrently.
Andy Grover8cbd9602009-04-01 08:20:20 +0000699 * It would be nice to not have to use a spinlock to synchronize things,
700 * but the one problem that rules this out is that 64bit updates are
701 * not atomic on all platforms. Things would be a lot simpler if
702 * we had atomic64 or maybe cmpxchg64 everywhere.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000703 *
704 * Reconnecting complicates this picture just slightly. When we
705 * reconnect, we may be seeing duplicate packets. The peer
706 * is retransmitting them, because it hasn't seen an ACK for
707 * them. It is important that we ACK these.
708 *
709 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
710 * this flag set *MUST* be acknowledged immediately.
711 */
712
713/*
714 * When we get here, we're called from the recv queue handler.
715 * Check whether we ought to transmit an ACK.
716 */
717void rds_ib_attempt_ack(struct rds_ib_connection *ic)
718{
719 unsigned int adv_credits;
720
721 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
722 return;
723
724 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
725 rds_ib_stats_inc(s_ib_ack_send_delayed);
726 return;
727 }
728
729 /* Can we get a send credit? */
Steve Wise7b70d032009-04-09 14:09:39 +0000730 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000731 rds_ib_stats_inc(s_ib_tx_throttle);
732 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
733 return;
734 }
735
736 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
737 rds_ib_send_ack(ic, adv_credits);
738}
739
740/*
741 * We get here from the send completion handler, when the
742 * adapter tells us the ACK frame was sent.
743 */
744void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
745{
746 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
747 rds_ib_attempt_ack(ic);
748}
749
750/*
751 * This is called by the regular xmit code when it wants to piggyback
752 * an ACK on an outgoing frame.
753 */
754u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
755{
756 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
757 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
758 return rds_ib_get_ack(ic);
759}
760
761/*
762 * It's kind of lame that we're copying from the posted receive pages into
763 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
764 * them. But receiving new congestion bitmaps should be a *rare* event, so
765 * hopefully we won't need to invest that complexity in making it more
766 * efficient. By copying we can share a simpler core with TCP which has to
767 * copy.
768 */
769static void rds_ib_cong_recv(struct rds_connection *conn,
770 struct rds_ib_incoming *ibinc)
771{
772 struct rds_cong_map *map;
773 unsigned int map_off;
774 unsigned int map_page;
775 struct rds_page_frag *frag;
776 unsigned long frag_off;
777 unsigned long to_copy;
778 unsigned long copied;
779 uint64_t uncongested = 0;
780 void *addr;
781
782 /* catch completely corrupt packets */
783 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
784 return;
785
786 map = conn->c_fcong;
787 map_page = 0;
788 map_off = 0;
789
790 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
791 frag_off = 0;
792
793 copied = 0;
794
795 while (copied < RDS_CONG_MAP_BYTES) {
796 uint64_t *src, *dst;
797 unsigned int k;
798
799 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
800 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
801
Cong Wang6114eab2011-11-25 23:14:40 +0800802 addr = kmap_atomic(sg_page(&frag->f_sg));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000803
shamir rabinovitch579ba852016-04-07 07:57:36 -0400804 src = addr + frag->f_sg.offset + frag_off;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000805 dst = (void *)map->m_page_addrs[map_page] + map_off;
806 for (k = 0; k < to_copy; k += 8) {
807 /* Record ports that became uncongested, ie
808 * bits that changed from 0 to 1. */
809 uncongested |= ~(*src) & *dst;
810 *dst++ = *src++;
811 }
Cong Wang6114eab2011-11-25 23:14:40 +0800812 kunmap_atomic(addr);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000813
814 copied += to_copy;
815
816 map_off += to_copy;
817 if (map_off == PAGE_SIZE) {
818 map_off = 0;
819 map_page++;
820 }
821
822 frag_off += to_copy;
823 if (frag_off == RDS_FRAG_SIZE) {
824 frag = list_entry(frag->f_item.next,
825 struct rds_page_frag, f_item);
826 frag_off = 0;
827 }
828 }
829
830 /* the congestion map is in little endian order */
831 uncongested = le64_to_cpu(uncongested);
832
833 rds_cong_map_updated(map, uncongested);
834}
835
Andy Grover1e23b3e2009-02-24 15:30:34 +0000836static void rds_ib_process_recv(struct rds_connection *conn,
Andy Grover597ddd52009-07-17 13:13:27 +0000837 struct rds_ib_recv_work *recv, u32 data_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +0000838 struct rds_ib_ack_state *state)
839{
840 struct rds_ib_connection *ic = conn->c_transport_data;
841 struct rds_ib_incoming *ibinc = ic->i_ibinc;
842 struct rds_header *ihdr, *hdr;
843
844 /* XXX shut down the connection if port 0,0 are seen? */
845
846 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
Andy Grover597ddd52009-07-17 13:13:27 +0000847 data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000848
Andy Grover597ddd52009-07-17 13:13:27 +0000849 if (data_len < sizeof(struct rds_header)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000850 rds_ib_conn_error(conn, "incoming message "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700851 "from %pI6c didn't include a "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000852 "header, disconnecting and "
853 "reconnecting\n",
854 &conn->c_faddr);
855 return;
856 }
Andy Grover597ddd52009-07-17 13:13:27 +0000857 data_len -= sizeof(struct rds_header);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000858
Andy Groverf147dd92010-01-13 15:50:09 -0800859 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000860
861 /* Validate the checksum. */
862 if (!rds_message_verify_checksum(ihdr)) {
863 rds_ib_conn_error(conn, "incoming message "
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700864 "from %pI6c has corrupted header - "
Andy Grover1e23b3e2009-02-24 15:30:34 +0000865 "forcing a reconnect\n",
866 &conn->c_faddr);
867 rds_stats_inc(s_recv_drop_bad_checksum);
868 return;
869 }
870
871 /* Process the ACK sequence which comes with every packet */
872 state->ack_recv = be64_to_cpu(ihdr->h_ack);
873 state->ack_recv_valid = 1;
874
875 /* Process the credits update if there was one */
876 if (ihdr->h_credit)
877 rds_ib_send_add_credits(conn, ihdr->h_credit);
878
Andy Grover597ddd52009-07-17 13:13:27 +0000879 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000880 /* This is an ACK-only packet. The fact that it gets
881 * special treatment here is that historically, ACKs
882 * were rather special beasts.
883 */
884 rds_ib_stats_inc(s_ib_ack_received);
885
886 /*
887 * Usually the frags make their way on to incs and are then freed as
888 * the inc is freed. We don't go that route, so we have to drop the
889 * page ref ourselves. We can't just leave the page on the recv
890 * because that confuses the dma mapping of pages and each recv's use
Andy Grover0b088e02010-05-24 20:12:41 -0700891 * of a partial page.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000892 *
893 * FIXME: Fold this into the code path below.
894 */
Chris Mason33244122010-05-26 22:05:37 -0700895 rds_ib_frag_free(ic, recv->r_frag);
Andy Grover0b088e02010-05-24 20:12:41 -0700896 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000897 return;
898 }
899
900 /*
901 * If we don't already have an inc on the connection then this
902 * fragment has a header and starts a message.. copy its header
903 * into the inc and save the inc so we can hang upcoming fragments
904 * off its list.
905 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800906 if (!ibinc) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000907 ibinc = recv->r_ibinc;
908 recv->r_ibinc = NULL;
909 ic->i_ibinc = ibinc;
910
911 hdr = &ibinc->ii_inc.i_hdr;
Santosh Shilimkar32890252016-07-04 22:35:15 -0700912 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
913 local_clock();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000914 memcpy(hdr, ihdr, sizeof(*hdr));
915 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
Santosh Shilimkar32890252016-07-04 22:35:15 -0700916 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
917 local_clock();
Andy Grover1e23b3e2009-02-24 15:30:34 +0000918
919 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
920 ic->i_recv_data_rem, hdr->h_flags);
921 } else {
922 hdr = &ibinc->ii_inc.i_hdr;
923 /* We can't just use memcmp here; fragments of a
924 * single message may carry different ACKs */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800925 if (hdr->h_sequence != ihdr->h_sequence ||
926 hdr->h_len != ihdr->h_len ||
927 hdr->h_sport != ihdr->h_sport ||
928 hdr->h_dport != ihdr->h_dport) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000929 rds_ib_conn_error(conn,
930 "fragment header mismatch; forcing reconnect\n");
931 return;
932 }
933 }
934
935 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
936 recv->r_frag = NULL;
937
938 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
939 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
940 else {
941 ic->i_recv_data_rem = 0;
942 ic->i_ibinc = NULL;
943
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700944 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000945 rds_ib_cong_recv(conn, ibinc);
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700946 } else {
947 rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr,
Cong Wang6114eab2011-11-25 23:14:40 +0800948 &ibinc->ii_inc, GFP_ATOMIC);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000949 state->ack_next = be64_to_cpu(hdr->h_sequence);
950 state->ack_next_valid = 1;
951 }
952
953 /* Evaluate the ACK_REQUIRED flag *after* we received
954 * the complete frame, and after bumping the next_rx
955 * sequence. */
956 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
957 rds_stats_inc(s_recv_ack_required);
958 state->ack_required = 1;
959 }
960
961 rds_inc_put(&ibinc->ii_inc);
962 }
963}
964
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400965void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
966 struct ib_wc *wc,
967 struct rds_ib_ack_state *state)
Andy Groverd521b632009-10-30 08:51:57 +0000968{
969 struct rds_connection *conn = ic->conn;
Andy Groverd521b632009-10-30 08:51:57 +0000970 struct rds_ib_recv_work *recv;
971
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400972 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
973 (unsigned long long)wc->wr_id, wc->status,
974 ib_wc_status_msg(wc->status), wc->byte_len,
975 be32_to_cpu(wc->ex.imm_data));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000976
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400977 rds_ib_stats_inc(s_ib_rx_cq_event);
978 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
979 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
980 DMA_FROM_DEVICE);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000981
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400982 /* Also process recvs in connecting state because it is possible
983 * to get a recv completion _before_ the rdmacm ESTABLISHED
984 * event is processed.
985 */
986 if (wc->status == IB_WC_SUCCESS) {
987 rds_ib_process_recv(conn, recv, wc->byte_len, state);
988 } else {
989 /* We expect errors as the qp is drained during shutdown */
990 if (rds_conn_up(conn) || rds_conn_connecting(conn))
Ka-Cheong Pooneee2fa62018-07-23 20:51:21 -0700991 rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
Santosh Shilimkarff3f19a2016-03-14 07:43:55 -0700992 &conn->c_laddr, &conn->c_faddr,
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400993 wc->status,
994 ib_wc_status_msg(wc->status));
Andy Grover1e23b3e2009-02-24 15:30:34 +0000995 }
Andy Groverd521b632009-10-30 08:51:57 +0000996
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -0400997 /* rds_ib_process_recv() doesn't always consume the frag, and
998 * we might not have called it at all if the wc didn't indicate
999 * success. We already unmapped the frag's pages, though, and
1000 * the following rds_ib_ring_free() call tells the refill path
1001 * that it will not find an allocated frag here. Make sure we
1002 * keep that promise by freeing a frag that's still on the ring.
1003 */
1004 if (recv->r_frag) {
1005 rds_ib_frag_free(ic, recv->r_frag);
1006 recv->r_frag = NULL;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001007 }
Santosh Shilimkarf4f943c2015-09-06 02:18:51 -04001008 rds_ib_ring_free(&ic->i_recv_ring, 1);
Andy Grover1e23b3e2009-02-24 15:30:34 +00001009
1010 /* If we ever end up with a really empty receive ring, we're
1011 * in deep trouble, as the sender will definitely see RNR
1012 * timeouts. */
1013 if (rds_ib_ring_empty(&ic->i_recv_ring))
1014 rds_ib_stats_inc(s_ib_rx_ring_empty);
1015
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001016 if (rds_ib_ring_low(&ic->i_recv_ring)) {
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001017 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001018 rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1019 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001020}
1021
Sowmini Varadhan2da43c42016-06-30 16:11:15 -07001022int rds_ib_recv_path(struct rds_conn_path *cp)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001023{
Sowmini Varadhan2da43c42016-06-30 16:11:15 -07001024 struct rds_connection *conn = cp->cp_conn;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001025 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001026
1027 rdsdebug("conn %p\n", conn);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001028 if (rds_conn_up(conn)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001029 rds_ib_attempt_ack(ic);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001030 rds_ib_recv_refill(conn, 0, GFP_KERNEL);
Håkon Bugge05bfd7d2017-08-08 11:13:32 +02001031 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
santosh.shilimkar@oracle.com73ce4312015-08-22 15:45:26 -07001032 }
Andy Grover1e23b3e2009-02-24 15:30:34 +00001033
Håkon Buggefa525312018-07-16 15:06:39 +02001034 return 0;
Andy Grover1e23b3e2009-02-24 15:30:34 +00001035}
1036
Zach Brownef87b7e2010-07-09 12:26:20 -07001037int rds_ib_recv_init(void)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001038{
1039 struct sysinfo si;
1040 int ret = -ENOMEM;
1041
1042 /* Default to 30% of all available RAM for recv memory */
1043 si_meminfo(&si);
1044 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1045
1046 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1047 sizeof(struct rds_ib_incoming),
Andy Groverc20f5b92010-07-07 16:46:26 -07001048 0, SLAB_HWCACHE_ALIGN, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -08001049 if (!rds_ib_incoming_slab)
Andy Grover1e23b3e2009-02-24 15:30:34 +00001050 goto out;
1051
1052 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1053 sizeof(struct rds_page_frag),
Andy Groverc20f5b92010-07-07 16:46:26 -07001054 0, SLAB_HWCACHE_ALIGN, NULL);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001055 if (!rds_ib_frag_slab) {
Andy Grover1e23b3e2009-02-24 15:30:34 +00001056 kmem_cache_destroy(rds_ib_incoming_slab);
santosh.shilimkar@oracle.comba54d3c2015-08-25 12:01:58 -07001057 rds_ib_incoming_slab = NULL;
1058 } else
Andy Grover1e23b3e2009-02-24 15:30:34 +00001059 ret = 0;
1060out:
1061 return ret;
1062}
1063
1064void rds_ib_recv_exit(void)
1065{
1066 kmem_cache_destroy(rds_ib_incoming_slab);
1067 kmem_cache_destroy(rds_ib_frag_slab);
1068}