blob: 8f041f7954a28d11528037d3a79feba64b4a703f [file] [log] [blame]
Andy Grover1e23b3e2009-02-24 15:30:34 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover1e23b3e2009-02-24 15:30:34 +000035#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <rdma/rdma_cm.h>
38
39#include "rds.h"
40#include "ib.h"
41
42static struct kmem_cache *rds_ib_incoming_slab;
43static struct kmem_cache *rds_ib_frag_slab;
44static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
45
46static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
47{
48 rdsdebug("frag %p page %p\n", frag, frag->f_page);
49 __free_page(frag->f_page);
50 frag->f_page = NULL;
51}
52
53static void rds_ib_frag_free(struct rds_page_frag *frag)
54{
55 rdsdebug("frag %p page %p\n", frag, frag->f_page);
Andy Grover8690bfa2010-01-12 11:56:44 -080056 BUG_ON(frag->f_page);
Andy Grover1e23b3e2009-02-24 15:30:34 +000057 kmem_cache_free(rds_ib_frag_slab, frag);
58}
59
60/*
61 * We map a page at a time. Its fragments are posted in order. This
62 * is called in fragment order as the fragments get send completion events.
63 * Only the last frag in the page performs the unmapping.
64 *
65 * It's OK for ring cleanup to call this in whatever order it likes because
66 * DMA is not in flight and so we can unmap while other ring entries still
67 * hold page references in their frags.
68 */
69static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
70 struct rds_ib_recv_work *recv)
71{
72 struct rds_page_frag *frag = recv->r_frag;
73
74 rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
75 if (frag->f_mapped)
76 ib_dma_unmap_page(ic->i_cm_id->device,
77 frag->f_mapped,
78 RDS_FRAG_SIZE, DMA_FROM_DEVICE);
79 frag->f_mapped = 0;
80}
81
82void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
83{
84 struct rds_ib_recv_work *recv;
85 u32 i;
86
87 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
88 struct ib_sge *sge;
89
90 recv->r_ibinc = NULL;
91 recv->r_frag = NULL;
92
93 recv->r_wr.next = NULL;
94 recv->r_wr.wr_id = i;
95 recv->r_wr.sg_list = recv->r_sge;
96 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
97
Andy Grover919ced42010-01-13 16:32:24 -080098 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +000099 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
100 sge->length = sizeof(struct rds_header);
101 sge->lkey = ic->i_mr->lkey;
Andy Grover919ced42010-01-13 16:32:24 -0800102
103 sge = &recv->r_sge[1];
104 sge->addr = 0;
105 sge->length = RDS_FRAG_SIZE;
106 sge->lkey = ic->i_mr->lkey;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000107 }
108}
109
110static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
111 struct rds_ib_recv_work *recv)
112{
113 if (recv->r_ibinc) {
114 rds_inc_put(&recv->r_ibinc->ii_inc);
115 recv->r_ibinc = NULL;
116 }
117 if (recv->r_frag) {
118 rds_ib_recv_unmap_page(ic, recv);
119 if (recv->r_frag->f_page)
120 rds_ib_frag_drop_page(recv->r_frag);
121 rds_ib_frag_free(recv->r_frag);
122 recv->r_frag = NULL;
123 }
124}
125
126void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
127{
128 u32 i;
129
130 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
131 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
132
133 if (ic->i_frag.f_page)
134 rds_ib_frag_drop_page(&ic->i_frag);
135}
136
137static int rds_ib_recv_refill_one(struct rds_connection *conn,
Andy Groverf17a1a52010-03-18 17:19:52 -0700138 struct rds_ib_recv_work *recv)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000139{
140 struct rds_ib_connection *ic = conn->c_transport_data;
141 dma_addr_t dma_addr;
142 struct ib_sge *sge;
143 int ret = -ENOMEM;
144
Andy Grover8690bfa2010-01-12 11:56:44 -0800145 if (!recv->r_ibinc) {
Andy Grover86357b12009-10-30 08:51:54 +0000146 if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000147 rds_ib_stats_inc(s_ib_rx_alloc_limit);
148 goto out;
149 }
Andy Groverf17a1a52010-03-18 17:19:52 -0700150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, GFP_NOWAIT);
Andy Grover8690bfa2010-01-12 11:56:44 -0800151 if (!recv->r_ibinc) {
Andy Grover86357b12009-10-30 08:51:54 +0000152 atomic_dec(&rds_ib_allocation);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000153 goto out;
Andy Grover86357b12009-10-30 08:51:54 +0000154 }
Andy Grover1e23b3e2009-02-24 15:30:34 +0000155 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
156 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
157 }
158
Andy Grover8690bfa2010-01-12 11:56:44 -0800159 if (!recv->r_frag) {
Andy Groverf17a1a52010-03-18 17:19:52 -0700160 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, GFP_NOWAIT);
Andy Grover8690bfa2010-01-12 11:56:44 -0800161 if (!recv->r_frag)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000162 goto out;
163 INIT_LIST_HEAD(&recv->r_frag->f_item);
164 recv->r_frag->f_page = NULL;
165 }
166
Andy Grover8690bfa2010-01-12 11:56:44 -0800167 if (!ic->i_frag.f_page) {
Andy Groverf17a1a52010-03-18 17:19:52 -0700168 ic->i_frag.f_page = alloc_page(GFP_NOWAIT);
Andy Grover8690bfa2010-01-12 11:56:44 -0800169 if (!ic->i_frag.f_page)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000170 goto out;
171 ic->i_frag.f_offset = 0;
172 }
173
174 dma_addr = ib_dma_map_page(ic->i_cm_id->device,
175 ic->i_frag.f_page,
176 ic->i_frag.f_offset,
177 RDS_FRAG_SIZE,
178 DMA_FROM_DEVICE);
179 if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
180 goto out;
181
182 /*
183 * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap()
184 * must be called on this recv. This happens as completions hit
185 * in order or on connection shutdown.
186 */
187 recv->r_frag->f_page = ic->i_frag.f_page;
188 recv->r_frag->f_offset = ic->i_frag.f_offset;
189 recv->r_frag->f_mapped = dma_addr;
190
Andy Grover919ced42010-01-13 16:32:24 -0800191 sge = &recv->r_sge[0];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000192 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
193 sge->length = sizeof(struct rds_header);
194
Andy Grover919ced42010-01-13 16:32:24 -0800195 sge = &recv->r_sge[1];
196 sge->addr = dma_addr;
197 sge->length = RDS_FRAG_SIZE;
198
Andy Grover1e23b3e2009-02-24 15:30:34 +0000199 get_page(recv->r_frag->f_page);
200
201 if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
202 ic->i_frag.f_offset += RDS_FRAG_SIZE;
203 } else {
204 put_page(ic->i_frag.f_page);
205 ic->i_frag.f_page = NULL;
206 ic->i_frag.f_offset = 0;
207 }
208
209 ret = 0;
210out:
211 return ret;
212}
213
214/*
215 * This tries to allocate and post unused work requests after making sure that
216 * they have all the allocations they need to queue received fragments into
217 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
218 * pairs don't go unmatched.
219 *
220 * -1 is returned if posting fails due to temporary resource exhaustion.
221 */
Andy Groverf17a1a52010-03-18 17:19:52 -0700222int rds_ib_recv_refill(struct rds_connection *conn, int prefill)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000223{
224 struct rds_ib_connection *ic = conn->c_transport_data;
225 struct rds_ib_recv_work *recv;
226 struct ib_recv_wr *failed_wr;
227 unsigned int posted = 0;
228 int ret = 0;
229 u32 pos;
230
Joe Perchesf64f9e72009-11-29 16:55:45 -0800231 while ((prefill || rds_conn_up(conn)) &&
232 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000233 if (pos >= ic->i_recv_ring.w_nr) {
234 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
235 pos);
236 ret = -EINVAL;
237 break;
238 }
239
240 recv = &ic->i_recvs[pos];
Andy Groverf17a1a52010-03-18 17:19:52 -0700241 ret = rds_ib_recv_refill_one(conn, recv);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000242 if (ret) {
243 ret = -1;
244 break;
245 }
246
247 /* XXX when can this fail? */
248 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
249 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
250 recv->r_ibinc, recv->r_frag->f_page,
251 (long) recv->r_frag->f_mapped, ret);
252 if (ret) {
253 rds_ib_conn_error(conn, "recv post on "
254 "%pI4 returned %d, disconnecting and "
255 "reconnecting\n", &conn->c_faddr,
256 ret);
257 ret = -1;
258 break;
259 }
260
261 posted++;
262 }
263
264 /* We're doing flow control - update the window. */
265 if (ic->i_flowctl && posted)
266 rds_ib_advertise_credits(conn, posted);
267
268 if (ret)
269 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
270 return ret;
271}
272
Andy Grover809fa142010-01-12 14:41:46 -0800273static void rds_ib_inc_purge(struct rds_incoming *inc)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000274{
275 struct rds_ib_incoming *ibinc;
276 struct rds_page_frag *frag;
277 struct rds_page_frag *pos;
278
279 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
280 rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
281
282 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
283 list_del_init(&frag->f_item);
284 rds_ib_frag_drop_page(frag);
285 rds_ib_frag_free(frag);
286 }
287}
288
289void rds_ib_inc_free(struct rds_incoming *inc)
290{
291 struct rds_ib_incoming *ibinc;
292
293 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
294
295 rds_ib_inc_purge(inc);
296 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
297 BUG_ON(!list_empty(&ibinc->ii_frags));
298 kmem_cache_free(rds_ib_incoming_slab, ibinc);
299 atomic_dec(&rds_ib_allocation);
300 BUG_ON(atomic_read(&rds_ib_allocation) < 0);
301}
302
303int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
304 size_t size)
305{
306 struct rds_ib_incoming *ibinc;
307 struct rds_page_frag *frag;
308 struct iovec *iov = first_iov;
309 unsigned long to_copy;
310 unsigned long frag_off = 0;
311 unsigned long iov_off = 0;
312 int copied = 0;
313 int ret;
314 u32 len;
315
316 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
317 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
318 len = be32_to_cpu(inc->i_hdr.h_len);
319
320 while (copied < size && copied < len) {
321 if (frag_off == RDS_FRAG_SIZE) {
322 frag = list_entry(frag->f_item.next,
323 struct rds_page_frag, f_item);
324 frag_off = 0;
325 }
326 while (iov_off == iov->iov_len) {
327 iov_off = 0;
328 iov++;
329 }
330
331 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
332 to_copy = min_t(size_t, to_copy, size - copied);
333 to_copy = min_t(unsigned long, to_copy, len - copied);
334
335 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
336 "[%p, %lu] + %lu\n",
337 to_copy, iov->iov_base, iov->iov_len, iov_off,
338 frag->f_page, frag->f_offset, frag_off);
339
340 /* XXX needs + offset for multiple recvs per page */
341 ret = rds_page_copy_to_user(frag->f_page,
342 frag->f_offset + frag_off,
343 iov->iov_base + iov_off,
344 to_copy);
345 if (ret) {
346 copied = ret;
347 break;
348 }
349
350 iov_off += to_copy;
351 frag_off += to_copy;
352 copied += to_copy;
353 }
354
355 return copied;
356}
357
358/* ic starts out kzalloc()ed */
359void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
360{
361 struct ib_send_wr *wr = &ic->i_ack_wr;
362 struct ib_sge *sge = &ic->i_ack_sge;
363
364 sge->addr = ic->i_ack_dma;
365 sge->length = sizeof(struct rds_header);
366 sge->lkey = ic->i_mr->lkey;
367
368 wr->sg_list = sge;
369 wr->num_sge = 1;
370 wr->opcode = IB_WR_SEND;
371 wr->wr_id = RDS_IB_ACK_WR_ID;
372 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
373}
374
375/*
376 * You'd think that with reliable IB connections you wouldn't need to ack
377 * messages that have been received. The problem is that IB hardware generates
378 * an ack message before it has DMAed the message into memory. This creates a
379 * potential message loss if the HCA is disabled for any reason between when it
380 * sends the ack and before the message is DMAed and processed. This is only a
381 * potential issue if another HCA is available for fail-over.
382 *
383 * When the remote host receives our ack they'll free the sent message from
384 * their send queue. To decrease the latency of this we always send an ack
385 * immediately after we've received messages.
386 *
387 * For simplicity, we only have one ack in flight at a time. This puts
388 * pressure on senders to have deep enough send queues to absorb the latency of
389 * a single ack frame being in flight. This might not be good enough.
390 *
391 * This is implemented by have a long-lived send_wr and sge which point to a
392 * statically allocated ack frame. This ack wr does not fall under the ring
393 * accounting that the tx and rx wrs do. The QP attribute specifically makes
394 * room for it beyond the ring size. Send completion notices its special
395 * wr_id and avoids working with the ring in that case.
396 */
Andy Grover8cbd9602009-04-01 08:20:20 +0000397#ifndef KERNEL_HAS_ATOMIC64
Andy Grover1e23b3e2009-02-24 15:30:34 +0000398static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
399 int ack_required)
400{
Andy Grover8cbd9602009-04-01 08:20:20 +0000401 unsigned long flags;
402
403 spin_lock_irqsave(&ic->i_ack_lock, flags);
404 ic->i_ack_next = seq;
405 if (ack_required)
406 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
407 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
408}
409
410static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
411{
412 unsigned long flags;
413 u64 seq;
414
415 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
416
417 spin_lock_irqsave(&ic->i_ack_lock, flags);
418 seq = ic->i_ack_next;
419 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
420
421 return seq;
422}
423#else
424static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
425 int ack_required)
426{
427 atomic64_set(&ic->i_ack_next, seq);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000428 if (ack_required) {
429 smp_mb__before_clear_bit();
430 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
431 }
432}
433
434static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
435{
436 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
437 smp_mb__after_clear_bit();
438
Andy Grover8cbd9602009-04-01 08:20:20 +0000439 return atomic64_read(&ic->i_ack_next);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000440}
Andy Grover8cbd9602009-04-01 08:20:20 +0000441#endif
442
Andy Grover1e23b3e2009-02-24 15:30:34 +0000443
444static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
445{
446 struct rds_header *hdr = ic->i_ack;
447 struct ib_send_wr *failed_wr;
448 u64 seq;
449 int ret;
450
451 seq = rds_ib_get_ack(ic);
452
453 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
454 rds_message_populate_header(hdr, 0, 0, 0);
455 hdr->h_ack = cpu_to_be64(seq);
456 hdr->h_credit = adv_credits;
457 rds_message_make_checksum(hdr);
458 ic->i_ack_queued = jiffies;
459
460 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
461 if (unlikely(ret)) {
462 /* Failed to send. Release the WR, and
463 * force another ACK.
464 */
465 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
466 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
467
468 rds_ib_stats_inc(s_ib_ack_send_failure);
Andy Grover735f61e2010-03-11 13:49:55 +0000469
470 rds_ib_conn_error(ic->conn, "sending ack failed\n");
Andy Grover1e23b3e2009-02-24 15:30:34 +0000471 } else
472 rds_ib_stats_inc(s_ib_ack_sent);
473}
474
475/*
476 * There are 3 ways of getting acknowledgements to the peer:
477 * 1. We call rds_ib_attempt_ack from the recv completion handler
478 * to send an ACK-only frame.
479 * However, there can be only one such frame in the send queue
480 * at any time, so we may have to postpone it.
481 * 2. When another (data) packet is transmitted while there's
482 * an ACK in the queue, we piggyback the ACK sequence number
483 * on the data packet.
484 * 3. If the ACK WR is done sending, we get called from the
485 * send queue completion handler, and check whether there's
486 * another ACK pending (postponed because the WR was on the
487 * queue). If so, we transmit it.
488 *
489 * We maintain 2 variables:
490 * - i_ack_flags, which keeps track of whether the ACK WR
491 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
492 * - i_ack_next, which is the last sequence number we received
493 *
494 * Potentially, send queue and receive queue handlers can run concurrently.
Andy Grover8cbd9602009-04-01 08:20:20 +0000495 * It would be nice to not have to use a spinlock to synchronize things,
496 * but the one problem that rules this out is that 64bit updates are
497 * not atomic on all platforms. Things would be a lot simpler if
498 * we had atomic64 or maybe cmpxchg64 everywhere.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000499 *
500 * Reconnecting complicates this picture just slightly. When we
501 * reconnect, we may be seeing duplicate packets. The peer
502 * is retransmitting them, because it hasn't seen an ACK for
503 * them. It is important that we ACK these.
504 *
505 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
506 * this flag set *MUST* be acknowledged immediately.
507 */
508
509/*
510 * When we get here, we're called from the recv queue handler.
511 * Check whether we ought to transmit an ACK.
512 */
513void rds_ib_attempt_ack(struct rds_ib_connection *ic)
514{
515 unsigned int adv_credits;
516
517 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
518 return;
519
520 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
521 rds_ib_stats_inc(s_ib_ack_send_delayed);
522 return;
523 }
524
525 /* Can we get a send credit? */
Steve Wise7b70d032009-04-09 14:09:39 +0000526 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000527 rds_ib_stats_inc(s_ib_tx_throttle);
528 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
529 return;
530 }
531
532 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
533 rds_ib_send_ack(ic, adv_credits);
534}
535
536/*
537 * We get here from the send completion handler, when the
538 * adapter tells us the ACK frame was sent.
539 */
540void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
541{
542 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
543 rds_ib_attempt_ack(ic);
544}
545
546/*
547 * This is called by the regular xmit code when it wants to piggyback
548 * an ACK on an outgoing frame.
549 */
550u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
551{
552 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
553 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
554 return rds_ib_get_ack(ic);
555}
556
557/*
558 * It's kind of lame that we're copying from the posted receive pages into
559 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
560 * them. But receiving new congestion bitmaps should be a *rare* event, so
561 * hopefully we won't need to invest that complexity in making it more
562 * efficient. By copying we can share a simpler core with TCP which has to
563 * copy.
564 */
565static void rds_ib_cong_recv(struct rds_connection *conn,
566 struct rds_ib_incoming *ibinc)
567{
568 struct rds_cong_map *map;
569 unsigned int map_off;
570 unsigned int map_page;
571 struct rds_page_frag *frag;
572 unsigned long frag_off;
573 unsigned long to_copy;
574 unsigned long copied;
575 uint64_t uncongested = 0;
576 void *addr;
577
578 /* catch completely corrupt packets */
579 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
580 return;
581
582 map = conn->c_fcong;
583 map_page = 0;
584 map_off = 0;
585
586 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
587 frag_off = 0;
588
589 copied = 0;
590
591 while (copied < RDS_CONG_MAP_BYTES) {
592 uint64_t *src, *dst;
593 unsigned int k;
594
595 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
596 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
597
598 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
599
600 src = addr + frag_off;
601 dst = (void *)map->m_page_addrs[map_page] + map_off;
602 for (k = 0; k < to_copy; k += 8) {
603 /* Record ports that became uncongested, ie
604 * bits that changed from 0 to 1. */
605 uncongested |= ~(*src) & *dst;
606 *dst++ = *src++;
607 }
608 kunmap_atomic(addr, KM_SOFTIRQ0);
609
610 copied += to_copy;
611
612 map_off += to_copy;
613 if (map_off == PAGE_SIZE) {
614 map_off = 0;
615 map_page++;
616 }
617
618 frag_off += to_copy;
619 if (frag_off == RDS_FRAG_SIZE) {
620 frag = list_entry(frag->f_item.next,
621 struct rds_page_frag, f_item);
622 frag_off = 0;
623 }
624 }
625
626 /* the congestion map is in little endian order */
627 uncongested = le64_to_cpu(uncongested);
628
629 rds_cong_map_updated(map, uncongested);
630}
631
632/*
633 * Rings are posted with all the allocations they'll need to queue the
634 * incoming message to the receiving socket so this can't fail.
635 * All fragments start with a header, so we can make sure we're not receiving
636 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
637 */
638struct rds_ib_ack_state {
639 u64 ack_next;
640 u64 ack_recv;
641 unsigned int ack_required:1;
642 unsigned int ack_next_valid:1;
643 unsigned int ack_recv_valid:1;
644};
645
646static void rds_ib_process_recv(struct rds_connection *conn,
Andy Grover597ddd52009-07-17 13:13:27 +0000647 struct rds_ib_recv_work *recv, u32 data_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +0000648 struct rds_ib_ack_state *state)
649{
650 struct rds_ib_connection *ic = conn->c_transport_data;
651 struct rds_ib_incoming *ibinc = ic->i_ibinc;
652 struct rds_header *ihdr, *hdr;
653
654 /* XXX shut down the connection if port 0,0 are seen? */
655
656 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
Andy Grover597ddd52009-07-17 13:13:27 +0000657 data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000658
Andy Grover597ddd52009-07-17 13:13:27 +0000659 if (data_len < sizeof(struct rds_header)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000660 rds_ib_conn_error(conn, "incoming message "
661 "from %pI4 didn't inclue a "
662 "header, disconnecting and "
663 "reconnecting\n",
664 &conn->c_faddr);
665 return;
666 }
Andy Grover597ddd52009-07-17 13:13:27 +0000667 data_len -= sizeof(struct rds_header);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000668
Andy Groverf147dd92010-01-13 15:50:09 -0800669 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
Andy Grover1e23b3e2009-02-24 15:30:34 +0000670
671 /* Validate the checksum. */
672 if (!rds_message_verify_checksum(ihdr)) {
673 rds_ib_conn_error(conn, "incoming message "
674 "from %pI4 has corrupted header - "
675 "forcing a reconnect\n",
676 &conn->c_faddr);
677 rds_stats_inc(s_recv_drop_bad_checksum);
678 return;
679 }
680
681 /* Process the ACK sequence which comes with every packet */
682 state->ack_recv = be64_to_cpu(ihdr->h_ack);
683 state->ack_recv_valid = 1;
684
685 /* Process the credits update if there was one */
686 if (ihdr->h_credit)
687 rds_ib_send_add_credits(conn, ihdr->h_credit);
688
Andy Grover597ddd52009-07-17 13:13:27 +0000689 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000690 /* This is an ACK-only packet. The fact that it gets
691 * special treatment here is that historically, ACKs
692 * were rather special beasts.
693 */
694 rds_ib_stats_inc(s_ib_ack_received);
695
696 /*
697 * Usually the frags make their way on to incs and are then freed as
698 * the inc is freed. We don't go that route, so we have to drop the
699 * page ref ourselves. We can't just leave the page on the recv
700 * because that confuses the dma mapping of pages and each recv's use
701 * of a partial page. We can leave the frag, though, it will be
702 * reused.
703 *
704 * FIXME: Fold this into the code path below.
705 */
706 rds_ib_frag_drop_page(recv->r_frag);
707 return;
708 }
709
710 /*
711 * If we don't already have an inc on the connection then this
712 * fragment has a header and starts a message.. copy its header
713 * into the inc and save the inc so we can hang upcoming fragments
714 * off its list.
715 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800716 if (!ibinc) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000717 ibinc = recv->r_ibinc;
718 recv->r_ibinc = NULL;
719 ic->i_ibinc = ibinc;
720
721 hdr = &ibinc->ii_inc.i_hdr;
722 memcpy(hdr, ihdr, sizeof(*hdr));
723 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
724
725 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
726 ic->i_recv_data_rem, hdr->h_flags);
727 } else {
728 hdr = &ibinc->ii_inc.i_hdr;
729 /* We can't just use memcmp here; fragments of a
730 * single message may carry different ACKs */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800731 if (hdr->h_sequence != ihdr->h_sequence ||
732 hdr->h_len != ihdr->h_len ||
733 hdr->h_sport != ihdr->h_sport ||
734 hdr->h_dport != ihdr->h_dport) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000735 rds_ib_conn_error(conn,
736 "fragment header mismatch; forcing reconnect\n");
737 return;
738 }
739 }
740
741 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
742 recv->r_frag = NULL;
743
744 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
745 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
746 else {
747 ic->i_recv_data_rem = 0;
748 ic->i_ibinc = NULL;
749
750 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
751 rds_ib_cong_recv(conn, ibinc);
752 else {
753 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
754 &ibinc->ii_inc, GFP_ATOMIC,
755 KM_SOFTIRQ0);
756 state->ack_next = be64_to_cpu(hdr->h_sequence);
757 state->ack_next_valid = 1;
758 }
759
760 /* Evaluate the ACK_REQUIRED flag *after* we received
761 * the complete frame, and after bumping the next_rx
762 * sequence. */
763 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
764 rds_stats_inc(s_recv_ack_required);
765 state->ack_required = 1;
766 }
767
768 rds_inc_put(&ibinc->ii_inc);
769 }
770}
771
772/*
773 * Plucking the oldest entry from the ring can be done concurrently with
774 * the thread refilling the ring. Each ring operation is protected by
775 * spinlocks and the transient state of refilling doesn't change the
776 * recording of which entry is oldest.
777 *
778 * This relies on IB only calling one cq comp_handler for each cq so that
779 * there will only be one caller of rds_recv_incoming() per RDS connection.
780 */
781void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
782{
783 struct rds_connection *conn = context;
784 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover1e23b3e2009-02-24 15:30:34 +0000785
786 rdsdebug("conn %p cq %p\n", conn, cq);
787
788 rds_ib_stats_inc(s_ib_rx_cq_call);
789
Andy Groverd521b632009-10-30 08:51:57 +0000790 tasklet_schedule(&ic->i_recv_tasklet);
791}
Andy Grover1e23b3e2009-02-24 15:30:34 +0000792
Andy Groverd521b632009-10-30 08:51:57 +0000793static inline void rds_poll_cq(struct rds_ib_connection *ic,
794 struct rds_ib_ack_state *state)
795{
796 struct rds_connection *conn = ic->conn;
797 struct ib_wc wc;
798 struct rds_ib_recv_work *recv;
799
800 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000801 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
802 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
803 be32_to_cpu(wc.ex.imm_data));
804 rds_ib_stats_inc(s_ib_rx_cq_event);
805
806 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
807
808 rds_ib_recv_unmap_page(ic, recv);
809
810 /*
811 * Also process recvs in connecting state because it is possible
812 * to get a recv completion _before_ the rdmacm ESTABLISHED
813 * event is processed.
814 */
815 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
816 /* We expect errors as the qp is drained during shutdown */
817 if (wc.status == IB_WC_SUCCESS) {
Andy Groverd521b632009-10-30 08:51:57 +0000818 rds_ib_process_recv(conn, recv, wc.byte_len, state);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000819 } else {
820 rds_ib_conn_error(conn, "recv completion on "
821 "%pI4 had status %u, disconnecting and "
822 "reconnecting\n", &conn->c_faddr,
823 wc.status);
824 }
825 }
826
827 rds_ib_ring_free(&ic->i_recv_ring, 1);
828 }
Andy Groverd521b632009-10-30 08:51:57 +0000829}
830
831void rds_ib_recv_tasklet_fn(unsigned long data)
832{
833 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
834 struct rds_connection *conn = ic->conn;
835 struct rds_ib_ack_state state = { 0, };
836
837 rds_poll_cq(ic, &state);
838 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
839 rds_poll_cq(ic, &state);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000840
841 if (state.ack_next_valid)
842 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
843 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
844 rds_send_drop_acked(conn, state.ack_recv, NULL);
845 ic->i_ack_recv = state.ack_recv;
846 }
847 if (rds_conn_up(conn))
848 rds_ib_attempt_ack(ic);
849
850 /* If we ever end up with a really empty receive ring, we're
851 * in deep trouble, as the sender will definitely see RNR
852 * timeouts. */
853 if (rds_ib_ring_empty(&ic->i_recv_ring))
854 rds_ib_stats_inc(s_ib_rx_ring_empty);
855
Andy Grover1e23b3e2009-02-24 15:30:34 +0000856 if (rds_ib_ring_low(&ic->i_recv_ring))
Andy Groverf17a1a52010-03-18 17:19:52 -0700857 rds_ib_recv_refill(conn, 0);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000858}
859
860int rds_ib_recv(struct rds_connection *conn)
861{
862 struct rds_ib_connection *ic = conn->c_transport_data;
863 int ret = 0;
864
865 rdsdebug("conn %p\n", conn);
866
867 /*
868 * If we get a temporary posting failure in this context then
869 * we're really low and we want the caller to back off for a bit.
870 */
871 mutex_lock(&ic->i_recv_mutex);
Andy Groverf17a1a52010-03-18 17:19:52 -0700872 if (rds_ib_recv_refill(conn, 0))
Andy Grover1e23b3e2009-02-24 15:30:34 +0000873 ret = -ENOMEM;
874 else
875 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
876 mutex_unlock(&ic->i_recv_mutex);
877
878 if (rds_conn_up(conn))
879 rds_ib_attempt_ack(ic);
880
881 return ret;
882}
883
884int __init rds_ib_recv_init(void)
885{
886 struct sysinfo si;
887 int ret = -ENOMEM;
888
889 /* Default to 30% of all available RAM for recv memory */
890 si_meminfo(&si);
891 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
892
893 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
894 sizeof(struct rds_ib_incoming),
895 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800896 if (!rds_ib_incoming_slab)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000897 goto out;
898
899 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
900 sizeof(struct rds_page_frag),
901 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800902 if (!rds_ib_frag_slab)
Andy Grover1e23b3e2009-02-24 15:30:34 +0000903 kmem_cache_destroy(rds_ib_incoming_slab);
904 else
905 ret = 0;
906out:
907 return ret;
908}
909
910void rds_ib_recv_exit(void)
911{
912 kmem_cache_destroy(rds_ib_incoming_slab);
913 kmem_cache_destroy(rds_ib_frag_slab);
914}