blob: cd7a6cfcab03b4aa25bd335d6622aef881d6dc6d [file] [log] [blame]
Andy Grover1e23b3e2009-02-24 15:30:34 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/pci.h>
35#include <linux/dma-mapping.h>
36#include <rdma/rdma_cm.h>
37
38#include "rds.h"
39#include "ib.h"
40
41static struct kmem_cache *rds_ib_incoming_slab;
42static struct kmem_cache *rds_ib_frag_slab;
43static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
44
45static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
46{
47 rdsdebug("frag %p page %p\n", frag, frag->f_page);
48 __free_page(frag->f_page);
49 frag->f_page = NULL;
50}
51
52static void rds_ib_frag_free(struct rds_page_frag *frag)
53{
54 rdsdebug("frag %p page %p\n", frag, frag->f_page);
55 BUG_ON(frag->f_page != NULL);
56 kmem_cache_free(rds_ib_frag_slab, frag);
57}
58
59/*
60 * We map a page at a time. Its fragments are posted in order. This
61 * is called in fragment order as the fragments get send completion events.
62 * Only the last frag in the page performs the unmapping.
63 *
64 * It's OK for ring cleanup to call this in whatever order it likes because
65 * DMA is not in flight and so we can unmap while other ring entries still
66 * hold page references in their frags.
67 */
68static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
69 struct rds_ib_recv_work *recv)
70{
71 struct rds_page_frag *frag = recv->r_frag;
72
73 rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
74 if (frag->f_mapped)
75 ib_dma_unmap_page(ic->i_cm_id->device,
76 frag->f_mapped,
77 RDS_FRAG_SIZE, DMA_FROM_DEVICE);
78 frag->f_mapped = 0;
79}
80
81void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
82{
83 struct rds_ib_recv_work *recv;
84 u32 i;
85
86 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
87 struct ib_sge *sge;
88
89 recv->r_ibinc = NULL;
90 recv->r_frag = NULL;
91
92 recv->r_wr.next = NULL;
93 recv->r_wr.wr_id = i;
94 recv->r_wr.sg_list = recv->r_sge;
95 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
96
97 sge = rds_ib_data_sge(ic, recv->r_sge);
98 sge->addr = 0;
99 sge->length = RDS_FRAG_SIZE;
100 sge->lkey = ic->i_mr->lkey;
101
102 sge = rds_ib_header_sge(ic, recv->r_sge);
103 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
104 sge->length = sizeof(struct rds_header);
105 sge->lkey = ic->i_mr->lkey;
106 }
107}
108
109static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
110 struct rds_ib_recv_work *recv)
111{
112 if (recv->r_ibinc) {
113 rds_inc_put(&recv->r_ibinc->ii_inc);
114 recv->r_ibinc = NULL;
115 }
116 if (recv->r_frag) {
117 rds_ib_recv_unmap_page(ic, recv);
118 if (recv->r_frag->f_page)
119 rds_ib_frag_drop_page(recv->r_frag);
120 rds_ib_frag_free(recv->r_frag);
121 recv->r_frag = NULL;
122 }
123}
124
125void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
126{
127 u32 i;
128
129 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
130 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
131
132 if (ic->i_frag.f_page)
133 rds_ib_frag_drop_page(&ic->i_frag);
134}
135
136static int rds_ib_recv_refill_one(struct rds_connection *conn,
137 struct rds_ib_recv_work *recv,
138 gfp_t kptr_gfp, gfp_t page_gfp)
139{
140 struct rds_ib_connection *ic = conn->c_transport_data;
141 dma_addr_t dma_addr;
142 struct ib_sge *sge;
143 int ret = -ENOMEM;
144
145 if (recv->r_ibinc == NULL) {
146 if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) {
147 rds_ib_stats_inc(s_ib_rx_alloc_limit);
148 goto out;
149 }
150 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
151 kptr_gfp);
152 if (recv->r_ibinc == NULL)
153 goto out;
154 atomic_inc(&rds_ib_allocation);
155 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
156 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
157 }
158
159 if (recv->r_frag == NULL) {
160 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp);
161 if (recv->r_frag == NULL)
162 goto out;
163 INIT_LIST_HEAD(&recv->r_frag->f_item);
164 recv->r_frag->f_page = NULL;
165 }
166
167 if (ic->i_frag.f_page == NULL) {
168 ic->i_frag.f_page = alloc_page(page_gfp);
169 if (ic->i_frag.f_page == NULL)
170 goto out;
171 ic->i_frag.f_offset = 0;
172 }
173
174 dma_addr = ib_dma_map_page(ic->i_cm_id->device,
175 ic->i_frag.f_page,
176 ic->i_frag.f_offset,
177 RDS_FRAG_SIZE,
178 DMA_FROM_DEVICE);
179 if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
180 goto out;
181
182 /*
183 * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap()
184 * must be called on this recv. This happens as completions hit
185 * in order or on connection shutdown.
186 */
187 recv->r_frag->f_page = ic->i_frag.f_page;
188 recv->r_frag->f_offset = ic->i_frag.f_offset;
189 recv->r_frag->f_mapped = dma_addr;
190
191 sge = rds_ib_data_sge(ic, recv->r_sge);
192 sge->addr = dma_addr;
193 sge->length = RDS_FRAG_SIZE;
194
195 sge = rds_ib_header_sge(ic, recv->r_sge);
196 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
197 sge->length = sizeof(struct rds_header);
198
199 get_page(recv->r_frag->f_page);
200
201 if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
202 ic->i_frag.f_offset += RDS_FRAG_SIZE;
203 } else {
204 put_page(ic->i_frag.f_page);
205 ic->i_frag.f_page = NULL;
206 ic->i_frag.f_offset = 0;
207 }
208
209 ret = 0;
210out:
211 return ret;
212}
213
214/*
215 * This tries to allocate and post unused work requests after making sure that
216 * they have all the allocations they need to queue received fragments into
217 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
218 * pairs don't go unmatched.
219 *
220 * -1 is returned if posting fails due to temporary resource exhaustion.
221 */
222int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
223 gfp_t page_gfp, int prefill)
224{
225 struct rds_ib_connection *ic = conn->c_transport_data;
226 struct rds_ib_recv_work *recv;
227 struct ib_recv_wr *failed_wr;
228 unsigned int posted = 0;
229 int ret = 0;
230 u32 pos;
231
232 while ((prefill || rds_conn_up(conn))
233 && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
234 if (pos >= ic->i_recv_ring.w_nr) {
235 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
236 pos);
237 ret = -EINVAL;
238 break;
239 }
240
241 recv = &ic->i_recvs[pos];
242 ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
243 if (ret) {
244 ret = -1;
245 break;
246 }
247
248 /* XXX when can this fail? */
249 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
250 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
251 recv->r_ibinc, recv->r_frag->f_page,
252 (long) recv->r_frag->f_mapped, ret);
253 if (ret) {
254 rds_ib_conn_error(conn, "recv post on "
255 "%pI4 returned %d, disconnecting and "
256 "reconnecting\n", &conn->c_faddr,
257 ret);
258 ret = -1;
259 break;
260 }
261
262 posted++;
263 }
264
265 /* We're doing flow control - update the window. */
266 if (ic->i_flowctl && posted)
267 rds_ib_advertise_credits(conn, posted);
268
269 if (ret)
270 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
271 return ret;
272}
273
274void rds_ib_inc_purge(struct rds_incoming *inc)
275{
276 struct rds_ib_incoming *ibinc;
277 struct rds_page_frag *frag;
278 struct rds_page_frag *pos;
279
280 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
281 rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
282
283 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
284 list_del_init(&frag->f_item);
285 rds_ib_frag_drop_page(frag);
286 rds_ib_frag_free(frag);
287 }
288}
289
290void rds_ib_inc_free(struct rds_incoming *inc)
291{
292 struct rds_ib_incoming *ibinc;
293
294 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
295
296 rds_ib_inc_purge(inc);
297 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
298 BUG_ON(!list_empty(&ibinc->ii_frags));
299 kmem_cache_free(rds_ib_incoming_slab, ibinc);
300 atomic_dec(&rds_ib_allocation);
301 BUG_ON(atomic_read(&rds_ib_allocation) < 0);
302}
303
304int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
305 size_t size)
306{
307 struct rds_ib_incoming *ibinc;
308 struct rds_page_frag *frag;
309 struct iovec *iov = first_iov;
310 unsigned long to_copy;
311 unsigned long frag_off = 0;
312 unsigned long iov_off = 0;
313 int copied = 0;
314 int ret;
315 u32 len;
316
317 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
318 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
319 len = be32_to_cpu(inc->i_hdr.h_len);
320
321 while (copied < size && copied < len) {
322 if (frag_off == RDS_FRAG_SIZE) {
323 frag = list_entry(frag->f_item.next,
324 struct rds_page_frag, f_item);
325 frag_off = 0;
326 }
327 while (iov_off == iov->iov_len) {
328 iov_off = 0;
329 iov++;
330 }
331
332 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
333 to_copy = min_t(size_t, to_copy, size - copied);
334 to_copy = min_t(unsigned long, to_copy, len - copied);
335
336 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
337 "[%p, %lu] + %lu\n",
338 to_copy, iov->iov_base, iov->iov_len, iov_off,
339 frag->f_page, frag->f_offset, frag_off);
340
341 /* XXX needs + offset for multiple recvs per page */
342 ret = rds_page_copy_to_user(frag->f_page,
343 frag->f_offset + frag_off,
344 iov->iov_base + iov_off,
345 to_copy);
346 if (ret) {
347 copied = ret;
348 break;
349 }
350
351 iov_off += to_copy;
352 frag_off += to_copy;
353 copied += to_copy;
354 }
355
356 return copied;
357}
358
359/* ic starts out kzalloc()ed */
360void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
361{
362 struct ib_send_wr *wr = &ic->i_ack_wr;
363 struct ib_sge *sge = &ic->i_ack_sge;
364
365 sge->addr = ic->i_ack_dma;
366 sge->length = sizeof(struct rds_header);
367 sge->lkey = ic->i_mr->lkey;
368
369 wr->sg_list = sge;
370 wr->num_sge = 1;
371 wr->opcode = IB_WR_SEND;
372 wr->wr_id = RDS_IB_ACK_WR_ID;
373 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
374}
375
376/*
377 * You'd think that with reliable IB connections you wouldn't need to ack
378 * messages that have been received. The problem is that IB hardware generates
379 * an ack message before it has DMAed the message into memory. This creates a
380 * potential message loss if the HCA is disabled for any reason between when it
381 * sends the ack and before the message is DMAed and processed. This is only a
382 * potential issue if another HCA is available for fail-over.
383 *
384 * When the remote host receives our ack they'll free the sent message from
385 * their send queue. To decrease the latency of this we always send an ack
386 * immediately after we've received messages.
387 *
388 * For simplicity, we only have one ack in flight at a time. This puts
389 * pressure on senders to have deep enough send queues to absorb the latency of
390 * a single ack frame being in flight. This might not be good enough.
391 *
392 * This is implemented by have a long-lived send_wr and sge which point to a
393 * statically allocated ack frame. This ack wr does not fall under the ring
394 * accounting that the tx and rx wrs do. The QP attribute specifically makes
395 * room for it beyond the ring size. Send completion notices its special
396 * wr_id and avoids working with the ring in that case.
397 */
Andy Grover8cbd9602009-04-01 08:20:20 +0000398#ifndef KERNEL_HAS_ATOMIC64
Andy Grover1e23b3e2009-02-24 15:30:34 +0000399static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
400 int ack_required)
401{
Andy Grover8cbd9602009-04-01 08:20:20 +0000402 unsigned long flags;
403
404 spin_lock_irqsave(&ic->i_ack_lock, flags);
405 ic->i_ack_next = seq;
406 if (ack_required)
407 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
408 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
409}
410
411static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
412{
413 unsigned long flags;
414 u64 seq;
415
416 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
417
418 spin_lock_irqsave(&ic->i_ack_lock, flags);
419 seq = ic->i_ack_next;
420 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
421
422 return seq;
423}
424#else
425static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
426 int ack_required)
427{
428 atomic64_set(&ic->i_ack_next, seq);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000429 if (ack_required) {
430 smp_mb__before_clear_bit();
431 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
432 }
433}
434
435static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
436{
437 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
438 smp_mb__after_clear_bit();
439
Andy Grover8cbd9602009-04-01 08:20:20 +0000440 return atomic64_read(&ic->i_ack_next);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000441}
Andy Grover8cbd9602009-04-01 08:20:20 +0000442#endif
443
Andy Grover1e23b3e2009-02-24 15:30:34 +0000444
445static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
446{
447 struct rds_header *hdr = ic->i_ack;
448 struct ib_send_wr *failed_wr;
449 u64 seq;
450 int ret;
451
452 seq = rds_ib_get_ack(ic);
453
454 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
455 rds_message_populate_header(hdr, 0, 0, 0);
456 hdr->h_ack = cpu_to_be64(seq);
457 hdr->h_credit = adv_credits;
458 rds_message_make_checksum(hdr);
459 ic->i_ack_queued = jiffies;
460
461 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
462 if (unlikely(ret)) {
463 /* Failed to send. Release the WR, and
464 * force another ACK.
465 */
466 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
467 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
468
469 rds_ib_stats_inc(s_ib_ack_send_failure);
470 /* Need to finesse this later. */
471 BUG();
472 } else
473 rds_ib_stats_inc(s_ib_ack_sent);
474}
475
476/*
477 * There are 3 ways of getting acknowledgements to the peer:
478 * 1. We call rds_ib_attempt_ack from the recv completion handler
479 * to send an ACK-only frame.
480 * However, there can be only one such frame in the send queue
481 * at any time, so we may have to postpone it.
482 * 2. When another (data) packet is transmitted while there's
483 * an ACK in the queue, we piggyback the ACK sequence number
484 * on the data packet.
485 * 3. If the ACK WR is done sending, we get called from the
486 * send queue completion handler, and check whether there's
487 * another ACK pending (postponed because the WR was on the
488 * queue). If so, we transmit it.
489 *
490 * We maintain 2 variables:
491 * - i_ack_flags, which keeps track of whether the ACK WR
492 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
493 * - i_ack_next, which is the last sequence number we received
494 *
495 * Potentially, send queue and receive queue handlers can run concurrently.
Andy Grover8cbd9602009-04-01 08:20:20 +0000496 * It would be nice to not have to use a spinlock to synchronize things,
497 * but the one problem that rules this out is that 64bit updates are
498 * not atomic on all platforms. Things would be a lot simpler if
499 * we had atomic64 or maybe cmpxchg64 everywhere.
Andy Grover1e23b3e2009-02-24 15:30:34 +0000500 *
501 * Reconnecting complicates this picture just slightly. When we
502 * reconnect, we may be seeing duplicate packets. The peer
503 * is retransmitting them, because it hasn't seen an ACK for
504 * them. It is important that we ACK these.
505 *
506 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
507 * this flag set *MUST* be acknowledged immediately.
508 */
509
510/*
511 * When we get here, we're called from the recv queue handler.
512 * Check whether we ought to transmit an ACK.
513 */
514void rds_ib_attempt_ack(struct rds_ib_connection *ic)
515{
516 unsigned int adv_credits;
517
518 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
519 return;
520
521 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
522 rds_ib_stats_inc(s_ib_ack_send_delayed);
523 return;
524 }
525
526 /* Can we get a send credit? */
Steve Wise7b70d032009-04-09 14:09:39 +0000527 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000528 rds_ib_stats_inc(s_ib_tx_throttle);
529 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
530 return;
531 }
532
533 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
534 rds_ib_send_ack(ic, adv_credits);
535}
536
537/*
538 * We get here from the send completion handler, when the
539 * adapter tells us the ACK frame was sent.
540 */
541void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
542{
543 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
544 rds_ib_attempt_ack(ic);
545}
546
547/*
548 * This is called by the regular xmit code when it wants to piggyback
549 * an ACK on an outgoing frame.
550 */
551u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
552{
553 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
554 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
555 return rds_ib_get_ack(ic);
556}
557
Andy Grover02a6a252009-07-17 13:13:24 +0000558static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
559 struct rds_ib_recv_work *recv,
560 u32 data_len)
561{
562 struct rds_ib_connection *ic = conn->c_transport_data;
563 void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
564 void *addr;
565 u32 misplaced_hdr_bytes;
566
567 /*
568 * Support header at the front (RDS 3.1+) as well as header-at-end.
569 *
570 * Cases:
571 * 1) header all in header buff (great!)
572 * 2) header all in data page (copy all to header buff)
573 * 3) header split across hdr buf + data page
574 * (move bit in hdr buff to end before copying other bit from data page)
575 */
576 if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
577 return hdr_buff;
578
579 if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
580 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
581 memcpy(hdr_buff,
582 addr + recv->r_frag->f_offset + data_len,
583 sizeof(struct rds_header));
584 kunmap_atomic(addr, KM_SOFTIRQ0);
585 return hdr_buff;
586 }
587
588 misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
589
590 memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
591
592 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
593 memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
594 sizeof(struct rds_header) - misplaced_hdr_bytes);
595 kunmap_atomic(addr, KM_SOFTIRQ0);
596 return hdr_buff;
597}
598
Andy Grover1e23b3e2009-02-24 15:30:34 +0000599/*
600 * It's kind of lame that we're copying from the posted receive pages into
601 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
602 * them. But receiving new congestion bitmaps should be a *rare* event, so
603 * hopefully we won't need to invest that complexity in making it more
604 * efficient. By copying we can share a simpler core with TCP which has to
605 * copy.
606 */
607static void rds_ib_cong_recv(struct rds_connection *conn,
608 struct rds_ib_incoming *ibinc)
609{
610 struct rds_cong_map *map;
611 unsigned int map_off;
612 unsigned int map_page;
613 struct rds_page_frag *frag;
614 unsigned long frag_off;
615 unsigned long to_copy;
616 unsigned long copied;
617 uint64_t uncongested = 0;
618 void *addr;
619
620 /* catch completely corrupt packets */
621 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
622 return;
623
624 map = conn->c_fcong;
625 map_page = 0;
626 map_off = 0;
627
628 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
629 frag_off = 0;
630
631 copied = 0;
632
633 while (copied < RDS_CONG_MAP_BYTES) {
634 uint64_t *src, *dst;
635 unsigned int k;
636
637 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
638 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
639
640 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
641
642 src = addr + frag_off;
643 dst = (void *)map->m_page_addrs[map_page] + map_off;
644 for (k = 0; k < to_copy; k += 8) {
645 /* Record ports that became uncongested, ie
646 * bits that changed from 0 to 1. */
647 uncongested |= ~(*src) & *dst;
648 *dst++ = *src++;
649 }
650 kunmap_atomic(addr, KM_SOFTIRQ0);
651
652 copied += to_copy;
653
654 map_off += to_copy;
655 if (map_off == PAGE_SIZE) {
656 map_off = 0;
657 map_page++;
658 }
659
660 frag_off += to_copy;
661 if (frag_off == RDS_FRAG_SIZE) {
662 frag = list_entry(frag->f_item.next,
663 struct rds_page_frag, f_item);
664 frag_off = 0;
665 }
666 }
667
668 /* the congestion map is in little endian order */
669 uncongested = le64_to_cpu(uncongested);
670
671 rds_cong_map_updated(map, uncongested);
672}
673
674/*
675 * Rings are posted with all the allocations they'll need to queue the
676 * incoming message to the receiving socket so this can't fail.
677 * All fragments start with a header, so we can make sure we're not receiving
678 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
679 */
680struct rds_ib_ack_state {
681 u64 ack_next;
682 u64 ack_recv;
683 unsigned int ack_required:1;
684 unsigned int ack_next_valid:1;
685 unsigned int ack_recv_valid:1;
686};
687
688static void rds_ib_process_recv(struct rds_connection *conn,
Andy Grover597ddd52009-07-17 13:13:27 +0000689 struct rds_ib_recv_work *recv, u32 data_len,
Andy Grover1e23b3e2009-02-24 15:30:34 +0000690 struct rds_ib_ack_state *state)
691{
692 struct rds_ib_connection *ic = conn->c_transport_data;
693 struct rds_ib_incoming *ibinc = ic->i_ibinc;
694 struct rds_header *ihdr, *hdr;
695
696 /* XXX shut down the connection if port 0,0 are seen? */
697
698 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
Andy Grover597ddd52009-07-17 13:13:27 +0000699 data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000700
Andy Grover597ddd52009-07-17 13:13:27 +0000701 if (data_len < sizeof(struct rds_header)) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000702 rds_ib_conn_error(conn, "incoming message "
703 "from %pI4 didn't inclue a "
704 "header, disconnecting and "
705 "reconnecting\n",
706 &conn->c_faddr);
707 return;
708 }
Andy Grover597ddd52009-07-17 13:13:27 +0000709 data_len -= sizeof(struct rds_header);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000710
Andy Grover597ddd52009-07-17 13:13:27 +0000711 ihdr = rds_ib_get_header(conn, recv, data_len);
Andy Grover1e23b3e2009-02-24 15:30:34 +0000712
713 /* Validate the checksum. */
714 if (!rds_message_verify_checksum(ihdr)) {
715 rds_ib_conn_error(conn, "incoming message "
716 "from %pI4 has corrupted header - "
717 "forcing a reconnect\n",
718 &conn->c_faddr);
719 rds_stats_inc(s_recv_drop_bad_checksum);
720 return;
721 }
722
723 /* Process the ACK sequence which comes with every packet */
724 state->ack_recv = be64_to_cpu(ihdr->h_ack);
725 state->ack_recv_valid = 1;
726
727 /* Process the credits update if there was one */
728 if (ihdr->h_credit)
729 rds_ib_send_add_credits(conn, ihdr->h_credit);
730
Andy Grover597ddd52009-07-17 13:13:27 +0000731 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
Andy Grover1e23b3e2009-02-24 15:30:34 +0000732 /* This is an ACK-only packet. The fact that it gets
733 * special treatment here is that historically, ACKs
734 * were rather special beasts.
735 */
736 rds_ib_stats_inc(s_ib_ack_received);
737
738 /*
739 * Usually the frags make their way on to incs and are then freed as
740 * the inc is freed. We don't go that route, so we have to drop the
741 * page ref ourselves. We can't just leave the page on the recv
742 * because that confuses the dma mapping of pages and each recv's use
743 * of a partial page. We can leave the frag, though, it will be
744 * reused.
745 *
746 * FIXME: Fold this into the code path below.
747 */
748 rds_ib_frag_drop_page(recv->r_frag);
749 return;
750 }
751
752 /*
753 * If we don't already have an inc on the connection then this
754 * fragment has a header and starts a message.. copy its header
755 * into the inc and save the inc so we can hang upcoming fragments
756 * off its list.
757 */
758 if (ibinc == NULL) {
759 ibinc = recv->r_ibinc;
760 recv->r_ibinc = NULL;
761 ic->i_ibinc = ibinc;
762
763 hdr = &ibinc->ii_inc.i_hdr;
764 memcpy(hdr, ihdr, sizeof(*hdr));
765 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
766
767 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
768 ic->i_recv_data_rem, hdr->h_flags);
769 } else {
770 hdr = &ibinc->ii_inc.i_hdr;
771 /* We can't just use memcmp here; fragments of a
772 * single message may carry different ACKs */
773 if (hdr->h_sequence != ihdr->h_sequence
774 || hdr->h_len != ihdr->h_len
775 || hdr->h_sport != ihdr->h_sport
776 || hdr->h_dport != ihdr->h_dport) {
777 rds_ib_conn_error(conn,
778 "fragment header mismatch; forcing reconnect\n");
779 return;
780 }
781 }
782
783 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
784 recv->r_frag = NULL;
785
786 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
787 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
788 else {
789 ic->i_recv_data_rem = 0;
790 ic->i_ibinc = NULL;
791
792 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
793 rds_ib_cong_recv(conn, ibinc);
794 else {
795 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
796 &ibinc->ii_inc, GFP_ATOMIC,
797 KM_SOFTIRQ0);
798 state->ack_next = be64_to_cpu(hdr->h_sequence);
799 state->ack_next_valid = 1;
800 }
801
802 /* Evaluate the ACK_REQUIRED flag *after* we received
803 * the complete frame, and after bumping the next_rx
804 * sequence. */
805 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
806 rds_stats_inc(s_recv_ack_required);
807 state->ack_required = 1;
808 }
809
810 rds_inc_put(&ibinc->ii_inc);
811 }
812}
813
814/*
815 * Plucking the oldest entry from the ring can be done concurrently with
816 * the thread refilling the ring. Each ring operation is protected by
817 * spinlocks and the transient state of refilling doesn't change the
818 * recording of which entry is oldest.
819 *
820 * This relies on IB only calling one cq comp_handler for each cq so that
821 * there will only be one caller of rds_recv_incoming() per RDS connection.
822 */
823void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
824{
825 struct rds_connection *conn = context;
826 struct rds_ib_connection *ic = conn->c_transport_data;
827 struct ib_wc wc;
828 struct rds_ib_ack_state state = { 0, };
829 struct rds_ib_recv_work *recv;
830
831 rdsdebug("conn %p cq %p\n", conn, cq);
832
833 rds_ib_stats_inc(s_ib_rx_cq_call);
834
835 ib_req_notify_cq(cq, IB_CQ_SOLICITED);
836
837 while (ib_poll_cq(cq, 1, &wc) > 0) {
838 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
839 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
840 be32_to_cpu(wc.ex.imm_data));
841 rds_ib_stats_inc(s_ib_rx_cq_event);
842
843 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
844
845 rds_ib_recv_unmap_page(ic, recv);
846
847 /*
848 * Also process recvs in connecting state because it is possible
849 * to get a recv completion _before_ the rdmacm ESTABLISHED
850 * event is processed.
851 */
852 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
853 /* We expect errors as the qp is drained during shutdown */
854 if (wc.status == IB_WC_SUCCESS) {
855 rds_ib_process_recv(conn, recv, wc.byte_len, &state);
856 } else {
857 rds_ib_conn_error(conn, "recv completion on "
858 "%pI4 had status %u, disconnecting and "
859 "reconnecting\n", &conn->c_faddr,
860 wc.status);
861 }
862 }
863
864 rds_ib_ring_free(&ic->i_recv_ring, 1);
865 }
866
867 if (state.ack_next_valid)
868 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
869 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
870 rds_send_drop_acked(conn, state.ack_recv, NULL);
871 ic->i_ack_recv = state.ack_recv;
872 }
873 if (rds_conn_up(conn))
874 rds_ib_attempt_ack(ic);
875
876 /* If we ever end up with a really empty receive ring, we're
877 * in deep trouble, as the sender will definitely see RNR
878 * timeouts. */
879 if (rds_ib_ring_empty(&ic->i_recv_ring))
880 rds_ib_stats_inc(s_ib_rx_ring_empty);
881
882 /*
883 * If the ring is running low, then schedule the thread to refill.
884 */
885 if (rds_ib_ring_low(&ic->i_recv_ring))
886 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
887}
888
889int rds_ib_recv(struct rds_connection *conn)
890{
891 struct rds_ib_connection *ic = conn->c_transport_data;
892 int ret = 0;
893
894 rdsdebug("conn %p\n", conn);
895
896 /*
897 * If we get a temporary posting failure in this context then
898 * we're really low and we want the caller to back off for a bit.
899 */
900 mutex_lock(&ic->i_recv_mutex);
901 if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
902 ret = -ENOMEM;
903 else
904 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
905 mutex_unlock(&ic->i_recv_mutex);
906
907 if (rds_conn_up(conn))
908 rds_ib_attempt_ack(ic);
909
910 return ret;
911}
912
913int __init rds_ib_recv_init(void)
914{
915 struct sysinfo si;
916 int ret = -ENOMEM;
917
918 /* Default to 30% of all available RAM for recv memory */
919 si_meminfo(&si);
920 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
921
922 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
923 sizeof(struct rds_ib_incoming),
924 0, 0, NULL);
925 if (rds_ib_incoming_slab == NULL)
926 goto out;
927
928 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
929 sizeof(struct rds_page_frag),
930 0, 0, NULL);
931 if (rds_ib_frag_slab == NULL)
932 kmem_cache_destroy(rds_ib_incoming_slab);
933 else
934 ret = 0;
935out:
936 return ret;
937}
938
939void rds_ib_recv_exit(void)
940{
941 kmem_cache_destroy(rds_ib_incoming_slab);
942 kmem_cache_destroy(rds_ib_frag_slab);
943}