blob: e6745d827c3a995ae9b174e74e3c61c681e322c7 [file] [log] [blame]
Andy Grover6a0979d2009-02-24 15:30:33 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <linux/device.h>
36#include <linux/dmapool.h>
37
38#include "rds.h"
Andy Grover6a0979d2009-02-24 15:30:33 +000039#include "ib.h"
40
Andy Grover9c030392010-01-12 14:43:06 -080041/*
42 * Convert IB-specific error message to RDS error message and call core
43 * completion handler.
44 */
45static void rds_ib_send_complete(struct rds_message *rm,
46 int wc_status,
47 void (*complete)(struct rds_message *rm, int status))
Andy Grover6a0979d2009-02-24 15:30:33 +000048{
49 int notify_status;
50
51 switch (wc_status) {
52 case IB_WC_WR_FLUSH_ERR:
53 return;
54
55 case IB_WC_SUCCESS:
56 notify_status = RDS_RDMA_SUCCESS;
57 break;
58
59 case IB_WC_REM_ACCESS_ERR:
60 notify_status = RDS_RDMA_REMOTE_ERROR;
61 break;
62
63 default:
64 notify_status = RDS_RDMA_OTHER_ERROR;
65 break;
66 }
Andy Grover9c030392010-01-12 14:43:06 -080067 complete(rm, notify_status);
Andy Grover6a0979d2009-02-24 15:30:33 +000068}
69
70static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
71 struct rds_ib_send_work *send,
72 int wc_status)
73{
74 struct rds_message *rm = send->s_rm;
75
76 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
77
78 ib_dma_unmap_sg(ic->i_cm_id->device,
Andy Grovere7791372010-01-12 12:15:02 -080079 rm->data.m_sg, rm->data.m_nents,
80 DMA_TO_DEVICE);
Andy Grover6a0979d2009-02-24 15:30:33 +000081
Andy Groverff87e972010-01-12 14:13:15 -080082 if (rm->rdma.m_rdma_op.r_active) {
Andy Grover15133f62010-01-12 14:33:38 -080083 struct rds_rdma_op *op = &rm->rdma.m_rdma_op;
84
85 if (op->r_mapped) {
86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->r_sg, op->r_nents,
88 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 op->r_mapped = 0;
90 }
Andy Grover6a0979d2009-02-24 15:30:33 +000091
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
97 * packet delay.
98 * 2. Notify when the IB stack gives us the completion event for
99 * the RDMA operation.
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
106 *
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
110 * of synching.
111 */
Andy Grover9c030392010-01-12 14:43:06 -0800112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete);
Andy Grover6a0979d2009-02-24 15:30:33 +0000113
Andy Groverff87e972010-01-12 14:13:15 -0800114 if (rm->rdma.m_rdma_op.r_write)
115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
Andy Grover6a0979d2009-02-24 15:30:33 +0000116 else
Andy Groverff87e972010-01-12 14:13:15 -0800117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
Andy Grover6a0979d2009-02-24 15:30:33 +0000118 }
119
Andy Grover15133f62010-01-12 14:33:38 -0800120 if (rm->atomic.op_active) {
121 struct rm_atomic_op *op = &rm->atomic;
122
123 /* unmap atomic recvbuf */
124 if (op->op_mapped) {
125 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
126 DMA_FROM_DEVICE);
127 op->op_mapped = 0;
128 }
129
Andy Grover9c030392010-01-12 14:43:06 -0800130 rds_ib_send_complete(rm, wc_status, rds_atomic_send_complete);
Andy Grover15133f62010-01-12 14:33:38 -0800131
132 if (rm->atomic.op_type == RDS_ATOMIC_TYPE_CSWP)
133 rds_stats_inc(s_atomic_cswp);
134 else
135 rds_stats_inc(s_atomic_fadd);
136 }
137
Andy Grover6a0979d2009-02-24 15:30:33 +0000138 /* If anyone waited for this message to get flushed out, wake
139 * them up now */
140 rds_message_unmapped(rm);
141
142 rds_message_put(rm);
143 send->s_rm = NULL;
144}
145
146void rds_ib_send_init_ring(struct rds_ib_connection *ic)
147{
148 struct rds_ib_send_work *send;
149 u32 i;
150
151 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
152 struct ib_sge *sge;
153
154 send->s_rm = NULL;
155 send->s_op = NULL;
156
157 send->s_wr.wr_id = i;
158 send->s_wr.sg_list = send->s_sge;
Andy Grover6a0979d2009-02-24 15:30:33 +0000159 send->s_wr.ex.imm_data = 0;
160
Andy Grover919ced42010-01-13 16:32:24 -0800161 sge = &send->s_sge[0];
Andy Grover6a0979d2009-02-24 15:30:33 +0000162 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
163 sge->length = sizeof(struct rds_header);
164 sge->lkey = ic->i_mr->lkey;
Andy Grover919ced42010-01-13 16:32:24 -0800165
166 send->s_sge[1].lkey = ic->i_mr->lkey;
Andy Grover6a0979d2009-02-24 15:30:33 +0000167 }
168}
169
170void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
171{
172 struct rds_ib_send_work *send;
173 u32 i;
174
175 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
Andy Grover15133f62010-01-12 14:33:38 -0800176 if (!send->s_rm || send->s_wr.opcode == 0xdead)
Andy Grover6a0979d2009-02-24 15:30:33 +0000177 continue;
Andy Grover15133f62010-01-12 14:33:38 -0800178 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
Andy Grover6a0979d2009-02-24 15:30:33 +0000179 }
180}
181
182/*
183 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
184 * operations performed in the send path. As the sender allocs and potentially
185 * unallocs the next free entry in the ring it doesn't alter which is
186 * the next to be freed, which is what this is concerned with.
187 */
188void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
189{
190 struct rds_connection *conn = context;
191 struct rds_ib_connection *ic = conn->c_transport_data;
192 struct ib_wc wc;
193 struct rds_ib_send_work *send;
194 u32 completed;
195 u32 oldest;
196 u32 i = 0;
197 int ret;
198
199 rdsdebug("cq %p conn %p\n", cq, conn);
200 rds_ib_stats_inc(s_ib_tx_cq_call);
201 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
202 if (ret)
203 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
204
205 while (ib_poll_cq(cq, 1, &wc) > 0) {
206 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
207 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
208 be32_to_cpu(wc.ex.imm_data));
209 rds_ib_stats_inc(s_ib_tx_cq_event);
210
211 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
212 if (ic->i_ack_queued + HZ/2 < jiffies)
213 rds_ib_stats_inc(s_ib_tx_stalled);
214 rds_ib_ack_send_complete(ic);
215 continue;
216 }
217
218 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
219
220 completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
221
222 for (i = 0; i < completed; i++) {
223 send = &ic->i_sends[oldest];
224
225 /* In the error case, wc.opcode sometimes contains garbage */
226 switch (send->s_wr.opcode) {
227 case IB_WR_SEND:
Andy Grover6a0979d2009-02-24 15:30:33 +0000228 case IB_WR_RDMA_WRITE:
229 case IB_WR_RDMA_READ:
Andy Grover15133f62010-01-12 14:33:38 -0800230 case IB_WR_ATOMIC_FETCH_AND_ADD:
231 case IB_WR_ATOMIC_CMP_AND_SWP:
Andy Grover241eef32010-01-19 21:25:26 -0800232 if (send->s_rm)
233 rds_ib_send_unmap_rm(ic, send, wc.status);
Andy Grover6a0979d2009-02-24 15:30:33 +0000234 break;
235 default:
236 if (printk_ratelimit())
237 printk(KERN_NOTICE
238 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
239 __func__, send->s_wr.opcode);
240 break;
241 }
242
243 send->s_wr.opcode = 0xdead;
244 send->s_wr.num_sge = 1;
245 if (send->s_queued + HZ/2 < jiffies)
246 rds_ib_stats_inc(s_ib_tx_stalled);
247
248 /* If a RDMA operation produced an error, signal this right
249 * away. If we don't, the subsequent SEND that goes with this
250 * RDMA will be canceled with ERR_WFLUSH, and the application
251 * never learn that the RDMA failed. */
252 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
253 struct rds_message *rm;
254
255 rm = rds_send_get_message(conn, send->s_op);
Sherman Pun450d06c2010-03-11 13:50:05 +0000256 if (rm) {
Andy Grover15133f62010-01-12 14:33:38 -0800257 rds_ib_send_unmap_rm(ic, send, wc.status);
Andy Grover9c030392010-01-12 14:43:06 -0800258 rds_ib_send_complete(rm, wc.status, rds_rdma_send_complete);
Sherman Pun450d06c2010-03-11 13:50:05 +0000259 rds_message_put(rm);
260 }
Andy Grover6a0979d2009-02-24 15:30:33 +0000261 }
262
263 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
264 }
265
266 rds_ib_ring_free(&ic->i_send_ring, completed);
267
Joe Perchesf64f9e72009-11-29 16:55:45 -0800268 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
269 test_bit(0, &conn->c_map_queued))
Andy Grover6a0979d2009-02-24 15:30:33 +0000270 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
271
272 /* We expect errors as the qp is drained during shutdown */
273 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
274 rds_ib_conn_error(conn,
275 "send completion on %pI4 "
276 "had status %u, disconnecting and reconnecting\n",
277 &conn->c_faddr, wc.status);
278 }
279 }
280}
281
282/*
283 * This is the main function for allocating credits when sending
284 * messages.
285 *
286 * Conceptually, we have two counters:
287 * - send credits: this tells us how many WRs we're allowed
288 * to submit without overruning the reciever's queue. For
289 * each SEND WR we post, we decrement this by one.
290 *
291 * - posted credits: this tells us how many WRs we recently
292 * posted to the receive queue. This value is transferred
293 * to the peer as a "credit update" in a RDS header field.
294 * Every time we transmit credits to the peer, we subtract
295 * the amount of transferred credits from this counter.
296 *
297 * It is essential that we avoid situations where both sides have
298 * exhausted their send credits, and are unable to send new credits
299 * to the peer. We achieve this by requiring that we send at least
300 * one credit update to the peer before exhausting our credits.
301 * When new credits arrive, we subtract one credit that is withheld
302 * until we've posted new buffers and are ready to transmit these
303 * credits (see rds_ib_send_add_credits below).
304 *
305 * The RDS send code is essentially single-threaded; rds_send_xmit
306 * grabs c_send_lock to ensure exclusive access to the send ring.
307 * However, the ACK sending code is independent and can race with
308 * message SENDs.
309 *
310 * In the send path, we need to update the counters for send credits
311 * and the counter of posted buffers atomically - when we use the
312 * last available credit, we cannot allow another thread to race us
313 * and grab the posted credits counter. Hence, we have to use a
314 * spinlock to protect the credit counter, or use atomics.
315 *
316 * Spinlocks shared between the send and the receive path are bad,
317 * because they create unnecessary delays. An early implementation
318 * using a spinlock showed a 5% degradation in throughput at some
319 * loads.
320 *
321 * This implementation avoids spinlocks completely, putting both
322 * counters into a single atomic, and updating that atomic using
323 * atomic_add (in the receive path, when receiving fresh credits),
324 * and using atomic_cmpxchg when updating the two counters.
325 */
326int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
Steve Wise7b70d032009-04-09 14:09:39 +0000327 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
Andy Grover6a0979d2009-02-24 15:30:33 +0000328{
329 unsigned int avail, posted, got = 0, advertise;
330 long oldval, newval;
331
332 *adv_credits = 0;
333 if (!ic->i_flowctl)
334 return wanted;
335
336try_again:
337 advertise = 0;
338 oldval = newval = atomic_read(&ic->i_credits);
339 posted = IB_GET_POST_CREDITS(oldval);
340 avail = IB_GET_SEND_CREDITS(oldval);
341
342 rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
343 wanted, avail, posted);
344
345 /* The last credit must be used to send a credit update. */
346 if (avail && !posted)
347 avail--;
348
349 if (avail < wanted) {
350 struct rds_connection *conn = ic->i_cm_id->context;
351
352 /* Oops, there aren't that many credits left! */
353 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
354 got = avail;
355 } else {
356 /* Sometimes you get what you want, lalala. */
357 got = wanted;
358 }
359 newval -= IB_SET_SEND_CREDITS(got);
360
361 /*
362 * If need_posted is non-zero, then the caller wants
363 * the posted regardless of whether any send credits are
364 * available.
365 */
366 if (posted && (got || need_posted)) {
Steve Wise7b70d032009-04-09 14:09:39 +0000367 advertise = min_t(unsigned int, posted, max_posted);
Andy Grover6a0979d2009-02-24 15:30:33 +0000368 newval -= IB_SET_POST_CREDITS(advertise);
369 }
370
371 /* Finally bill everything */
372 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
373 goto try_again;
374
375 *adv_credits = advertise;
376 return got;
377}
378
379void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
380{
381 struct rds_ib_connection *ic = conn->c_transport_data;
382
383 if (credits == 0)
384 return;
385
386 rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
387 credits,
388 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
389 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
390
391 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
392 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
393 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
394
395 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
396
397 rds_ib_stats_inc(s_ib_rx_credit_updates);
398}
399
400void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
401{
402 struct rds_ib_connection *ic = conn->c_transport_data;
403
404 if (posted == 0)
405 return;
406
407 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
408
409 /* Decide whether to send an update to the peer now.
410 * If we would send a credit update for every single buffer we
411 * post, we would end up with an ACK storm (ACK arrives,
412 * consumes buffer, we refill the ring, send ACK to remote
413 * advertising the newly posted buffer... ad inf)
414 *
415 * Performance pretty much depends on how often we send
416 * credit updates - too frequent updates mean lots of ACKs.
417 * Too infrequent updates, and the peer will run out of
418 * credits and has to throttle.
419 * For the time being, 16 seems to be a good compromise.
420 */
421 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
422 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
423}
424
Andy Grover241eef32010-01-19 21:25:26 -0800425static inline void rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
426 struct rds_ib_send_work *send,
427 bool notify)
428{
429 /*
430 * We want to delay signaling completions just enough to get
431 * the batching benefits but not so much that we create dead time
432 * on the wire.
433 */
434 if (ic->i_unsignaled_wrs-- == 0 || notify) {
435 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
436 send->s_wr.send_flags |= IB_SEND_SIGNALED;
437 }
438}
439
Andy Grover6a0979d2009-02-24 15:30:33 +0000440/*
441 * This can be called multiple times for a given message. The first time
442 * we see a message we map its scatterlist into the IB device so that
443 * we can provide that mapped address to the IB scatter gather entries
444 * in the IB work requests. We translate the scatterlist into a series
445 * of work requests that fragment the message. These work requests complete
446 * in order so we pass ownership of the message to the completion handler
447 * once we send the final fragment.
448 *
449 * The RDS core uses the c_send_lock to only enter this function once
450 * per connection. This makes sure that the tx ring alloc/unalloc pairs
451 * don't get out of sync and confuse the ring.
452 */
453int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
454 unsigned int hdr_off, unsigned int sg, unsigned int off)
455{
456 struct rds_ib_connection *ic = conn->c_transport_data;
457 struct ib_device *dev = ic->i_cm_id->device;
458 struct rds_ib_send_work *send = NULL;
459 struct rds_ib_send_work *first;
460 struct rds_ib_send_work *prev;
461 struct ib_send_wr *failed_wr;
462 struct scatterlist *scat;
463 u32 pos;
464 u32 i;
465 u32 work_alloc;
Andy Groverda5a06c2010-01-14 12:18:11 -0800466 u32 credit_alloc = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000467 u32 posted;
468 u32 adv_credits = 0;
469 int send_flags = 0;
Andy Groverda5a06c2010-01-14 12:18:11 -0800470 int bytes_sent = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000471 int ret;
472 int flow_controlled = 0;
473
474 BUG_ON(off % RDS_FRAG_SIZE);
475 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
476
Andy Grover2e7b3b92010-03-11 13:49:59 +0000477 /* Do not send cong updates to IB loopback */
478 if (conn->c_loopback
479 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
480 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
481 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
482 }
483
Andy Grover6a0979d2009-02-24 15:30:33 +0000484 /* FIXME we may overallocate here */
485 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
486 i = 1;
487 else
488 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
489
490 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
491 if (work_alloc == 0) {
492 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
493 rds_ib_stats_inc(s_ib_tx_ring_full);
494 ret = -ENOMEM;
495 goto out;
496 }
497
Andy Grover6a0979d2009-02-24 15:30:33 +0000498 if (ic->i_flowctl) {
Steve Wise7b70d032009-04-09 14:09:39 +0000499 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
Andy Grover6a0979d2009-02-24 15:30:33 +0000500 adv_credits += posted;
501 if (credit_alloc < work_alloc) {
502 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
503 work_alloc = credit_alloc;
Andy Groverc8de3f12010-01-15 15:55:26 -0800504 flow_controlled = 1;
Andy Grover6a0979d2009-02-24 15:30:33 +0000505 }
506 if (work_alloc == 0) {
Steve Wised39e0602009-04-09 14:09:38 +0000507 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
Andy Grover6a0979d2009-02-24 15:30:33 +0000508 rds_ib_stats_inc(s_ib_tx_throttle);
509 ret = -ENOMEM;
510 goto out;
511 }
512 }
513
514 /* map the message the first time we see it */
Andy Grover8690bfa2010-01-12 11:56:44 -0800515 if (!ic->i_rm) {
Andy Grovere7791372010-01-12 12:15:02 -0800516 if (rm->data.m_nents) {
517 rm->data.m_count = ib_dma_map_sg(dev,
518 rm->data.m_sg,
519 rm->data.m_nents,
520 DMA_TO_DEVICE);
521 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
522 if (rm->data.m_count == 0) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000523 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
524 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
525 ret = -ENOMEM; /* XXX ? */
526 goto out;
527 }
528 } else {
Andy Grovere7791372010-01-12 12:15:02 -0800529 rm->data.m_count = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000530 }
531
Andy Grover6a0979d2009-02-24 15:30:33 +0000532 rds_message_addref(rm);
533 ic->i_rm = rm;
534
535 /* Finalize the header */
536 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
537 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
538 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
539 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
540
541 /* If it has a RDMA op, tell the peer we did it. This is
542 * used by the peer to release use-once RDMA MRs. */
Andy Groverff87e972010-01-12 14:13:15 -0800543 if (rm->rdma.m_rdma_op.r_active) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000544 struct rds_ext_header_rdma ext_hdr;
545
Andy Groverff87e972010-01-12 14:13:15 -0800546 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key);
Andy Grover6a0979d2009-02-24 15:30:33 +0000547 rds_message_add_extension(&rm->m_inc.i_hdr,
548 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
549 }
550 if (rm->m_rdma_cookie) {
551 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
552 rds_rdma_cookie_key(rm->m_rdma_cookie),
553 rds_rdma_cookie_offset(rm->m_rdma_cookie));
554 }
555
556 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
557 * we should not do this unless we have a chance of at least
558 * sticking the header into the send ring. Which is why we
559 * should call rds_ib_ring_alloc first. */
560 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
561 rds_message_make_checksum(&rm->m_inc.i_hdr);
562
563 /*
564 * Update adv_credits since we reset the ACK_REQUIRED bit.
565 */
Andy Groverc8de3f12010-01-15 15:55:26 -0800566 if (ic->i_flowctl) {
567 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
568 adv_credits += posted;
569 BUG_ON(adv_credits > 255);
570 }
Andy Grover735f61e2010-03-11 13:49:55 +0000571 }
Andy Grover6a0979d2009-02-24 15:30:33 +0000572
Andy Grover6a0979d2009-02-24 15:30:33 +0000573 /* Sometimes you want to put a fence between an RDMA
574 * READ and the following SEND.
575 * We could either do this all the time
576 * or when requested by the user. Right now, we let
577 * the application choose.
578 */
Andy Groverff87e972010-01-12 14:13:15 -0800579 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence)
Andy Grover6a0979d2009-02-24 15:30:33 +0000580 send_flags = IB_SEND_FENCE;
581
Andy Groverda5a06c2010-01-14 12:18:11 -0800582 /* Each frag gets a header. Msgs may be 0 bytes */
583 send = &ic->i_sends[pos];
584 first = send;
585 prev = NULL;
586 scat = &rm->data.m_sg[sg];
587 i = 0;
588 do {
589 unsigned int len = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000590
Andy Groverda5a06c2010-01-14 12:18:11 -0800591 /* Set up the header */
592 send->s_wr.send_flags = send_flags;
593 send->s_wr.opcode = IB_WR_SEND;
594 send->s_wr.num_sge = 1;
595 send->s_wr.next = NULL;
596 send->s_queued = jiffies;
597 send->s_op = NULL;
Andy Grover6a0979d2009-02-24 15:30:33 +0000598
Andy Groverda5a06c2010-01-14 12:18:11 -0800599 send->s_sge[0].addr = ic->i_send_hdrs_dma
600 + (pos * sizeof(struct rds_header));
601 send->s_sge[0].length = sizeof(struct rds_header);
Andy Grover6a0979d2009-02-24 15:30:33 +0000602
Andy Groverda5a06c2010-01-14 12:18:11 -0800603 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
Andy Grover6a0979d2009-02-24 15:30:33 +0000604
Andy Groverda5a06c2010-01-14 12:18:11 -0800605 /* Set up the data, if present */
606 if (i < work_alloc
607 && scat != &rm->data.m_sg[rm->data.m_count]) {
608 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
609 send->s_wr.num_sge = 2;
610
611 send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
612 send->s_sge[1].length = len;
613
614 bytes_sent += len;
615 off += len;
616 if (off == ib_sg_dma_len(dev, scat)) {
617 scat++;
618 off = 0;
619 }
620 }
Andy Grover6a0979d2009-02-24 15:30:33 +0000621
Andy Grover241eef32010-01-19 21:25:26 -0800622 rds_ib_set_wr_signal_state(ic, send, 0);
Andy Grover6a0979d2009-02-24 15:30:33 +0000623
Andy Grover6a0979d2009-02-24 15:30:33 +0000624 /*
625 * Always signal the last one if we're stopping due to flow control.
626 */
Andy Groverc8de3f12010-01-15 15:55:26 -0800627 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
Andy Grover6a0979d2009-02-24 15:30:33 +0000628 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
629
630 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
631 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
632
Andy Groverc8de3f12010-01-15 15:55:26 -0800633 if (ic->i_flowctl && adv_credits) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000634 struct rds_header *hdr = &ic->i_send_hdrs[pos];
635
636 /* add credit and redo the header checksum */
637 hdr->h_credit = adv_credits;
638 rds_message_make_checksum(hdr);
639 adv_credits = 0;
640 rds_ib_stats_inc(s_ib_tx_credit_updates);
641 }
642
643 if (prev)
644 prev->s_wr.next = &send->s_wr;
645 prev = send;
646
647 pos = (pos + 1) % ic->i_send_ring.w_nr;
Andy Groverda5a06c2010-01-14 12:18:11 -0800648 send = &ic->i_sends[pos];
649 i++;
650
651 } while (i < work_alloc
652 && scat != &rm->data.m_sg[rm->data.m_count]);
Andy Grover6a0979d2009-02-24 15:30:33 +0000653
654 /* Account the RDS header in the number of bytes we sent, but just once.
655 * The caller has no concept of fragmentation. */
656 if (hdr_off == 0)
Andy Groverda5a06c2010-01-14 12:18:11 -0800657 bytes_sent += sizeof(struct rds_header);
Andy Grover6a0979d2009-02-24 15:30:33 +0000658
659 /* if we finished the message then send completion owns it */
Andy Grovere7791372010-01-12 12:15:02 -0800660 if (scat == &rm->data.m_sg[rm->data.m_count]) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000661 prev->s_rm = ic->i_rm;
Andy Grover241eef32010-01-19 21:25:26 -0800662 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
Andy Grover6a0979d2009-02-24 15:30:33 +0000663 ic->i_rm = NULL;
664 }
665
Andy Groverda5a06c2010-01-14 12:18:11 -0800666 /* Put back wrs & credits we didn't use */
Andy Grover6a0979d2009-02-24 15:30:33 +0000667 if (i < work_alloc) {
668 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
669 work_alloc = i;
670 }
671 if (ic->i_flowctl && i < credit_alloc)
672 rds_ib_send_add_credits(conn, credit_alloc - i);
673
674 /* XXX need to worry about failed_wr and partial sends. */
675 failed_wr = &first->s_wr;
676 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
677 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
678 first, &first->s_wr, ret, failed_wr);
679 BUG_ON(failed_wr != &first->s_wr);
680 if (ret) {
681 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
682 "returned %d\n", &conn->c_faddr, ret);
683 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
684 if (prev->s_rm) {
685 ic->i_rm = prev->s_rm;
686 prev->s_rm = NULL;
687 }
Andy Grover735f61e2010-03-11 13:49:55 +0000688
689 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
Andy Grover6a0979d2009-02-24 15:30:33 +0000690 goto out;
691 }
692
Andy Groverda5a06c2010-01-14 12:18:11 -0800693 ret = bytes_sent;
Andy Grover6a0979d2009-02-24 15:30:33 +0000694out:
695 BUG_ON(adv_credits);
696 return ret;
697}
698
Andy Grover15133f62010-01-12 14:33:38 -0800699/*
700 * Issue atomic operation.
701 * A simplified version of the rdma case, we always map 1 SG, and
702 * only 8 bytes, for the return value from the atomic operation.
703 */
Andy Grover241eef32010-01-19 21:25:26 -0800704int rds_ib_xmit_atomic(struct rds_connection *conn, struct rds_message *rm)
Andy Grover15133f62010-01-12 14:33:38 -0800705{
706 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Grover241eef32010-01-19 21:25:26 -0800707 struct rm_atomic_op *op = &rm->atomic;
Andy Grover15133f62010-01-12 14:33:38 -0800708 struct rds_ib_send_work *send = NULL;
709 struct ib_send_wr *failed_wr;
710 struct rds_ib_device *rds_ibdev;
711 u32 pos;
712 u32 work_alloc;
713 int ret;
714
715 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
716
717 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
718 if (work_alloc != 1) {
719 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
720 rds_ib_stats_inc(s_ib_tx_ring_full);
721 ret = -ENOMEM;
722 goto out;
723 }
724
725 /* address of send request in ring */
726 send = &ic->i_sends[pos];
727 send->s_queued = jiffies;
728
729 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
730 send->s_wr.opcode = IB_WR_ATOMIC_CMP_AND_SWP;
731 send->s_wr.wr.atomic.compare_add = op->op_compare;
732 send->s_wr.wr.atomic.swap = op->op_swap_add;
733 } else { /* FADD */
734 send->s_wr.opcode = IB_WR_ATOMIC_FETCH_AND_ADD;
735 send->s_wr.wr.atomic.compare_add = op->op_swap_add;
736 send->s_wr.wr.atomic.swap = 0;
737 }
Andy Grover241eef32010-01-19 21:25:26 -0800738 rds_ib_set_wr_signal_state(ic, send, op->op_notify);
Andy Grover15133f62010-01-12 14:33:38 -0800739 send->s_wr.num_sge = 1;
740 send->s_wr.next = NULL;
741 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
742 send->s_wr.wr.atomic.rkey = op->op_rkey;
743
Andy Grover241eef32010-01-19 21:25:26 -0800744 /*
745 * If there is no data or rdma ops in the message, then
746 * we must fill in s_rm ourselves, so we properly clean up
747 * on completion.
748 */
749 if (!rm->rdma.m_rdma_op.r_active && !rm->data.op_active)
750 send->s_rm = rm;
751
Andy Grover15133f62010-01-12 14:33:38 -0800752 /* map 8 byte retval buffer to the device */
753 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
754 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
755 if (ret != 1) {
756 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
757 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
758 ret = -ENOMEM; /* XXX ? */
759 goto out;
760 }
761
762 /* Convert our struct scatterlist to struct ib_sge */
763 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
764 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
765 send->s_sge[0].lkey = ic->i_mr->lkey;
766
767 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
768 send->s_sge[0].addr, send->s_sge[0].length);
769
770 failed_wr = &send->s_wr;
771 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
772 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
773 send, &send->s_wr, ret, failed_wr);
774 BUG_ON(failed_wr != &send->s_wr);
775 if (ret) {
776 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
777 "returned %d\n", &conn->c_faddr, ret);
778 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
779 goto out;
780 }
781
782 if (unlikely(failed_wr != &send->s_wr)) {
783 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
784 BUG_ON(failed_wr != &send->s_wr);
785 }
786
787out:
788 return ret;
789}
790
Andy Grover6a0979d2009-02-24 15:30:33 +0000791int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
792{
793 struct rds_ib_connection *ic = conn->c_transport_data;
794 struct rds_ib_send_work *send = NULL;
795 struct rds_ib_send_work *first;
796 struct rds_ib_send_work *prev;
797 struct ib_send_wr *failed_wr;
798 struct rds_ib_device *rds_ibdev;
799 struct scatterlist *scat;
800 unsigned long len;
801 u64 remote_addr = op->r_remote_addr;
802 u32 pos;
803 u32 work_alloc;
804 u32 i;
805 u32 j;
806 int sent;
807 int ret;
808 int num_sge;
809
810 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
811
812 /* map the message the first time we see it */
813 if (!op->r_mapped) {
814 op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
815 op->r_sg, op->r_nents, (op->r_write) ?
816 DMA_TO_DEVICE : DMA_FROM_DEVICE);
817 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
818 if (op->r_count == 0) {
819 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
820 ret = -ENOMEM; /* XXX ? */
821 goto out;
822 }
823
824 op->r_mapped = 1;
825 }
826
827 /*
828 * Instead of knowing how to return a partial rdma read/write we insist that there
829 * be enough work requests to send the entire message.
830 */
831 i = ceil(op->r_count, rds_ibdev->max_sge);
832
833 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
834 if (work_alloc != i) {
835 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
836 rds_ib_stats_inc(s_ib_tx_ring_full);
837 ret = -ENOMEM;
838 goto out;
839 }
840
841 send = &ic->i_sends[pos];
842 first = send;
843 prev = NULL;
844 scat = &op->r_sg[0];
845 sent = 0;
846 num_sge = op->r_count;
847
848 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
849 send->s_wr.send_flags = 0;
850 send->s_queued = jiffies;
Andy Grover241eef32010-01-19 21:25:26 -0800851
852 rds_ib_set_wr_signal_state(ic, send, op->r_notify);
Andy Grover6a0979d2009-02-24 15:30:33 +0000853
854 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
855 send->s_wr.wr.rdma.remote_addr = remote_addr;
856 send->s_wr.wr.rdma.rkey = op->r_key;
857 send->s_op = op;
858
859 if (num_sge > rds_ibdev->max_sge) {
860 send->s_wr.num_sge = rds_ibdev->max_sge;
861 num_sge -= rds_ibdev->max_sge;
862 } else {
863 send->s_wr.num_sge = num_sge;
864 }
865
866 send->s_wr.next = NULL;
867
868 if (prev)
869 prev->s_wr.next = &send->s_wr;
870
871 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
872 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
873 send->s_sge[j].addr =
874 ib_sg_dma_address(ic->i_cm_id->device, scat);
875 send->s_sge[j].length = len;
876 send->s_sge[j].lkey = ic->i_mr->lkey;
877
878 sent += len;
879 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
880
881 remote_addr += len;
882 scat++;
883 }
884
885 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
886 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
887
888 prev = send;
889 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
890 send = ic->i_sends;
891 }
892
Andy Grover6a0979d2009-02-24 15:30:33 +0000893 if (i < work_alloc) {
894 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
895 work_alloc = i;
896 }
897
898 failed_wr = &first->s_wr;
899 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
900 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
901 first, &first->s_wr, ret, failed_wr);
902 BUG_ON(failed_wr != &first->s_wr);
903 if (ret) {
904 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
905 "returned %d\n", &conn->c_faddr, ret);
906 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
907 goto out;
908 }
909
910 if (unlikely(failed_wr != &first->s_wr)) {
911 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
912 BUG_ON(failed_wr != &first->s_wr);
913 }
914
915
916out:
917 return ret;
918}
919
920void rds_ib_xmit_complete(struct rds_connection *conn)
921{
922 struct rds_ib_connection *ic = conn->c_transport_data;
923
924 /* We may have a pending ACK or window update we were unable
925 * to send previously (due to flow control). Try again. */
926 rds_ib_attempt_ack(ic);
927}