blob: 5d0a704fa039f9859d588f6136a4515d98d71c8b [file] [log] [blame]
Andy Grover6a0979d2009-02-24 15:30:33 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/in.h>
35#include <linux/device.h>
36#include <linux/dmapool.h>
Manuel Zerpiescb0a6052011-06-16 02:09:57 +000037#include <linux/ratelimit.h>
Andy Grover6a0979d2009-02-24 15:30:33 +000038
39#include "rds.h"
Andy Grover6a0979d2009-02-24 15:30:33 +000040#include "ib.h"
41
Andy Grover9c030392010-01-12 14:43:06 -080042/*
43 * Convert IB-specific error message to RDS error message and call core
44 * completion handler.
45 */
46static void rds_ib_send_complete(struct rds_message *rm,
47 int wc_status,
48 void (*complete)(struct rds_message *rm, int status))
Andy Grover6a0979d2009-02-24 15:30:33 +000049{
50 int notify_status;
51
52 switch (wc_status) {
53 case IB_WC_WR_FLUSH_ERR:
54 return;
55
56 case IB_WC_SUCCESS:
57 notify_status = RDS_RDMA_SUCCESS;
58 break;
59
60 case IB_WC_REM_ACCESS_ERR:
61 notify_status = RDS_RDMA_REMOTE_ERROR;
62 break;
63
64 default:
65 notify_status = RDS_RDMA_OTHER_ERROR;
66 break;
67 }
Andy Grover9c030392010-01-12 14:43:06 -080068 complete(rm, notify_status);
Andy Grover6a0979d2009-02-24 15:30:33 +000069}
70
Andy Groverff3d7d32010-03-01 14:03:09 -080071static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
72 struct rm_data_op *op,
73 int wc_status)
Andy Grover6a0979d2009-02-24 15:30:33 +000074{
Andy Groverff3d7d32010-03-01 14:03:09 -080075 if (op->op_nents)
76 ib_dma_unmap_sg(ic->i_cm_id->device,
77 op->op_sg, op->op_nents,
78 DMA_TO_DEVICE);
79}
Andy Grover6a0979d2009-02-24 15:30:33 +000080
Andy Groverff3d7d32010-03-01 14:03:09 -080081static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
82 struct rm_rdma_op *op,
83 int wc_status)
84{
85 if (op->op_mapped) {
86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->op_sg, op->op_nents,
88 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
89 op->op_mapped = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +000090 }
91
Andy Groverff3d7d32010-03-01 14:03:09 -080092 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
97 * packet delay.
98 * 2. Notify when the IB stack gives us the completion event for
99 * the RDMA operation.
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * we would need to take an event for the rdma WR. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
106 *
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
110 * of synching.
111 */
112 rds_ib_send_complete(container_of(op, struct rds_message, rdma),
113 wc_status, rds_rdma_send_complete);
Andy Grover15133f62010-01-12 14:33:38 -0800114
Andy Groverff3d7d32010-03-01 14:03:09 -0800115 if (op->op_write)
116 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
117 else
118 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
119}
Andy Grover15133f62010-01-12 14:33:38 -0800120
Andy Groverff3d7d32010-03-01 14:03:09 -0800121static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
122 struct rm_atomic_op *op,
123 int wc_status)
124{
125 /* unmap atomic recvbuf */
126 if (op->op_mapped) {
127 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
128 DMA_FROM_DEVICE);
129 op->op_mapped = 0;
Andy Grover15133f62010-01-12 14:33:38 -0800130 }
131
Andy Groverff3d7d32010-03-01 14:03:09 -0800132 rds_ib_send_complete(container_of(op, struct rds_message, atomic),
133 wc_status, rds_atomic_send_complete);
Andy Grover6a0979d2009-02-24 15:30:33 +0000134
Andy Groverff3d7d32010-03-01 14:03:09 -0800135 if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
Andy Grover51e2cba2010-03-29 17:47:30 -0700136 rds_ib_stats_inc(s_ib_atomic_cswp);
Andy Groverff3d7d32010-03-01 14:03:09 -0800137 else
Andy Grover51e2cba2010-03-29 17:47:30 -0700138 rds_ib_stats_inc(s_ib_atomic_fadd);
Andy Groverff3d7d32010-03-01 14:03:09 -0800139}
140
141/*
142 * Unmap the resources associated with a struct send_work.
143 *
144 * Returns the rm for no good reason other than it is unobtainable
145 * other than by switching on wr.opcode, currently, and the caller,
146 * the event handler, needs it.
147 */
148static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
149 struct rds_ib_send_work *send,
150 int wc_status)
151{
152 struct rds_message *rm = NULL;
153
154 /* In the error case, wc.opcode sometimes contains garbage */
155 switch (send->s_wr.opcode) {
156 case IB_WR_SEND:
157 if (send->s_op) {
158 rm = container_of(send->s_op, struct rds_message, data);
159 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
160 }
161 break;
162 case IB_WR_RDMA_WRITE:
163 case IB_WR_RDMA_READ:
164 if (send->s_op) {
165 rm = container_of(send->s_op, struct rds_message, rdma);
166 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
167 }
168 break;
169 case IB_WR_ATOMIC_FETCH_AND_ADD:
170 case IB_WR_ATOMIC_CMP_AND_SWP:
171 if (send->s_op) {
172 rm = container_of(send->s_op, struct rds_message, atomic);
173 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
174 }
175 break;
176 default:
Manuel Zerpiescb0a6052011-06-16 02:09:57 +0000177 printk_ratelimited(KERN_NOTICE
Andy Groverff3d7d32010-03-01 14:03:09 -0800178 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
179 __func__, send->s_wr.opcode);
180 break;
181 }
182
183 send->s_wr.opcode = 0xdead;
184
185 return rm;
Andy Grover6a0979d2009-02-24 15:30:33 +0000186}
187
188void rds_ib_send_init_ring(struct rds_ib_connection *ic)
189{
190 struct rds_ib_send_work *send;
191 u32 i;
192
193 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
194 struct ib_sge *sge;
195
Andy Grover6a0979d2009-02-24 15:30:33 +0000196 send->s_op = NULL;
197
198 send->s_wr.wr_id = i;
199 send->s_wr.sg_list = send->s_sge;
Andy Grover6a0979d2009-02-24 15:30:33 +0000200 send->s_wr.ex.imm_data = 0;
201
Andy Grover919ced42010-01-13 16:32:24 -0800202 sge = &send->s_sge[0];
Andy Grover6a0979d2009-02-24 15:30:33 +0000203 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
204 sge->length = sizeof(struct rds_header);
205 sge->lkey = ic->i_mr->lkey;
Andy Grover919ced42010-01-13 16:32:24 -0800206
207 send->s_sge[1].lkey = ic->i_mr->lkey;
Andy Grover6a0979d2009-02-24 15:30:33 +0000208 }
209}
210
211void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
212{
213 struct rds_ib_send_work *send;
214 u32 i;
215
216 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800217 if (send->s_op && send->s_wr.opcode != 0xdead)
218 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
Andy Grover6a0979d2009-02-24 15:30:33 +0000219 }
220}
221
222/*
Zach Brownf0460112010-07-14 13:55:35 -0700223 * The only fast path caller always has a non-zero nr, so we don't
224 * bother testing nr before performing the atomic sub.
225 */
226static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
227{
228 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
229 waitqueue_active(&rds_ib_ring_empty_wait))
230 wake_up(&rds_ib_ring_empty_wait);
231 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
232}
233
234/*
Andy Grover6a0979d2009-02-24 15:30:33 +0000235 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
236 * operations performed in the send path. As the sender allocs and potentially
237 * unallocs the next free entry in the ring it doesn't alter which is
238 * the next to be freed, which is what this is concerned with.
239 */
240void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
241{
242 struct rds_connection *conn = context;
243 struct rds_ib_connection *ic = conn->c_transport_data;
Andy Groverff3d7d32010-03-01 14:03:09 -0800244 struct rds_message *rm = NULL;
Andy Grover6a0979d2009-02-24 15:30:33 +0000245 struct ib_wc wc;
246 struct rds_ib_send_work *send;
247 u32 completed;
248 u32 oldest;
249 u32 i = 0;
250 int ret;
Zach Brownf0460112010-07-14 13:55:35 -0700251 int nr_sig = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000252
253 rdsdebug("cq %p conn %p\n", cq, conn);
254 rds_ib_stats_inc(s_ib_tx_cq_call);
255 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
256 if (ret)
257 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
258
259 while (ib_poll_cq(cq, 1, &wc) > 0) {
Zach Brown59f740a2010-08-03 13:52:47 -0700260 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
261 (unsigned long long)wc.wr_id, wc.status,
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300262 ib_wc_status_msg(wc.status), wc.byte_len,
Andy Grover6a0979d2009-02-24 15:30:33 +0000263 be32_to_cpu(wc.ex.imm_data));
264 rds_ib_stats_inc(s_ib_tx_cq_event);
265
266 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
Manuel Schölling71fd7622014-05-18 23:32:49 +0200267 if (time_after(jiffies, ic->i_ack_queued + HZ/2))
Andy Grover6a0979d2009-02-24 15:30:33 +0000268 rds_ib_stats_inc(s_ib_tx_stalled);
269 rds_ib_ack_send_complete(ic);
270 continue;
271 }
272
273 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
274
275 completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
276
277 for (i = 0; i < completed; i++) {
278 send = &ic->i_sends[oldest];
Zach Brownf0460112010-07-14 13:55:35 -0700279 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
280 nr_sig++;
Andy Grover6a0979d2009-02-24 15:30:33 +0000281
Andy Groverff3d7d32010-03-01 14:03:09 -0800282 rm = rds_ib_send_unmap_op(ic, send, wc.status);
Andy Grover6a0979d2009-02-24 15:30:33 +0000283
Manuel Schölling71fd7622014-05-18 23:32:49 +0200284 if (time_after(jiffies, send->s_queued + HZ/2))
Andy Grover6a0979d2009-02-24 15:30:33 +0000285 rds_ib_stats_inc(s_ib_tx_stalled);
286
Chris Masonc9e65382010-05-11 15:14:16 -0700287 if (send->s_op) {
288 if (send->s_op == rm->m_final_op) {
289 /* If anyone waited for this message to get flushed out, wake
290 * them up now */
291 rds_message_unmapped(rm);
292 }
Andy Groverff3d7d32010-03-01 14:03:09 -0800293 rds_message_put(rm);
294 send->s_op = NULL;
Andy Grover6a0979d2009-02-24 15:30:33 +0000295 }
296
297 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
298 }
299
300 rds_ib_ring_free(&ic->i_send_ring, completed);
Zach Brownf0460112010-07-14 13:55:35 -0700301 rds_ib_sub_signaled(ic, nr_sig);
302 nr_sig = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000303
Joe Perchesf64f9e72009-11-29 16:55:45 -0800304 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
305 test_bit(0, &conn->c_map_queued))
Andy Grover6a0979d2009-02-24 15:30:33 +0000306 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
307
308 /* We expect errors as the qp is drained during shutdown */
309 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
Zach Brown59f740a2010-08-03 13:52:47 -0700310 rds_ib_conn_error(conn, "send completion on %pI4 had status "
311 "%u (%s), disconnecting and reconnecting\n",
312 &conn->c_faddr, wc.status,
Sagi Grimberg3c88f3d2015-05-18 13:40:33 +0300313 ib_wc_status_msg(wc.status));
Andy Grover6a0979d2009-02-24 15:30:33 +0000314 }
315 }
316}
317
318/*
319 * This is the main function for allocating credits when sending
320 * messages.
321 *
322 * Conceptually, we have two counters:
323 * - send credits: this tells us how many WRs we're allowed
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300324 * to submit without overruning the receiver's queue. For
Andy Grover6a0979d2009-02-24 15:30:33 +0000325 * each SEND WR we post, we decrement this by one.
326 *
327 * - posted credits: this tells us how many WRs we recently
328 * posted to the receive queue. This value is transferred
329 * to the peer as a "credit update" in a RDS header field.
330 * Every time we transmit credits to the peer, we subtract
331 * the amount of transferred credits from this counter.
332 *
333 * It is essential that we avoid situations where both sides have
334 * exhausted their send credits, and are unable to send new credits
335 * to the peer. We achieve this by requiring that we send at least
336 * one credit update to the peer before exhausting our credits.
337 * When new credits arrive, we subtract one credit that is withheld
338 * until we've posted new buffers and are ready to transmit these
339 * credits (see rds_ib_send_add_credits below).
340 *
341 * The RDS send code is essentially single-threaded; rds_send_xmit
Zach Brown0f4b1c72010-06-04 14:41:41 -0700342 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
Andy Grover6a0979d2009-02-24 15:30:33 +0000343 * However, the ACK sending code is independent and can race with
344 * message SENDs.
345 *
346 * In the send path, we need to update the counters for send credits
347 * and the counter of posted buffers atomically - when we use the
348 * last available credit, we cannot allow another thread to race us
349 * and grab the posted credits counter. Hence, we have to use a
350 * spinlock to protect the credit counter, or use atomics.
351 *
352 * Spinlocks shared between the send and the receive path are bad,
353 * because they create unnecessary delays. An early implementation
354 * using a spinlock showed a 5% degradation in throughput at some
355 * loads.
356 *
357 * This implementation avoids spinlocks completely, putting both
358 * counters into a single atomic, and updating that atomic using
359 * atomic_add (in the receive path, when receiving fresh credits),
360 * and using atomic_cmpxchg when updating the two counters.
361 */
362int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
Steve Wise7b70d032009-04-09 14:09:39 +0000363 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
Andy Grover6a0979d2009-02-24 15:30:33 +0000364{
365 unsigned int avail, posted, got = 0, advertise;
366 long oldval, newval;
367
368 *adv_credits = 0;
369 if (!ic->i_flowctl)
370 return wanted;
371
372try_again:
373 advertise = 0;
374 oldval = newval = atomic_read(&ic->i_credits);
375 posted = IB_GET_POST_CREDITS(oldval);
376 avail = IB_GET_SEND_CREDITS(oldval);
377
Rasmus Villemoes11ac1192015-02-05 23:17:20 +0100378 rdsdebug("wanted=%u credits=%u posted=%u\n",
Andy Grover6a0979d2009-02-24 15:30:33 +0000379 wanted, avail, posted);
380
381 /* The last credit must be used to send a credit update. */
382 if (avail && !posted)
383 avail--;
384
385 if (avail < wanted) {
386 struct rds_connection *conn = ic->i_cm_id->context;
387
388 /* Oops, there aren't that many credits left! */
389 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
390 got = avail;
391 } else {
392 /* Sometimes you get what you want, lalala. */
393 got = wanted;
394 }
395 newval -= IB_SET_SEND_CREDITS(got);
396
397 /*
398 * If need_posted is non-zero, then the caller wants
399 * the posted regardless of whether any send credits are
400 * available.
401 */
402 if (posted && (got || need_posted)) {
Steve Wise7b70d032009-04-09 14:09:39 +0000403 advertise = min_t(unsigned int, posted, max_posted);
Andy Grover6a0979d2009-02-24 15:30:33 +0000404 newval -= IB_SET_POST_CREDITS(advertise);
405 }
406
407 /* Finally bill everything */
408 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
409 goto try_again;
410
411 *adv_credits = advertise;
412 return got;
413}
414
415void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
416{
417 struct rds_ib_connection *ic = conn->c_transport_data;
418
419 if (credits == 0)
420 return;
421
Rasmus Villemoes11ac1192015-02-05 23:17:20 +0100422 rdsdebug("credits=%u current=%u%s\n",
Andy Grover6a0979d2009-02-24 15:30:33 +0000423 credits,
424 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
425 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
426
427 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
428 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
429 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
430
431 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
432
433 rds_ib_stats_inc(s_ib_rx_credit_updates);
434}
435
436void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
437{
438 struct rds_ib_connection *ic = conn->c_transport_data;
439
440 if (posted == 0)
441 return;
442
443 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
444
445 /* Decide whether to send an update to the peer now.
446 * If we would send a credit update for every single buffer we
447 * post, we would end up with an ACK storm (ACK arrives,
448 * consumes buffer, we refill the ring, send ACK to remote
449 * advertising the newly posted buffer... ad inf)
450 *
451 * Performance pretty much depends on how often we send
452 * credit updates - too frequent updates mean lots of ACKs.
453 * Too infrequent updates, and the peer will run out of
454 * credits and has to throttle.
455 * For the time being, 16 seems to be a good compromise.
456 */
457 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
458 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
459}
460
Zach Brownf0460112010-07-14 13:55:35 -0700461static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
462 struct rds_ib_send_work *send,
463 bool notify)
Andy Grover241eef32010-01-19 21:25:26 -0800464{
465 /*
466 * We want to delay signaling completions just enough to get
467 * the batching benefits but not so much that we create dead time
468 * on the wire.
469 */
470 if (ic->i_unsignaled_wrs-- == 0 || notify) {
471 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
472 send->s_wr.send_flags |= IB_SEND_SIGNALED;
Zach Brownf0460112010-07-14 13:55:35 -0700473 return 1;
Andy Grover241eef32010-01-19 21:25:26 -0800474 }
Zach Brownf0460112010-07-14 13:55:35 -0700475 return 0;
Andy Grover241eef32010-01-19 21:25:26 -0800476}
477
Andy Grover6a0979d2009-02-24 15:30:33 +0000478/*
479 * This can be called multiple times for a given message. The first time
480 * we see a message we map its scatterlist into the IB device so that
481 * we can provide that mapped address to the IB scatter gather entries
482 * in the IB work requests. We translate the scatterlist into a series
483 * of work requests that fragment the message. These work requests complete
484 * in order so we pass ownership of the message to the completion handler
485 * once we send the final fragment.
486 *
487 * The RDS core uses the c_send_lock to only enter this function once
488 * per connection. This makes sure that the tx ring alloc/unalloc pairs
489 * don't get out of sync and confuse the ring.
490 */
491int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
492 unsigned int hdr_off, unsigned int sg, unsigned int off)
493{
494 struct rds_ib_connection *ic = conn->c_transport_data;
495 struct ib_device *dev = ic->i_cm_id->device;
496 struct rds_ib_send_work *send = NULL;
497 struct rds_ib_send_work *first;
498 struct rds_ib_send_work *prev;
499 struct ib_send_wr *failed_wr;
500 struct scatterlist *scat;
501 u32 pos;
502 u32 i;
503 u32 work_alloc;
Andy Groverda5a06c2010-01-14 12:18:11 -0800504 u32 credit_alloc = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000505 u32 posted;
506 u32 adv_credits = 0;
507 int send_flags = 0;
Andy Groverda5a06c2010-01-14 12:18:11 -0800508 int bytes_sent = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000509 int ret;
510 int flow_controlled = 0;
Zach Brownf0460112010-07-14 13:55:35 -0700511 int nr_sig = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000512
513 BUG_ON(off % RDS_FRAG_SIZE);
514 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
515
Andy Grover2e7b3b92010-03-11 13:49:59 +0000516 /* Do not send cong updates to IB loopback */
517 if (conn->c_loopback
518 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
519 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
Neil Horman60946282011-03-02 06:28:22 +0000520 scat = &rm->data.op_sg[sg];
Venkat Venkatsubra18fc25c2013-12-02 15:41:39 -0800521 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
522 return sizeof(struct rds_header) + ret;
Andy Grover2e7b3b92010-03-11 13:49:59 +0000523 }
524
Andy Grover6a0979d2009-02-24 15:30:33 +0000525 /* FIXME we may overallocate here */
526 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
527 i = 1;
528 else
529 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
530
531 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
532 if (work_alloc == 0) {
533 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
534 rds_ib_stats_inc(s_ib_tx_ring_full);
535 ret = -ENOMEM;
536 goto out;
537 }
538
Andy Grover6a0979d2009-02-24 15:30:33 +0000539 if (ic->i_flowctl) {
Steve Wise7b70d032009-04-09 14:09:39 +0000540 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
Andy Grover6a0979d2009-02-24 15:30:33 +0000541 adv_credits += posted;
542 if (credit_alloc < work_alloc) {
543 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
544 work_alloc = credit_alloc;
Andy Groverc8de3f12010-01-15 15:55:26 -0800545 flow_controlled = 1;
Andy Grover6a0979d2009-02-24 15:30:33 +0000546 }
547 if (work_alloc == 0) {
Steve Wised39e0602009-04-09 14:09:38 +0000548 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
Andy Grover6a0979d2009-02-24 15:30:33 +0000549 rds_ib_stats_inc(s_ib_tx_throttle);
550 ret = -ENOMEM;
551 goto out;
552 }
553 }
554
555 /* map the message the first time we see it */
Andy Groverff3d7d32010-03-01 14:03:09 -0800556 if (!ic->i_data_op) {
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800557 if (rm->data.op_nents) {
558 rm->data.op_count = ib_dma_map_sg(dev,
559 rm->data.op_sg,
560 rm->data.op_nents,
561 DMA_TO_DEVICE);
562 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
563 if (rm->data.op_count == 0) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000564 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
565 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
566 ret = -ENOMEM; /* XXX ? */
567 goto out;
568 }
569 } else {
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800570 rm->data.op_count = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000571 }
572
Andy Grover6a0979d2009-02-24 15:30:33 +0000573 rds_message_addref(rm);
Wengang Wangd655a9f2015-05-21 13:11:40 +0800574 rm->data.op_dmasg = 0;
575 rm->data.op_dmaoff = 0;
Andy Groverff3d7d32010-03-01 14:03:09 -0800576 ic->i_data_op = &rm->data;
Andy Grover6a0979d2009-02-24 15:30:33 +0000577
578 /* Finalize the header */
579 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
580 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
581 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
582 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
583
584 /* If it has a RDMA op, tell the peer we did it. This is
585 * used by the peer to release use-once RDMA MRs. */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800586 if (rm->rdma.op_active) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000587 struct rds_ext_header_rdma ext_hdr;
588
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800589 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
Andy Grover6a0979d2009-02-24 15:30:33 +0000590 rds_message_add_extension(&rm->m_inc.i_hdr,
591 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
592 }
593 if (rm->m_rdma_cookie) {
594 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
595 rds_rdma_cookie_key(rm->m_rdma_cookie),
596 rds_rdma_cookie_offset(rm->m_rdma_cookie));
597 }
598
599 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
600 * we should not do this unless we have a chance of at least
601 * sticking the header into the send ring. Which is why we
602 * should call rds_ib_ring_alloc first. */
603 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
604 rds_message_make_checksum(&rm->m_inc.i_hdr);
605
606 /*
607 * Update adv_credits since we reset the ACK_REQUIRED bit.
608 */
Andy Groverc8de3f12010-01-15 15:55:26 -0800609 if (ic->i_flowctl) {
610 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
611 adv_credits += posted;
612 BUG_ON(adv_credits > 255);
613 }
Andy Grover735f61e2010-03-11 13:49:55 +0000614 }
Andy Grover6a0979d2009-02-24 15:30:33 +0000615
Andy Grover6a0979d2009-02-24 15:30:33 +0000616 /* Sometimes you want to put a fence between an RDMA
617 * READ and the following SEND.
618 * We could either do this all the time
619 * or when requested by the user. Right now, we let
620 * the application choose.
621 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800622 if (rm->rdma.op_active && rm->rdma.op_fence)
Andy Grover6a0979d2009-02-24 15:30:33 +0000623 send_flags = IB_SEND_FENCE;
624
Andy Groverda5a06c2010-01-14 12:18:11 -0800625 /* Each frag gets a header. Msgs may be 0 bytes */
626 send = &ic->i_sends[pos];
627 first = send;
628 prev = NULL;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800629 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
Andy Groverda5a06c2010-01-14 12:18:11 -0800630 i = 0;
631 do {
632 unsigned int len = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000633
Andy Groverda5a06c2010-01-14 12:18:11 -0800634 /* Set up the header */
635 send->s_wr.send_flags = send_flags;
636 send->s_wr.opcode = IB_WR_SEND;
637 send->s_wr.num_sge = 1;
638 send->s_wr.next = NULL;
639 send->s_queued = jiffies;
640 send->s_op = NULL;
Andy Grover6a0979d2009-02-24 15:30:33 +0000641
Andy Groverda5a06c2010-01-14 12:18:11 -0800642 send->s_sge[0].addr = ic->i_send_hdrs_dma
643 + (pos * sizeof(struct rds_header));
644 send->s_sge[0].length = sizeof(struct rds_header);
Andy Grover6a0979d2009-02-24 15:30:33 +0000645
Andy Groverda5a06c2010-01-14 12:18:11 -0800646 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
Andy Grover6a0979d2009-02-24 15:30:33 +0000647
Andy Groverda5a06c2010-01-14 12:18:11 -0800648 /* Set up the data, if present */
649 if (i < work_alloc
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800650 && scat != &rm->data.op_sg[rm->data.op_count]) {
Wengang Wangd655a9f2015-05-21 13:11:40 +0800651 len = min(RDS_FRAG_SIZE,
652 ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
Andy Groverda5a06c2010-01-14 12:18:11 -0800653 send->s_wr.num_sge = 2;
654
Wengang Wangd655a9f2015-05-21 13:11:40 +0800655 send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
656 send->s_sge[1].addr += rm->data.op_dmaoff;
Andy Groverda5a06c2010-01-14 12:18:11 -0800657 send->s_sge[1].length = len;
658
659 bytes_sent += len;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800660 rm->data.op_dmaoff += len;
661 if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
Andy Groverda5a06c2010-01-14 12:18:11 -0800662 scat++;
Wengang Wangd655a9f2015-05-21 13:11:40 +0800663 rm->data.op_dmasg++;
664 rm->data.op_dmaoff = 0;
Andy Groverda5a06c2010-01-14 12:18:11 -0800665 }
666 }
Andy Grover6a0979d2009-02-24 15:30:33 +0000667
Andy Grover241eef32010-01-19 21:25:26 -0800668 rds_ib_set_wr_signal_state(ic, send, 0);
Andy Grover6a0979d2009-02-24 15:30:33 +0000669
Andy Grover6a0979d2009-02-24 15:30:33 +0000670 /*
671 * Always signal the last one if we're stopping due to flow control.
672 */
Andy Groverc8de3f12010-01-15 15:55:26 -0800673 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
Andy Grover6a0979d2009-02-24 15:30:33 +0000674 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
675
Zach Brownf0460112010-07-14 13:55:35 -0700676 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
677 nr_sig++;
678
Andy Grover6a0979d2009-02-24 15:30:33 +0000679 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
680 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
681
Andy Groverc8de3f12010-01-15 15:55:26 -0800682 if (ic->i_flowctl && adv_credits) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000683 struct rds_header *hdr = &ic->i_send_hdrs[pos];
684
685 /* add credit and redo the header checksum */
686 hdr->h_credit = adv_credits;
687 rds_message_make_checksum(hdr);
688 adv_credits = 0;
689 rds_ib_stats_inc(s_ib_tx_credit_updates);
690 }
691
692 if (prev)
693 prev->s_wr.next = &send->s_wr;
694 prev = send;
695
696 pos = (pos + 1) % ic->i_send_ring.w_nr;
Andy Groverda5a06c2010-01-14 12:18:11 -0800697 send = &ic->i_sends[pos];
698 i++;
699
700 } while (i < work_alloc
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800701 && scat != &rm->data.op_sg[rm->data.op_count]);
Andy Grover6a0979d2009-02-24 15:30:33 +0000702
703 /* Account the RDS header in the number of bytes we sent, but just once.
704 * The caller has no concept of fragmentation. */
705 if (hdr_off == 0)
Andy Groverda5a06c2010-01-14 12:18:11 -0800706 bytes_sent += sizeof(struct rds_header);
Andy Grover6a0979d2009-02-24 15:30:33 +0000707
708 /* if we finished the message then send completion owns it */
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800709 if (scat == &rm->data.op_sg[rm->data.op_count]) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800710 prev->s_op = ic->i_data_op;
Andy Grover241eef32010-01-19 21:25:26 -0800711 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
Andy Groverff3d7d32010-03-01 14:03:09 -0800712 ic->i_data_op = NULL;
Andy Grover6a0979d2009-02-24 15:30:33 +0000713 }
714
Andy Groverda5a06c2010-01-14 12:18:11 -0800715 /* Put back wrs & credits we didn't use */
Andy Grover6a0979d2009-02-24 15:30:33 +0000716 if (i < work_alloc) {
717 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
718 work_alloc = i;
719 }
720 if (ic->i_flowctl && i < credit_alloc)
721 rds_ib_send_add_credits(conn, credit_alloc - i);
722
Zach Brownf0460112010-07-14 13:55:35 -0700723 if (nr_sig)
724 atomic_add(nr_sig, &ic->i_signaled_sends);
725
Andy Grover6a0979d2009-02-24 15:30:33 +0000726 /* XXX need to worry about failed_wr and partial sends. */
727 failed_wr = &first->s_wr;
728 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
729 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
730 first, &first->s_wr, ret, failed_wr);
731 BUG_ON(failed_wr != &first->s_wr);
732 if (ret) {
733 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
734 "returned %d\n", &conn->c_faddr, ret);
735 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
Zach Brownf0460112010-07-14 13:55:35 -0700736 rds_ib_sub_signaled(ic, nr_sig);
Andy Groverff3d7d32010-03-01 14:03:09 -0800737 if (prev->s_op) {
738 ic->i_data_op = prev->s_op;
739 prev->s_op = NULL;
Andy Grover6a0979d2009-02-24 15:30:33 +0000740 }
Andy Grover735f61e2010-03-11 13:49:55 +0000741
742 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
Andy Grover6a0979d2009-02-24 15:30:33 +0000743 goto out;
744 }
745
Andy Groverda5a06c2010-01-14 12:18:11 -0800746 ret = bytes_sent;
Andy Grover6a0979d2009-02-24 15:30:33 +0000747out:
748 BUG_ON(adv_credits);
749 return ret;
750}
751
Andy Grover15133f62010-01-12 14:33:38 -0800752/*
753 * Issue atomic operation.
754 * A simplified version of the rdma case, we always map 1 SG, and
755 * only 8 bytes, for the return value from the atomic operation.
756 */
Andy Groverff3d7d32010-03-01 14:03:09 -0800757int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
Andy Grover15133f62010-01-12 14:33:38 -0800758{
759 struct rds_ib_connection *ic = conn->c_transport_data;
760 struct rds_ib_send_work *send = NULL;
761 struct ib_send_wr *failed_wr;
762 struct rds_ib_device *rds_ibdev;
763 u32 pos;
764 u32 work_alloc;
765 int ret;
Zach Brownf0460112010-07-14 13:55:35 -0700766 int nr_sig = 0;
Andy Grover15133f62010-01-12 14:33:38 -0800767
768 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
769
770 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
771 if (work_alloc != 1) {
772 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
773 rds_ib_stats_inc(s_ib_tx_ring_full);
774 ret = -ENOMEM;
775 goto out;
776 }
777
778 /* address of send request in ring */
779 send = &ic->i_sends[pos];
780 send->s_queued = jiffies;
781
782 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
Andy Grover20c72bd2010-08-25 05:51:28 -0700783 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
784 send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
785 send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
786 send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
787 send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
Andy Grover15133f62010-01-12 14:33:38 -0800788 } else { /* FADD */
Andy Grover20c72bd2010-08-25 05:51:28 -0700789 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
790 send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
Andy Grover15133f62010-01-12 14:33:38 -0800791 send->s_wr.wr.atomic.swap = 0;
Andy Grover20c72bd2010-08-25 05:51:28 -0700792 send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
793 send->s_wr.wr.atomic.swap_mask = 0;
Andy Grover15133f62010-01-12 14:33:38 -0800794 }
Zach Brownf0460112010-07-14 13:55:35 -0700795 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
Andy Grover15133f62010-01-12 14:33:38 -0800796 send->s_wr.num_sge = 1;
797 send->s_wr.next = NULL;
798 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
799 send->s_wr.wr.atomic.rkey = op->op_rkey;
Chris Mason1cc22282010-05-11 16:15:35 -0700800 send->s_op = op;
801 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
Andy Grover15133f62010-01-12 14:33:38 -0800802
803 /* map 8 byte retval buffer to the device */
804 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
805 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
806 if (ret != 1) {
807 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
808 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
809 ret = -ENOMEM; /* XXX ? */
810 goto out;
811 }
812
813 /* Convert our struct scatterlist to struct ib_sge */
814 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
815 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
816 send->s_sge[0].lkey = ic->i_mr->lkey;
817
818 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
819 send->s_sge[0].addr, send->s_sge[0].length);
820
Zach Brownf0460112010-07-14 13:55:35 -0700821 if (nr_sig)
822 atomic_add(nr_sig, &ic->i_signaled_sends);
823
Andy Grover15133f62010-01-12 14:33:38 -0800824 failed_wr = &send->s_wr;
825 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
826 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
827 send, &send->s_wr, ret, failed_wr);
828 BUG_ON(failed_wr != &send->s_wr);
829 if (ret) {
830 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
831 "returned %d\n", &conn->c_faddr, ret);
832 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
Zach Brownf0460112010-07-14 13:55:35 -0700833 rds_ib_sub_signaled(ic, nr_sig);
Andy Grover15133f62010-01-12 14:33:38 -0800834 goto out;
835 }
836
837 if (unlikely(failed_wr != &send->s_wr)) {
838 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
839 BUG_ON(failed_wr != &send->s_wr);
840 }
841
842out:
843 return ret;
844}
845
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800846int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
Andy Grover6a0979d2009-02-24 15:30:33 +0000847{
848 struct rds_ib_connection *ic = conn->c_transport_data;
849 struct rds_ib_send_work *send = NULL;
850 struct rds_ib_send_work *first;
851 struct rds_ib_send_work *prev;
852 struct ib_send_wr *failed_wr;
Andy Grover6a0979d2009-02-24 15:30:33 +0000853 struct scatterlist *scat;
854 unsigned long len;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800855 u64 remote_addr = op->op_remote_addr;
Zach Brown89bf9d42010-05-18 15:44:50 -0700856 u32 max_sge = ic->rds_ibdev->max_sge;
Andy Grover6a0979d2009-02-24 15:30:33 +0000857 u32 pos;
858 u32 work_alloc;
859 u32 i;
860 u32 j;
861 int sent;
862 int ret;
863 int num_sge;
Zach Brownf0460112010-07-14 13:55:35 -0700864 int nr_sig = 0;
Andy Grover6a0979d2009-02-24 15:30:33 +0000865
Andy Groverff3d7d32010-03-01 14:03:09 -0800866 /* map the op the first time we see it */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800867 if (!op->op_mapped) {
868 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
869 op->op_sg, op->op_nents, (op->op_write) ?
870 DMA_TO_DEVICE : DMA_FROM_DEVICE);
871 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
872 if (op->op_count == 0) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000873 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
874 ret = -ENOMEM; /* XXX ? */
875 goto out;
876 }
877
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800878 op->op_mapped = 1;
Andy Grover6a0979d2009-02-24 15:30:33 +0000879 }
880
881 /*
882 * Instead of knowing how to return a partial rdma read/write we insist that there
883 * be enough work requests to send the entire message.
884 */
Zach Brown89bf9d42010-05-18 15:44:50 -0700885 i = ceil(op->op_count, max_sge);
Andy Grover6a0979d2009-02-24 15:30:33 +0000886
887 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
888 if (work_alloc != i) {
889 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
890 rds_ib_stats_inc(s_ib_tx_ring_full);
891 ret = -ENOMEM;
892 goto out;
893 }
894
895 send = &ic->i_sends[pos];
896 first = send;
897 prev = NULL;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800898 scat = &op->op_sg[0];
Andy Grover6a0979d2009-02-24 15:30:33 +0000899 sent = 0;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800900 num_sge = op->op_count;
Andy Grover6a0979d2009-02-24 15:30:33 +0000901
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800902 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000903 send->s_wr.send_flags = 0;
904 send->s_queued = jiffies;
Chris Mason1cc22282010-05-11 16:15:35 -0700905 send->s_op = NULL;
Andy Grover241eef32010-01-19 21:25:26 -0800906
Zach Brownf0460112010-07-14 13:55:35 -0700907 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
Andy Grover6a0979d2009-02-24 15:30:33 +0000908
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800909 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
Andy Grover6a0979d2009-02-24 15:30:33 +0000910 send->s_wr.wr.rdma.remote_addr = remote_addr;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800911 send->s_wr.wr.rdma.rkey = op->op_rkey;
Andy Grover6a0979d2009-02-24 15:30:33 +0000912
Zach Brown89bf9d42010-05-18 15:44:50 -0700913 if (num_sge > max_sge) {
914 send->s_wr.num_sge = max_sge;
915 num_sge -= max_sge;
Andy Grover6a0979d2009-02-24 15:30:33 +0000916 } else {
917 send->s_wr.num_sge = num_sge;
918 }
919
920 send->s_wr.next = NULL;
921
922 if (prev)
923 prev->s_wr.next = &send->s_wr;
924
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800925 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
Andy Grover6a0979d2009-02-24 15:30:33 +0000926 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
927 send->s_sge[j].addr =
928 ib_sg_dma_address(ic->i_cm_id->device, scat);
929 send->s_sge[j].length = len;
930 send->s_sge[j].lkey = ic->i_mr->lkey;
931
932 sent += len;
933 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
934
935 remote_addr += len;
936 scat++;
937 }
938
939 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
940 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
941
942 prev = send;
943 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
944 send = ic->i_sends;
945 }
946
Chris Mason1cc22282010-05-11 16:15:35 -0700947 /* give a reference to the last op */
948 if (scat == &op->op_sg[op->op_count]) {
949 prev->s_op = op;
950 rds_message_addref(container_of(op, struct rds_message, rdma));
951 }
952
Andy Grover6a0979d2009-02-24 15:30:33 +0000953 if (i < work_alloc) {
954 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
955 work_alloc = i;
956 }
957
Zach Brownf0460112010-07-14 13:55:35 -0700958 if (nr_sig)
959 atomic_add(nr_sig, &ic->i_signaled_sends);
960
Andy Grover6a0979d2009-02-24 15:30:33 +0000961 failed_wr = &first->s_wr;
962 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
963 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
964 first, &first->s_wr, ret, failed_wr);
965 BUG_ON(failed_wr != &first->s_wr);
966 if (ret) {
967 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
968 "returned %d\n", &conn->c_faddr, ret);
969 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
Zach Brownf0460112010-07-14 13:55:35 -0700970 rds_ib_sub_signaled(ic, nr_sig);
Andy Grover6a0979d2009-02-24 15:30:33 +0000971 goto out;
972 }
973
974 if (unlikely(failed_wr != &first->s_wr)) {
975 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
976 BUG_ON(failed_wr != &first->s_wr);
977 }
978
979
980out:
981 return ret;
982}
983
984void rds_ib_xmit_complete(struct rds_connection *conn)
985{
986 struct rds_ib_connection *ic = conn->c_transport_data;
987
988 /* We may have a pending ACK or window update we were unable
989 * to send previously (due to flow control). Try again. */
990 rds_ib_attempt_ack(ic);
991}