blob: 81471b25373bef8df7d506b6cb6aed7b640e8776 [file] [log] [blame]
Andy Grover5c115592009-02-24 15:30:27 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/gfp.h>
Andy Grover5c115592009-02-24 15:30:27 +000035#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/list.h>
38
39#include "rds.h"
Andy Grover5c115592009-02-24 15:30:27 +000040
41/* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
43 * will kick our shin.
44 * Also, it seems fairer to not let one busy connection stall all the
45 * others.
46 *
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
49 * drained the queue).
50 */
51static int send_batch_count = 64;
52module_param(send_batch_count, int, 0444);
53MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55/*
Zach Brown0f4b1c72010-06-04 14:41:41 -070056 * Reset the send state. Callers must ensure that this doesn't race with
57 * rds_send_xmit().
Andy Grover5c115592009-02-24 15:30:27 +000058 */
59void rds_send_reset(struct rds_connection *conn)
60{
61 struct rds_message *rm, *tmp;
62 unsigned long flags;
63
64 if (conn->c_xmit_rm) {
Chris Mason7e3f2952010-05-11 15:11:11 -070065 rm = conn->c_xmit_rm;
66 conn->c_xmit_rm = NULL;
Andy Grover5c115592009-02-24 15:30:27 +000067 /* Tell the user the RDMA op is no longer mapped by the
68 * transport. This isn't entirely true (it's flushed out
69 * independently) but as the connection is down, there's
70 * no ongoing RDMA to/from that memory */
Chris Mason7e3f2952010-05-11 15:11:11 -070071 rds_message_unmapped(rm);
Chris Mason7e3f2952010-05-11 15:11:11 -070072 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +000073 }
Chris Mason7e3f2952010-05-11 15:11:11 -070074
Andy Grover5c115592009-02-24 15:30:27 +000075 conn->c_xmit_sg = 0;
76 conn->c_xmit_hdr_off = 0;
77 conn->c_xmit_data_off = 0;
Andy Grover15133f62010-01-12 14:33:38 -080078 conn->c_xmit_atomic_sent = 0;
Andy Grover5b2366b2010-02-03 19:36:44 -080079 conn->c_xmit_rdma_sent = 0;
80 conn->c_xmit_data_sent = 0;
Andy Grover5c115592009-02-24 15:30:27 +000081
82 conn->c_map_queued = 0;
83
84 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
85 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
86
87 /* Mark messages as retransmissions, and move them to the send q */
88 spin_lock_irqsave(&conn->c_lock, flags);
89 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
90 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
91 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
92 }
93 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
94 spin_unlock_irqrestore(&conn->c_lock, flags);
95}
96
Zach Brown0f4b1c72010-06-04 14:41:41 -070097static int acquire_in_xmit(struct rds_connection *conn)
98{
99 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
100}
101
102static void release_in_xmit(struct rds_connection *conn)
103{
104 clear_bit(RDS_IN_XMIT, &conn->c_flags);
105 smp_mb__after_clear_bit();
106 /*
107 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
108 * hot path and finding waiters is very rare. We don't want to walk
109 * the system-wide hashed waitqueue buckets in the fast path only to
110 * almost never find waiters.
111 */
112 if (waitqueue_active(&conn->c_waitq))
113 wake_up_all(&conn->c_waitq);
114}
115
Andy Grover5c115592009-02-24 15:30:27 +0000116/*
117 * We're making the concious trade-off here to only send one message
118 * down the connection at a time.
119 * Pro:
120 * - tx queueing is a simple fifo list
121 * - reassembly is optional and easily done by transports per conn
122 * - no per flow rx lookup at all, straight to the socket
123 * - less per-frag memory and wire overhead
124 * Con:
125 * - queued acks can be delayed behind large messages
126 * Depends:
127 * - small message latency is higher behind queued large messages
128 * - large message latency isn't starved by intervening small sends
129 */
130int rds_send_xmit(struct rds_connection *conn)
131{
132 struct rds_message *rm;
133 unsigned long flags;
134 unsigned int tmp;
Andy Grover5c115592009-02-24 15:30:27 +0000135 struct scatterlist *sg;
136 int ret = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000137 LIST_HEAD(to_be_dropped);
138
Andy Groverfcc54502010-03-29 17:08:49 -0700139restart:
Andy Grover049ee3f2010-03-23 17:39:07 -0700140
Andy Grover5c115592009-02-24 15:30:27 +0000141 /*
142 * sendmsg calls here after having queued its message on the send
143 * queue. We only have one task feeding the connection at a time. If
144 * another thread is already feeding the queue then we back off. This
145 * avoids blocking the caller and trading per-connection data between
146 * caches per message.
Andy Grover5c115592009-02-24 15:30:27 +0000147 */
Zach Brown0f4b1c72010-06-04 14:41:41 -0700148 if (!acquire_in_xmit(conn)) {
Andy Grover049ee3f2010-03-23 17:39:07 -0700149 rds_stats_inc(s_send_lock_contention);
Andy Grover5c115592009-02-24 15:30:27 +0000150 ret = -ENOMEM;
151 goto out;
152 }
Zach Brown0f4b1c72010-06-04 14:41:41 -0700153
154 /*
155 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
156 * we do the opposite to avoid races.
157 */
158 if (!rds_conn_up(conn)) {
159 release_in_xmit(conn);
160 ret = 0;
161 goto out;
162 }
Andy Grover5c115592009-02-24 15:30:27 +0000163
164 if (conn->c_trans->xmit_prepare)
165 conn->c_trans->xmit_prepare(conn);
166
167 /*
168 * spin trying to push headers and data down the connection until
Andy Grover5b2366b2010-02-03 19:36:44 -0800169 * the connection doesn't make forward progress.
Andy Grover5c115592009-02-24 15:30:27 +0000170 */
Andy Groverfcc54502010-03-29 17:08:49 -0700171 while (1) {
Andy Grover5c115592009-02-24 15:30:27 +0000172
Andy Grover5c115592009-02-24 15:30:27 +0000173 rm = conn->c_xmit_rm;
Andy Grover5c115592009-02-24 15:30:27 +0000174
Andy Grover5b2366b2010-02-03 19:36:44 -0800175 /*
176 * If between sending messages, we can send a pending congestion
177 * map update.
Andy Grover5c115592009-02-24 15:30:27 +0000178 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800179 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
Andy Grover77dd5502010-03-22 15:22:04 -0700180 rm = rds_cong_update_alloc(conn);
181 if (IS_ERR(rm)) {
182 ret = PTR_ERR(rm);
183 break;
Andy Grover5b2366b2010-02-03 19:36:44 -0800184 }
Andy Grover77dd5502010-03-22 15:22:04 -0700185 rm->data.op_active = 1;
186
187 conn->c_xmit_rm = rm;
Andy Grover5c115592009-02-24 15:30:27 +0000188 }
189
190 /*
Andy Grover5b2366b2010-02-03 19:36:44 -0800191 * If not already working on one, grab the next message.
Andy Grover5c115592009-02-24 15:30:27 +0000192 *
193 * c_xmit_rm holds a ref while we're sending this message down
194 * the connction. We can use this ref while holding the
195 * send_sem.. rds_send_reset() is serialized with it.
196 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800197 if (!rm) {
Andy Grover5c115592009-02-24 15:30:27 +0000198 unsigned int len;
199
Zach Brown0f4b1c72010-06-04 14:41:41 -0700200 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000201
202 if (!list_empty(&conn->c_send_queue)) {
203 rm = list_entry(conn->c_send_queue.next,
204 struct rds_message,
205 m_conn_item);
206 rds_message_addref(rm);
207
208 /*
209 * Move the message from the send queue to the retransmit
210 * list right away.
211 */
212 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
213 }
214
Zach Brown0f4b1c72010-06-04 14:41:41 -0700215 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000216
Andy Groverfcc54502010-03-29 17:08:49 -0700217 if (!rm)
Andy Grover5c115592009-02-24 15:30:27 +0000218 break;
Andy Grover5c115592009-02-24 15:30:27 +0000219
220 /* Unfortunately, the way Infiniband deals with
221 * RDMA to a bad MR key is by moving the entire
222 * queue pair to error state. We cold possibly
223 * recover from that, but right now we drop the
224 * connection.
225 * Therefore, we never retransmit messages with RDMA ops.
226 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800227 if (rm->rdma.op_active &&
Joe Perchesf64f9e72009-11-29 16:55:45 -0800228 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
Zach Brown0f4b1c72010-06-04 14:41:41 -0700229 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000230 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
231 list_move(&rm->m_conn_item, &to_be_dropped);
Zach Brown0f4b1c72010-06-04 14:41:41 -0700232 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000233 continue;
234 }
235
236 /* Require an ACK every once in a while */
237 len = ntohl(rm->m_inc.i_hdr.h_len);
Joe Perchesf64f9e72009-11-29 16:55:45 -0800238 if (conn->c_unacked_packets == 0 ||
239 conn->c_unacked_bytes < len) {
Andy Grover5c115592009-02-24 15:30:27 +0000240 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
241
242 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
243 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
244 rds_stats_inc(s_send_ack_required);
245 } else {
246 conn->c_unacked_bytes -= len;
247 conn->c_unacked_packets--;
248 }
249
250 conn->c_xmit_rm = rm;
251 }
252
Andy Grover2c3a5f92010-03-01 16:10:40 -0800253 /* The transport either sends the whole rdma or none of it */
254 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800255 rm->m_final_op = &rm->rdma;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800256 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
Chris Mason1cc22282010-05-11 16:15:35 -0700257 if (ret)
Andy Grover2c3a5f92010-03-01 16:10:40 -0800258 break;
259 conn->c_xmit_rdma_sent = 1;
260
261 /* The transport owns the mapped memory for now.
262 * You can't unmap it while it's on the send queue */
263 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
264 }
265
Andy Grover15133f62010-01-12 14:33:38 -0800266 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800267 rm->m_final_op = &rm->atomic;
268 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
Chris Mason1cc22282010-05-11 16:15:35 -0700269 if (ret)
Andy Grover15133f62010-01-12 14:33:38 -0800270 break;
271 conn->c_xmit_atomic_sent = 1;
Andy Groverff3d7d32010-03-01 14:03:09 -0800272
Andy Grover15133f62010-01-12 14:33:38 -0800273 /* The transport owns the mapped memory for now.
274 * You can't unmap it while it's on the send queue */
275 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276 }
277
Andy Grover2c3a5f92010-03-01 16:10:40 -0800278 /*
279 * A number of cases require an RDS header to be sent
280 * even if there is no data.
281 * We permit 0-byte sends; rds-ping depends on this.
282 * However, if there are exclusively attached silent ops,
283 * we skip the hdr/data send, to enable silent operation.
284 */
285 if (rm->data.op_nents == 0) {
286 int ops_present;
287 int all_ops_are_silent = 1;
Andy Grover241eef32010-01-19 21:25:26 -0800288
Andy Grover2c3a5f92010-03-01 16:10:40 -0800289 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
290 if (rm->atomic.op_active && !rm->atomic.op_silent)
291 all_ops_are_silent = 0;
292 if (rm->rdma.op_active && !rm->rdma.op_silent)
293 all_ops_are_silent = 0;
Andy Grover241eef32010-01-19 21:25:26 -0800294
Andy Grover2c3a5f92010-03-01 16:10:40 -0800295 if (ops_present && all_ops_are_silent
296 && !rm->m_rdma_cookie)
297 rm->data.op_active = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000298 }
299
Andy Grover5b2366b2010-02-03 19:36:44 -0800300 if (rm->data.op_active && !conn->c_xmit_data_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800301 rm->m_final_op = &rm->data;
Andy Grover5c115592009-02-24 15:30:27 +0000302 ret = conn->c_trans->xmit(conn, rm,
303 conn->c_xmit_hdr_off,
304 conn->c_xmit_sg,
305 conn->c_xmit_data_off);
306 if (ret <= 0)
307 break;
308
309 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
310 tmp = min_t(int, ret,
311 sizeof(struct rds_header) -
312 conn->c_xmit_hdr_off);
313 conn->c_xmit_hdr_off += tmp;
314 ret -= tmp;
315 }
316
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800317 sg = &rm->data.op_sg[conn->c_xmit_sg];
Andy Grover5c115592009-02-24 15:30:27 +0000318 while (ret) {
319 tmp = min_t(int, ret, sg->length -
320 conn->c_xmit_data_off);
321 conn->c_xmit_data_off += tmp;
322 ret -= tmp;
323 if (conn->c_xmit_data_off == sg->length) {
324 conn->c_xmit_data_off = 0;
325 sg++;
326 conn->c_xmit_sg++;
327 BUG_ON(ret != 0 &&
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800328 conn->c_xmit_sg == rm->data.op_nents);
Andy Grover5c115592009-02-24 15:30:27 +0000329 }
330 }
Andy Grover5b2366b2010-02-03 19:36:44 -0800331
332 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
333 (conn->c_xmit_sg == rm->data.op_nents))
334 conn->c_xmit_data_sent = 1;
335 }
336
337 /*
338 * A rm will only take multiple times through this loop
339 * if there is a data op. Thus, if the data is sent (or there was
340 * none), then we're done with the rm.
341 */
342 if (!rm->data.op_active || conn->c_xmit_data_sent) {
343 conn->c_xmit_rm = NULL;
344 conn->c_xmit_sg = 0;
345 conn->c_xmit_hdr_off = 0;
346 conn->c_xmit_data_off = 0;
347 conn->c_xmit_rdma_sent = 0;
348 conn->c_xmit_atomic_sent = 0;
349 conn->c_xmit_data_sent = 0;
350
351 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000352 }
353 }
354
Andy Grover5c115592009-02-24 15:30:27 +0000355 if (conn->c_trans->xmit_complete)
356 conn->c_trans->xmit_complete(conn);
357
Zach Brown0f4b1c72010-06-04 14:41:41 -0700358 release_in_xmit(conn);
Andy Grover5c115592009-02-24 15:30:27 +0000359
Andy Grover2ad80992010-03-23 17:48:04 -0700360 /* Nuke any messages we decided not to retransmit. */
361 if (!list_empty(&to_be_dropped)) {
362 /* irqs on here, so we can put(), unlike above */
363 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
364 rds_message_put(rm);
365 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
366 }
367
Andy Groverfcc54502010-03-29 17:08:49 -0700368 /*
Zach Brown0f4b1c72010-06-04 14:41:41 -0700369 * Other senders can queue a message after we last test the send queue
370 * but before we clear RDS_IN_XMIT. In that case they'd back off and
371 * not try and send their newly queued message. We need to check the
372 * send queue after having cleared RDS_IN_XMIT so that their message
373 * doesn't get stuck on the send queue.
Andy Groverfcc54502010-03-29 17:08:49 -0700374 *
375 * If the transport cannot continue (i.e ret != 0), then it must
376 * call us when more room is available, such as from the tx
377 * completion handler.
378 */
379 if (ret == 0) {
Chris Mason9e29db02010-04-15 16:38:14 -0400380 smp_mb();
Andy Grover5c115592009-02-24 15:30:27 +0000381 if (!list_empty(&conn->c_send_queue)) {
Andy Grover049ee3f2010-03-23 17:39:07 -0700382 rds_stats_inc(s_send_lock_queue_raced);
Zach Brown0f4b1c72010-06-04 14:41:41 -0700383 goto restart;
Andy Grover5c115592009-02-24 15:30:27 +0000384 }
Andy Grover5c115592009-02-24 15:30:27 +0000385 }
386out:
387 return ret;
388}
389
390static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
391{
392 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
393
394 assert_spin_locked(&rs->rs_lock);
395
396 BUG_ON(rs->rs_snd_bytes < len);
397 rs->rs_snd_bytes -= len;
398
399 if (rs->rs_snd_bytes == 0)
400 rds_stats_inc(s_send_queue_empty);
401}
402
403static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
404 is_acked_func is_acked)
405{
406 if (is_acked)
407 return is_acked(rm, ack);
408 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
409}
410
411/*
Andy Grover5c115592009-02-24 15:30:27 +0000412 * This is pretty similar to what happens below in the ACK
413 * handling code - except that we call here as soon as we get
414 * the IB send completion on the RDMA op and the accompanying
415 * message.
416 */
417void rds_rdma_send_complete(struct rds_message *rm, int status)
418{
419 struct rds_sock *rs = NULL;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800420 struct rm_rdma_op *ro;
Andy Grover5c115592009-02-24 15:30:27 +0000421 struct rds_notifier *notifier;
Andy Grover9de08642010-03-29 16:50:54 -0700422 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000423
Andy Grover9de08642010-03-29 16:50:54 -0700424 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000425
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800426 ro = &rm->rdma;
Joe Perchesf64f9e72009-11-29 16:55:45 -0800427 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800428 ro->op_active && ro->op_notify && ro->op_notifier) {
429 notifier = ro->op_notifier;
Andy Grover5c115592009-02-24 15:30:27 +0000430 rs = rm->m_rs;
431 sock_hold(rds_rs_to_sk(rs));
432
433 notifier->n_status = status;
434 spin_lock(&rs->rs_lock);
435 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
436 spin_unlock(&rs->rs_lock);
437
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800438 ro->op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000439 }
440
Andy Grover9de08642010-03-29 16:50:54 -0700441 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000442
443 if (rs) {
444 rds_wake_sk_sleep(rs);
445 sock_put(rds_rs_to_sk(rs));
446 }
447}
Andy Grover616b7572009-08-21 12:28:32 +0000448EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
Andy Grover5c115592009-02-24 15:30:27 +0000449
450/*
Andy Grover15133f62010-01-12 14:33:38 -0800451 * Just like above, except looks at atomic op
452 */
453void rds_atomic_send_complete(struct rds_message *rm, int status)
454{
455 struct rds_sock *rs = NULL;
456 struct rm_atomic_op *ao;
457 struct rds_notifier *notifier;
Andy Grovercf4b7382010-03-29 16:50:54 -0700458 unsigned long flags;
Andy Grover15133f62010-01-12 14:33:38 -0800459
Andy Grovercf4b7382010-03-29 16:50:54 -0700460 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover15133f62010-01-12 14:33:38 -0800461
462 ao = &rm->atomic;
463 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
464 && ao->op_active && ao->op_notify && ao->op_notifier) {
465 notifier = ao->op_notifier;
466 rs = rm->m_rs;
467 sock_hold(rds_rs_to_sk(rs));
468
469 notifier->n_status = status;
470 spin_lock(&rs->rs_lock);
471 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
472 spin_unlock(&rs->rs_lock);
473
474 ao->op_notifier = NULL;
475 }
476
Andy Grovercf4b7382010-03-29 16:50:54 -0700477 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover15133f62010-01-12 14:33:38 -0800478
479 if (rs) {
480 rds_wake_sk_sleep(rs);
481 sock_put(rds_rs_to_sk(rs));
482 }
483}
484EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
485
486/*
Andy Grover5c115592009-02-24 15:30:27 +0000487 * This is the same as rds_rdma_send_complete except we
488 * don't do any locking - we have all the ingredients (message,
489 * socket, socket lock) and can just move the notifier.
490 */
491static inline void
Andy Grover940786e2010-02-19 18:04:58 -0800492__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
Andy Grover5c115592009-02-24 15:30:27 +0000493{
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800494 struct rm_rdma_op *ro;
Andy Grover940786e2010-02-19 18:04:58 -0800495 struct rm_atomic_op *ao;
Andy Grover5c115592009-02-24 15:30:27 +0000496
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800497 ro = &rm->rdma;
498 if (ro->op_active && ro->op_notify && ro->op_notifier) {
499 ro->op_notifier->n_status = status;
500 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
501 ro->op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000502 }
503
Andy Grover940786e2010-02-19 18:04:58 -0800504 ao = &rm->atomic;
505 if (ao->op_active && ao->op_notify && ao->op_notifier) {
506 ao->op_notifier->n_status = status;
507 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
508 ao->op_notifier = NULL;
509 }
510
Andy Grover5c115592009-02-24 15:30:27 +0000511 /* No need to wake the app - caller does this */
512}
513
514/*
515 * This is called from the IB send completion when we detect
516 * a RDMA operation that failed with remote access error.
517 * So speed is not an issue here.
518 */
519struct rds_message *rds_send_get_message(struct rds_connection *conn,
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800520 struct rm_rdma_op *op)
Andy Grover5c115592009-02-24 15:30:27 +0000521{
522 struct rds_message *rm, *tmp, *found = NULL;
523 unsigned long flags;
524
525 spin_lock_irqsave(&conn->c_lock, flags);
526
527 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800528 if (&rm->rdma == op) {
Andy Grover5c115592009-02-24 15:30:27 +0000529 atomic_inc(&rm->m_refcount);
530 found = rm;
531 goto out;
532 }
533 }
534
535 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800536 if (&rm->rdma == op) {
Andy Grover5c115592009-02-24 15:30:27 +0000537 atomic_inc(&rm->m_refcount);
538 found = rm;
539 break;
540 }
541 }
542
543out:
544 spin_unlock_irqrestore(&conn->c_lock, flags);
545
546 return found;
547}
Andy Grover616b7572009-08-21 12:28:32 +0000548EXPORT_SYMBOL_GPL(rds_send_get_message);
Andy Grover5c115592009-02-24 15:30:27 +0000549
550/*
551 * This removes messages from the socket's list if they're on it. The list
552 * argument must be private to the caller, we must be able to modify it
553 * without locks. The messages must have a reference held for their
554 * position on the list. This function will drop that reference after
555 * removing the messages from the 'messages' list regardless of if it found
556 * the messages on the socket list or not.
557 */
558void rds_send_remove_from_sock(struct list_head *messages, int status)
559{
Andy Grover561c7df2010-03-11 13:50:06 +0000560 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000561 struct rds_sock *rs = NULL;
562 struct rds_message *rm;
563
Andy Grover5c115592009-02-24 15:30:27 +0000564 while (!list_empty(messages)) {
Andy Grover561c7df2010-03-11 13:50:06 +0000565 int was_on_sock = 0;
566
Andy Grover5c115592009-02-24 15:30:27 +0000567 rm = list_entry(messages->next, struct rds_message,
568 m_conn_item);
569 list_del_init(&rm->m_conn_item);
570
571 /*
572 * If we see this flag cleared then we're *sure* that someone
573 * else beat us to removing it from the sock. If we race
574 * with their flag update we'll get the lock and then really
575 * see that the flag has been cleared.
576 *
577 * The message spinlock makes sure nobody clears rm->m_rs
578 * while we're messing with it. It does not prevent the
579 * message from being removed from the socket, though.
580 */
Andy Grover561c7df2010-03-11 13:50:06 +0000581 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000582 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
583 goto unlock_and_drop;
584
585 if (rs != rm->m_rs) {
586 if (rs) {
Andy Grover5c115592009-02-24 15:30:27 +0000587 rds_wake_sk_sleep(rs);
588 sock_put(rds_rs_to_sk(rs));
589 }
590 rs = rm->m_rs;
Andy Grover5c115592009-02-24 15:30:27 +0000591 sock_hold(rds_rs_to_sk(rs));
592 }
Tina Yang048c15e2010-03-11 13:50:00 +0000593 spin_lock(&rs->rs_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000594
595 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800596 struct rm_rdma_op *ro = &rm->rdma;
Andy Grover5c115592009-02-24 15:30:27 +0000597 struct rds_notifier *notifier;
598
599 list_del_init(&rm->m_sock_item);
600 rds_send_sndbuf_remove(rs, rm);
601
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800602 if (ro->op_active && ro->op_notifier &&
603 (ro->op_notify || (ro->op_recverr && status))) {
604 notifier = ro->op_notifier;
Andy Grover5c115592009-02-24 15:30:27 +0000605 list_add_tail(&notifier->n_list,
606 &rs->rs_notify_queue);
607 if (!notifier->n_status)
608 notifier->n_status = status;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800609 rm->rdma.op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000610 }
Andy Grover561c7df2010-03-11 13:50:06 +0000611 was_on_sock = 1;
Andy Grover5c115592009-02-24 15:30:27 +0000612 rm->m_rs = NULL;
613 }
Tina Yang048c15e2010-03-11 13:50:00 +0000614 spin_unlock(&rs->rs_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000615
616unlock_and_drop:
Andy Grover561c7df2010-03-11 13:50:06 +0000617 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000618 rds_message_put(rm);
Andy Grover561c7df2010-03-11 13:50:06 +0000619 if (was_on_sock)
620 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000621 }
622
623 if (rs) {
Andy Grover5c115592009-02-24 15:30:27 +0000624 rds_wake_sk_sleep(rs);
625 sock_put(rds_rs_to_sk(rs));
626 }
Andy Grover5c115592009-02-24 15:30:27 +0000627}
628
629/*
630 * Transports call here when they've determined that the receiver queued
631 * messages up to, and including, the given sequence number. Messages are
632 * moved to the retrans queue when rds_send_xmit picks them off the send
633 * queue. This means that in the TCP case, the message may not have been
634 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
635 * checks the RDS_MSG_HAS_ACK_SEQ bit.
636 *
637 * XXX It's not clear to me how this is safely serialized with socket
638 * destruction. Maybe it should bail if it sees SOCK_DEAD.
639 */
640void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
641 is_acked_func is_acked)
642{
643 struct rds_message *rm, *tmp;
644 unsigned long flags;
645 LIST_HEAD(list);
646
647 spin_lock_irqsave(&conn->c_lock, flags);
648
649 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
650 if (!rds_send_is_acked(rm, ack, is_acked))
651 break;
652
653 list_move(&rm->m_conn_item, &list);
654 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
655 }
656
657 /* order flag updates with spin locks */
658 if (!list_empty(&list))
659 smp_mb__after_clear_bit();
660
661 spin_unlock_irqrestore(&conn->c_lock, flags);
662
663 /* now remove the messages from the sock list as needed */
664 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
665}
Andy Grover616b7572009-08-21 12:28:32 +0000666EXPORT_SYMBOL_GPL(rds_send_drop_acked);
Andy Grover5c115592009-02-24 15:30:27 +0000667
668void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
669{
670 struct rds_message *rm, *tmp;
671 struct rds_connection *conn;
Andy Grover7c82eaf2010-02-19 18:01:41 -0800672 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000673 LIST_HEAD(list);
Andy Grover5c115592009-02-24 15:30:27 +0000674
675 /* get all the messages we're dropping under the rs lock */
676 spin_lock_irqsave(&rs->rs_lock, flags);
677
678 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
679 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
680 dest->sin_port != rm->m_inc.i_hdr.h_dport))
681 continue;
682
Andy Grover5c115592009-02-24 15:30:27 +0000683 list_move(&rm->m_sock_item, &list);
684 rds_send_sndbuf_remove(rs, rm);
685 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
Andy Grover5c115592009-02-24 15:30:27 +0000686 }
687
688 /* order flag updates with the rs lock */
Andy Grover7c82eaf2010-02-19 18:01:41 -0800689 smp_mb__after_clear_bit();
Andy Grover5c115592009-02-24 15:30:27 +0000690
691 spin_unlock_irqrestore(&rs->rs_lock, flags);
692
Andy Grover7c82eaf2010-02-19 18:01:41 -0800693 if (list_empty(&list))
694 return;
Andy Grover5c115592009-02-24 15:30:27 +0000695
Andy Grover7c82eaf2010-02-19 18:01:41 -0800696 /* Remove the messages from the conn */
Andy Grover5c115592009-02-24 15:30:27 +0000697 list_for_each_entry(rm, &list, m_sock_item) {
Andy Grover7c82eaf2010-02-19 18:01:41 -0800698
699 conn = rm->m_inc.i_conn;
Andy Grover7c82eaf2010-02-19 18:01:41 -0800700
Andy Grover9de08642010-03-29 16:50:54 -0700701 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800702 /*
703 * Maybe someone else beat us to removing rm from the conn.
704 * If we race with their flag update we'll get the lock and
705 * then really see that the flag has been cleared.
706 */
707 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
708 spin_unlock_irqrestore(&conn->c_lock, flags);
709 continue;
710 }
Andy Grover9de08642010-03-29 16:50:54 -0700711 list_del_init(&rm->m_conn_item);
712 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800713
714 /*
715 * Couldn't grab m_rs_lock in top loop (lock ordering),
716 * but we can now.
717 */
Andy Grover9de08642010-03-29 16:50:54 -0700718 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800719
Tina Yang550a8002010-03-11 13:50:03 +0000720 spin_lock(&rs->rs_lock);
Andy Grover940786e2010-02-19 18:04:58 -0800721 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
Tina Yang550a8002010-03-11 13:50:03 +0000722 spin_unlock(&rs->rs_lock);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800723
Andy Grover5c115592009-02-24 15:30:27 +0000724 rm->m_rs = NULL;
Andy Grover9de08642010-03-29 16:50:54 -0700725 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000726
Andy Grover7c82eaf2010-02-19 18:01:41 -0800727 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000728 }
729
Andy Grover7c82eaf2010-02-19 18:01:41 -0800730 rds_wake_sk_sleep(rs);
Tina Yang550a8002010-03-11 13:50:03 +0000731
Andy Grover5c115592009-02-24 15:30:27 +0000732 while (!list_empty(&list)) {
733 rm = list_entry(list.next, struct rds_message, m_sock_item);
734 list_del_init(&rm->m_sock_item);
735
736 rds_message_wait(rm);
737 rds_message_put(rm);
738 }
739}
740
741/*
742 * we only want this to fire once so we use the callers 'queued'. It's
743 * possible that another thread can race with us and remove the
744 * message from the flow with RDS_CANCEL_SENT_TO.
745 */
746static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
747 struct rds_message *rm, __be16 sport,
748 __be16 dport, int *queued)
749{
750 unsigned long flags;
751 u32 len;
752
753 if (*queued)
754 goto out;
755
756 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
757
758 /* this is the only place which holds both the socket's rs_lock
759 * and the connection's c_lock */
760 spin_lock_irqsave(&rs->rs_lock, flags);
761
762 /*
763 * If there is a little space in sndbuf, we don't queue anything,
764 * and userspace gets -EAGAIN. But poll() indicates there's send
765 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
766 * freed up by incoming acks. So we check the *old* value of
767 * rs_snd_bytes here to allow the last msg to exceed the buffer,
768 * and poll() now knows no more data can be sent.
769 */
770 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
771 rs->rs_snd_bytes += len;
772
773 /* let recv side know we are close to send space exhaustion.
774 * This is probably not the optimal way to do it, as this
775 * means we set the flag on *all* messages as soon as our
776 * throughput hits a certain threshold.
777 */
778 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
779 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
780
781 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
782 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
783 rds_message_addref(rm);
784 rm->m_rs = rs;
785
786 /* The code ordering is a little weird, but we're
787 trying to minimize the time we hold c_lock */
788 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
789 rm->m_inc.i_conn = conn;
790 rds_message_addref(rm);
791
792 spin_lock(&conn->c_lock);
793 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
794 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
795 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
796 spin_unlock(&conn->c_lock);
797
798 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
799 rm, len, rs, rs->rs_snd_bytes,
800 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
801
802 *queued = 1;
803 }
804
805 spin_unlock_irqrestore(&rs->rs_lock, flags);
806out:
807 return *queued;
808}
809
Andy Groverfc445082010-01-12 12:56:06 -0800810/*
811 * rds_message is getting to be quite complicated, and we'd like to allocate
812 * it all in one go. This figures out how big it needs to be up front.
813 */
814static int rds_rm_size(struct msghdr *msg, int data_len)
815{
Andy Groverff87e972010-01-12 14:13:15 -0800816 struct cmsghdr *cmsg;
Andy Groverfc445082010-01-12 12:56:06 -0800817 int size = 0;
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700818 int cmsg_groups = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800819 int retval;
Andy Groverfc445082010-01-12 12:56:06 -0800820
Andy Groverff87e972010-01-12 14:13:15 -0800821 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
822 if (!CMSG_OK(msg, cmsg))
823 return -EINVAL;
824
825 if (cmsg->cmsg_level != SOL_RDS)
826 continue;
827
828 switch (cmsg->cmsg_type) {
829 case RDS_CMSG_RDMA_ARGS:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700830 cmsg_groups |= 1;
Andy Groverff87e972010-01-12 14:13:15 -0800831 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
832 if (retval < 0)
833 return retval;
834 size += retval;
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700835
Andy Groverff87e972010-01-12 14:13:15 -0800836 break;
837
838 case RDS_CMSG_RDMA_DEST:
839 case RDS_CMSG_RDMA_MAP:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700840 cmsg_groups |= 2;
Andy Groverff87e972010-01-12 14:13:15 -0800841 /* these are valid but do no add any size */
842 break;
843
Andy Grover15133f62010-01-12 14:33:38 -0800844 case RDS_CMSG_ATOMIC_CSWP:
845 case RDS_CMSG_ATOMIC_FADD:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700846 cmsg_groups |= 1;
Andy Grover15133f62010-01-12 14:33:38 -0800847 size += sizeof(struct scatterlist);
848 break;
849
Andy Groverff87e972010-01-12 14:13:15 -0800850 default:
851 return -EINVAL;
852 }
853
854 }
855
856 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
Andy Groverfc445082010-01-12 12:56:06 -0800857
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700858 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
859 if (cmsg_groups == 3)
860 return -EINVAL;
861
Andy Groverfc445082010-01-12 12:56:06 -0800862 return size;
863}
864
Andy Grover5c115592009-02-24 15:30:27 +0000865static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
866 struct msghdr *msg, int *allocated_mr)
867{
868 struct cmsghdr *cmsg;
869 int ret = 0;
870
871 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
872 if (!CMSG_OK(msg, cmsg))
873 return -EINVAL;
874
875 if (cmsg->cmsg_level != SOL_RDS)
876 continue;
877
878 /* As a side effect, RDMA_DEST and RDMA_MAP will set
Andy Grover15133f62010-01-12 14:33:38 -0800879 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
Andy Grover5c115592009-02-24 15:30:27 +0000880 */
881 switch (cmsg->cmsg_type) {
882 case RDS_CMSG_RDMA_ARGS:
883 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
884 break;
885
886 case RDS_CMSG_RDMA_DEST:
887 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
888 break;
889
890 case RDS_CMSG_RDMA_MAP:
891 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
892 if (!ret)
893 *allocated_mr = 1;
894 break;
Andy Grover15133f62010-01-12 14:33:38 -0800895 case RDS_CMSG_ATOMIC_CSWP:
896 case RDS_CMSG_ATOMIC_FADD:
897 ret = rds_cmsg_atomic(rs, rm, cmsg);
898 break;
Andy Grover5c115592009-02-24 15:30:27 +0000899
900 default:
901 return -EINVAL;
902 }
903
904 if (ret)
905 break;
906 }
907
908 return ret;
909}
910
911int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
912 size_t payload_len)
913{
914 struct sock *sk = sock->sk;
915 struct rds_sock *rs = rds_sk_to_rs(sk);
916 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
917 __be32 daddr;
918 __be16 dport;
919 struct rds_message *rm = NULL;
920 struct rds_connection *conn;
921 int ret = 0;
922 int queued = 0, allocated_mr = 0;
923 int nonblock = msg->msg_flags & MSG_DONTWAIT;
Andy Grover1123fd72010-03-11 13:49:56 +0000924 long timeo = sock_sndtimeo(sk, nonblock);
Andy Grover5c115592009-02-24 15:30:27 +0000925
926 /* Mirror Linux UDP mirror of BSD error message compatibility */
927 /* XXX: Perhaps MSG_MORE someday */
928 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
929 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
930 ret = -EOPNOTSUPP;
931 goto out;
932 }
933
934 if (msg->msg_namelen) {
935 /* XXX fail non-unicast destination IPs? */
936 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
937 ret = -EINVAL;
938 goto out;
939 }
940 daddr = usin->sin_addr.s_addr;
941 dport = usin->sin_port;
942 } else {
943 /* We only care about consistency with ->connect() */
944 lock_sock(sk);
945 daddr = rs->rs_conn_addr;
946 dport = rs->rs_conn_port;
947 release_sock(sk);
948 }
949
950 /* racing with another thread binding seems ok here */
951 if (daddr == 0 || rs->rs_bound_addr == 0) {
952 ret = -ENOTCONN; /* XXX not a great errno */
953 goto out;
954 }
955
Andy Groverfc445082010-01-12 12:56:06 -0800956 /* size of rm including all sgs */
957 ret = rds_rm_size(msg, payload_len);
958 if (ret < 0)
959 goto out;
960
961 rm = rds_message_alloc(ret, GFP_KERNEL);
962 if (!rm) {
963 ret = -ENOMEM;
Andy Grover5c115592009-02-24 15:30:27 +0000964 goto out;
965 }
966
Andy Grover372cd7d2010-02-03 19:40:32 -0800967 /* Attach data to the rm */
968 if (payload_len) {
969 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
970 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
971 if (ret)
972 goto out;
973 }
974 rm->data.op_active = 1;
Andy Groverfc445082010-01-12 12:56:06 -0800975
Andy Grover5c115592009-02-24 15:30:27 +0000976 rm->m_daddr = daddr;
977
Andy Grover5c115592009-02-24 15:30:27 +0000978 /* rds_conn_create has a spinlock that runs with IRQ off.
979 * Caching the conn in the socket helps a lot. */
980 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
981 conn = rs->rs_conn;
982 else {
983 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
984 rs->rs_transport,
985 sock->sk->sk_allocation);
986 if (IS_ERR(conn)) {
987 ret = PTR_ERR(conn);
988 goto out;
989 }
990 rs->rs_conn = conn;
991 }
992
Andy Grover49f69692009-04-09 14:09:41 +0000993 /* Parse any control messages the user may have included. */
994 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
995 if (ret)
996 goto out;
997
Andy Grover2c3a5f92010-03-01 16:10:40 -0800998 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
Andy Grover5c115592009-02-24 15:30:27 +0000999 if (printk_ratelimit())
1000 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
Andy Groverf8b3aaf2010-03-01 14:11:53 -08001001 &rm->rdma, conn->c_trans->xmit_rdma);
Andy Grover15133f62010-01-12 14:33:38 -08001002 ret = -EOPNOTSUPP;
1003 goto out;
1004 }
1005
1006 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1007 if (printk_ratelimit())
1008 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1009 &rm->atomic, conn->c_trans->xmit_atomic);
Andy Grover5c115592009-02-24 15:30:27 +00001010 ret = -EOPNOTSUPP;
1011 goto out;
1012 }
1013
Zach Brownf3c68082010-05-24 13:14:36 -07001014 rds_conn_connect_if_down(conn);
Andy Grover5c115592009-02-24 15:30:27 +00001015
1016 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
Andy Groverb98ba522010-03-11 13:50:04 +00001017 if (ret) {
1018 rs->rs_seen_congestion = 1;
Andy Grover5c115592009-02-24 15:30:27 +00001019 goto out;
Andy Groverb98ba522010-03-11 13:50:04 +00001020 }
Andy Grover5c115592009-02-24 15:30:27 +00001021
1022 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1023 dport, &queued)) {
1024 rds_stats_inc(s_send_queue_full);
1025 /* XXX make sure this is reasonable */
1026 if (payload_len > rds_sk_sndbuf(rs)) {
1027 ret = -EMSGSIZE;
1028 goto out;
1029 }
1030 if (nonblock) {
1031 ret = -EAGAIN;
1032 goto out;
1033 }
1034
Eric Dumazetaa395142010-04-20 13:03:51 +00001035 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
Andy Grover5c115592009-02-24 15:30:27 +00001036 rds_send_queue_rm(rs, conn, rm,
1037 rs->rs_bound_port,
1038 dport,
1039 &queued),
1040 timeo);
1041 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1042 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1043 continue;
1044
1045 ret = timeo;
1046 if (ret == 0)
1047 ret = -ETIMEDOUT;
1048 goto out;
1049 }
1050
1051 /*
1052 * By now we've committed to the send. We reuse rds_send_worker()
1053 * to retry sends in the rds thread if the transport asks us to.
1054 */
1055 rds_stats_inc(s_send_queued);
1056
1057 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
Andy Grovera7d3a282010-03-29 16:20:18 -07001058 rds_send_xmit(conn);
Andy Grover5c115592009-02-24 15:30:27 +00001059
1060 rds_message_put(rm);
1061 return payload_len;
1062
1063out:
1064 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1065 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1066 * or in any other way, we need to destroy the MR again */
1067 if (allocated_mr)
1068 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1069
1070 if (rm)
1071 rds_message_put(rm);
1072 return ret;
1073}
1074
1075/*
1076 * Reply to a ping packet.
1077 */
1078int
1079rds_send_pong(struct rds_connection *conn, __be16 dport)
1080{
1081 struct rds_message *rm;
1082 unsigned long flags;
1083 int ret = 0;
1084
1085 rm = rds_message_alloc(0, GFP_ATOMIC);
Andy Grover8690bfa2010-01-12 11:56:44 -08001086 if (!rm) {
Andy Grover5c115592009-02-24 15:30:27 +00001087 ret = -ENOMEM;
1088 goto out;
1089 }
1090
1091 rm->m_daddr = conn->c_faddr;
Andy Groveracfcd4d2010-03-31 18:56:25 -07001092 rm->data.op_active = 1;
Andy Grover5c115592009-02-24 15:30:27 +00001093
Zach Brownf3c68082010-05-24 13:14:36 -07001094 rds_conn_connect_if_down(conn);
Andy Grover5c115592009-02-24 15:30:27 +00001095
1096 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1097 if (ret)
1098 goto out;
1099
1100 spin_lock_irqsave(&conn->c_lock, flags);
1101 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1102 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1103 rds_message_addref(rm);
1104 rm->m_inc.i_conn = conn;
1105
1106 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1107 conn->c_next_tx_seq);
1108 conn->c_next_tx_seq++;
1109 spin_unlock_irqrestore(&conn->c_lock, flags);
1110
1111 rds_stats_inc(s_send_queued);
1112 rds_stats_inc(s_send_pong);
1113
Andy Groveracfcd4d2010-03-31 18:56:25 -07001114 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1115 rds_send_xmit(conn);
1116
Andy Grover5c115592009-02-24 15:30:27 +00001117 rds_message_put(rm);
1118 return 0;
1119
1120out:
1121 if (rm)
1122 rds_message_put(rm);
1123 return ret;
1124}