blob: 3fb280b75160d3886714c328ffbd28e6eadcb0a0 [file] [log] [blame]
Andy Grover5c115592009-02-24 15:30:27 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Paul Gortmakerd9b93842011-09-18 13:21:27 -040034#include <linux/moduleparam.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Andy Grover5c115592009-02-24 15:30:27 +000036#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
Manuel Zerpiescb0a6052011-06-16 02:09:57 +000039#include <linux/ratelimit.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040040#include <linux/export.h>
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -070041#include <linux/sizes.h>
Andy Grover5c115592009-02-24 15:30:27 +000042
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070043#include "rds_single_path.h"
Andy Grover5c115592009-02-24 15:30:27 +000044#include "rds.h"
Andy Grover5c115592009-02-24 15:30:27 +000045
46/* When transmitting messages in rds_send_xmit, we need to emerge from
47 * time to time and briefly release the CPU. Otherwise the softlock watchdog
48 * will kick our shin.
49 * Also, it seems fairer to not let one busy connection stall all the
50 * others.
51 *
52 * send_batch_count is the number of times we'll loop in send_xmit. Setting
53 * it to 0 will restore the old behavior (where we looped until we had
54 * drained the queue).
55 */
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -070056static int send_batch_count = SZ_1K;
Andy Grover5c115592009-02-24 15:30:27 +000057module_param(send_batch_count, int, 0444);
58MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
59
stephen hemmingerff51bf82010-10-19 08:08:33 +000060static void rds_send_remove_from_sock(struct list_head *messages, int status);
61
Andy Grover5c115592009-02-24 15:30:27 +000062/*
Zach Brown0f4b1c72010-06-04 14:41:41 -070063 * Reset the send state. Callers must ensure that this doesn't race with
64 * rds_send_xmit().
Andy Grover5c115592009-02-24 15:30:27 +000065 */
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070066static void rds_send_path_reset(struct rds_conn_path *cp)
Andy Grover5c115592009-02-24 15:30:27 +000067{
68 struct rds_message *rm, *tmp;
69 unsigned long flags;
70
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070071 if (cp->cp_xmit_rm) {
72 rm = cp->cp_xmit_rm;
73 cp->cp_xmit_rm = NULL;
Andy Grover5c115592009-02-24 15:30:27 +000074 /* Tell the user the RDMA op is no longer mapped by the
75 * transport. This isn't entirely true (it's flushed out
76 * independently) but as the connection is down, there's
77 * no ongoing RDMA to/from that memory */
Chris Mason7e3f2952010-05-11 15:11:11 -070078 rds_message_unmapped(rm);
Chris Mason7e3f2952010-05-11 15:11:11 -070079 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +000080 }
Chris Mason7e3f2952010-05-11 15:11:11 -070081
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070082 cp->cp_xmit_sg = 0;
83 cp->cp_xmit_hdr_off = 0;
84 cp->cp_xmit_data_off = 0;
85 cp->cp_xmit_atomic_sent = 0;
86 cp->cp_xmit_rdma_sent = 0;
87 cp->cp_xmit_data_sent = 0;
Andy Grover5c115592009-02-24 15:30:27 +000088
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070089 cp->cp_conn->c_map_queued = 0;
Andy Grover5c115592009-02-24 15:30:27 +000090
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070091 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
92 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
Andy Grover5c115592009-02-24 15:30:27 +000093
94 /* Mark messages as retransmissions, and move them to the send q */
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070095 spin_lock_irqsave(&cp->cp_lock, flags);
96 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
Andy Grover5c115592009-02-24 15:30:27 +000097 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
98 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
99 }
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -0700100 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
101 spin_unlock_irqrestore(&cp->cp_lock, flags);
102}
103
104void rds_send_reset(struct rds_connection *conn)
105{
106 rds_send_path_reset(&conn->c_path[0]);
Andy Grover5c115592009-02-24 15:30:27 +0000107}
Sowmini Varadhan0b6f7602016-06-04 13:59:59 -0700108EXPORT_SYMBOL_GPL(rds_send_reset);
Andy Grover5c115592009-02-24 15:30:27 +0000109
Zach Brown0f4b1c72010-06-04 14:41:41 -0700110static int acquire_in_xmit(struct rds_connection *conn)
111{
112 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
113}
114
115static void release_in_xmit(struct rds_connection *conn)
116{
117 clear_bit(RDS_IN_XMIT, &conn->c_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100118 smp_mb__after_atomic();
Zach Brown0f4b1c72010-06-04 14:41:41 -0700119 /*
120 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
121 * hot path and finding waiters is very rare. We don't want to walk
122 * the system-wide hashed waitqueue buckets in the fast path only to
123 * almost never find waiters.
124 */
125 if (waitqueue_active(&conn->c_waitq))
126 wake_up_all(&conn->c_waitq);
127}
128
Andy Grover5c115592009-02-24 15:30:27 +0000129/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300130 * We're making the conscious trade-off here to only send one message
Andy Grover5c115592009-02-24 15:30:27 +0000131 * down the connection at a time.
132 * Pro:
133 * - tx queueing is a simple fifo list
134 * - reassembly is optional and easily done by transports per conn
135 * - no per flow rx lookup at all, straight to the socket
136 * - less per-frag memory and wire overhead
137 * Con:
138 * - queued acks can be delayed behind large messages
139 * Depends:
140 * - small message latency is higher behind queued large messages
141 * - large message latency isn't starved by intervening small sends
142 */
143int rds_send_xmit(struct rds_connection *conn)
144{
145 struct rds_message *rm;
146 unsigned long flags;
147 unsigned int tmp;
Andy Grover5c115592009-02-24 15:30:27 +0000148 struct scatterlist *sg;
149 int ret = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000150 LIST_HEAD(to_be_dropped);
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400151 int batch_count;
152 unsigned long send_gen = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000153
Andy Groverfcc54502010-03-29 17:08:49 -0700154restart:
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400155 batch_count = 0;
Andy Grover049ee3f2010-03-23 17:39:07 -0700156
Andy Grover5c115592009-02-24 15:30:27 +0000157 /*
158 * sendmsg calls here after having queued its message on the send
159 * queue. We only have one task feeding the connection at a time. If
160 * another thread is already feeding the queue then we back off. This
161 * avoids blocking the caller and trading per-connection data between
162 * caches per message.
Andy Grover5c115592009-02-24 15:30:27 +0000163 */
Zach Brown0f4b1c72010-06-04 14:41:41 -0700164 if (!acquire_in_xmit(conn)) {
Andy Grover049ee3f2010-03-23 17:39:07 -0700165 rds_stats_inc(s_send_lock_contention);
Andy Grover5c115592009-02-24 15:30:27 +0000166 ret = -ENOMEM;
167 goto out;
168 }
Zach Brown0f4b1c72010-06-04 14:41:41 -0700169
170 /*
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400171 * we record the send generation after doing the xmit acquire.
172 * if someone else manages to jump in and do some work, we'll use
173 * this to avoid a goto restart farther down.
174 *
175 * The acquire_in_xmit() check above ensures that only one
176 * caller can increment c_send_gen at any time.
177 */
178 conn->c_send_gen++;
179 send_gen = conn->c_send_gen;
180
181 /*
Zach Brown0f4b1c72010-06-04 14:41:41 -0700182 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
183 * we do the opposite to avoid races.
184 */
185 if (!rds_conn_up(conn)) {
186 release_in_xmit(conn);
187 ret = 0;
188 goto out;
189 }
Andy Grover5c115592009-02-24 15:30:27 +0000190
191 if (conn->c_trans->xmit_prepare)
192 conn->c_trans->xmit_prepare(conn);
193
194 /*
195 * spin trying to push headers and data down the connection until
Andy Grover5b2366b2010-02-03 19:36:44 -0800196 * the connection doesn't make forward progress.
Andy Grover5c115592009-02-24 15:30:27 +0000197 */
Andy Groverfcc54502010-03-29 17:08:49 -0700198 while (1) {
Andy Grover5c115592009-02-24 15:30:27 +0000199
Andy Grover5c115592009-02-24 15:30:27 +0000200 rm = conn->c_xmit_rm;
Andy Grover5c115592009-02-24 15:30:27 +0000201
Andy Grover5b2366b2010-02-03 19:36:44 -0800202 /*
203 * If between sending messages, we can send a pending congestion
204 * map update.
Andy Grover5c115592009-02-24 15:30:27 +0000205 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800206 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
Andy Grover77dd5502010-03-22 15:22:04 -0700207 rm = rds_cong_update_alloc(conn);
208 if (IS_ERR(rm)) {
209 ret = PTR_ERR(rm);
210 break;
Andy Grover5b2366b2010-02-03 19:36:44 -0800211 }
Andy Grover77dd5502010-03-22 15:22:04 -0700212 rm->data.op_active = 1;
213
214 conn->c_xmit_rm = rm;
Andy Grover5c115592009-02-24 15:30:27 +0000215 }
216
217 /*
Andy Grover5b2366b2010-02-03 19:36:44 -0800218 * If not already working on one, grab the next message.
Andy Grover5c115592009-02-24 15:30:27 +0000219 *
220 * c_xmit_rm holds a ref while we're sending this message down
221 * the connction. We can use this ref while holding the
222 * send_sem.. rds_send_reset() is serialized with it.
223 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800224 if (!rm) {
Andy Grover5c115592009-02-24 15:30:27 +0000225 unsigned int len;
226
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400227 batch_count++;
228
229 /* we want to process as big a batch as we can, but
230 * we also want to avoid softlockups. If we've been
231 * through a lot of messages, lets back off and see
232 * if anyone else jumps in
233 */
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -0700234 if (batch_count >= send_batch_count)
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400235 goto over_batch;
236
Zach Brown0f4b1c72010-06-04 14:41:41 -0700237 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000238
239 if (!list_empty(&conn->c_send_queue)) {
240 rm = list_entry(conn->c_send_queue.next,
241 struct rds_message,
242 m_conn_item);
243 rds_message_addref(rm);
244
245 /*
246 * Move the message from the send queue to the retransmit
247 * list right away.
248 */
249 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
250 }
251
Zach Brown0f4b1c72010-06-04 14:41:41 -0700252 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000253
Andy Groverfcc54502010-03-29 17:08:49 -0700254 if (!rm)
Andy Grover5c115592009-02-24 15:30:27 +0000255 break;
Andy Grover5c115592009-02-24 15:30:27 +0000256
257 /* Unfortunately, the way Infiniband deals with
258 * RDMA to a bad MR key is by moving the entire
259 * queue pair to error state. We cold possibly
260 * recover from that, but right now we drop the
261 * connection.
262 * Therefore, we never retransmit messages with RDMA ops.
263 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800264 if (rm->rdma.op_active &&
Joe Perchesf64f9e72009-11-29 16:55:45 -0800265 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
Zach Brown0f4b1c72010-06-04 14:41:41 -0700266 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000267 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
268 list_move(&rm->m_conn_item, &to_be_dropped);
Zach Brown0f4b1c72010-06-04 14:41:41 -0700269 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000270 continue;
271 }
272
273 /* Require an ACK every once in a while */
274 len = ntohl(rm->m_inc.i_hdr.h_len);
Joe Perchesf64f9e72009-11-29 16:55:45 -0800275 if (conn->c_unacked_packets == 0 ||
276 conn->c_unacked_bytes < len) {
Andy Grover5c115592009-02-24 15:30:27 +0000277 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
278
279 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
280 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
281 rds_stats_inc(s_send_ack_required);
282 } else {
283 conn->c_unacked_bytes -= len;
284 conn->c_unacked_packets--;
285 }
286
287 conn->c_xmit_rm = rm;
288 }
289
Andy Grover2c3a5f92010-03-01 16:10:40 -0800290 /* The transport either sends the whole rdma or none of it */
291 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800292 rm->m_final_op = &rm->rdma;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700293 /* The transport owns the mapped memory for now.
294 * You can't unmap it while it's on the send queue
295 */
296 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800297 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700298 if (ret) {
299 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
300 wake_up_interruptible(&rm->m_flush_wait);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800301 break;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700302 }
Andy Grover2c3a5f92010-03-01 16:10:40 -0800303 conn->c_xmit_rdma_sent = 1;
304
Andy Grover2c3a5f92010-03-01 16:10:40 -0800305 }
306
Andy Grover15133f62010-01-12 14:33:38 -0800307 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800308 rm->m_final_op = &rm->atomic;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700309 /* The transport owns the mapped memory for now.
310 * You can't unmap it while it's on the send queue
311 */
312 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
Andy Groverff3d7d32010-03-01 14:03:09 -0800313 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700314 if (ret) {
315 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
316 wake_up_interruptible(&rm->m_flush_wait);
Andy Grover15133f62010-01-12 14:33:38 -0800317 break;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700318 }
Andy Grover15133f62010-01-12 14:33:38 -0800319 conn->c_xmit_atomic_sent = 1;
Andy Groverff3d7d32010-03-01 14:03:09 -0800320
Andy Grover15133f62010-01-12 14:33:38 -0800321 }
322
Andy Grover2c3a5f92010-03-01 16:10:40 -0800323 /*
324 * A number of cases require an RDS header to be sent
325 * even if there is no data.
326 * We permit 0-byte sends; rds-ping depends on this.
327 * However, if there are exclusively attached silent ops,
328 * we skip the hdr/data send, to enable silent operation.
329 */
330 if (rm->data.op_nents == 0) {
331 int ops_present;
332 int all_ops_are_silent = 1;
Andy Grover241eef32010-01-19 21:25:26 -0800333
Andy Grover2c3a5f92010-03-01 16:10:40 -0800334 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
335 if (rm->atomic.op_active && !rm->atomic.op_silent)
336 all_ops_are_silent = 0;
337 if (rm->rdma.op_active && !rm->rdma.op_silent)
338 all_ops_are_silent = 0;
Andy Grover241eef32010-01-19 21:25:26 -0800339
Andy Grover2c3a5f92010-03-01 16:10:40 -0800340 if (ops_present && all_ops_are_silent
341 && !rm->m_rdma_cookie)
342 rm->data.op_active = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000343 }
344
Andy Grover5b2366b2010-02-03 19:36:44 -0800345 if (rm->data.op_active && !conn->c_xmit_data_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800346 rm->m_final_op = &rm->data;
Andy Grover5c115592009-02-24 15:30:27 +0000347 ret = conn->c_trans->xmit(conn, rm,
348 conn->c_xmit_hdr_off,
349 conn->c_xmit_sg,
350 conn->c_xmit_data_off);
351 if (ret <= 0)
352 break;
353
354 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
355 tmp = min_t(int, ret,
356 sizeof(struct rds_header) -
357 conn->c_xmit_hdr_off);
358 conn->c_xmit_hdr_off += tmp;
359 ret -= tmp;
360 }
361
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800362 sg = &rm->data.op_sg[conn->c_xmit_sg];
Andy Grover5c115592009-02-24 15:30:27 +0000363 while (ret) {
364 tmp = min_t(int, ret, sg->length -
365 conn->c_xmit_data_off);
366 conn->c_xmit_data_off += tmp;
367 ret -= tmp;
368 if (conn->c_xmit_data_off == sg->length) {
369 conn->c_xmit_data_off = 0;
370 sg++;
371 conn->c_xmit_sg++;
372 BUG_ON(ret != 0 &&
Andy Grover6c7cc6e2010-01-27 18:04:18 -0800373 conn->c_xmit_sg == rm->data.op_nents);
Andy Grover5c115592009-02-24 15:30:27 +0000374 }
375 }
Andy Grover5b2366b2010-02-03 19:36:44 -0800376
377 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
378 (conn->c_xmit_sg == rm->data.op_nents))
379 conn->c_xmit_data_sent = 1;
380 }
381
382 /*
383 * A rm will only take multiple times through this loop
384 * if there is a data op. Thus, if the data is sent (or there was
385 * none), then we're done with the rm.
386 */
387 if (!rm->data.op_active || conn->c_xmit_data_sent) {
388 conn->c_xmit_rm = NULL;
389 conn->c_xmit_sg = 0;
390 conn->c_xmit_hdr_off = 0;
391 conn->c_xmit_data_off = 0;
392 conn->c_xmit_rdma_sent = 0;
393 conn->c_xmit_atomic_sent = 0;
394 conn->c_xmit_data_sent = 0;
395
396 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000397 }
398 }
399
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400400over_batch:
Andy Grover5c115592009-02-24 15:30:27 +0000401 if (conn->c_trans->xmit_complete)
402 conn->c_trans->xmit_complete(conn);
Zach Brown0f4b1c72010-06-04 14:41:41 -0700403 release_in_xmit(conn);
Andy Grover5c115592009-02-24 15:30:27 +0000404
Andy Grover2ad80992010-03-23 17:48:04 -0700405 /* Nuke any messages we decided not to retransmit. */
406 if (!list_empty(&to_be_dropped)) {
407 /* irqs on here, so we can put(), unlike above */
408 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
409 rds_message_put(rm);
410 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
411 }
412
Andy Groverfcc54502010-03-29 17:08:49 -0700413 /*
Zach Brown0f4b1c72010-06-04 14:41:41 -0700414 * Other senders can queue a message after we last test the send queue
415 * but before we clear RDS_IN_XMIT. In that case they'd back off and
416 * not try and send their newly queued message. We need to check the
417 * send queue after having cleared RDS_IN_XMIT so that their message
418 * doesn't get stuck on the send queue.
Andy Groverfcc54502010-03-29 17:08:49 -0700419 *
420 * If the transport cannot continue (i.e ret != 0), then it must
421 * call us when more room is available, such as from the tx
422 * completion handler.
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400423 *
424 * We have an extra generation check here so that if someone manages
425 * to jump in after our release_in_xmit, we'll see that they have done
426 * some work and we will skip our goto
Andy Groverfcc54502010-03-29 17:08:49 -0700427 */
428 if (ret == 0) {
Chris Mason9e29db02010-04-15 16:38:14 -0400429 smp_mb();
santosh.shilimkar@oracle.com0c484242015-08-22 15:45:27 -0700430 if ((test_bit(0, &conn->c_map_queued) ||
431 !list_empty(&conn->c_send_queue)) &&
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400432 send_gen == conn->c_send_gen) {
Andy Grover049ee3f2010-03-23 17:39:07 -0700433 rds_stats_inc(s_send_lock_queue_raced);
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -0700434 if (batch_count < send_batch_count)
435 goto restart;
436 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
Andy Grover5c115592009-02-24 15:30:27 +0000437 }
Andy Grover5c115592009-02-24 15:30:27 +0000438 }
439out:
440 return ret;
441}
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400442EXPORT_SYMBOL_GPL(rds_send_xmit);
Andy Grover5c115592009-02-24 15:30:27 +0000443
444static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
445{
446 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
447
448 assert_spin_locked(&rs->rs_lock);
449
450 BUG_ON(rs->rs_snd_bytes < len);
451 rs->rs_snd_bytes -= len;
452
453 if (rs->rs_snd_bytes == 0)
454 rds_stats_inc(s_send_queue_empty);
455}
456
457static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
458 is_acked_func is_acked)
459{
460 if (is_acked)
461 return is_acked(rm, ack);
462 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
463}
464
465/*
Andy Grover5c115592009-02-24 15:30:27 +0000466 * This is pretty similar to what happens below in the ACK
467 * handling code - except that we call here as soon as we get
468 * the IB send completion on the RDMA op and the accompanying
469 * message.
470 */
471void rds_rdma_send_complete(struct rds_message *rm, int status)
472{
473 struct rds_sock *rs = NULL;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800474 struct rm_rdma_op *ro;
Andy Grover5c115592009-02-24 15:30:27 +0000475 struct rds_notifier *notifier;
Andy Grover9de08642010-03-29 16:50:54 -0700476 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000477
Andy Grover9de08642010-03-29 16:50:54 -0700478 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000479
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800480 ro = &rm->rdma;
Joe Perchesf64f9e72009-11-29 16:55:45 -0800481 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800482 ro->op_active && ro->op_notify && ro->op_notifier) {
483 notifier = ro->op_notifier;
Andy Grover5c115592009-02-24 15:30:27 +0000484 rs = rm->m_rs;
485 sock_hold(rds_rs_to_sk(rs));
486
487 notifier->n_status = status;
488 spin_lock(&rs->rs_lock);
489 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
490 spin_unlock(&rs->rs_lock);
491
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800492 ro->op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000493 }
494
Andy Grover9de08642010-03-29 16:50:54 -0700495 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000496
497 if (rs) {
498 rds_wake_sk_sleep(rs);
499 sock_put(rds_rs_to_sk(rs));
500 }
501}
Andy Grover616b7572009-08-21 12:28:32 +0000502EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
Andy Grover5c115592009-02-24 15:30:27 +0000503
504/*
Andy Grover15133f62010-01-12 14:33:38 -0800505 * Just like above, except looks at atomic op
506 */
507void rds_atomic_send_complete(struct rds_message *rm, int status)
508{
509 struct rds_sock *rs = NULL;
510 struct rm_atomic_op *ao;
511 struct rds_notifier *notifier;
Andy Grovercf4b7382010-03-29 16:50:54 -0700512 unsigned long flags;
Andy Grover15133f62010-01-12 14:33:38 -0800513
Andy Grovercf4b7382010-03-29 16:50:54 -0700514 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover15133f62010-01-12 14:33:38 -0800515
516 ao = &rm->atomic;
517 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
518 && ao->op_active && ao->op_notify && ao->op_notifier) {
519 notifier = ao->op_notifier;
520 rs = rm->m_rs;
521 sock_hold(rds_rs_to_sk(rs));
522
523 notifier->n_status = status;
524 spin_lock(&rs->rs_lock);
525 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
526 spin_unlock(&rs->rs_lock);
527
528 ao->op_notifier = NULL;
529 }
530
Andy Grovercf4b7382010-03-29 16:50:54 -0700531 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover15133f62010-01-12 14:33:38 -0800532
533 if (rs) {
534 rds_wake_sk_sleep(rs);
535 sock_put(rds_rs_to_sk(rs));
536 }
537}
538EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
539
540/*
Andy Grover5c115592009-02-24 15:30:27 +0000541 * This is the same as rds_rdma_send_complete except we
542 * don't do any locking - we have all the ingredients (message,
543 * socket, socket lock) and can just move the notifier.
544 */
545static inline void
Andy Grover940786e2010-02-19 18:04:58 -0800546__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
Andy Grover5c115592009-02-24 15:30:27 +0000547{
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800548 struct rm_rdma_op *ro;
Andy Grover940786e2010-02-19 18:04:58 -0800549 struct rm_atomic_op *ao;
Andy Grover5c115592009-02-24 15:30:27 +0000550
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800551 ro = &rm->rdma;
552 if (ro->op_active && ro->op_notify && ro->op_notifier) {
553 ro->op_notifier->n_status = status;
554 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
555 ro->op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000556 }
557
Andy Grover940786e2010-02-19 18:04:58 -0800558 ao = &rm->atomic;
559 if (ao->op_active && ao->op_notify && ao->op_notifier) {
560 ao->op_notifier->n_status = status;
561 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
562 ao->op_notifier = NULL;
563 }
564
Andy Grover5c115592009-02-24 15:30:27 +0000565 /* No need to wake the app - caller does this */
566}
567
568/*
Andy Grover5c115592009-02-24 15:30:27 +0000569 * This removes messages from the socket's list if they're on it. The list
570 * argument must be private to the caller, we must be able to modify it
571 * without locks. The messages must have a reference held for their
572 * position on the list. This function will drop that reference after
573 * removing the messages from the 'messages' list regardless of if it found
574 * the messages on the socket list or not.
575 */
stephen hemmingerff51bf82010-10-19 08:08:33 +0000576static void rds_send_remove_from_sock(struct list_head *messages, int status)
Andy Grover5c115592009-02-24 15:30:27 +0000577{
Andy Grover561c7df2010-03-11 13:50:06 +0000578 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000579 struct rds_sock *rs = NULL;
580 struct rds_message *rm;
581
Andy Grover5c115592009-02-24 15:30:27 +0000582 while (!list_empty(messages)) {
Andy Grover561c7df2010-03-11 13:50:06 +0000583 int was_on_sock = 0;
584
Andy Grover5c115592009-02-24 15:30:27 +0000585 rm = list_entry(messages->next, struct rds_message,
586 m_conn_item);
587 list_del_init(&rm->m_conn_item);
588
589 /*
590 * If we see this flag cleared then we're *sure* that someone
591 * else beat us to removing it from the sock. If we race
592 * with their flag update we'll get the lock and then really
593 * see that the flag has been cleared.
594 *
595 * The message spinlock makes sure nobody clears rm->m_rs
596 * while we're messing with it. It does not prevent the
597 * message from being removed from the socket, though.
598 */
Andy Grover561c7df2010-03-11 13:50:06 +0000599 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000600 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
601 goto unlock_and_drop;
602
603 if (rs != rm->m_rs) {
604 if (rs) {
Andy Grover5c115592009-02-24 15:30:27 +0000605 rds_wake_sk_sleep(rs);
606 sock_put(rds_rs_to_sk(rs));
607 }
608 rs = rm->m_rs;
Herton R. Krzesinski593cbb32014-10-01 18:49:54 -0300609 if (rs)
610 sock_hold(rds_rs_to_sk(rs));
Andy Grover5c115592009-02-24 15:30:27 +0000611 }
Herton R. Krzesinski593cbb32014-10-01 18:49:54 -0300612 if (!rs)
613 goto unlock_and_drop;
Tina Yang048c15e2010-03-11 13:50:00 +0000614 spin_lock(&rs->rs_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000615
616 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800617 struct rm_rdma_op *ro = &rm->rdma;
Andy Grover5c115592009-02-24 15:30:27 +0000618 struct rds_notifier *notifier;
619
620 list_del_init(&rm->m_sock_item);
621 rds_send_sndbuf_remove(rs, rm);
622
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800623 if (ro->op_active && ro->op_notifier &&
624 (ro->op_notify || (ro->op_recverr && status))) {
625 notifier = ro->op_notifier;
Andy Grover5c115592009-02-24 15:30:27 +0000626 list_add_tail(&notifier->n_list,
627 &rs->rs_notify_queue);
628 if (!notifier->n_status)
629 notifier->n_status = status;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800630 rm->rdma.op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000631 }
Andy Grover561c7df2010-03-11 13:50:06 +0000632 was_on_sock = 1;
Andy Grover5c115592009-02-24 15:30:27 +0000633 rm->m_rs = NULL;
634 }
Tina Yang048c15e2010-03-11 13:50:00 +0000635 spin_unlock(&rs->rs_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000636
637unlock_and_drop:
Andy Grover561c7df2010-03-11 13:50:06 +0000638 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000639 rds_message_put(rm);
Andy Grover561c7df2010-03-11 13:50:06 +0000640 if (was_on_sock)
641 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000642 }
643
644 if (rs) {
Andy Grover5c115592009-02-24 15:30:27 +0000645 rds_wake_sk_sleep(rs);
646 sock_put(rds_rs_to_sk(rs));
647 }
Andy Grover5c115592009-02-24 15:30:27 +0000648}
649
650/*
651 * Transports call here when they've determined that the receiver queued
652 * messages up to, and including, the given sequence number. Messages are
653 * moved to the retrans queue when rds_send_xmit picks them off the send
654 * queue. This means that in the TCP case, the message may not have been
655 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
656 * checks the RDS_MSG_HAS_ACK_SEQ bit.
Andy Grover5c115592009-02-24 15:30:27 +0000657 */
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700658void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
659 is_acked_func is_acked)
Andy Grover5c115592009-02-24 15:30:27 +0000660{
661 struct rds_message *rm, *tmp;
662 unsigned long flags;
663 LIST_HEAD(list);
664
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700665 spin_lock_irqsave(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000666
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700667 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
Andy Grover5c115592009-02-24 15:30:27 +0000668 if (!rds_send_is_acked(rm, ack, is_acked))
669 break;
670
671 list_move(&rm->m_conn_item, &list);
672 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
673 }
674
675 /* order flag updates with spin locks */
676 if (!list_empty(&list))
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100677 smp_mb__after_atomic();
Andy Grover5c115592009-02-24 15:30:27 +0000678
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700679 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000680
681 /* now remove the messages from the sock list as needed */
682 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
683}
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700684EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
685
686void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
687 is_acked_func is_acked)
688{
689 WARN_ON(conn->c_trans->t_mp_capable);
690 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
691}
Andy Grover616b7572009-08-21 12:28:32 +0000692EXPORT_SYMBOL_GPL(rds_send_drop_acked);
Andy Grover5c115592009-02-24 15:30:27 +0000693
694void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
695{
696 struct rds_message *rm, *tmp;
697 struct rds_connection *conn;
Andy Grover7c82eaf2010-02-19 18:01:41 -0800698 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000699 LIST_HEAD(list);
Andy Grover5c115592009-02-24 15:30:27 +0000700
701 /* get all the messages we're dropping under the rs lock */
702 spin_lock_irqsave(&rs->rs_lock, flags);
703
704 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
705 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
706 dest->sin_port != rm->m_inc.i_hdr.h_dport))
707 continue;
708
Andy Grover5c115592009-02-24 15:30:27 +0000709 list_move(&rm->m_sock_item, &list);
710 rds_send_sndbuf_remove(rs, rm);
711 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
Andy Grover5c115592009-02-24 15:30:27 +0000712 }
713
714 /* order flag updates with the rs lock */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100715 smp_mb__after_atomic();
Andy Grover5c115592009-02-24 15:30:27 +0000716
717 spin_unlock_irqrestore(&rs->rs_lock, flags);
718
Andy Grover7c82eaf2010-02-19 18:01:41 -0800719 if (list_empty(&list))
720 return;
Andy Grover5c115592009-02-24 15:30:27 +0000721
Andy Grover7c82eaf2010-02-19 18:01:41 -0800722 /* Remove the messages from the conn */
Andy Grover5c115592009-02-24 15:30:27 +0000723 list_for_each_entry(rm, &list, m_sock_item) {
Andy Grover7c82eaf2010-02-19 18:01:41 -0800724
725 conn = rm->m_inc.i_conn;
Andy Grover7c82eaf2010-02-19 18:01:41 -0800726
Andy Grover9de08642010-03-29 16:50:54 -0700727 spin_lock_irqsave(&conn->c_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800728 /*
729 * Maybe someone else beat us to removing rm from the conn.
730 * If we race with their flag update we'll get the lock and
731 * then really see that the flag has been cleared.
732 */
733 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
734 spin_unlock_irqrestore(&conn->c_lock, flags);
Herton R. Krzesinski593cbb32014-10-01 18:49:54 -0300735 spin_lock_irqsave(&rm->m_rs_lock, flags);
736 rm->m_rs = NULL;
737 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800738 continue;
739 }
Andy Grover9de08642010-03-29 16:50:54 -0700740 list_del_init(&rm->m_conn_item);
741 spin_unlock_irqrestore(&conn->c_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800742
743 /*
744 * Couldn't grab m_rs_lock in top loop (lock ordering),
745 * but we can now.
746 */
Andy Grover9de08642010-03-29 16:50:54 -0700747 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800748
Tina Yang550a8002010-03-11 13:50:03 +0000749 spin_lock(&rs->rs_lock);
Andy Grover940786e2010-02-19 18:04:58 -0800750 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
Tina Yang550a8002010-03-11 13:50:03 +0000751 spin_unlock(&rs->rs_lock);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800752
Andy Grover5c115592009-02-24 15:30:27 +0000753 rm->m_rs = NULL;
Andy Grover9de08642010-03-29 16:50:54 -0700754 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000755
Andy Grover7c82eaf2010-02-19 18:01:41 -0800756 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000757 }
758
Andy Grover7c82eaf2010-02-19 18:01:41 -0800759 rds_wake_sk_sleep(rs);
Tina Yang550a8002010-03-11 13:50:03 +0000760
Andy Grover5c115592009-02-24 15:30:27 +0000761 while (!list_empty(&list)) {
762 rm = list_entry(list.next, struct rds_message, m_sock_item);
763 list_del_init(&rm->m_sock_item);
Andy Grover5c115592009-02-24 15:30:27 +0000764 rds_message_wait(rm);
santosh.shilimkar@oracle.comdfcec252015-08-22 15:45:33 -0700765
766 /* just in case the code above skipped this message
767 * because RDS_MSG_ON_CONN wasn't set, run it again here
768 * taking m_rs_lock is the only thing that keeps us
769 * from racing with ack processing.
770 */
771 spin_lock_irqsave(&rm->m_rs_lock, flags);
772
773 spin_lock(&rs->rs_lock);
774 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
775 spin_unlock(&rs->rs_lock);
776
777 rm->m_rs = NULL;
778 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
779
Andy Grover5c115592009-02-24 15:30:27 +0000780 rds_message_put(rm);
781 }
782}
783
784/*
785 * we only want this to fire once so we use the callers 'queued'. It's
786 * possible that another thread can race with us and remove the
787 * message from the flow with RDS_CANCEL_SENT_TO.
788 */
789static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
790 struct rds_message *rm, __be16 sport,
791 __be16 dport, int *queued)
792{
793 unsigned long flags;
794 u32 len;
795
796 if (*queued)
797 goto out;
798
799 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
800
801 /* this is the only place which holds both the socket's rs_lock
802 * and the connection's c_lock */
803 spin_lock_irqsave(&rs->rs_lock, flags);
804
805 /*
806 * If there is a little space in sndbuf, we don't queue anything,
807 * and userspace gets -EAGAIN. But poll() indicates there's send
808 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
809 * freed up by incoming acks. So we check the *old* value of
810 * rs_snd_bytes here to allow the last msg to exceed the buffer,
811 * and poll() now knows no more data can be sent.
812 */
813 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
814 rs->rs_snd_bytes += len;
815
816 /* let recv side know we are close to send space exhaustion.
817 * This is probably not the optimal way to do it, as this
818 * means we set the flag on *all* messages as soon as our
819 * throughput hits a certain threshold.
820 */
821 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
822 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
823
824 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
825 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
826 rds_message_addref(rm);
827 rm->m_rs = rs;
828
829 /* The code ordering is a little weird, but we're
830 trying to minimize the time we hold c_lock */
831 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
832 rm->m_inc.i_conn = conn;
833 rds_message_addref(rm);
834
835 spin_lock(&conn->c_lock);
836 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
837 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
838 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
839 spin_unlock(&conn->c_lock);
840
841 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
842 rm, len, rs, rs->rs_snd_bytes,
843 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
844
845 *queued = 1;
846 }
847
848 spin_unlock_irqrestore(&rs->rs_lock, flags);
849out:
850 return *queued;
851}
852
Andy Groverfc445082010-01-12 12:56:06 -0800853/*
854 * rds_message is getting to be quite complicated, and we'd like to allocate
855 * it all in one go. This figures out how big it needs to be up front.
856 */
857static int rds_rm_size(struct msghdr *msg, int data_len)
858{
Andy Groverff87e972010-01-12 14:13:15 -0800859 struct cmsghdr *cmsg;
Andy Groverfc445082010-01-12 12:56:06 -0800860 int size = 0;
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700861 int cmsg_groups = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800862 int retval;
Andy Groverfc445082010-01-12 12:56:06 -0800863
Gu Zhengf95b4142014-12-11 11:22:04 +0800864 for_each_cmsghdr(cmsg, msg) {
Andy Groverff87e972010-01-12 14:13:15 -0800865 if (!CMSG_OK(msg, cmsg))
866 return -EINVAL;
867
868 if (cmsg->cmsg_level != SOL_RDS)
869 continue;
870
871 switch (cmsg->cmsg_type) {
872 case RDS_CMSG_RDMA_ARGS:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700873 cmsg_groups |= 1;
Andy Groverff87e972010-01-12 14:13:15 -0800874 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
875 if (retval < 0)
876 return retval;
877 size += retval;
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700878
Andy Groverff87e972010-01-12 14:13:15 -0800879 break;
880
881 case RDS_CMSG_RDMA_DEST:
882 case RDS_CMSG_RDMA_MAP:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700883 cmsg_groups |= 2;
Andy Groverff87e972010-01-12 14:13:15 -0800884 /* these are valid but do no add any size */
885 break;
886
Andy Grover15133f62010-01-12 14:33:38 -0800887 case RDS_CMSG_ATOMIC_CSWP:
888 case RDS_CMSG_ATOMIC_FADD:
Andy Grover20c72bd2010-08-25 05:51:28 -0700889 case RDS_CMSG_MASKED_ATOMIC_CSWP:
890 case RDS_CMSG_MASKED_ATOMIC_FADD:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700891 cmsg_groups |= 1;
Andy Grover15133f62010-01-12 14:33:38 -0800892 size += sizeof(struct scatterlist);
893 break;
894
Andy Groverff87e972010-01-12 14:13:15 -0800895 default:
896 return -EINVAL;
897 }
898
899 }
900
901 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
Andy Groverfc445082010-01-12 12:56:06 -0800902
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700903 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
904 if (cmsg_groups == 3)
905 return -EINVAL;
906
Andy Groverfc445082010-01-12 12:56:06 -0800907 return size;
908}
909
Andy Grover5c115592009-02-24 15:30:27 +0000910static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
911 struct msghdr *msg, int *allocated_mr)
912{
913 struct cmsghdr *cmsg;
914 int ret = 0;
915
Gu Zhengf95b4142014-12-11 11:22:04 +0800916 for_each_cmsghdr(cmsg, msg) {
Andy Grover5c115592009-02-24 15:30:27 +0000917 if (!CMSG_OK(msg, cmsg))
918 return -EINVAL;
919
920 if (cmsg->cmsg_level != SOL_RDS)
921 continue;
922
923 /* As a side effect, RDMA_DEST and RDMA_MAP will set
Andy Grover15133f62010-01-12 14:33:38 -0800924 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
Andy Grover5c115592009-02-24 15:30:27 +0000925 */
926 switch (cmsg->cmsg_type) {
927 case RDS_CMSG_RDMA_ARGS:
928 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
929 break;
930
931 case RDS_CMSG_RDMA_DEST:
932 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
933 break;
934
935 case RDS_CMSG_RDMA_MAP:
936 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
937 if (!ret)
938 *allocated_mr = 1;
939 break;
Andy Grover15133f62010-01-12 14:33:38 -0800940 case RDS_CMSG_ATOMIC_CSWP:
941 case RDS_CMSG_ATOMIC_FADD:
Andy Grover20c72bd2010-08-25 05:51:28 -0700942 case RDS_CMSG_MASKED_ATOMIC_CSWP:
943 case RDS_CMSG_MASKED_ATOMIC_FADD:
Andy Grover15133f62010-01-12 14:33:38 -0800944 ret = rds_cmsg_atomic(rs, rm, cmsg);
945 break;
Andy Grover5c115592009-02-24 15:30:27 +0000946
947 default:
948 return -EINVAL;
949 }
950
951 if (ret)
952 break;
953 }
954
955 return ret;
956}
957
Ying Xue1b784142015-03-02 15:37:48 +0800958int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
Andy Grover5c115592009-02-24 15:30:27 +0000959{
960 struct sock *sk = sock->sk;
961 struct rds_sock *rs = rds_sk_to_rs(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100962 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
Andy Grover5c115592009-02-24 15:30:27 +0000963 __be32 daddr;
964 __be16 dport;
965 struct rds_message *rm = NULL;
966 struct rds_connection *conn;
967 int ret = 0;
968 int queued = 0, allocated_mr = 0;
969 int nonblock = msg->msg_flags & MSG_DONTWAIT;
Andy Grover1123fd72010-03-11 13:49:56 +0000970 long timeo = sock_sndtimeo(sk, nonblock);
Andy Grover5c115592009-02-24 15:30:27 +0000971
972 /* Mirror Linux UDP mirror of BSD error message compatibility */
973 /* XXX: Perhaps MSG_MORE someday */
974 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
Andy Grover5c115592009-02-24 15:30:27 +0000975 ret = -EOPNOTSUPP;
976 goto out;
977 }
978
979 if (msg->msg_namelen) {
980 /* XXX fail non-unicast destination IPs? */
981 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
982 ret = -EINVAL;
983 goto out;
984 }
985 daddr = usin->sin_addr.s_addr;
986 dport = usin->sin_port;
987 } else {
988 /* We only care about consistency with ->connect() */
989 lock_sock(sk);
990 daddr = rs->rs_conn_addr;
991 dport = rs->rs_conn_port;
992 release_sock(sk);
993 }
994
Quentin Casasnovas8c7188b2015-11-24 17:13:21 -0500995 lock_sock(sk);
Andy Grover5c115592009-02-24 15:30:27 +0000996 if (daddr == 0 || rs->rs_bound_addr == 0) {
Quentin Casasnovas8c7188b2015-11-24 17:13:21 -0500997 release_sock(sk);
Andy Grover5c115592009-02-24 15:30:27 +0000998 ret = -ENOTCONN; /* XXX not a great errno */
999 goto out;
1000 }
Quentin Casasnovas8c7188b2015-11-24 17:13:21 -05001001 release_sock(sk);
Andy Grover5c115592009-02-24 15:30:27 +00001002
Mukesh Kacker06e89412015-08-22 15:45:34 -07001003 if (payload_len > rds_sk_sndbuf(rs)) {
1004 ret = -EMSGSIZE;
1005 goto out;
1006 }
1007
Andy Groverfc445082010-01-12 12:56:06 -08001008 /* size of rm including all sgs */
1009 ret = rds_rm_size(msg, payload_len);
1010 if (ret < 0)
1011 goto out;
1012
1013 rm = rds_message_alloc(ret, GFP_KERNEL);
1014 if (!rm) {
1015 ret = -ENOMEM;
Andy Grover5c115592009-02-24 15:30:27 +00001016 goto out;
1017 }
1018
Andy Grover372cd7d2010-02-03 19:40:32 -08001019 /* Attach data to the rm */
1020 if (payload_len) {
1021 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
Andy Groverd139ff02010-10-28 15:40:59 +00001022 if (!rm->data.op_sg) {
1023 ret = -ENOMEM;
1024 goto out;
1025 }
Al Viroc0371da2014-11-24 10:42:55 -05001026 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
Andy Grover372cd7d2010-02-03 19:40:32 -08001027 if (ret)
1028 goto out;
1029 }
1030 rm->data.op_active = 1;
Andy Groverfc445082010-01-12 12:56:06 -08001031
Andy Grover5c115592009-02-24 15:30:27 +00001032 rm->m_daddr = daddr;
1033
Andy Grover5c115592009-02-24 15:30:27 +00001034 /* rds_conn_create has a spinlock that runs with IRQ off.
1035 * Caching the conn in the socket helps a lot. */
1036 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1037 conn = rs->rs_conn;
1038 else {
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -04001039 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1040 rs->rs_bound_addr, daddr,
Andy Grover5c115592009-02-24 15:30:27 +00001041 rs->rs_transport,
1042 sock->sk->sk_allocation);
1043 if (IS_ERR(conn)) {
1044 ret = PTR_ERR(conn);
1045 goto out;
1046 }
1047 rs->rs_conn = conn;
1048 }
1049
Andy Grover49f69692009-04-09 14:09:41 +00001050 /* Parse any control messages the user may have included. */
1051 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1052 if (ret)
1053 goto out;
1054
Andy Grover2c3a5f92010-03-01 16:10:40 -08001055 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
Manuel Zerpiescb0a6052011-06-16 02:09:57 +00001056 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
Andy Groverf8b3aaf2010-03-01 14:11:53 -08001057 &rm->rdma, conn->c_trans->xmit_rdma);
Andy Grover15133f62010-01-12 14:33:38 -08001058 ret = -EOPNOTSUPP;
1059 goto out;
1060 }
1061
1062 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
Manuel Zerpiescb0a6052011-06-16 02:09:57 +00001063 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
Andy Grover15133f62010-01-12 14:33:38 -08001064 &rm->atomic, conn->c_trans->xmit_atomic);
Andy Grover5c115592009-02-24 15:30:27 +00001065 ret = -EOPNOTSUPP;
1066 goto out;
1067 }
1068
Zach Brownf3c68082010-05-24 13:14:36 -07001069 rds_conn_connect_if_down(conn);
Andy Grover5c115592009-02-24 15:30:27 +00001070
1071 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
Andy Groverb98ba522010-03-11 13:50:04 +00001072 if (ret) {
1073 rs->rs_seen_congestion = 1;
Andy Grover5c115592009-02-24 15:30:27 +00001074 goto out;
Andy Groverb98ba522010-03-11 13:50:04 +00001075 }
Andy Grover5c115592009-02-24 15:30:27 +00001076
1077 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1078 dport, &queued)) {
1079 rds_stats_inc(s_send_queue_full);
Mukesh Kacker06e89412015-08-22 15:45:34 -07001080
Andy Grover5c115592009-02-24 15:30:27 +00001081 if (nonblock) {
1082 ret = -EAGAIN;
1083 goto out;
1084 }
1085
Eric Dumazetaa395142010-04-20 13:03:51 +00001086 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
Andy Grover5c115592009-02-24 15:30:27 +00001087 rds_send_queue_rm(rs, conn, rm,
1088 rs->rs_bound_port,
1089 dport,
1090 &queued),
1091 timeo);
1092 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1093 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1094 continue;
1095
1096 ret = timeo;
1097 if (ret == 0)
1098 ret = -ETIMEDOUT;
1099 goto out;
1100 }
1101
1102 /*
1103 * By now we've committed to the send. We reuse rds_send_worker()
1104 * to retry sends in the rds thread if the transport asks us to.
1105 */
1106 rds_stats_inc(s_send_queued);
1107
Santosh Shilimkardb6526d2015-09-11 15:44:29 -07001108 ret = rds_send_xmit(conn);
1109 if (ret == -ENOMEM || ret == -EAGAIN)
1110 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
Andy Grover5c115592009-02-24 15:30:27 +00001111
1112 rds_message_put(rm);
1113 return payload_len;
1114
1115out:
1116 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1117 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1118 * or in any other way, we need to destroy the MR again */
1119 if (allocated_mr)
1120 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1121
1122 if (rm)
1123 rds_message_put(rm);
1124 return ret;
1125}
1126
1127/*
1128 * Reply to a ping packet.
1129 */
1130int
1131rds_send_pong(struct rds_connection *conn, __be16 dport)
1132{
1133 struct rds_message *rm;
1134 unsigned long flags;
1135 int ret = 0;
1136
1137 rm = rds_message_alloc(0, GFP_ATOMIC);
Andy Grover8690bfa2010-01-12 11:56:44 -08001138 if (!rm) {
Andy Grover5c115592009-02-24 15:30:27 +00001139 ret = -ENOMEM;
1140 goto out;
1141 }
1142
1143 rm->m_daddr = conn->c_faddr;
Andy Groveracfcd4d2010-03-31 18:56:25 -07001144 rm->data.op_active = 1;
Andy Grover5c115592009-02-24 15:30:27 +00001145
Zach Brownf3c68082010-05-24 13:14:36 -07001146 rds_conn_connect_if_down(conn);
Andy Grover5c115592009-02-24 15:30:27 +00001147
1148 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1149 if (ret)
1150 goto out;
1151
1152 spin_lock_irqsave(&conn->c_lock, flags);
1153 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1154 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1155 rds_message_addref(rm);
1156 rm->m_inc.i_conn = conn;
1157
1158 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1159 conn->c_next_tx_seq);
1160 conn->c_next_tx_seq++;
1161 spin_unlock_irqrestore(&conn->c_lock, flags);
1162
1163 rds_stats_inc(s_send_queued);
1164 rds_stats_inc(s_send_pong);
1165
santosh.shilimkar@oracle.com7b4b0002015-10-16 22:13:21 -04001166 /* schedule the send work on rds_wq */
1167 queue_delayed_work(rds_wq, &conn->c_send_w, 1);
Andy Groveracfcd4d2010-03-31 18:56:25 -07001168
Andy Grover5c115592009-02-24 15:30:27 +00001169 rds_message_put(rm);
1170 return 0;
1171
1172out:
1173 if (rm)
1174 rds_message_put(rm);
1175 return ret;
1176}