blob: 896626b9a0efde321d64b7f2eef3b3e0200b872f [file] [log] [blame]
Andy Grover5c115592009-02-24 15:30:27 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Paul Gortmakerd9b93842011-09-18 13:21:27 -040034#include <linux/moduleparam.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Andy Grover5c115592009-02-24 15:30:27 +000036#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
Manuel Zerpiescb0a6052011-06-16 02:09:57 +000039#include <linux/ratelimit.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040040#include <linux/export.h>
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -070041#include <linux/sizes.h>
Andy Grover5c115592009-02-24 15:30:27 +000042
43#include "rds.h"
Andy Grover5c115592009-02-24 15:30:27 +000044
45/* When transmitting messages in rds_send_xmit, we need to emerge from
46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
47 * will kick our shin.
48 * Also, it seems fairer to not let one busy connection stall all the
49 * others.
50 *
51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
52 * it to 0 will restore the old behavior (where we looped until we had
53 * drained the queue).
54 */
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -070055static int send_batch_count = SZ_1K;
Andy Grover5c115592009-02-24 15:30:27 +000056module_param(send_batch_count, int, 0444);
57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
stephen hemmingerff51bf82010-10-19 08:08:33 +000059static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
Andy Grover5c115592009-02-24 15:30:27 +000061/*
Zach Brown0f4b1c72010-06-04 14:41:41 -070062 * Reset the send state. Callers must ensure that this doesn't race with
63 * rds_send_xmit().
Andy Grover5c115592009-02-24 15:30:27 +000064 */
Sowmini Varadhand769ef82016-06-13 09:44:41 -070065void rds_send_path_reset(struct rds_conn_path *cp)
Andy Grover5c115592009-02-24 15:30:27 +000066{
67 struct rds_message *rm, *tmp;
68 unsigned long flags;
69
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070070 if (cp->cp_xmit_rm) {
71 rm = cp->cp_xmit_rm;
72 cp->cp_xmit_rm = NULL;
Andy Grover5c115592009-02-24 15:30:27 +000073 /* Tell the user the RDMA op is no longer mapped by the
74 * transport. This isn't entirely true (it's flushed out
75 * independently) but as the connection is down, there's
76 * no ongoing RDMA to/from that memory */
Chris Mason7e3f2952010-05-11 15:11:11 -070077 rds_message_unmapped(rm);
Chris Mason7e3f2952010-05-11 15:11:11 -070078 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +000079 }
Chris Mason7e3f2952010-05-11 15:11:11 -070080
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070081 cp->cp_xmit_sg = 0;
82 cp->cp_xmit_hdr_off = 0;
83 cp->cp_xmit_data_off = 0;
84 cp->cp_xmit_atomic_sent = 0;
85 cp->cp_xmit_rdma_sent = 0;
86 cp->cp_xmit_data_sent = 0;
Andy Grover5c115592009-02-24 15:30:27 +000087
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070088 cp->cp_conn->c_map_queued = 0;
Andy Grover5c115592009-02-24 15:30:27 +000089
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070090 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
Andy Grover5c115592009-02-24 15:30:27 +000092
93 /* Mark messages as retransmissions, and move them to the send q */
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070094 spin_lock_irqsave(&cp->cp_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
Andy Grover5c115592009-02-24 15:30:27 +000096 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 }
Sowmini Varadhan4e9b5512016-06-13 09:44:30 -070099 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100 spin_unlock_irqrestore(&cp->cp_lock, flags);
101}
Sowmini Varadhand769ef82016-06-13 09:44:41 -0700102EXPORT_SYMBOL_GPL(rds_send_path_reset);
Andy Grover5c115592009-02-24 15:30:27 +0000103
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700104static int acquire_in_xmit(struct rds_conn_path *cp)
Zach Brown0f4b1c72010-06-04 14:41:41 -0700105{
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
Zach Brown0f4b1c72010-06-04 14:41:41 -0700107}
108
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700109static void release_in_xmit(struct rds_conn_path *cp)
Zach Brown0f4b1c72010-06-04 14:41:41 -0700110{
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700111 clear_bit(RDS_IN_XMIT, &cp->cp_flags);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100112 smp_mb__after_atomic();
Zach Brown0f4b1c72010-06-04 14:41:41 -0700113 /*
114 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
115 * hot path and finding waiters is very rare. We don't want to walk
116 * the system-wide hashed waitqueue buckets in the fast path only to
117 * almost never find waiters.
118 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700119 if (waitqueue_active(&cp->cp_waitq))
120 wake_up_all(&cp->cp_waitq);
Zach Brown0f4b1c72010-06-04 14:41:41 -0700121}
122
Andy Grover5c115592009-02-24 15:30:27 +0000123/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300124 * We're making the conscious trade-off here to only send one message
Andy Grover5c115592009-02-24 15:30:27 +0000125 * down the connection at a time.
126 * Pro:
127 * - tx queueing is a simple fifo list
128 * - reassembly is optional and easily done by transports per conn
129 * - no per flow rx lookup at all, straight to the socket
130 * - less per-frag memory and wire overhead
131 * Con:
132 * - queued acks can be delayed behind large messages
133 * Depends:
134 * - small message latency is higher behind queued large messages
135 * - large message latency isn't starved by intervening small sends
136 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700137int rds_send_xmit(struct rds_conn_path *cp)
Andy Grover5c115592009-02-24 15:30:27 +0000138{
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700139 struct rds_connection *conn = cp->cp_conn;
Andy Grover5c115592009-02-24 15:30:27 +0000140 struct rds_message *rm;
141 unsigned long flags;
142 unsigned int tmp;
Andy Grover5c115592009-02-24 15:30:27 +0000143 struct scatterlist *sg;
144 int ret = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000145 LIST_HEAD(to_be_dropped);
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400146 int batch_count;
147 unsigned long send_gen = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000148
Andy Groverfcc54502010-03-29 17:08:49 -0700149restart:
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400150 batch_count = 0;
Andy Grover049ee3f2010-03-23 17:39:07 -0700151
Andy Grover5c115592009-02-24 15:30:27 +0000152 /*
153 * sendmsg calls here after having queued its message on the send
154 * queue. We only have one task feeding the connection at a time. If
155 * another thread is already feeding the queue then we back off. This
156 * avoids blocking the caller and trading per-connection data between
157 * caches per message.
Andy Grover5c115592009-02-24 15:30:27 +0000158 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700159 if (!acquire_in_xmit(cp)) {
Andy Grover049ee3f2010-03-23 17:39:07 -0700160 rds_stats_inc(s_send_lock_contention);
Andy Grover5c115592009-02-24 15:30:27 +0000161 ret = -ENOMEM;
162 goto out;
163 }
Zach Brown0f4b1c72010-06-04 14:41:41 -0700164
165 /*
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400166 * we record the send generation after doing the xmit acquire.
167 * if someone else manages to jump in and do some work, we'll use
168 * this to avoid a goto restart farther down.
169 *
170 * The acquire_in_xmit() check above ensures that only one
171 * caller can increment c_send_gen at any time.
172 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700173 cp->cp_send_gen++;
174 send_gen = cp->cp_send_gen;
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400175
176 /*
Zach Brown0f4b1c72010-06-04 14:41:41 -0700177 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
178 * we do the opposite to avoid races.
179 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700180 if (!rds_conn_path_up(cp)) {
181 release_in_xmit(cp);
Zach Brown0f4b1c72010-06-04 14:41:41 -0700182 ret = 0;
183 goto out;
184 }
Andy Grover5c115592009-02-24 15:30:27 +0000185
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700186 if (conn->c_trans->xmit_path_prepare)
187 conn->c_trans->xmit_path_prepare(cp);
Andy Grover5c115592009-02-24 15:30:27 +0000188
189 /*
190 * spin trying to push headers and data down the connection until
Andy Grover5b2366b2010-02-03 19:36:44 -0800191 * the connection doesn't make forward progress.
Andy Grover5c115592009-02-24 15:30:27 +0000192 */
Andy Groverfcc54502010-03-29 17:08:49 -0700193 while (1) {
Andy Grover5c115592009-02-24 15:30:27 +0000194
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700195 rm = cp->cp_xmit_rm;
Andy Grover5c115592009-02-24 15:30:27 +0000196
Andy Grover5b2366b2010-02-03 19:36:44 -0800197 /*
198 * If between sending messages, we can send a pending congestion
199 * map update.
Andy Grover5c115592009-02-24 15:30:27 +0000200 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800201 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
Andy Grover77dd5502010-03-22 15:22:04 -0700202 rm = rds_cong_update_alloc(conn);
203 if (IS_ERR(rm)) {
204 ret = PTR_ERR(rm);
205 break;
Andy Grover5b2366b2010-02-03 19:36:44 -0800206 }
Andy Grover77dd5502010-03-22 15:22:04 -0700207 rm->data.op_active = 1;
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700208 rm->m_inc.i_conn_path = cp;
209 rm->m_inc.i_conn = cp->cp_conn;
Andy Grover77dd5502010-03-22 15:22:04 -0700210
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700211 cp->cp_xmit_rm = rm;
Andy Grover5c115592009-02-24 15:30:27 +0000212 }
213
214 /*
Andy Grover5b2366b2010-02-03 19:36:44 -0800215 * If not already working on one, grab the next message.
Andy Grover5c115592009-02-24 15:30:27 +0000216 *
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700217 * cp_xmit_rm holds a ref while we're sending this message down
Andy Grover5c115592009-02-24 15:30:27 +0000218 * the connction. We can use this ref while holding the
219 * send_sem.. rds_send_reset() is serialized with it.
220 */
Andy Grover8690bfa2010-01-12 11:56:44 -0800221 if (!rm) {
Andy Grover5c115592009-02-24 15:30:27 +0000222 unsigned int len;
223
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400224 batch_count++;
225
226 /* we want to process as big a batch as we can, but
227 * we also want to avoid softlockups. If we've been
228 * through a lot of messages, lets back off and see
229 * if anyone else jumps in
230 */
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -0700231 if (batch_count >= send_batch_count)
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400232 goto over_batch;
233
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700234 spin_lock_irqsave(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000235
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700236 if (!list_empty(&cp->cp_send_queue)) {
237 rm = list_entry(cp->cp_send_queue.next,
Andy Grover5c115592009-02-24 15:30:27 +0000238 struct rds_message,
239 m_conn_item);
240 rds_message_addref(rm);
241
242 /*
243 * Move the message from the send queue to the retransmit
244 * list right away.
245 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700246 list_move_tail(&rm->m_conn_item,
247 &cp->cp_retrans);
Andy Grover5c115592009-02-24 15:30:27 +0000248 }
249
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700250 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000251
Andy Groverfcc54502010-03-29 17:08:49 -0700252 if (!rm)
Andy Grover5c115592009-02-24 15:30:27 +0000253 break;
Andy Grover5c115592009-02-24 15:30:27 +0000254
255 /* Unfortunately, the way Infiniband deals with
256 * RDMA to a bad MR key is by moving the entire
257 * queue pair to error state. We cold possibly
258 * recover from that, but right now we drop the
259 * connection.
260 * Therefore, we never retransmit messages with RDMA ops.
261 */
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800262 if (rm->rdma.op_active &&
Joe Perchesf64f9e72009-11-29 16:55:45 -0800263 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700264 spin_lock_irqsave(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000265 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
266 list_move(&rm->m_conn_item, &to_be_dropped);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700267 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000268 continue;
269 }
270
271 /* Require an ACK every once in a while */
272 len = ntohl(rm->m_inc.i_hdr.h_len);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700273 if (cp->cp_unacked_packets == 0 ||
274 cp->cp_unacked_bytes < len) {
Andy Grover5c115592009-02-24 15:30:27 +0000275 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
276
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700277 cp->cp_unacked_packets =
278 rds_sysctl_max_unacked_packets;
279 cp->cp_unacked_bytes =
280 rds_sysctl_max_unacked_bytes;
Andy Grover5c115592009-02-24 15:30:27 +0000281 rds_stats_inc(s_send_ack_required);
282 } else {
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700283 cp->cp_unacked_bytes -= len;
284 cp->cp_unacked_packets--;
Andy Grover5c115592009-02-24 15:30:27 +0000285 }
286
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700287 cp->cp_xmit_rm = rm;
Andy Grover5c115592009-02-24 15:30:27 +0000288 }
289
Andy Grover2c3a5f92010-03-01 16:10:40 -0800290 /* The transport either sends the whole rdma or none of it */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700291 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800292 rm->m_final_op = &rm->rdma;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700293 /* The transport owns the mapped memory for now.
294 * You can't unmap it while it's on the send queue
295 */
296 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800297 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700298 if (ret) {
299 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
300 wake_up_interruptible(&rm->m_flush_wait);
Andy Grover2c3a5f92010-03-01 16:10:40 -0800301 break;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700302 }
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700303 cp->cp_xmit_rdma_sent = 1;
Andy Grover2c3a5f92010-03-01 16:10:40 -0800304
Andy Grover2c3a5f92010-03-01 16:10:40 -0800305 }
306
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700307 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800308 rm->m_final_op = &rm->atomic;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700309 /* The transport owns the mapped memory for now.
310 * You can't unmap it while it's on the send queue
311 */
312 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
Andy Groverff3d7d32010-03-01 14:03:09 -0800313 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700314 if (ret) {
315 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
316 wake_up_interruptible(&rm->m_flush_wait);
Andy Grover15133f62010-01-12 14:33:38 -0800317 break;
santosh.shilimkar@oracle.com4f731132015-08-22 15:45:29 -0700318 }
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700319 cp->cp_xmit_atomic_sent = 1;
Andy Groverff3d7d32010-03-01 14:03:09 -0800320
Andy Grover15133f62010-01-12 14:33:38 -0800321 }
322
Andy Grover2c3a5f92010-03-01 16:10:40 -0800323 /*
324 * A number of cases require an RDS header to be sent
325 * even if there is no data.
326 * We permit 0-byte sends; rds-ping depends on this.
327 * However, if there are exclusively attached silent ops,
328 * we skip the hdr/data send, to enable silent operation.
329 */
330 if (rm->data.op_nents == 0) {
331 int ops_present;
332 int all_ops_are_silent = 1;
Andy Grover241eef32010-01-19 21:25:26 -0800333
Andy Grover2c3a5f92010-03-01 16:10:40 -0800334 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
335 if (rm->atomic.op_active && !rm->atomic.op_silent)
336 all_ops_are_silent = 0;
337 if (rm->rdma.op_active && !rm->rdma.op_silent)
338 all_ops_are_silent = 0;
Andy Grover241eef32010-01-19 21:25:26 -0800339
Andy Grover2c3a5f92010-03-01 16:10:40 -0800340 if (ops_present && all_ops_are_silent
341 && !rm->m_rdma_cookie)
342 rm->data.op_active = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000343 }
344
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700345 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
Andy Groverff3d7d32010-03-01 14:03:09 -0800346 rm->m_final_op = &rm->data;
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700347
Andy Grover5c115592009-02-24 15:30:27 +0000348 ret = conn->c_trans->xmit(conn, rm,
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700349 cp->cp_xmit_hdr_off,
350 cp->cp_xmit_sg,
351 cp->cp_xmit_data_off);
Andy Grover5c115592009-02-24 15:30:27 +0000352 if (ret <= 0)
353 break;
354
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700355 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
Andy Grover5c115592009-02-24 15:30:27 +0000356 tmp = min_t(int, ret,
357 sizeof(struct rds_header) -
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700358 cp->cp_xmit_hdr_off);
359 cp->cp_xmit_hdr_off += tmp;
Andy Grover5c115592009-02-24 15:30:27 +0000360 ret -= tmp;
361 }
362
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700363 sg = &rm->data.op_sg[cp->cp_xmit_sg];
Andy Grover5c115592009-02-24 15:30:27 +0000364 while (ret) {
365 tmp = min_t(int, ret, sg->length -
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700366 cp->cp_xmit_data_off);
367 cp->cp_xmit_data_off += tmp;
Andy Grover5c115592009-02-24 15:30:27 +0000368 ret -= tmp;
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700369 if (cp->cp_xmit_data_off == sg->length) {
370 cp->cp_xmit_data_off = 0;
Andy Grover5c115592009-02-24 15:30:27 +0000371 sg++;
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700372 cp->cp_xmit_sg++;
373 BUG_ON(ret != 0 && cp->cp_xmit_sg ==
374 rm->data.op_nents);
Andy Grover5c115592009-02-24 15:30:27 +0000375 }
376 }
Andy Grover5b2366b2010-02-03 19:36:44 -0800377
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700378 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
379 (cp->cp_xmit_sg == rm->data.op_nents))
380 cp->cp_xmit_data_sent = 1;
Andy Grover5b2366b2010-02-03 19:36:44 -0800381 }
382
383 /*
384 * A rm will only take multiple times through this loop
385 * if there is a data op. Thus, if the data is sent (or there was
386 * none), then we're done with the rm.
387 */
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700388 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
389 cp->cp_xmit_rm = NULL;
390 cp->cp_xmit_sg = 0;
391 cp->cp_xmit_hdr_off = 0;
392 cp->cp_xmit_data_off = 0;
393 cp->cp_xmit_rdma_sent = 0;
394 cp->cp_xmit_atomic_sent = 0;
395 cp->cp_xmit_data_sent = 0;
Andy Grover5b2366b2010-02-03 19:36:44 -0800396
397 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000398 }
399 }
400
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400401over_batch:
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700402 if (conn->c_trans->xmit_path_complete)
403 conn->c_trans->xmit_path_complete(cp);
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700404 release_in_xmit(cp);
Andy Grover5c115592009-02-24 15:30:27 +0000405
Andy Grover2ad80992010-03-23 17:48:04 -0700406 /* Nuke any messages we decided not to retransmit. */
407 if (!list_empty(&to_be_dropped)) {
408 /* irqs on here, so we can put(), unlike above */
409 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
410 rds_message_put(rm);
411 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
412 }
413
Andy Groverfcc54502010-03-29 17:08:49 -0700414 /*
Zach Brown0f4b1c72010-06-04 14:41:41 -0700415 * Other senders can queue a message after we last test the send queue
416 * but before we clear RDS_IN_XMIT. In that case they'd back off and
417 * not try and send their newly queued message. We need to check the
418 * send queue after having cleared RDS_IN_XMIT so that their message
419 * doesn't get stuck on the send queue.
Andy Groverfcc54502010-03-29 17:08:49 -0700420 *
421 * If the transport cannot continue (i.e ret != 0), then it must
422 * call us when more room is available, such as from the tx
423 * completion handler.
Sowmini Varadhan443be0e2015-04-08 12:33:47 -0400424 *
425 * We have an extra generation check here so that if someone manages
426 * to jump in after our release_in_xmit, we'll see that they have done
427 * some work and we will skip our goto
Andy Groverfcc54502010-03-29 17:08:49 -0700428 */
429 if (ret == 0) {
Chris Mason9e29db02010-04-15 16:38:14 -0400430 smp_mb();
santosh.shilimkar@oracle.com0c484242015-08-22 15:45:27 -0700431 if ((test_bit(0, &conn->c_map_queued) ||
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700432 !list_empty(&cp->cp_send_queue)) &&
433 send_gen == cp->cp_send_gen) {
Andy Grover049ee3f2010-03-23 17:39:07 -0700434 rds_stats_inc(s_send_lock_queue_raced);
Santosh Shilimkar4bebdd72015-09-10 11:57:14 -0700435 if (batch_count < send_batch_count)
436 goto restart;
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -0700437 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
Andy Grover5c115592009-02-24 15:30:27 +0000438 }
Andy Grover5c115592009-02-24 15:30:27 +0000439 }
440out:
441 return ret;
442}
Santosh Shilimkar0c28c042015-09-06 02:18:51 -0400443EXPORT_SYMBOL_GPL(rds_send_xmit);
Andy Grover5c115592009-02-24 15:30:27 +0000444
445static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
446{
447 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
448
449 assert_spin_locked(&rs->rs_lock);
450
451 BUG_ON(rs->rs_snd_bytes < len);
452 rs->rs_snd_bytes -= len;
453
454 if (rs->rs_snd_bytes == 0)
455 rds_stats_inc(s_send_queue_empty);
456}
457
458static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
459 is_acked_func is_acked)
460{
461 if (is_acked)
462 return is_acked(rm, ack);
463 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
464}
465
466/*
Andy Grover5c115592009-02-24 15:30:27 +0000467 * This is pretty similar to what happens below in the ACK
468 * handling code - except that we call here as soon as we get
469 * the IB send completion on the RDMA op and the accompanying
470 * message.
471 */
472void rds_rdma_send_complete(struct rds_message *rm, int status)
473{
474 struct rds_sock *rs = NULL;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800475 struct rm_rdma_op *ro;
Andy Grover5c115592009-02-24 15:30:27 +0000476 struct rds_notifier *notifier;
Andy Grover9de08642010-03-29 16:50:54 -0700477 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000478
Andy Grover9de08642010-03-29 16:50:54 -0700479 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000480
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800481 ro = &rm->rdma;
Joe Perchesf64f9e72009-11-29 16:55:45 -0800482 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800483 ro->op_active && ro->op_notify && ro->op_notifier) {
484 notifier = ro->op_notifier;
Andy Grover5c115592009-02-24 15:30:27 +0000485 rs = rm->m_rs;
486 sock_hold(rds_rs_to_sk(rs));
487
488 notifier->n_status = status;
489 spin_lock(&rs->rs_lock);
490 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
491 spin_unlock(&rs->rs_lock);
492
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800493 ro->op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000494 }
495
Andy Grover9de08642010-03-29 16:50:54 -0700496 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000497
498 if (rs) {
499 rds_wake_sk_sleep(rs);
500 sock_put(rds_rs_to_sk(rs));
501 }
502}
Andy Grover616b7572009-08-21 12:28:32 +0000503EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
Andy Grover5c115592009-02-24 15:30:27 +0000504
505/*
Andy Grover15133f62010-01-12 14:33:38 -0800506 * Just like above, except looks at atomic op
507 */
508void rds_atomic_send_complete(struct rds_message *rm, int status)
509{
510 struct rds_sock *rs = NULL;
511 struct rm_atomic_op *ao;
512 struct rds_notifier *notifier;
Andy Grovercf4b7382010-03-29 16:50:54 -0700513 unsigned long flags;
Andy Grover15133f62010-01-12 14:33:38 -0800514
Andy Grovercf4b7382010-03-29 16:50:54 -0700515 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover15133f62010-01-12 14:33:38 -0800516
517 ao = &rm->atomic;
518 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
519 && ao->op_active && ao->op_notify && ao->op_notifier) {
520 notifier = ao->op_notifier;
521 rs = rm->m_rs;
522 sock_hold(rds_rs_to_sk(rs));
523
524 notifier->n_status = status;
525 spin_lock(&rs->rs_lock);
526 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
527 spin_unlock(&rs->rs_lock);
528
529 ao->op_notifier = NULL;
530 }
531
Andy Grovercf4b7382010-03-29 16:50:54 -0700532 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover15133f62010-01-12 14:33:38 -0800533
534 if (rs) {
535 rds_wake_sk_sleep(rs);
536 sock_put(rds_rs_to_sk(rs));
537 }
538}
539EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
540
541/*
Andy Grover5c115592009-02-24 15:30:27 +0000542 * This is the same as rds_rdma_send_complete except we
543 * don't do any locking - we have all the ingredients (message,
544 * socket, socket lock) and can just move the notifier.
545 */
546static inline void
Andy Grover940786e2010-02-19 18:04:58 -0800547__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
Andy Grover5c115592009-02-24 15:30:27 +0000548{
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800549 struct rm_rdma_op *ro;
Andy Grover940786e2010-02-19 18:04:58 -0800550 struct rm_atomic_op *ao;
Andy Grover5c115592009-02-24 15:30:27 +0000551
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800552 ro = &rm->rdma;
553 if (ro->op_active && ro->op_notify && ro->op_notifier) {
554 ro->op_notifier->n_status = status;
555 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
556 ro->op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000557 }
558
Andy Grover940786e2010-02-19 18:04:58 -0800559 ao = &rm->atomic;
560 if (ao->op_active && ao->op_notify && ao->op_notifier) {
561 ao->op_notifier->n_status = status;
562 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
563 ao->op_notifier = NULL;
564 }
565
Andy Grover5c115592009-02-24 15:30:27 +0000566 /* No need to wake the app - caller does this */
567}
568
569/*
Andy Grover5c115592009-02-24 15:30:27 +0000570 * This removes messages from the socket's list if they're on it. The list
571 * argument must be private to the caller, we must be able to modify it
572 * without locks. The messages must have a reference held for their
573 * position on the list. This function will drop that reference after
574 * removing the messages from the 'messages' list regardless of if it found
575 * the messages on the socket list or not.
576 */
stephen hemmingerff51bf82010-10-19 08:08:33 +0000577static void rds_send_remove_from_sock(struct list_head *messages, int status)
Andy Grover5c115592009-02-24 15:30:27 +0000578{
Andy Grover561c7df2010-03-11 13:50:06 +0000579 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000580 struct rds_sock *rs = NULL;
581 struct rds_message *rm;
582
Andy Grover5c115592009-02-24 15:30:27 +0000583 while (!list_empty(messages)) {
Andy Grover561c7df2010-03-11 13:50:06 +0000584 int was_on_sock = 0;
585
Andy Grover5c115592009-02-24 15:30:27 +0000586 rm = list_entry(messages->next, struct rds_message,
587 m_conn_item);
588 list_del_init(&rm->m_conn_item);
589
590 /*
591 * If we see this flag cleared then we're *sure* that someone
592 * else beat us to removing it from the sock. If we race
593 * with their flag update we'll get the lock and then really
594 * see that the flag has been cleared.
595 *
596 * The message spinlock makes sure nobody clears rm->m_rs
597 * while we're messing with it. It does not prevent the
598 * message from being removed from the socket, though.
599 */
Andy Grover561c7df2010-03-11 13:50:06 +0000600 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000601 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
602 goto unlock_and_drop;
603
604 if (rs != rm->m_rs) {
605 if (rs) {
Andy Grover5c115592009-02-24 15:30:27 +0000606 rds_wake_sk_sleep(rs);
607 sock_put(rds_rs_to_sk(rs));
608 }
609 rs = rm->m_rs;
Herton R. Krzesinski593cbb32014-10-01 18:49:54 -0300610 if (rs)
611 sock_hold(rds_rs_to_sk(rs));
Andy Grover5c115592009-02-24 15:30:27 +0000612 }
Herton R. Krzesinski593cbb32014-10-01 18:49:54 -0300613 if (!rs)
614 goto unlock_and_drop;
Tina Yang048c15e2010-03-11 13:50:00 +0000615 spin_lock(&rs->rs_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000616
617 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800618 struct rm_rdma_op *ro = &rm->rdma;
Andy Grover5c115592009-02-24 15:30:27 +0000619 struct rds_notifier *notifier;
620
621 list_del_init(&rm->m_sock_item);
622 rds_send_sndbuf_remove(rs, rm);
623
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800624 if (ro->op_active && ro->op_notifier &&
625 (ro->op_notify || (ro->op_recverr && status))) {
626 notifier = ro->op_notifier;
Andy Grover5c115592009-02-24 15:30:27 +0000627 list_add_tail(&notifier->n_list,
628 &rs->rs_notify_queue);
629 if (!notifier->n_status)
630 notifier->n_status = status;
Andy Groverf8b3aaf2010-03-01 14:11:53 -0800631 rm->rdma.op_notifier = NULL;
Andy Grover5c115592009-02-24 15:30:27 +0000632 }
Andy Grover561c7df2010-03-11 13:50:06 +0000633 was_on_sock = 1;
Andy Grover5c115592009-02-24 15:30:27 +0000634 rm->m_rs = NULL;
635 }
Tina Yang048c15e2010-03-11 13:50:00 +0000636 spin_unlock(&rs->rs_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000637
638unlock_and_drop:
Andy Grover561c7df2010-03-11 13:50:06 +0000639 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000640 rds_message_put(rm);
Andy Grover561c7df2010-03-11 13:50:06 +0000641 if (was_on_sock)
642 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000643 }
644
645 if (rs) {
Andy Grover5c115592009-02-24 15:30:27 +0000646 rds_wake_sk_sleep(rs);
647 sock_put(rds_rs_to_sk(rs));
648 }
Andy Grover5c115592009-02-24 15:30:27 +0000649}
650
651/*
652 * Transports call here when they've determined that the receiver queued
653 * messages up to, and including, the given sequence number. Messages are
654 * moved to the retrans queue when rds_send_xmit picks them off the send
655 * queue. This means that in the TCP case, the message may not have been
656 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
657 * checks the RDS_MSG_HAS_ACK_SEQ bit.
Andy Grover5c115592009-02-24 15:30:27 +0000658 */
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700659void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
660 is_acked_func is_acked)
Andy Grover5c115592009-02-24 15:30:27 +0000661{
662 struct rds_message *rm, *tmp;
663 unsigned long flags;
664 LIST_HEAD(list);
665
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700666 spin_lock_irqsave(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000667
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700668 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
Andy Grover5c115592009-02-24 15:30:27 +0000669 if (!rds_send_is_acked(rm, ack, is_acked))
670 break;
671
672 list_move(&rm->m_conn_item, &list);
673 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
674 }
675
676 /* order flag updates with spin locks */
677 if (!list_empty(&list))
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100678 smp_mb__after_atomic();
Andy Grover5c115592009-02-24 15:30:27 +0000679
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700680 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000681
682 /* now remove the messages from the sock list as needed */
683 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
684}
Sowmini Varadhan5c3d2742016-06-13 09:44:31 -0700685EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
686
687void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
688 is_acked_func is_acked)
689{
690 WARN_ON(conn->c_trans->t_mp_capable);
691 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
692}
Andy Grover616b7572009-08-21 12:28:32 +0000693EXPORT_SYMBOL_GPL(rds_send_drop_acked);
Andy Grover5c115592009-02-24 15:30:27 +0000694
695void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
696{
697 struct rds_message *rm, *tmp;
698 struct rds_connection *conn;
Sowmini Varadhan01ff34e2016-06-13 09:44:35 -0700699 struct rds_conn_path *cp;
Andy Grover7c82eaf2010-02-19 18:01:41 -0800700 unsigned long flags;
Andy Grover5c115592009-02-24 15:30:27 +0000701 LIST_HEAD(list);
Andy Grover5c115592009-02-24 15:30:27 +0000702
703 /* get all the messages we're dropping under the rs lock */
704 spin_lock_irqsave(&rs->rs_lock, flags);
705
706 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
707 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
708 dest->sin_port != rm->m_inc.i_hdr.h_dport))
709 continue;
710
Andy Grover5c115592009-02-24 15:30:27 +0000711 list_move(&rm->m_sock_item, &list);
712 rds_send_sndbuf_remove(rs, rm);
713 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
Andy Grover5c115592009-02-24 15:30:27 +0000714 }
715
716 /* order flag updates with the rs lock */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100717 smp_mb__after_atomic();
Andy Grover5c115592009-02-24 15:30:27 +0000718
719 spin_unlock_irqrestore(&rs->rs_lock, flags);
720
Andy Grover7c82eaf2010-02-19 18:01:41 -0800721 if (list_empty(&list))
722 return;
Andy Grover5c115592009-02-24 15:30:27 +0000723
Andy Grover7c82eaf2010-02-19 18:01:41 -0800724 /* Remove the messages from the conn */
Andy Grover5c115592009-02-24 15:30:27 +0000725 list_for_each_entry(rm, &list, m_sock_item) {
Andy Grover7c82eaf2010-02-19 18:01:41 -0800726
727 conn = rm->m_inc.i_conn;
Sowmini Varadhan01ff34e2016-06-13 09:44:35 -0700728 if (conn->c_trans->t_mp_capable)
729 cp = rm->m_inc.i_conn_path;
730 else
731 cp = &conn->c_path[0];
Andy Grover7c82eaf2010-02-19 18:01:41 -0800732
Sowmini Varadhan01ff34e2016-06-13 09:44:35 -0700733 spin_lock_irqsave(&cp->cp_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800734 /*
735 * Maybe someone else beat us to removing rm from the conn.
736 * If we race with their flag update we'll get the lock and
737 * then really see that the flag has been cleared.
738 */
739 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
Sowmini Varadhan01ff34e2016-06-13 09:44:35 -0700740 spin_unlock_irqrestore(&cp->cp_lock, flags);
Herton R. Krzesinski593cbb32014-10-01 18:49:54 -0300741 spin_lock_irqsave(&rm->m_rs_lock, flags);
742 rm->m_rs = NULL;
743 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800744 continue;
745 }
Andy Grover9de08642010-03-29 16:50:54 -0700746 list_del_init(&rm->m_conn_item);
Sowmini Varadhan01ff34e2016-06-13 09:44:35 -0700747 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800748
749 /*
750 * Couldn't grab m_rs_lock in top loop (lock ordering),
751 * but we can now.
752 */
Andy Grover9de08642010-03-29 16:50:54 -0700753 spin_lock_irqsave(&rm->m_rs_lock, flags);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800754
Tina Yang550a8002010-03-11 13:50:03 +0000755 spin_lock(&rs->rs_lock);
Andy Grover940786e2010-02-19 18:04:58 -0800756 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
Tina Yang550a8002010-03-11 13:50:03 +0000757 spin_unlock(&rs->rs_lock);
Andy Grover7c82eaf2010-02-19 18:01:41 -0800758
Andy Grover5c115592009-02-24 15:30:27 +0000759 rm->m_rs = NULL;
Andy Grover9de08642010-03-29 16:50:54 -0700760 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +0000761
Andy Grover7c82eaf2010-02-19 18:01:41 -0800762 rds_message_put(rm);
Andy Grover5c115592009-02-24 15:30:27 +0000763 }
764
Andy Grover7c82eaf2010-02-19 18:01:41 -0800765 rds_wake_sk_sleep(rs);
Tina Yang550a8002010-03-11 13:50:03 +0000766
Andy Grover5c115592009-02-24 15:30:27 +0000767 while (!list_empty(&list)) {
768 rm = list_entry(list.next, struct rds_message, m_sock_item);
769 list_del_init(&rm->m_sock_item);
Andy Grover5c115592009-02-24 15:30:27 +0000770 rds_message_wait(rm);
santosh.shilimkar@oracle.comdfcec252015-08-22 15:45:33 -0700771
772 /* just in case the code above skipped this message
773 * because RDS_MSG_ON_CONN wasn't set, run it again here
774 * taking m_rs_lock is the only thing that keeps us
775 * from racing with ack processing.
776 */
777 spin_lock_irqsave(&rm->m_rs_lock, flags);
778
779 spin_lock(&rs->rs_lock);
780 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
781 spin_unlock(&rs->rs_lock);
782
783 rm->m_rs = NULL;
784 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
785
Andy Grover5c115592009-02-24 15:30:27 +0000786 rds_message_put(rm);
787 }
788}
789
790/*
791 * we only want this to fire once so we use the callers 'queued'. It's
792 * possible that another thread can race with us and remove the
793 * message from the flow with RDS_CANCEL_SENT_TO.
794 */
795static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
Sowmini Varadhan780a6d92016-06-13 09:44:33 -0700796 struct rds_conn_path *cp,
Andy Grover5c115592009-02-24 15:30:27 +0000797 struct rds_message *rm, __be16 sport,
798 __be16 dport, int *queued)
799{
800 unsigned long flags;
801 u32 len;
802
803 if (*queued)
804 goto out;
805
806 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
807
808 /* this is the only place which holds both the socket's rs_lock
809 * and the connection's c_lock */
810 spin_lock_irqsave(&rs->rs_lock, flags);
811
812 /*
813 * If there is a little space in sndbuf, we don't queue anything,
814 * and userspace gets -EAGAIN. But poll() indicates there's send
815 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
816 * freed up by incoming acks. So we check the *old* value of
817 * rs_snd_bytes here to allow the last msg to exceed the buffer,
818 * and poll() now knows no more data can be sent.
819 */
820 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
821 rs->rs_snd_bytes += len;
822
823 /* let recv side know we are close to send space exhaustion.
824 * This is probably not the optimal way to do it, as this
825 * means we set the flag on *all* messages as soon as our
826 * throughput hits a certain threshold.
827 */
828 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
829 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
830
831 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
832 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
833 rds_message_addref(rm);
834 rm->m_rs = rs;
835
836 /* The code ordering is a little weird, but we're
837 trying to minimize the time we hold c_lock */
838 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
839 rm->m_inc.i_conn = conn;
Sowmini Varadhan780a6d92016-06-13 09:44:33 -0700840 rm->m_inc.i_conn_path = cp;
Andy Grover5c115592009-02-24 15:30:27 +0000841 rds_message_addref(rm);
842
Sowmini Varadhan780a6d92016-06-13 09:44:33 -0700843 spin_lock(&cp->cp_lock);
844 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
845 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
Andy Grover5c115592009-02-24 15:30:27 +0000846 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
Sowmini Varadhan780a6d92016-06-13 09:44:33 -0700847 spin_unlock(&cp->cp_lock);
Andy Grover5c115592009-02-24 15:30:27 +0000848
849 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
850 rm, len, rs, rs->rs_snd_bytes,
851 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
852
853 *queued = 1;
854 }
855
856 spin_unlock_irqrestore(&rs->rs_lock, flags);
857out:
858 return *queued;
859}
860
Andy Groverfc445082010-01-12 12:56:06 -0800861/*
862 * rds_message is getting to be quite complicated, and we'd like to allocate
863 * it all in one go. This figures out how big it needs to be up front.
864 */
865static int rds_rm_size(struct msghdr *msg, int data_len)
866{
Andy Groverff87e972010-01-12 14:13:15 -0800867 struct cmsghdr *cmsg;
Andy Groverfc445082010-01-12 12:56:06 -0800868 int size = 0;
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700869 int cmsg_groups = 0;
Andy Groverff87e972010-01-12 14:13:15 -0800870 int retval;
Andy Groverfc445082010-01-12 12:56:06 -0800871
Gu Zhengf95b4142014-12-11 11:22:04 +0800872 for_each_cmsghdr(cmsg, msg) {
Andy Groverff87e972010-01-12 14:13:15 -0800873 if (!CMSG_OK(msg, cmsg))
874 return -EINVAL;
875
876 if (cmsg->cmsg_level != SOL_RDS)
877 continue;
878
879 switch (cmsg->cmsg_type) {
880 case RDS_CMSG_RDMA_ARGS:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700881 cmsg_groups |= 1;
Andy Groverff87e972010-01-12 14:13:15 -0800882 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
883 if (retval < 0)
884 return retval;
885 size += retval;
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700886
Andy Groverff87e972010-01-12 14:13:15 -0800887 break;
888
889 case RDS_CMSG_RDMA_DEST:
890 case RDS_CMSG_RDMA_MAP:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700891 cmsg_groups |= 2;
Andy Groverff87e972010-01-12 14:13:15 -0800892 /* these are valid but do no add any size */
893 break;
894
Andy Grover15133f62010-01-12 14:33:38 -0800895 case RDS_CMSG_ATOMIC_CSWP:
896 case RDS_CMSG_ATOMIC_FADD:
Andy Grover20c72bd2010-08-25 05:51:28 -0700897 case RDS_CMSG_MASKED_ATOMIC_CSWP:
898 case RDS_CMSG_MASKED_ATOMIC_FADD:
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700899 cmsg_groups |= 1;
Andy Grover15133f62010-01-12 14:33:38 -0800900 size += sizeof(struct scatterlist);
901 break;
902
Andy Groverff87e972010-01-12 14:13:15 -0800903 default:
904 return -EINVAL;
905 }
906
907 }
908
909 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
Andy Groverfc445082010-01-12 12:56:06 -0800910
Andy Groveraa0a4ef2010-04-13 12:00:35 -0700911 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
912 if (cmsg_groups == 3)
913 return -EINVAL;
914
Andy Groverfc445082010-01-12 12:56:06 -0800915 return size;
916}
917
Andy Grover5c115592009-02-24 15:30:27 +0000918static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
919 struct msghdr *msg, int *allocated_mr)
920{
921 struct cmsghdr *cmsg;
922 int ret = 0;
923
Gu Zhengf95b4142014-12-11 11:22:04 +0800924 for_each_cmsghdr(cmsg, msg) {
Andy Grover5c115592009-02-24 15:30:27 +0000925 if (!CMSG_OK(msg, cmsg))
926 return -EINVAL;
927
928 if (cmsg->cmsg_level != SOL_RDS)
929 continue;
930
931 /* As a side effect, RDMA_DEST and RDMA_MAP will set
Andy Grover15133f62010-01-12 14:33:38 -0800932 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
Andy Grover5c115592009-02-24 15:30:27 +0000933 */
934 switch (cmsg->cmsg_type) {
935 case RDS_CMSG_RDMA_ARGS:
936 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
937 break;
938
939 case RDS_CMSG_RDMA_DEST:
940 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
941 break;
942
943 case RDS_CMSG_RDMA_MAP:
944 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
945 if (!ret)
946 *allocated_mr = 1;
947 break;
Andy Grover15133f62010-01-12 14:33:38 -0800948 case RDS_CMSG_ATOMIC_CSWP:
949 case RDS_CMSG_ATOMIC_FADD:
Andy Grover20c72bd2010-08-25 05:51:28 -0700950 case RDS_CMSG_MASKED_ATOMIC_CSWP:
951 case RDS_CMSG_MASKED_ATOMIC_FADD:
Andy Grover15133f62010-01-12 14:33:38 -0800952 ret = rds_cmsg_atomic(rs, rm, cmsg);
953 break;
Andy Grover5c115592009-02-24 15:30:27 +0000954
955 default:
956 return -EINVAL;
957 }
958
959 if (ret)
960 break;
961 }
962
963 return ret;
964}
965
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700966static void rds_send_ping(struct rds_connection *conn);
967
968static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
969{
970 int hash;
971
972 if (conn->c_npaths == 0)
973 hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
974 else
975 hash = RDS_MPATH_HASH(rs, conn->c_npaths);
976 if (conn->c_npaths == 0 && hash != 0) {
977 rds_send_ping(conn);
978
979 if (conn->c_npaths == 0) {
980 wait_event_interruptible(conn->c_hs_waitq,
981 (conn->c_npaths != 0));
982 }
983 if (conn->c_npaths == 1)
984 hash = 0;
985 }
986 return hash;
987}
988
Ying Xue1b784142015-03-02 15:37:48 +0800989int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
Andy Grover5c115592009-02-24 15:30:27 +0000990{
991 struct sock *sk = sock->sk;
992 struct rds_sock *rs = rds_sk_to_rs(sk);
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100993 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
Andy Grover5c115592009-02-24 15:30:27 +0000994 __be32 daddr;
995 __be16 dport;
996 struct rds_message *rm = NULL;
997 struct rds_connection *conn;
998 int ret = 0;
999 int queued = 0, allocated_mr = 0;
1000 int nonblock = msg->msg_flags & MSG_DONTWAIT;
Andy Grover1123fd72010-03-11 13:49:56 +00001001 long timeo = sock_sndtimeo(sk, nonblock);
Sowmini Varadhan780a6d92016-06-13 09:44:33 -07001002 struct rds_conn_path *cpath;
Andy Grover5c115592009-02-24 15:30:27 +00001003
1004 /* Mirror Linux UDP mirror of BSD error message compatibility */
1005 /* XXX: Perhaps MSG_MORE someday */
1006 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
Andy Grover5c115592009-02-24 15:30:27 +00001007 ret = -EOPNOTSUPP;
1008 goto out;
1009 }
1010
1011 if (msg->msg_namelen) {
1012 /* XXX fail non-unicast destination IPs? */
1013 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1014 ret = -EINVAL;
1015 goto out;
1016 }
1017 daddr = usin->sin_addr.s_addr;
1018 dport = usin->sin_port;
1019 } else {
1020 /* We only care about consistency with ->connect() */
1021 lock_sock(sk);
1022 daddr = rs->rs_conn_addr;
1023 dport = rs->rs_conn_port;
1024 release_sock(sk);
1025 }
1026
Quentin Casasnovas8c7188b2015-11-24 17:13:21 -05001027 lock_sock(sk);
Andy Grover5c115592009-02-24 15:30:27 +00001028 if (daddr == 0 || rs->rs_bound_addr == 0) {
Quentin Casasnovas8c7188b2015-11-24 17:13:21 -05001029 release_sock(sk);
Andy Grover5c115592009-02-24 15:30:27 +00001030 ret = -ENOTCONN; /* XXX not a great errno */
1031 goto out;
1032 }
Quentin Casasnovas8c7188b2015-11-24 17:13:21 -05001033 release_sock(sk);
Andy Grover5c115592009-02-24 15:30:27 +00001034
Mukesh Kacker06e89412015-08-22 15:45:34 -07001035 if (payload_len > rds_sk_sndbuf(rs)) {
1036 ret = -EMSGSIZE;
1037 goto out;
1038 }
1039
Andy Groverfc445082010-01-12 12:56:06 -08001040 /* size of rm including all sgs */
1041 ret = rds_rm_size(msg, payload_len);
1042 if (ret < 0)
1043 goto out;
1044
1045 rm = rds_message_alloc(ret, GFP_KERNEL);
1046 if (!rm) {
1047 ret = -ENOMEM;
Andy Grover5c115592009-02-24 15:30:27 +00001048 goto out;
1049 }
1050
Andy Grover372cd7d2010-02-03 19:40:32 -08001051 /* Attach data to the rm */
1052 if (payload_len) {
1053 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
Andy Groverd139ff02010-10-28 15:40:59 +00001054 if (!rm->data.op_sg) {
1055 ret = -ENOMEM;
1056 goto out;
1057 }
Al Viroc0371da2014-11-24 10:42:55 -05001058 ret = rds_message_copy_from_user(rm, &msg->msg_iter);
Andy Grover372cd7d2010-02-03 19:40:32 -08001059 if (ret)
1060 goto out;
1061 }
1062 rm->data.op_active = 1;
Andy Groverfc445082010-01-12 12:56:06 -08001063
Andy Grover5c115592009-02-24 15:30:27 +00001064 rm->m_daddr = daddr;
1065
Andy Grover5c115592009-02-24 15:30:27 +00001066 /* rds_conn_create has a spinlock that runs with IRQ off.
1067 * Caching the conn in the socket helps a lot. */
1068 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1069 conn = rs->rs_conn;
1070 else {
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -04001071 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1072 rs->rs_bound_addr, daddr,
Andy Grover5c115592009-02-24 15:30:27 +00001073 rs->rs_transport,
1074 sock->sk->sk_allocation);
1075 if (IS_ERR(conn)) {
1076 ret = PTR_ERR(conn);
1077 goto out;
1078 }
1079 rs->rs_conn = conn;
1080 }
1081
Andy Grover49f69692009-04-09 14:09:41 +00001082 /* Parse any control messages the user may have included. */
1083 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1084 if (ret)
1085 goto out;
1086
Andy Grover2c3a5f92010-03-01 16:10:40 -08001087 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
Manuel Zerpiescb0a6052011-06-16 02:09:57 +00001088 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
Andy Groverf8b3aaf2010-03-01 14:11:53 -08001089 &rm->rdma, conn->c_trans->xmit_rdma);
Andy Grover15133f62010-01-12 14:33:38 -08001090 ret = -EOPNOTSUPP;
1091 goto out;
1092 }
1093
1094 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
Manuel Zerpiescb0a6052011-06-16 02:09:57 +00001095 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
Andy Grover15133f62010-01-12 14:33:38 -08001096 &rm->atomic, conn->c_trans->xmit_atomic);
Andy Grover5c115592009-02-24 15:30:27 +00001097 ret = -EOPNOTSUPP;
1098 goto out;
1099 }
1100
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001101 if (conn->c_trans->t_mp_capable)
1102 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1103 else
1104 cpath = &conn->c_path[0];
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -07001105
1106 rds_conn_path_connect_if_down(cpath);
Andy Grover5c115592009-02-24 15:30:27 +00001107
1108 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
Andy Groverb98ba522010-03-11 13:50:04 +00001109 if (ret) {
1110 rs->rs_seen_congestion = 1;
Andy Grover5c115592009-02-24 15:30:27 +00001111 goto out;
Andy Groverb98ba522010-03-11 13:50:04 +00001112 }
Sowmini Varadhan780a6d92016-06-13 09:44:33 -07001113 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
Andy Grover5c115592009-02-24 15:30:27 +00001114 dport, &queued)) {
1115 rds_stats_inc(s_send_queue_full);
Mukesh Kacker06e89412015-08-22 15:45:34 -07001116
Andy Grover5c115592009-02-24 15:30:27 +00001117 if (nonblock) {
1118 ret = -EAGAIN;
1119 goto out;
1120 }
1121
Eric Dumazetaa395142010-04-20 13:03:51 +00001122 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
Sowmini Varadhan780a6d92016-06-13 09:44:33 -07001123 rds_send_queue_rm(rs, conn, cpath, rm,
Andy Grover5c115592009-02-24 15:30:27 +00001124 rs->rs_bound_port,
1125 dport,
1126 &queued),
1127 timeo);
1128 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1129 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1130 continue;
1131
1132 ret = timeo;
1133 if (ret == 0)
1134 ret = -ETIMEDOUT;
1135 goto out;
1136 }
1137
1138 /*
1139 * By now we've committed to the send. We reuse rds_send_worker()
1140 * to retry sends in the rds thread if the transport asks us to.
1141 */
1142 rds_stats_inc(s_send_queued);
1143
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -07001144 ret = rds_send_xmit(cpath);
Santosh Shilimkardb6526d2015-09-11 15:44:29 -07001145 if (ret == -ENOMEM || ret == -EAGAIN)
Sowmini Varadhan1f9ecd72016-06-13 09:44:34 -07001146 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
Andy Grover5c115592009-02-24 15:30:27 +00001147
1148 rds_message_put(rm);
1149 return payload_len;
1150
1151out:
1152 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1153 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1154 * or in any other way, we need to destroy the MR again */
1155 if (allocated_mr)
1156 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1157
1158 if (rm)
1159 rds_message_put(rm);
1160 return ret;
1161}
1162
1163/*
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001164 * send out a probe. Can be shared by rds_send_ping,
1165 * rds_send_pong, rds_send_hb.
1166 * rds_send_hb should use h_flags
1167 * RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1168 * or
1169 * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
Andy Grover5c115592009-02-24 15:30:27 +00001170 */
1171int
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001172rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1173 __be16 dport, u8 h_flags)
Andy Grover5c115592009-02-24 15:30:27 +00001174{
1175 struct rds_message *rm;
1176 unsigned long flags;
1177 int ret = 0;
1178
1179 rm = rds_message_alloc(0, GFP_ATOMIC);
Andy Grover8690bfa2010-01-12 11:56:44 -08001180 if (!rm) {
Andy Grover5c115592009-02-24 15:30:27 +00001181 ret = -ENOMEM;
1182 goto out;
1183 }
1184
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001185 rm->m_daddr = cp->cp_conn->c_faddr;
Andy Groveracfcd4d2010-03-31 18:56:25 -07001186 rm->data.op_active = 1;
Andy Grover5c115592009-02-24 15:30:27 +00001187
Sowmini Varadhan3c0a5902016-06-13 09:44:37 -07001188 rds_conn_path_connect_if_down(cp);
Andy Grover5c115592009-02-24 15:30:27 +00001189
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001190 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
Andy Grover5c115592009-02-24 15:30:27 +00001191 if (ret)
1192 goto out;
1193
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001194 spin_lock_irqsave(&cp->cp_lock, flags);
1195 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
Andy Grover5c115592009-02-24 15:30:27 +00001196 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1197 rds_message_addref(rm);
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001198 rm->m_inc.i_conn = cp->cp_conn;
1199 rm->m_inc.i_conn_path = cp;
Andy Grover5c115592009-02-24 15:30:27 +00001200
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001201 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001202 cp->cp_next_tx_seq);
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001203 rm->m_inc.i_hdr.h_flags |= h_flags;
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001204 cp->cp_next_tx_seq++;
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001205
1206 if (RDS_HS_PROBE(sport, dport) && cp->cp_conn->c_trans->t_mp_capable) {
1207 u16 npaths = RDS_MPATH_WORKERS;
1208
1209 rds_message_add_extension(&rm->m_inc.i_hdr,
1210 RDS_EXTHDR_NPATHS, &npaths,
1211 sizeof(npaths));
1212 }
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001213 spin_unlock_irqrestore(&cp->cp_lock, flags);
Andy Grover5c115592009-02-24 15:30:27 +00001214
1215 rds_stats_inc(s_send_queued);
1216 rds_stats_inc(s_send_pong);
1217
santosh.shilimkar@oracle.com7b4b0002015-10-16 22:13:21 -04001218 /* schedule the send work on rds_wq */
Sowmini Varadhan45997e92016-06-13 09:44:36 -07001219 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
Andy Groveracfcd4d2010-03-31 18:56:25 -07001220
Andy Grover5c115592009-02-24 15:30:27 +00001221 rds_message_put(rm);
1222 return 0;
1223
1224out:
1225 if (rm)
1226 rds_message_put(rm);
1227 return ret;
1228}
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -07001229
1230int
1231rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1232{
1233 return rds_send_probe(cp, 0, dport, 0);
1234}
1235
1236void
1237rds_send_ping(struct rds_connection *conn)
1238{
1239 unsigned long flags;
1240 struct rds_conn_path *cp = &conn->c_path[0];
1241
1242 spin_lock_irqsave(&cp->cp_lock, flags);
1243 if (conn->c_ping_triggered) {
1244 spin_unlock_irqrestore(&cp->cp_lock, flags);
1245 return;
1246 }
1247 conn->c_ping_triggered = 1;
1248 spin_unlock_irqrestore(&cp->cp_lock, flags);
1249 rds_send_probe(&conn->c_path[0], RDS_FLAG_PROBE_PORT, 0, 0);
1250}