Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
Paul Gortmaker | d9b9384 | 2011-09-18 13:21:27 -0400 | [diff] [blame] | 34 | #include <linux/moduleparam.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/gfp.h> |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 36 | #include <net/sock.h> |
| 37 | #include <linux/in.h> |
| 38 | #include <linux/list.h> |
Manuel Zerpies | cb0a605 | 2011-06-16 02:09:57 +0000 | [diff] [blame] | 39 | #include <linux/ratelimit.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 40 | #include <linux/export.h> |
Santosh Shilimkar | 4bebdd7 | 2015-09-10 11:57:14 -0700 | [diff] [blame] | 41 | #include <linux/sizes.h> |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 42 | |
Sowmini Varadhan | 0cb4396 | 2016-06-13 09:44:26 -0700 | [diff] [blame] | 43 | #include "rds_single_path.h" |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 44 | #include "rds.h" |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 45 | |
| 46 | /* When transmitting messages in rds_send_xmit, we need to emerge from |
| 47 | * time to time and briefly release the CPU. Otherwise the softlock watchdog |
| 48 | * will kick our shin. |
| 49 | * Also, it seems fairer to not let one busy connection stall all the |
| 50 | * others. |
| 51 | * |
| 52 | * send_batch_count is the number of times we'll loop in send_xmit. Setting |
| 53 | * it to 0 will restore the old behavior (where we looped until we had |
| 54 | * drained the queue). |
| 55 | */ |
Santosh Shilimkar | 4bebdd7 | 2015-09-10 11:57:14 -0700 | [diff] [blame] | 56 | static int send_batch_count = SZ_1K; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 57 | module_param(send_batch_count, int, 0444); |
| 58 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); |
| 59 | |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 60 | static void rds_send_remove_from_sock(struct list_head *messages, int status); |
| 61 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 62 | /* |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 63 | * Reset the send state. Callers must ensure that this doesn't race with |
| 64 | * rds_send_xmit(). |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 65 | */ |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 66 | static void rds_send_path_reset(struct rds_conn_path *cp) |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 67 | { |
| 68 | struct rds_message *rm, *tmp; |
| 69 | unsigned long flags; |
| 70 | |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 71 | if (cp->cp_xmit_rm) { |
| 72 | rm = cp->cp_xmit_rm; |
| 73 | cp->cp_xmit_rm = NULL; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 74 | /* Tell the user the RDMA op is no longer mapped by the |
| 75 | * transport. This isn't entirely true (it's flushed out |
| 76 | * independently) but as the connection is down, there's |
| 77 | * no ongoing RDMA to/from that memory */ |
Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 78 | rds_message_unmapped(rm); |
Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 79 | rds_message_put(rm); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 80 | } |
Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 81 | |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 82 | cp->cp_xmit_sg = 0; |
| 83 | cp->cp_xmit_hdr_off = 0; |
| 84 | cp->cp_xmit_data_off = 0; |
| 85 | cp->cp_xmit_atomic_sent = 0; |
| 86 | cp->cp_xmit_rdma_sent = 0; |
| 87 | cp->cp_xmit_data_sent = 0; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 88 | |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 89 | cp->cp_conn->c_map_queued = 0; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 90 | |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 91 | cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; |
| 92 | cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 93 | |
| 94 | /* Mark messages as retransmissions, and move them to the send q */ |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 95 | spin_lock_irqsave(&cp->cp_lock, flags); |
| 96 | list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 97 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
| 98 | set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); |
| 99 | } |
Sowmini Varadhan | 4e9b551 | 2016-06-13 09:44:30 -0700 | [diff] [blame] | 100 | list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); |
| 101 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
| 102 | } |
| 103 | |
| 104 | void rds_send_reset(struct rds_connection *conn) |
| 105 | { |
| 106 | rds_send_path_reset(&conn->c_path[0]); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 107 | } |
Sowmini Varadhan | 0b6f760 | 2016-06-04 13:59:59 -0700 | [diff] [blame] | 108 | EXPORT_SYMBOL_GPL(rds_send_reset); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 109 | |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 110 | static int acquire_in_xmit(struct rds_connection *conn) |
| 111 | { |
| 112 | return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; |
| 113 | } |
| 114 | |
| 115 | static void release_in_xmit(struct rds_connection *conn) |
| 116 | { |
| 117 | clear_bit(RDS_IN_XMIT, &conn->c_flags); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 118 | smp_mb__after_atomic(); |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 119 | /* |
| 120 | * We don't use wait_on_bit()/wake_up_bit() because our waking is in a |
| 121 | * hot path and finding waiters is very rare. We don't want to walk |
| 122 | * the system-wide hashed waitqueue buckets in the fast path only to |
| 123 | * almost never find waiters. |
| 124 | */ |
| 125 | if (waitqueue_active(&conn->c_waitq)) |
| 126 | wake_up_all(&conn->c_waitq); |
| 127 | } |
| 128 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 129 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 130 | * We're making the conscious trade-off here to only send one message |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 131 | * down the connection at a time. |
| 132 | * Pro: |
| 133 | * - tx queueing is a simple fifo list |
| 134 | * - reassembly is optional and easily done by transports per conn |
| 135 | * - no per flow rx lookup at all, straight to the socket |
| 136 | * - less per-frag memory and wire overhead |
| 137 | * Con: |
| 138 | * - queued acks can be delayed behind large messages |
| 139 | * Depends: |
| 140 | * - small message latency is higher behind queued large messages |
| 141 | * - large message latency isn't starved by intervening small sends |
| 142 | */ |
| 143 | int rds_send_xmit(struct rds_connection *conn) |
| 144 | { |
| 145 | struct rds_message *rm; |
| 146 | unsigned long flags; |
| 147 | unsigned int tmp; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 148 | struct scatterlist *sg; |
| 149 | int ret = 0; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 150 | LIST_HEAD(to_be_dropped); |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 151 | int batch_count; |
| 152 | unsigned long send_gen = 0; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 153 | |
Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 154 | restart: |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 155 | batch_count = 0; |
Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 156 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 157 | /* |
| 158 | * sendmsg calls here after having queued its message on the send |
| 159 | * queue. We only have one task feeding the connection at a time. If |
| 160 | * another thread is already feeding the queue then we back off. This |
| 161 | * avoids blocking the caller and trading per-connection data between |
| 162 | * caches per message. |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 163 | */ |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 164 | if (!acquire_in_xmit(conn)) { |
Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 165 | rds_stats_inc(s_send_lock_contention); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 166 | ret = -ENOMEM; |
| 167 | goto out; |
| 168 | } |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 169 | |
| 170 | /* |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 171 | * we record the send generation after doing the xmit acquire. |
| 172 | * if someone else manages to jump in and do some work, we'll use |
| 173 | * this to avoid a goto restart farther down. |
| 174 | * |
| 175 | * The acquire_in_xmit() check above ensures that only one |
| 176 | * caller can increment c_send_gen at any time. |
| 177 | */ |
| 178 | conn->c_send_gen++; |
| 179 | send_gen = conn->c_send_gen; |
| 180 | |
| 181 | /* |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 182 | * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, |
| 183 | * we do the opposite to avoid races. |
| 184 | */ |
| 185 | if (!rds_conn_up(conn)) { |
| 186 | release_in_xmit(conn); |
| 187 | ret = 0; |
| 188 | goto out; |
| 189 | } |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 190 | |
| 191 | if (conn->c_trans->xmit_prepare) |
| 192 | conn->c_trans->xmit_prepare(conn); |
| 193 | |
| 194 | /* |
| 195 | * spin trying to push headers and data down the connection until |
Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 196 | * the connection doesn't make forward progress. |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 197 | */ |
Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 198 | while (1) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 199 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 200 | rm = conn->c_xmit_rm; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 201 | |
Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 202 | /* |
| 203 | * If between sending messages, we can send a pending congestion |
| 204 | * map update. |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 205 | */ |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 206 | if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { |
Andy Grover | 77dd550 | 2010-03-22 15:22:04 -0700 | [diff] [blame] | 207 | rm = rds_cong_update_alloc(conn); |
| 208 | if (IS_ERR(rm)) { |
| 209 | ret = PTR_ERR(rm); |
| 210 | break; |
Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 211 | } |
Andy Grover | 77dd550 | 2010-03-22 15:22:04 -0700 | [diff] [blame] | 212 | rm->data.op_active = 1; |
| 213 | |
| 214 | conn->c_xmit_rm = rm; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | /* |
Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 218 | * If not already working on one, grab the next message. |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 219 | * |
| 220 | * c_xmit_rm holds a ref while we're sending this message down |
| 221 | * the connction. We can use this ref while holding the |
| 222 | * send_sem.. rds_send_reset() is serialized with it. |
| 223 | */ |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 224 | if (!rm) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 225 | unsigned int len; |
| 226 | |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 227 | batch_count++; |
| 228 | |
| 229 | /* we want to process as big a batch as we can, but |
| 230 | * we also want to avoid softlockups. If we've been |
| 231 | * through a lot of messages, lets back off and see |
| 232 | * if anyone else jumps in |
| 233 | */ |
Santosh Shilimkar | 4bebdd7 | 2015-09-10 11:57:14 -0700 | [diff] [blame] | 234 | if (batch_count >= send_batch_count) |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 235 | goto over_batch; |
| 236 | |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 237 | spin_lock_irqsave(&conn->c_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 238 | |
| 239 | if (!list_empty(&conn->c_send_queue)) { |
| 240 | rm = list_entry(conn->c_send_queue.next, |
| 241 | struct rds_message, |
| 242 | m_conn_item); |
| 243 | rds_message_addref(rm); |
| 244 | |
| 245 | /* |
| 246 | * Move the message from the send queue to the retransmit |
| 247 | * list right away. |
| 248 | */ |
| 249 | list_move_tail(&rm->m_conn_item, &conn->c_retrans); |
| 250 | } |
| 251 | |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 252 | spin_unlock_irqrestore(&conn->c_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 253 | |
Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 254 | if (!rm) |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 255 | break; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 256 | |
| 257 | /* Unfortunately, the way Infiniband deals with |
| 258 | * RDMA to a bad MR key is by moving the entire |
| 259 | * queue pair to error state. We cold possibly |
| 260 | * recover from that, but right now we drop the |
| 261 | * connection. |
| 262 | * Therefore, we never retransmit messages with RDMA ops. |
| 263 | */ |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 264 | if (rm->rdma.op_active && |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 265 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 266 | spin_lock_irqsave(&conn->c_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 267 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
| 268 | list_move(&rm->m_conn_item, &to_be_dropped); |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 269 | spin_unlock_irqrestore(&conn->c_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 270 | continue; |
| 271 | } |
| 272 | |
| 273 | /* Require an ACK every once in a while */ |
| 274 | len = ntohl(rm->m_inc.i_hdr.h_len); |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 275 | if (conn->c_unacked_packets == 0 || |
| 276 | conn->c_unacked_bytes < len) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 277 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
| 278 | |
| 279 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; |
| 280 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; |
| 281 | rds_stats_inc(s_send_ack_required); |
| 282 | } else { |
| 283 | conn->c_unacked_bytes -= len; |
| 284 | conn->c_unacked_packets--; |
| 285 | } |
| 286 | |
| 287 | conn->c_xmit_rm = rm; |
| 288 | } |
| 289 | |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 290 | /* The transport either sends the whole rdma or none of it */ |
| 291 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { |
Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 292 | rm->m_final_op = &rm->rdma; |
santosh.shilimkar@oracle.com | 4f73113 | 2015-08-22 15:45:29 -0700 | [diff] [blame] | 293 | /* The transport owns the mapped memory for now. |
| 294 | * You can't unmap it while it's on the send queue |
| 295 | */ |
| 296 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 297 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); |
santosh.shilimkar@oracle.com | 4f73113 | 2015-08-22 15:45:29 -0700 | [diff] [blame] | 298 | if (ret) { |
| 299 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); |
| 300 | wake_up_interruptible(&rm->m_flush_wait); |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 301 | break; |
santosh.shilimkar@oracle.com | 4f73113 | 2015-08-22 15:45:29 -0700 | [diff] [blame] | 302 | } |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 303 | conn->c_xmit_rdma_sent = 1; |
| 304 | |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 305 | } |
| 306 | |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 307 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { |
Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 308 | rm->m_final_op = &rm->atomic; |
santosh.shilimkar@oracle.com | 4f73113 | 2015-08-22 15:45:29 -0700 | [diff] [blame] | 309 | /* The transport owns the mapped memory for now. |
| 310 | * You can't unmap it while it's on the send queue |
| 311 | */ |
| 312 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); |
Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 313 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); |
santosh.shilimkar@oracle.com | 4f73113 | 2015-08-22 15:45:29 -0700 | [diff] [blame] | 314 | if (ret) { |
| 315 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); |
| 316 | wake_up_interruptible(&rm->m_flush_wait); |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 317 | break; |
santosh.shilimkar@oracle.com | 4f73113 | 2015-08-22 15:45:29 -0700 | [diff] [blame] | 318 | } |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 319 | conn->c_xmit_atomic_sent = 1; |
Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 320 | |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 321 | } |
| 322 | |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 323 | /* |
| 324 | * A number of cases require an RDS header to be sent |
| 325 | * even if there is no data. |
| 326 | * We permit 0-byte sends; rds-ping depends on this. |
| 327 | * However, if there are exclusively attached silent ops, |
| 328 | * we skip the hdr/data send, to enable silent operation. |
| 329 | */ |
| 330 | if (rm->data.op_nents == 0) { |
| 331 | int ops_present; |
| 332 | int all_ops_are_silent = 1; |
Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 333 | |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 334 | ops_present = (rm->atomic.op_active || rm->rdma.op_active); |
| 335 | if (rm->atomic.op_active && !rm->atomic.op_silent) |
| 336 | all_ops_are_silent = 0; |
| 337 | if (rm->rdma.op_active && !rm->rdma.op_silent) |
| 338 | all_ops_are_silent = 0; |
Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 339 | |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 340 | if (ops_present && all_ops_are_silent |
| 341 | && !rm->m_rdma_cookie) |
| 342 | rm->data.op_active = 0; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 343 | } |
| 344 | |
Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 345 | if (rm->data.op_active && !conn->c_xmit_data_sent) { |
Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 346 | rm->m_final_op = &rm->data; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 347 | ret = conn->c_trans->xmit(conn, rm, |
| 348 | conn->c_xmit_hdr_off, |
| 349 | conn->c_xmit_sg, |
| 350 | conn->c_xmit_data_off); |
| 351 | if (ret <= 0) |
| 352 | break; |
| 353 | |
| 354 | if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { |
| 355 | tmp = min_t(int, ret, |
| 356 | sizeof(struct rds_header) - |
| 357 | conn->c_xmit_hdr_off); |
| 358 | conn->c_xmit_hdr_off += tmp; |
| 359 | ret -= tmp; |
| 360 | } |
| 361 | |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 362 | sg = &rm->data.op_sg[conn->c_xmit_sg]; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 363 | while (ret) { |
| 364 | tmp = min_t(int, ret, sg->length - |
| 365 | conn->c_xmit_data_off); |
| 366 | conn->c_xmit_data_off += tmp; |
| 367 | ret -= tmp; |
| 368 | if (conn->c_xmit_data_off == sg->length) { |
| 369 | conn->c_xmit_data_off = 0; |
| 370 | sg++; |
| 371 | conn->c_xmit_sg++; |
| 372 | BUG_ON(ret != 0 && |
Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 373 | conn->c_xmit_sg == rm->data.op_nents); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 374 | } |
| 375 | } |
Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 376 | |
| 377 | if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && |
| 378 | (conn->c_xmit_sg == rm->data.op_nents)) |
| 379 | conn->c_xmit_data_sent = 1; |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * A rm will only take multiple times through this loop |
| 384 | * if there is a data op. Thus, if the data is sent (or there was |
| 385 | * none), then we're done with the rm. |
| 386 | */ |
| 387 | if (!rm->data.op_active || conn->c_xmit_data_sent) { |
| 388 | conn->c_xmit_rm = NULL; |
| 389 | conn->c_xmit_sg = 0; |
| 390 | conn->c_xmit_hdr_off = 0; |
| 391 | conn->c_xmit_data_off = 0; |
| 392 | conn->c_xmit_rdma_sent = 0; |
| 393 | conn->c_xmit_atomic_sent = 0; |
| 394 | conn->c_xmit_data_sent = 0; |
| 395 | |
| 396 | rds_message_put(rm); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 397 | } |
| 398 | } |
| 399 | |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 400 | over_batch: |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 401 | if (conn->c_trans->xmit_complete) |
| 402 | conn->c_trans->xmit_complete(conn); |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 403 | release_in_xmit(conn); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 404 | |
Andy Grover | 2ad8099 | 2010-03-23 17:48:04 -0700 | [diff] [blame] | 405 | /* Nuke any messages we decided not to retransmit. */ |
| 406 | if (!list_empty(&to_be_dropped)) { |
| 407 | /* irqs on here, so we can put(), unlike above */ |
| 408 | list_for_each_entry(rm, &to_be_dropped, m_conn_item) |
| 409 | rds_message_put(rm); |
| 410 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); |
| 411 | } |
| 412 | |
Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 413 | /* |
Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 414 | * Other senders can queue a message after we last test the send queue |
| 415 | * but before we clear RDS_IN_XMIT. In that case they'd back off and |
| 416 | * not try and send their newly queued message. We need to check the |
| 417 | * send queue after having cleared RDS_IN_XMIT so that their message |
| 418 | * doesn't get stuck on the send queue. |
Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 419 | * |
| 420 | * If the transport cannot continue (i.e ret != 0), then it must |
| 421 | * call us when more room is available, such as from the tx |
| 422 | * completion handler. |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 423 | * |
| 424 | * We have an extra generation check here so that if someone manages |
| 425 | * to jump in after our release_in_xmit, we'll see that they have done |
| 426 | * some work and we will skip our goto |
Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 427 | */ |
| 428 | if (ret == 0) { |
Chris Mason | 9e29db0 | 2010-04-15 16:38:14 -0400 | [diff] [blame] | 429 | smp_mb(); |
santosh.shilimkar@oracle.com | 0c48424 | 2015-08-22 15:45:27 -0700 | [diff] [blame] | 430 | if ((test_bit(0, &conn->c_map_queued) || |
| 431 | !list_empty(&conn->c_send_queue)) && |
Sowmini Varadhan | 443be0e | 2015-04-08 12:33:47 -0400 | [diff] [blame] | 432 | send_gen == conn->c_send_gen) { |
Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 433 | rds_stats_inc(s_send_lock_queue_raced); |
Santosh Shilimkar | 4bebdd7 | 2015-09-10 11:57:14 -0700 | [diff] [blame] | 434 | if (batch_count < send_batch_count) |
| 435 | goto restart; |
| 436 | queue_delayed_work(rds_wq, &conn->c_send_w, 1); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 437 | } |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 438 | } |
| 439 | out: |
| 440 | return ret; |
| 441 | } |
Santosh Shilimkar | 0c28c04 | 2015-09-06 02:18:51 -0400 | [diff] [blame] | 442 | EXPORT_SYMBOL_GPL(rds_send_xmit); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 443 | |
| 444 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) |
| 445 | { |
| 446 | u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); |
| 447 | |
| 448 | assert_spin_locked(&rs->rs_lock); |
| 449 | |
| 450 | BUG_ON(rs->rs_snd_bytes < len); |
| 451 | rs->rs_snd_bytes -= len; |
| 452 | |
| 453 | if (rs->rs_snd_bytes == 0) |
| 454 | rds_stats_inc(s_send_queue_empty); |
| 455 | } |
| 456 | |
| 457 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, |
| 458 | is_acked_func is_acked) |
| 459 | { |
| 460 | if (is_acked) |
| 461 | return is_acked(rm, ack); |
| 462 | return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; |
| 463 | } |
| 464 | |
| 465 | /* |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 466 | * This is pretty similar to what happens below in the ACK |
| 467 | * handling code - except that we call here as soon as we get |
| 468 | * the IB send completion on the RDMA op and the accompanying |
| 469 | * message. |
| 470 | */ |
| 471 | void rds_rdma_send_complete(struct rds_message *rm, int status) |
| 472 | { |
| 473 | struct rds_sock *rs = NULL; |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 474 | struct rm_rdma_op *ro; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 475 | struct rds_notifier *notifier; |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 476 | unsigned long flags; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 477 | |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 478 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 479 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 480 | ro = &rm->rdma; |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 481 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 482 | ro->op_active && ro->op_notify && ro->op_notifier) { |
| 483 | notifier = ro->op_notifier; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 484 | rs = rm->m_rs; |
| 485 | sock_hold(rds_rs_to_sk(rs)); |
| 486 | |
| 487 | notifier->n_status = status; |
| 488 | spin_lock(&rs->rs_lock); |
| 489 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); |
| 490 | spin_unlock(&rs->rs_lock); |
| 491 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 492 | ro->op_notifier = NULL; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 493 | } |
| 494 | |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 495 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 496 | |
| 497 | if (rs) { |
| 498 | rds_wake_sk_sleep(rs); |
| 499 | sock_put(rds_rs_to_sk(rs)); |
| 500 | } |
| 501 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 502 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 503 | |
| 504 | /* |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 505 | * Just like above, except looks at atomic op |
| 506 | */ |
| 507 | void rds_atomic_send_complete(struct rds_message *rm, int status) |
| 508 | { |
| 509 | struct rds_sock *rs = NULL; |
| 510 | struct rm_atomic_op *ao; |
| 511 | struct rds_notifier *notifier; |
Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 512 | unsigned long flags; |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 513 | |
Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 514 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 515 | |
| 516 | ao = &rm->atomic; |
| 517 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) |
| 518 | && ao->op_active && ao->op_notify && ao->op_notifier) { |
| 519 | notifier = ao->op_notifier; |
| 520 | rs = rm->m_rs; |
| 521 | sock_hold(rds_rs_to_sk(rs)); |
| 522 | |
| 523 | notifier->n_status = status; |
| 524 | spin_lock(&rs->rs_lock); |
| 525 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); |
| 526 | spin_unlock(&rs->rs_lock); |
| 527 | |
| 528 | ao->op_notifier = NULL; |
| 529 | } |
| 530 | |
Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 531 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 532 | |
| 533 | if (rs) { |
| 534 | rds_wake_sk_sleep(rs); |
| 535 | sock_put(rds_rs_to_sk(rs)); |
| 536 | } |
| 537 | } |
| 538 | EXPORT_SYMBOL_GPL(rds_atomic_send_complete); |
| 539 | |
| 540 | /* |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 541 | * This is the same as rds_rdma_send_complete except we |
| 542 | * don't do any locking - we have all the ingredients (message, |
| 543 | * socket, socket lock) and can just move the notifier. |
| 544 | */ |
| 545 | static inline void |
Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 546 | __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 547 | { |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 548 | struct rm_rdma_op *ro; |
Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 549 | struct rm_atomic_op *ao; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 550 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 551 | ro = &rm->rdma; |
| 552 | if (ro->op_active && ro->op_notify && ro->op_notifier) { |
| 553 | ro->op_notifier->n_status = status; |
| 554 | list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); |
| 555 | ro->op_notifier = NULL; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 556 | } |
| 557 | |
Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 558 | ao = &rm->atomic; |
| 559 | if (ao->op_active && ao->op_notify && ao->op_notifier) { |
| 560 | ao->op_notifier->n_status = status; |
| 561 | list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); |
| 562 | ao->op_notifier = NULL; |
| 563 | } |
| 564 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 565 | /* No need to wake the app - caller does this */ |
| 566 | } |
| 567 | |
| 568 | /* |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 569 | * This removes messages from the socket's list if they're on it. The list |
| 570 | * argument must be private to the caller, we must be able to modify it |
| 571 | * without locks. The messages must have a reference held for their |
| 572 | * position on the list. This function will drop that reference after |
| 573 | * removing the messages from the 'messages' list regardless of if it found |
| 574 | * the messages on the socket list or not. |
| 575 | */ |
stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 576 | static void rds_send_remove_from_sock(struct list_head *messages, int status) |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 577 | { |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 578 | unsigned long flags; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 579 | struct rds_sock *rs = NULL; |
| 580 | struct rds_message *rm; |
| 581 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 582 | while (!list_empty(messages)) { |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 583 | int was_on_sock = 0; |
| 584 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 585 | rm = list_entry(messages->next, struct rds_message, |
| 586 | m_conn_item); |
| 587 | list_del_init(&rm->m_conn_item); |
| 588 | |
| 589 | /* |
| 590 | * If we see this flag cleared then we're *sure* that someone |
| 591 | * else beat us to removing it from the sock. If we race |
| 592 | * with their flag update we'll get the lock and then really |
| 593 | * see that the flag has been cleared. |
| 594 | * |
| 595 | * The message spinlock makes sure nobody clears rm->m_rs |
| 596 | * while we're messing with it. It does not prevent the |
| 597 | * message from being removed from the socket, though. |
| 598 | */ |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 599 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 600 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
| 601 | goto unlock_and_drop; |
| 602 | |
| 603 | if (rs != rm->m_rs) { |
| 604 | if (rs) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 605 | rds_wake_sk_sleep(rs); |
| 606 | sock_put(rds_rs_to_sk(rs)); |
| 607 | } |
| 608 | rs = rm->m_rs; |
Herton R. Krzesinski | 593cbb3 | 2014-10-01 18:49:54 -0300 | [diff] [blame] | 609 | if (rs) |
| 610 | sock_hold(rds_rs_to_sk(rs)); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 611 | } |
Herton R. Krzesinski | 593cbb3 | 2014-10-01 18:49:54 -0300 | [diff] [blame] | 612 | if (!rs) |
| 613 | goto unlock_and_drop; |
Tina Yang | 048c15e | 2010-03-11 13:50:00 +0000 | [diff] [blame] | 614 | spin_lock(&rs->rs_lock); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 615 | |
| 616 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 617 | struct rm_rdma_op *ro = &rm->rdma; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 618 | struct rds_notifier *notifier; |
| 619 | |
| 620 | list_del_init(&rm->m_sock_item); |
| 621 | rds_send_sndbuf_remove(rs, rm); |
| 622 | |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 623 | if (ro->op_active && ro->op_notifier && |
| 624 | (ro->op_notify || (ro->op_recverr && status))) { |
| 625 | notifier = ro->op_notifier; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 626 | list_add_tail(¬ifier->n_list, |
| 627 | &rs->rs_notify_queue); |
| 628 | if (!notifier->n_status) |
| 629 | notifier->n_status = status; |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 630 | rm->rdma.op_notifier = NULL; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 631 | } |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 632 | was_on_sock = 1; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 633 | rm->m_rs = NULL; |
| 634 | } |
Tina Yang | 048c15e | 2010-03-11 13:50:00 +0000 | [diff] [blame] | 635 | spin_unlock(&rs->rs_lock); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 636 | |
| 637 | unlock_and_drop: |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 638 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 639 | rds_message_put(rm); |
Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 640 | if (was_on_sock) |
| 641 | rds_message_put(rm); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 642 | } |
| 643 | |
| 644 | if (rs) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 645 | rds_wake_sk_sleep(rs); |
| 646 | sock_put(rds_rs_to_sk(rs)); |
| 647 | } |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 648 | } |
| 649 | |
| 650 | /* |
| 651 | * Transports call here when they've determined that the receiver queued |
| 652 | * messages up to, and including, the given sequence number. Messages are |
| 653 | * moved to the retrans queue when rds_send_xmit picks them off the send |
| 654 | * queue. This means that in the TCP case, the message may not have been |
| 655 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked |
| 656 | * checks the RDS_MSG_HAS_ACK_SEQ bit. |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 657 | */ |
Sowmini Varadhan | 5c3d274 | 2016-06-13 09:44:31 -0700 | [diff] [blame] | 658 | void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, |
| 659 | is_acked_func is_acked) |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 660 | { |
| 661 | struct rds_message *rm, *tmp; |
| 662 | unsigned long flags; |
| 663 | LIST_HEAD(list); |
| 664 | |
Sowmini Varadhan | 5c3d274 | 2016-06-13 09:44:31 -0700 | [diff] [blame] | 665 | spin_lock_irqsave(&cp->cp_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 666 | |
Sowmini Varadhan | 5c3d274 | 2016-06-13 09:44:31 -0700 | [diff] [blame] | 667 | list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 668 | if (!rds_send_is_acked(rm, ack, is_acked)) |
| 669 | break; |
| 670 | |
| 671 | list_move(&rm->m_conn_item, &list); |
| 672 | clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
| 673 | } |
| 674 | |
| 675 | /* order flag updates with spin locks */ |
| 676 | if (!list_empty(&list)) |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 677 | smp_mb__after_atomic(); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 678 | |
Sowmini Varadhan | 5c3d274 | 2016-06-13 09:44:31 -0700 | [diff] [blame] | 679 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 680 | |
| 681 | /* now remove the messages from the sock list as needed */ |
| 682 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); |
| 683 | } |
Sowmini Varadhan | 5c3d274 | 2016-06-13 09:44:31 -0700 | [diff] [blame] | 684 | EXPORT_SYMBOL_GPL(rds_send_path_drop_acked); |
| 685 | |
| 686 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, |
| 687 | is_acked_func is_acked) |
| 688 | { |
| 689 | WARN_ON(conn->c_trans->t_mp_capable); |
| 690 | rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); |
| 691 | } |
Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 692 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 693 | |
| 694 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) |
| 695 | { |
| 696 | struct rds_message *rm, *tmp; |
| 697 | struct rds_connection *conn; |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 698 | unsigned long flags; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 699 | LIST_HEAD(list); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 700 | |
| 701 | /* get all the messages we're dropping under the rs lock */ |
| 702 | spin_lock_irqsave(&rs->rs_lock, flags); |
| 703 | |
| 704 | list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { |
| 705 | if (dest && (dest->sin_addr.s_addr != rm->m_daddr || |
| 706 | dest->sin_port != rm->m_inc.i_hdr.h_dport)) |
| 707 | continue; |
| 708 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 709 | list_move(&rm->m_sock_item, &list); |
| 710 | rds_send_sndbuf_remove(rs, rm); |
| 711 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 712 | } |
| 713 | |
| 714 | /* order flag updates with the rs lock */ |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 715 | smp_mb__after_atomic(); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 716 | |
| 717 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
| 718 | |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 719 | if (list_empty(&list)) |
| 720 | return; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 721 | |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 722 | /* Remove the messages from the conn */ |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 723 | list_for_each_entry(rm, &list, m_sock_item) { |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 724 | |
| 725 | conn = rm->m_inc.i_conn; |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 726 | |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 727 | spin_lock_irqsave(&conn->c_lock, flags); |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 728 | /* |
| 729 | * Maybe someone else beat us to removing rm from the conn. |
| 730 | * If we race with their flag update we'll get the lock and |
| 731 | * then really see that the flag has been cleared. |
| 732 | */ |
| 733 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { |
| 734 | spin_unlock_irqrestore(&conn->c_lock, flags); |
Herton R. Krzesinski | 593cbb3 | 2014-10-01 18:49:54 -0300 | [diff] [blame] | 735 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
| 736 | rm->m_rs = NULL; |
| 737 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 738 | continue; |
| 739 | } |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 740 | list_del_init(&rm->m_conn_item); |
| 741 | spin_unlock_irqrestore(&conn->c_lock, flags); |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 742 | |
| 743 | /* |
| 744 | * Couldn't grab m_rs_lock in top loop (lock ordering), |
| 745 | * but we can now. |
| 746 | */ |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 747 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 748 | |
Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 749 | spin_lock(&rs->rs_lock); |
Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 750 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); |
Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 751 | spin_unlock(&rs->rs_lock); |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 752 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 753 | rm->m_rs = NULL; |
Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 754 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 755 | |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 756 | rds_message_put(rm); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 757 | } |
| 758 | |
Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 759 | rds_wake_sk_sleep(rs); |
Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 760 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 761 | while (!list_empty(&list)) { |
| 762 | rm = list_entry(list.next, struct rds_message, m_sock_item); |
| 763 | list_del_init(&rm->m_sock_item); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 764 | rds_message_wait(rm); |
santosh.shilimkar@oracle.com | dfcec25 | 2015-08-22 15:45:33 -0700 | [diff] [blame] | 765 | |
| 766 | /* just in case the code above skipped this message |
| 767 | * because RDS_MSG_ON_CONN wasn't set, run it again here |
| 768 | * taking m_rs_lock is the only thing that keeps us |
| 769 | * from racing with ack processing. |
| 770 | */ |
| 771 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
| 772 | |
| 773 | spin_lock(&rs->rs_lock); |
| 774 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); |
| 775 | spin_unlock(&rs->rs_lock); |
| 776 | |
| 777 | rm->m_rs = NULL; |
| 778 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
| 779 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 780 | rds_message_put(rm); |
| 781 | } |
| 782 | } |
| 783 | |
| 784 | /* |
| 785 | * we only want this to fire once so we use the callers 'queued'. It's |
| 786 | * possible that another thread can race with us and remove the |
| 787 | * message from the flow with RDS_CANCEL_SENT_TO. |
| 788 | */ |
| 789 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, |
| 790 | struct rds_message *rm, __be16 sport, |
| 791 | __be16 dport, int *queued) |
| 792 | { |
| 793 | unsigned long flags; |
| 794 | u32 len; |
| 795 | |
| 796 | if (*queued) |
| 797 | goto out; |
| 798 | |
| 799 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); |
| 800 | |
| 801 | /* this is the only place which holds both the socket's rs_lock |
| 802 | * and the connection's c_lock */ |
| 803 | spin_lock_irqsave(&rs->rs_lock, flags); |
| 804 | |
| 805 | /* |
| 806 | * If there is a little space in sndbuf, we don't queue anything, |
| 807 | * and userspace gets -EAGAIN. But poll() indicates there's send |
| 808 | * room. This can lead to bad behavior (spinning) if snd_bytes isn't |
| 809 | * freed up by incoming acks. So we check the *old* value of |
| 810 | * rs_snd_bytes here to allow the last msg to exceed the buffer, |
| 811 | * and poll() now knows no more data can be sent. |
| 812 | */ |
| 813 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { |
| 814 | rs->rs_snd_bytes += len; |
| 815 | |
| 816 | /* let recv side know we are close to send space exhaustion. |
| 817 | * This is probably not the optimal way to do it, as this |
| 818 | * means we set the flag on *all* messages as soon as our |
| 819 | * throughput hits a certain threshold. |
| 820 | */ |
| 821 | if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) |
| 822 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
| 823 | |
| 824 | list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); |
| 825 | set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); |
| 826 | rds_message_addref(rm); |
| 827 | rm->m_rs = rs; |
| 828 | |
| 829 | /* The code ordering is a little weird, but we're |
| 830 | trying to minimize the time we hold c_lock */ |
| 831 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); |
| 832 | rm->m_inc.i_conn = conn; |
| 833 | rds_message_addref(rm); |
| 834 | |
| 835 | spin_lock(&conn->c_lock); |
| 836 | rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); |
| 837 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); |
| 838 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
| 839 | spin_unlock(&conn->c_lock); |
| 840 | |
| 841 | rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", |
| 842 | rm, len, rs, rs->rs_snd_bytes, |
| 843 | (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); |
| 844 | |
| 845 | *queued = 1; |
| 846 | } |
| 847 | |
| 848 | spin_unlock_irqrestore(&rs->rs_lock, flags); |
| 849 | out: |
| 850 | return *queued; |
| 851 | } |
| 852 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 853 | /* |
| 854 | * rds_message is getting to be quite complicated, and we'd like to allocate |
| 855 | * it all in one go. This figures out how big it needs to be up front. |
| 856 | */ |
| 857 | static int rds_rm_size(struct msghdr *msg, int data_len) |
| 858 | { |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 859 | struct cmsghdr *cmsg; |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 860 | int size = 0; |
Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 861 | int cmsg_groups = 0; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 862 | int retval; |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 863 | |
Gu Zheng | f95b414 | 2014-12-11 11:22:04 +0800 | [diff] [blame] | 864 | for_each_cmsghdr(cmsg, msg) { |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 865 | if (!CMSG_OK(msg, cmsg)) |
| 866 | return -EINVAL; |
| 867 | |
| 868 | if (cmsg->cmsg_level != SOL_RDS) |
| 869 | continue; |
| 870 | |
| 871 | switch (cmsg->cmsg_type) { |
| 872 | case RDS_CMSG_RDMA_ARGS: |
Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 873 | cmsg_groups |= 1; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 874 | retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); |
| 875 | if (retval < 0) |
| 876 | return retval; |
| 877 | size += retval; |
Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 878 | |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 879 | break; |
| 880 | |
| 881 | case RDS_CMSG_RDMA_DEST: |
| 882 | case RDS_CMSG_RDMA_MAP: |
Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 883 | cmsg_groups |= 2; |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 884 | /* these are valid but do no add any size */ |
| 885 | break; |
| 886 | |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 887 | case RDS_CMSG_ATOMIC_CSWP: |
| 888 | case RDS_CMSG_ATOMIC_FADD: |
Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 889 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
| 890 | case RDS_CMSG_MASKED_ATOMIC_FADD: |
Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 891 | cmsg_groups |= 1; |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 892 | size += sizeof(struct scatterlist); |
| 893 | break; |
| 894 | |
Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 895 | default: |
| 896 | return -EINVAL; |
| 897 | } |
| 898 | |
| 899 | } |
| 900 | |
| 901 | size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 902 | |
Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 903 | /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ |
| 904 | if (cmsg_groups == 3) |
| 905 | return -EINVAL; |
| 906 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 907 | return size; |
| 908 | } |
| 909 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 910 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, |
| 911 | struct msghdr *msg, int *allocated_mr) |
| 912 | { |
| 913 | struct cmsghdr *cmsg; |
| 914 | int ret = 0; |
| 915 | |
Gu Zheng | f95b414 | 2014-12-11 11:22:04 +0800 | [diff] [blame] | 916 | for_each_cmsghdr(cmsg, msg) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 917 | if (!CMSG_OK(msg, cmsg)) |
| 918 | return -EINVAL; |
| 919 | |
| 920 | if (cmsg->cmsg_level != SOL_RDS) |
| 921 | continue; |
| 922 | |
| 923 | /* As a side effect, RDMA_DEST and RDMA_MAP will set |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 924 | * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 925 | */ |
| 926 | switch (cmsg->cmsg_type) { |
| 927 | case RDS_CMSG_RDMA_ARGS: |
| 928 | ret = rds_cmsg_rdma_args(rs, rm, cmsg); |
| 929 | break; |
| 930 | |
| 931 | case RDS_CMSG_RDMA_DEST: |
| 932 | ret = rds_cmsg_rdma_dest(rs, rm, cmsg); |
| 933 | break; |
| 934 | |
| 935 | case RDS_CMSG_RDMA_MAP: |
| 936 | ret = rds_cmsg_rdma_map(rs, rm, cmsg); |
| 937 | if (!ret) |
| 938 | *allocated_mr = 1; |
| 939 | break; |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 940 | case RDS_CMSG_ATOMIC_CSWP: |
| 941 | case RDS_CMSG_ATOMIC_FADD: |
Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 942 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
| 943 | case RDS_CMSG_MASKED_ATOMIC_FADD: |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 944 | ret = rds_cmsg_atomic(rs, rm, cmsg); |
| 945 | break; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 946 | |
| 947 | default: |
| 948 | return -EINVAL; |
| 949 | } |
| 950 | |
| 951 | if (ret) |
| 952 | break; |
| 953 | } |
| 954 | |
| 955 | return ret; |
| 956 | } |
| 957 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 958 | int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 959 | { |
| 960 | struct sock *sk = sock->sk; |
| 961 | struct rds_sock *rs = rds_sk_to_rs(sk); |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 962 | DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 963 | __be32 daddr; |
| 964 | __be16 dport; |
| 965 | struct rds_message *rm = NULL; |
| 966 | struct rds_connection *conn; |
| 967 | int ret = 0; |
| 968 | int queued = 0, allocated_mr = 0; |
| 969 | int nonblock = msg->msg_flags & MSG_DONTWAIT; |
Andy Grover | 1123fd7 | 2010-03-11 13:49:56 +0000 | [diff] [blame] | 970 | long timeo = sock_sndtimeo(sk, nonblock); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 971 | |
| 972 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
| 973 | /* XXX: Perhaps MSG_MORE someday */ |
| 974 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 975 | ret = -EOPNOTSUPP; |
| 976 | goto out; |
| 977 | } |
| 978 | |
| 979 | if (msg->msg_namelen) { |
| 980 | /* XXX fail non-unicast destination IPs? */ |
| 981 | if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { |
| 982 | ret = -EINVAL; |
| 983 | goto out; |
| 984 | } |
| 985 | daddr = usin->sin_addr.s_addr; |
| 986 | dport = usin->sin_port; |
| 987 | } else { |
| 988 | /* We only care about consistency with ->connect() */ |
| 989 | lock_sock(sk); |
| 990 | daddr = rs->rs_conn_addr; |
| 991 | dport = rs->rs_conn_port; |
| 992 | release_sock(sk); |
| 993 | } |
| 994 | |
Quentin Casasnovas | 8c7188b | 2015-11-24 17:13:21 -0500 | [diff] [blame] | 995 | lock_sock(sk); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 996 | if (daddr == 0 || rs->rs_bound_addr == 0) { |
Quentin Casasnovas | 8c7188b | 2015-11-24 17:13:21 -0500 | [diff] [blame] | 997 | release_sock(sk); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 998 | ret = -ENOTCONN; /* XXX not a great errno */ |
| 999 | goto out; |
| 1000 | } |
Quentin Casasnovas | 8c7188b | 2015-11-24 17:13:21 -0500 | [diff] [blame] | 1001 | release_sock(sk); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1002 | |
Mukesh Kacker | 06e8941 | 2015-08-22 15:45:34 -0700 | [diff] [blame] | 1003 | if (payload_len > rds_sk_sndbuf(rs)) { |
| 1004 | ret = -EMSGSIZE; |
| 1005 | goto out; |
| 1006 | } |
| 1007 | |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 1008 | /* size of rm including all sgs */ |
| 1009 | ret = rds_rm_size(msg, payload_len); |
| 1010 | if (ret < 0) |
| 1011 | goto out; |
| 1012 | |
| 1013 | rm = rds_message_alloc(ret, GFP_KERNEL); |
| 1014 | if (!rm) { |
| 1015 | ret = -ENOMEM; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1016 | goto out; |
| 1017 | } |
| 1018 | |
Andy Grover | 372cd7d | 2010-02-03 19:40:32 -0800 | [diff] [blame] | 1019 | /* Attach data to the rm */ |
| 1020 | if (payload_len) { |
| 1021 | rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); |
Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 1022 | if (!rm->data.op_sg) { |
| 1023 | ret = -ENOMEM; |
| 1024 | goto out; |
| 1025 | } |
Al Viro | c0371da | 2014-11-24 10:42:55 -0500 | [diff] [blame] | 1026 | ret = rds_message_copy_from_user(rm, &msg->msg_iter); |
Andy Grover | 372cd7d | 2010-02-03 19:40:32 -0800 | [diff] [blame] | 1027 | if (ret) |
| 1028 | goto out; |
| 1029 | } |
| 1030 | rm->data.op_active = 1; |
Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 1031 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1032 | rm->m_daddr = daddr; |
| 1033 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1034 | /* rds_conn_create has a spinlock that runs with IRQ off. |
| 1035 | * Caching the conn in the socket helps a lot. */ |
| 1036 | if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) |
| 1037 | conn = rs->rs_conn; |
| 1038 | else { |
Sowmini Varadhan | d5a8ac2 | 2015-08-05 01:43:25 -0400 | [diff] [blame] | 1039 | conn = rds_conn_create_outgoing(sock_net(sock->sk), |
| 1040 | rs->rs_bound_addr, daddr, |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1041 | rs->rs_transport, |
| 1042 | sock->sk->sk_allocation); |
| 1043 | if (IS_ERR(conn)) { |
| 1044 | ret = PTR_ERR(conn); |
| 1045 | goto out; |
| 1046 | } |
| 1047 | rs->rs_conn = conn; |
| 1048 | } |
| 1049 | |
Andy Grover | 49f6969 | 2009-04-09 14:09:41 +0000 | [diff] [blame] | 1050 | /* Parse any control messages the user may have included. */ |
| 1051 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); |
| 1052 | if (ret) |
| 1053 | goto out; |
| 1054 | |
Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 1055 | if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { |
Manuel Zerpies | cb0a605 | 2011-06-16 02:09:57 +0000 | [diff] [blame] | 1056 | printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", |
Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 1057 | &rm->rdma, conn->c_trans->xmit_rdma); |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 1058 | ret = -EOPNOTSUPP; |
| 1059 | goto out; |
| 1060 | } |
| 1061 | |
| 1062 | if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { |
Manuel Zerpies | cb0a605 | 2011-06-16 02:09:57 +0000 | [diff] [blame] | 1063 | printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", |
Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 1064 | &rm->atomic, conn->c_trans->xmit_atomic); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1065 | ret = -EOPNOTSUPP; |
| 1066 | goto out; |
| 1067 | } |
| 1068 | |
Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 1069 | rds_conn_connect_if_down(conn); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1070 | |
| 1071 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); |
Andy Grover | b98ba52 | 2010-03-11 13:50:04 +0000 | [diff] [blame] | 1072 | if (ret) { |
| 1073 | rs->rs_seen_congestion = 1; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1074 | goto out; |
Andy Grover | b98ba52 | 2010-03-11 13:50:04 +0000 | [diff] [blame] | 1075 | } |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1076 | |
| 1077 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, |
| 1078 | dport, &queued)) { |
| 1079 | rds_stats_inc(s_send_queue_full); |
Mukesh Kacker | 06e8941 | 2015-08-22 15:45:34 -0700 | [diff] [blame] | 1080 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1081 | if (nonblock) { |
| 1082 | ret = -EAGAIN; |
| 1083 | goto out; |
| 1084 | } |
| 1085 | |
Eric Dumazet | aa39514 | 2010-04-20 13:03:51 +0000 | [diff] [blame] | 1086 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1087 | rds_send_queue_rm(rs, conn, rm, |
| 1088 | rs->rs_bound_port, |
| 1089 | dport, |
| 1090 | &queued), |
| 1091 | timeo); |
| 1092 | rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); |
| 1093 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) |
| 1094 | continue; |
| 1095 | |
| 1096 | ret = timeo; |
| 1097 | if (ret == 0) |
| 1098 | ret = -ETIMEDOUT; |
| 1099 | goto out; |
| 1100 | } |
| 1101 | |
| 1102 | /* |
| 1103 | * By now we've committed to the send. We reuse rds_send_worker() |
| 1104 | * to retry sends in the rds thread if the transport asks us to. |
| 1105 | */ |
| 1106 | rds_stats_inc(s_send_queued); |
| 1107 | |
Santosh Shilimkar | db6526d | 2015-09-11 15:44:29 -0700 | [diff] [blame] | 1108 | ret = rds_send_xmit(conn); |
| 1109 | if (ret == -ENOMEM || ret == -EAGAIN) |
| 1110 | queue_delayed_work(rds_wq, &conn->c_send_w, 1); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1111 | |
| 1112 | rds_message_put(rm); |
| 1113 | return payload_len; |
| 1114 | |
| 1115 | out: |
| 1116 | /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. |
| 1117 | * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN |
| 1118 | * or in any other way, we need to destroy the MR again */ |
| 1119 | if (allocated_mr) |
| 1120 | rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); |
| 1121 | |
| 1122 | if (rm) |
| 1123 | rds_message_put(rm); |
| 1124 | return ret; |
| 1125 | } |
| 1126 | |
| 1127 | /* |
| 1128 | * Reply to a ping packet. |
| 1129 | */ |
| 1130 | int |
| 1131 | rds_send_pong(struct rds_connection *conn, __be16 dport) |
| 1132 | { |
| 1133 | struct rds_message *rm; |
| 1134 | unsigned long flags; |
| 1135 | int ret = 0; |
| 1136 | |
| 1137 | rm = rds_message_alloc(0, GFP_ATOMIC); |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 1138 | if (!rm) { |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1139 | ret = -ENOMEM; |
| 1140 | goto out; |
| 1141 | } |
| 1142 | |
| 1143 | rm->m_daddr = conn->c_faddr; |
Andy Grover | acfcd4d | 2010-03-31 18:56:25 -0700 | [diff] [blame] | 1144 | rm->data.op_active = 1; |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1145 | |
Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 1146 | rds_conn_connect_if_down(conn); |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1147 | |
| 1148 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); |
| 1149 | if (ret) |
| 1150 | goto out; |
| 1151 | |
| 1152 | spin_lock_irqsave(&conn->c_lock, flags); |
| 1153 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); |
| 1154 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
| 1155 | rds_message_addref(rm); |
| 1156 | rm->m_inc.i_conn = conn; |
| 1157 | |
| 1158 | rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, |
| 1159 | conn->c_next_tx_seq); |
| 1160 | conn->c_next_tx_seq++; |
| 1161 | spin_unlock_irqrestore(&conn->c_lock, flags); |
| 1162 | |
| 1163 | rds_stats_inc(s_send_queued); |
| 1164 | rds_stats_inc(s_send_pong); |
| 1165 | |
santosh.shilimkar@oracle.com | 7b4b000 | 2015-10-16 22:13:21 -0400 | [diff] [blame] | 1166 | /* schedule the send work on rds_wq */ |
| 1167 | queue_delayed_work(rds_wq, &conn->c_send_w, 1); |
Andy Grover | acfcd4d | 2010-03-31 18:56:25 -0700 | [diff] [blame] | 1168 | |
Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1169 | rds_message_put(rm); |
| 1170 | return 0; |
| 1171 | |
| 1172 | out: |
| 1173 | if (rm) |
| 1174 | rds_message_put(rm); |
| 1175 | return ret; |
| 1176 | } |