Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2006 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/in.h> |
| 35 | #include <net/tcp.h> |
| 36 | |
| 37 | #include "rds.h" |
| 38 | #include "tcp.h" |
| 39 | |
| 40 | static void rds_tcp_cork(struct socket *sock, int val) |
| 41 | { |
| 42 | mm_segment_t oldfs; |
| 43 | |
| 44 | oldfs = get_fs(); |
| 45 | set_fs(KERNEL_DS); |
| 46 | sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val, |
| 47 | sizeof(val)); |
| 48 | set_fs(oldfs); |
| 49 | } |
| 50 | |
| 51 | void rds_tcp_xmit_prepare(struct rds_connection *conn) |
| 52 | { |
| 53 | struct rds_tcp_connection *tc = conn->c_transport_data; |
| 54 | |
| 55 | rds_tcp_cork(tc->t_sock, 1); |
| 56 | } |
| 57 | |
| 58 | void rds_tcp_xmit_complete(struct rds_connection *conn) |
| 59 | { |
| 60 | struct rds_tcp_connection *tc = conn->c_transport_data; |
| 61 | |
| 62 | rds_tcp_cork(tc->t_sock, 0); |
| 63 | } |
| 64 | |
| 65 | /* the core send_sem serializes this with other xmit and shutdown */ |
| 66 | int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) |
| 67 | { |
| 68 | struct kvec vec = { |
| 69 | .iov_base = data, |
| 70 | .iov_len = len, |
| 71 | }; |
| 72 | struct msghdr msg = { |
| 73 | .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, |
| 74 | }; |
| 75 | |
| 76 | return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); |
| 77 | } |
| 78 | |
| 79 | /* the core send_sem serializes this with other xmit and shutdown */ |
| 80 | int rds_tcp_xmit_cong_map(struct rds_connection *conn, |
| 81 | struct rds_cong_map *map, unsigned long offset) |
| 82 | { |
| 83 | static struct rds_header rds_tcp_map_header = { |
| 84 | .h_flags = RDS_FLAG_CONG_BITMAP, |
| 85 | }; |
| 86 | struct rds_tcp_connection *tc = conn->c_transport_data; |
| 87 | unsigned long i; |
| 88 | int ret; |
| 89 | int copied = 0; |
| 90 | |
| 91 | /* Some problem claims cpu_to_be32(constant) isn't a constant. */ |
| 92 | rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES); |
| 93 | |
| 94 | if (offset < sizeof(struct rds_header)) { |
| 95 | ret = rds_tcp_sendmsg(tc->t_sock, |
| 96 | (void *)&rds_tcp_map_header + offset, |
| 97 | sizeof(struct rds_header) - offset); |
| 98 | if (ret <= 0) |
| 99 | return ret; |
| 100 | offset += ret; |
| 101 | copied = ret; |
| 102 | if (offset < sizeof(struct rds_header)) |
| 103 | return ret; |
| 104 | } |
| 105 | |
| 106 | offset -= sizeof(struct rds_header); |
| 107 | i = offset / PAGE_SIZE; |
| 108 | offset = offset % PAGE_SIZE; |
| 109 | BUG_ON(i >= RDS_CONG_MAP_PAGES); |
| 110 | |
| 111 | do { |
| 112 | ret = tc->t_sock->ops->sendpage(tc->t_sock, |
| 113 | virt_to_page(map->m_page_addrs[i]), |
| 114 | offset, PAGE_SIZE - offset, |
| 115 | MSG_DONTWAIT); |
| 116 | if (ret <= 0) |
| 117 | break; |
| 118 | copied += ret; |
| 119 | offset += ret; |
| 120 | if (offset == PAGE_SIZE) { |
| 121 | offset = 0; |
| 122 | i++; |
| 123 | } |
| 124 | } while (i < RDS_CONG_MAP_PAGES); |
| 125 | |
| 126 | return copied ? copied : ret; |
| 127 | } |
| 128 | |
| 129 | /* the core send_sem serializes this with other xmit and shutdown */ |
| 130 | int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, |
| 131 | unsigned int hdr_off, unsigned int sg, unsigned int off) |
| 132 | { |
| 133 | struct rds_tcp_connection *tc = conn->c_transport_data; |
| 134 | int done = 0; |
| 135 | int ret = 0; |
| 136 | |
| 137 | if (hdr_off == 0) { |
| 138 | /* |
| 139 | * m_ack_seq is set to the sequence number of the last byte of |
| 140 | * header and data. see rds_tcp_is_acked(). |
| 141 | */ |
| 142 | tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); |
| 143 | rm->m_ack_seq = tc->t_last_sent_nxt + |
| 144 | sizeof(struct rds_header) + |
| 145 | be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; |
| 146 | smp_mb__before_clear_bit(); |
| 147 | set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); |
| 148 | tc->t_last_expected_una = rm->m_ack_seq + 1; |
| 149 | |
| 150 | rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", |
| 151 | rm, rds_tcp_snd_nxt(tc), |
| 152 | (unsigned long long)rm->m_ack_seq); |
| 153 | } |
| 154 | |
| 155 | if (hdr_off < sizeof(struct rds_header)) { |
| 156 | /* see rds_tcp_write_space() */ |
| 157 | set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags); |
| 158 | |
| 159 | ret = rds_tcp_sendmsg(tc->t_sock, |
| 160 | (void *)&rm->m_inc.i_hdr + hdr_off, |
| 161 | sizeof(rm->m_inc.i_hdr) - hdr_off); |
| 162 | if (ret < 0) |
| 163 | goto out; |
| 164 | done += ret; |
| 165 | if (hdr_off + done != sizeof(struct rds_header)) |
| 166 | goto out; |
| 167 | } |
| 168 | |
| 169 | while (sg < rm->m_nents) { |
| 170 | ret = tc->t_sock->ops->sendpage(tc->t_sock, |
| 171 | sg_page(&rm->m_sg[sg]), |
| 172 | rm->m_sg[sg].offset + off, |
| 173 | rm->m_sg[sg].length - off, |
| 174 | MSG_DONTWAIT|MSG_NOSIGNAL); |
| 175 | rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]), |
| 176 | rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, |
| 177 | ret); |
| 178 | if (ret <= 0) |
| 179 | break; |
| 180 | |
| 181 | off += ret; |
| 182 | done += ret; |
| 183 | if (off == rm->m_sg[sg].length) { |
| 184 | off = 0; |
| 185 | sg++; |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | out: |
| 190 | if (ret <= 0) { |
| 191 | /* write_space will hit after EAGAIN, all else fatal */ |
| 192 | if (ret == -EAGAIN) { |
| 193 | rds_tcp_stats_inc(s_tcp_sndbuf_full); |
| 194 | ret = 0; |
| 195 | } else { |
Joe Perches | 6884b34 | 2010-02-02 12:43:59 +0000 | [diff] [blame] | 196 | printk(KERN_WARNING "RDS/tcp: send to %pI4 " |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 197 | "returned %d, disconnecting and reconnecting\n", |
Joe Perches | 6884b34 | 2010-02-02 12:43:59 +0000 | [diff] [blame] | 198 | &conn->c_faddr, ret); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 199 | rds_conn_drop(conn); |
| 200 | } |
| 201 | } |
| 202 | if (done == 0) |
| 203 | done = ret; |
| 204 | return done; |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * rm->m_ack_seq is set to the tcp sequence number that corresponds to the |
| 209 | * last byte of the message, including the header. This means that the |
| 210 | * entire message has been received if rm->m_ack_seq is "before" the next |
| 211 | * unacked byte of the TCP sequence space. We have to do very careful |
| 212 | * wrapping 32bit comparisons here. |
| 213 | */ |
| 214 | static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) |
| 215 | { |
| 216 | if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags)) |
| 217 | return 0; |
| 218 | return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0; |
| 219 | } |
| 220 | |
| 221 | void rds_tcp_write_space(struct sock *sk) |
| 222 | { |
| 223 | void (*write_space)(struct sock *sk); |
| 224 | struct rds_connection *conn; |
| 225 | struct rds_tcp_connection *tc; |
| 226 | |
| 227 | read_lock(&sk->sk_callback_lock); |
| 228 | conn = sk->sk_user_data; |
| 229 | if (conn == NULL) { |
| 230 | write_space = sk->sk_write_space; |
| 231 | goto out; |
| 232 | } |
| 233 | |
| 234 | tc = conn->c_transport_data; |
| 235 | rdsdebug("write_space for tc %p\n", tc); |
| 236 | write_space = tc->t_orig_write_space; |
| 237 | rds_tcp_stats_inc(s_tcp_write_space_calls); |
| 238 | |
| 239 | rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); |
| 240 | tc->t_last_seen_una = rds_tcp_snd_una(tc); |
| 241 | rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); |
| 242 | |
Andy Grover | 8e82376 | 2010-03-11 13:49:58 +0000 | [diff] [blame] | 243 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) |
| 244 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
| 245 | |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 246 | out: |
| 247 | read_unlock(&sk->sk_callback_lock); |
| 248 | |
| 249 | /* |
| 250 | * write_space is only called when data leaves tcp's send queue if |
| 251 | * SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put |
| 252 | * data in tcp's send queue because we use write_space to parse the |
| 253 | * sequence numbers and notice that rds messages have been fully |
| 254 | * received. |
| 255 | * |
| 256 | * tcp's write_space clears SOCK_NOSPACE if the send queue has more |
| 257 | * than a certain amount of space. So we need to set it again *after* |
| 258 | * we call tcp's write_space or else we might only get called on the |
| 259 | * first of a series of incoming tcp acks. |
| 260 | */ |
| 261 | write_space(sk); |
| 262 | |
| 263 | if (sk->sk_socket) |
| 264 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
| 265 | } |