blob: 4a87d9ef3084552a5ced68e2ad05a5cbbc4954b7 [file] [log] [blame]
Andy Grover70041082009-08-21 12:28:31 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover70041082009-08-21 12:28:31 +000035#include <net/tcp.h>
36
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070037#include "rds_single_path.h"
Andy Grover70041082009-08-21 12:28:31 +000038#include "rds.h"
39#include "tcp.h"
40
41static struct kmem_cache *rds_tcp_incoming_slab;
42
Andy Grover809fa142010-01-12 14:41:46 -080043static void rds_tcp_inc_purge(struct rds_incoming *inc)
Andy Grover70041082009-08-21 12:28:31 +000044{
45 struct rds_tcp_incoming *tinc;
46 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
47 rdsdebug("purging tinc %p inc %p\n", tinc, inc);
48 skb_queue_purge(&tinc->ti_skb_list);
49}
50
51void rds_tcp_inc_free(struct rds_incoming *inc)
52{
53 struct rds_tcp_incoming *tinc;
54 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
55 rds_tcp_inc_purge(inc);
56 rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
57 kmem_cache_free(rds_tcp_incoming_slab, tinc);
58}
59
60/*
61 * this is pretty lame, but, whatever.
62 */
Al Viroc310e722014-11-20 09:21:14 -050063int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
Andy Grover70041082009-08-21 12:28:31 +000064{
65 struct rds_tcp_incoming *tinc;
Andy Grover70041082009-08-21 12:28:31 +000066 struct sk_buff *skb;
Andy Grover70041082009-08-21 12:28:31 +000067 int ret = 0;
68
Al Viroc310e722014-11-20 09:21:14 -050069 if (!iov_iter_count(to))
Andy Grover70041082009-08-21 12:28:31 +000070 goto out;
71
72 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
Andy Grover70041082009-08-21 12:28:31 +000073
74 skb_queue_walk(&tinc->ti_skb_list, skb) {
Al Viroc310e722014-11-20 09:21:14 -050075 unsigned long to_copy, skb_off;
76 for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
77 to_copy = iov_iter_count(to);
Andy Grover70041082009-08-21 12:28:31 +000078 to_copy = min(to_copy, skb->len - skb_off);
79
Al Viroc310e722014-11-20 09:21:14 -050080 if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
81 return -EFAULT;
Andy Grover70041082009-08-21 12:28:31 +000082
Andy Groverb075cfd2010-03-11 13:49:57 +000083 rds_stats_add(s_copy_to_user, to_copy);
Andy Grover70041082009-08-21 12:28:31 +000084 ret += to_copy;
Al Viroc310e722014-11-20 09:21:14 -050085
86 if (!iov_iter_count(to))
Andy Grover70041082009-08-21 12:28:31 +000087 goto out;
88 }
89 }
90out:
91 return ret;
92}
93
94/*
95 * We have a series of skbs that have fragmented pieces of the congestion
96 * bitmap. They must add up to the exact size of the congestion bitmap. We
97 * use the skb helpers to copy those into the pages that make up the in-memory
98 * congestion bitmap for the remote address of this connection. We then tell
99 * the congestion core that the bitmap has been changed so that it can wake up
100 * sleepers.
101 *
102 * This is racing with sending paths which are using test_bit to see if the
103 * bitmap indicates that their recipient is congested.
104 */
105
106static void rds_tcp_cong_recv(struct rds_connection *conn,
107 struct rds_tcp_incoming *tinc)
108{
109 struct sk_buff *skb;
110 unsigned int to_copy, skb_off;
111 unsigned int map_off;
112 unsigned int map_page;
113 struct rds_cong_map *map;
114 int ret;
115
116 /* catch completely corrupt packets */
117 if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
118 return;
119
120 map_page = 0;
121 map_off = 0;
122 map = conn->c_fcong;
123
124 skb_queue_walk(&tinc->ti_skb_list, skb) {
125 skb_off = 0;
126 while (skb_off < skb->len) {
127 to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
128 skb->len - skb_off);
129
130 BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
131
132 /* only returns 0 or -error */
133 ret = skb_copy_bits(skb, skb_off,
134 (void *)map->m_page_addrs[map_page] + map_off,
135 to_copy);
136 BUG_ON(ret != 0);
137
138 skb_off += to_copy;
139 map_off += to_copy;
140 if (map_off == PAGE_SIZE) {
141 map_off = 0;
142 map_page++;
143 }
144 }
145 }
146
147 rds_cong_map_updated(map, ~(u64) 0);
148}
149
150struct rds_tcp_desc_arg {
151 struct rds_connection *conn;
152 gfp_t gfp;
Andy Grover70041082009-08-21 12:28:31 +0000153};
154
155static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
156 unsigned int offset, size_t len)
157{
158 struct rds_tcp_desc_arg *arg = desc->arg.data;
159 struct rds_connection *conn = arg->conn;
160 struct rds_tcp_connection *tc = conn->c_transport_data;
161 struct rds_tcp_incoming *tinc = tc->t_tinc;
162 struct sk_buff *clone;
163 size_t left = len, to_copy;
164
165 rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
166 len);
167
168 /*
169 * tcp_read_sock() interprets partial progress as an indication to stop
170 * processing.
171 */
172 while (left) {
Andy Grover8690bfa2010-01-12 11:56:44 -0800173 if (!tinc) {
Andy Grover70041082009-08-21 12:28:31 +0000174 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
Joshua Houghton5c3da572016-06-18 15:46:31 +0000175 arg->gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800176 if (!tinc) {
Andy Grover70041082009-08-21 12:28:31 +0000177 desc->error = -ENOMEM;
178 goto out;
179 }
180 tc->t_tinc = tinc;
181 rdsdebug("alloced tinc %p\n", tinc);
182 rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr);
183 /*
184 * XXX * we might be able to use the __ variants when
185 * we've already serialized at a higher level.
186 */
187 skb_queue_head_init(&tinc->ti_skb_list);
188 }
189
190 if (left && tc->t_tinc_hdr_rem) {
191 to_copy = min(tc->t_tinc_hdr_rem, left);
192 rdsdebug("copying %zu header from skb %p\n", to_copy,
193 skb);
194 skb_copy_bits(skb, offset,
195 (char *)&tinc->ti_inc.i_hdr +
196 sizeof(struct rds_header) -
197 tc->t_tinc_hdr_rem,
198 to_copy);
199 tc->t_tinc_hdr_rem -= to_copy;
200 left -= to_copy;
201 offset += to_copy;
202
203 if (tc->t_tinc_hdr_rem == 0) {
204 /* could be 0 for a 0 len message */
205 tc->t_tinc_data_rem =
206 be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
207 }
208 }
209
210 if (left && tc->t_tinc_data_rem) {
Sowmini Varadhan947d2752016-04-22 18:36:36 -0700211 to_copy = min(tc->t_tinc_data_rem, left);
212
213 clone = pskb_extract(skb, offset, to_copy, arg->gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800214 if (!clone) {
Andy Grover70041082009-08-21 12:28:31 +0000215 desc->error = -ENOMEM;
216 goto out;
217 }
218
Andy Grover70041082009-08-21 12:28:31 +0000219 skb_queue_tail(&tinc->ti_skb_list, clone);
220
221 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
222 "clone %p data %p len %d\n",
223 skb, skb->data, skb->len, offset, to_copy,
224 clone, clone->data, clone->len);
225
226 tc->t_tinc_data_rem -= to_copy;
227 left -= to_copy;
228 offset += to_copy;
229 }
230
231 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
232 if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
233 rds_tcp_cong_recv(conn, tinc);
234 else
235 rds_recv_incoming(conn, conn->c_faddr,
236 conn->c_laddr, &tinc->ti_inc,
Cong Wang6114eab2011-11-25 23:14:40 +0800237 arg->gfp);
Andy Grover70041082009-08-21 12:28:31 +0000238
239 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
240 tc->t_tinc_data_rem = 0;
241 tc->t_tinc = NULL;
242 rds_inc_put(&tinc->ti_inc);
243 tinc = NULL;
244 }
245 }
246out:
247 rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
248 len, left, skb->len,
249 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
250 return len - left;
251}
252
253/* the caller has to hold the sock lock */
Cong Wang6114eab2011-11-25 23:14:40 +0800254static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp)
Andy Grover70041082009-08-21 12:28:31 +0000255{
256 struct rds_tcp_connection *tc = conn->c_transport_data;
257 struct socket *sock = tc->t_sock;
258 read_descriptor_t desc;
259 struct rds_tcp_desc_arg arg;
260
261 /* It's like glib in the kernel! */
262 arg.conn = conn;
263 arg.gfp = gfp;
Andy Grover70041082009-08-21 12:28:31 +0000264 desc.arg.data = &arg;
265 desc.error = 0;
266 desc.count = 1; /* give more than one skb per call */
267
268 tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
269 rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
270 desc.error);
271
272 return desc.error;
273}
274
275/*
276 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
277 * data_ready.
278 *
279 * if we fail to allocate we're in trouble.. blindly wait some time before
280 * trying again to see if the VM can free up something for us.
281 */
282int rds_tcp_recv(struct rds_connection *conn)
283{
284 struct rds_tcp_connection *tc = conn->c_transport_data;
285 struct socket *sock = tc->t_sock;
286 int ret = 0;
287
288 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
289
290 lock_sock(sock->sk);
Cong Wang6114eab2011-11-25 23:14:40 +0800291 ret = rds_tcp_read_sock(conn, GFP_KERNEL);
Andy Grover70041082009-08-21 12:28:31 +0000292 release_sock(sock->sk);
293
294 return ret;
295}
296
David S. Miller676d2362014-04-11 16:15:36 -0400297void rds_tcp_data_ready(struct sock *sk)
Andy Grover70041082009-08-21 12:28:31 +0000298{
David S. Miller676d2362014-04-11 16:15:36 -0400299 void (*ready)(struct sock *sk);
Andy Grover70041082009-08-21 12:28:31 +0000300 struct rds_connection *conn;
301 struct rds_tcp_connection *tc;
302
David S. Miller676d2362014-04-11 16:15:36 -0400303 rdsdebug("data ready sk %p\n", sk);
Andy Grover70041082009-08-21 12:28:31 +0000304
Eric Dumazet38036622016-05-17 17:44:08 -0700305 read_lock_bh(&sk->sk_callback_lock);
Andy Grover70041082009-08-21 12:28:31 +0000306 conn = sk->sk_user_data;
Andy Grover8690bfa2010-01-12 11:56:44 -0800307 if (!conn) { /* check for teardown race */
Andy Grover70041082009-08-21 12:28:31 +0000308 ready = sk->sk_data_ready;
309 goto out;
310 }
311
312 tc = conn->c_transport_data;
313 ready = tc->t_orig_data_ready;
314 rds_tcp_stats_inc(s_tcp_data_ready_calls);
315
Cong Wang6114eab2011-11-25 23:14:40 +0800316 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
Andy Grover70041082009-08-21 12:28:31 +0000317 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
318out:
Eric Dumazet38036622016-05-17 17:44:08 -0700319 read_unlock_bh(&sk->sk_callback_lock);
David S. Miller676d2362014-04-11 16:15:36 -0400320 ready(sk);
Andy Grover70041082009-08-21 12:28:31 +0000321}
322
Zach Brownef87b7e2010-07-09 12:26:20 -0700323int rds_tcp_recv_init(void)
Andy Grover70041082009-08-21 12:28:31 +0000324{
325 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
326 sizeof(struct rds_tcp_incoming),
327 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800328 if (!rds_tcp_incoming_slab)
Andy Grover70041082009-08-21 12:28:31 +0000329 return -ENOMEM;
330 return 0;
331}
332
333void rds_tcp_recv_exit(void)
334{
335 kmem_cache_destroy(rds_tcp_incoming_slab);
336}