blob: b9fbd2ee74efe1c4f75cb499f00ce92f8be5a331 [file] [log] [blame]
Andy Grover70041082009-08-21 12:28:31 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover70041082009-08-21 12:28:31 +000035#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40static struct kmem_cache *rds_tcp_incoming_slab;
41
Andy Grover809fa142010-01-12 14:41:46 -080042static void rds_tcp_inc_purge(struct rds_incoming *inc)
Andy Grover70041082009-08-21 12:28:31 +000043{
44 struct rds_tcp_incoming *tinc;
45 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
46 rdsdebug("purging tinc %p inc %p\n", tinc, inc);
47 skb_queue_purge(&tinc->ti_skb_list);
48}
49
50void rds_tcp_inc_free(struct rds_incoming *inc)
51{
52 struct rds_tcp_incoming *tinc;
53 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
54 rds_tcp_inc_purge(inc);
55 rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
56 kmem_cache_free(rds_tcp_incoming_slab, tinc);
57}
58
59/*
60 * this is pretty lame, but, whatever.
61 */
Al Viroc310e722014-11-20 09:21:14 -050062int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
Andy Grover70041082009-08-21 12:28:31 +000063{
64 struct rds_tcp_incoming *tinc;
Andy Grover70041082009-08-21 12:28:31 +000065 struct sk_buff *skb;
Andy Grover70041082009-08-21 12:28:31 +000066 int ret = 0;
67
Al Viroc310e722014-11-20 09:21:14 -050068 if (!iov_iter_count(to))
Andy Grover70041082009-08-21 12:28:31 +000069 goto out;
70
71 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
Andy Grover70041082009-08-21 12:28:31 +000072
73 skb_queue_walk(&tinc->ti_skb_list, skb) {
Al Viroc310e722014-11-20 09:21:14 -050074 unsigned long to_copy, skb_off;
75 for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
76 to_copy = iov_iter_count(to);
Andy Grover70041082009-08-21 12:28:31 +000077 to_copy = min(to_copy, skb->len - skb_off);
78
Al Viroc310e722014-11-20 09:21:14 -050079 if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
80 return -EFAULT;
Andy Grover70041082009-08-21 12:28:31 +000081
Andy Groverb075cfd2010-03-11 13:49:57 +000082 rds_stats_add(s_copy_to_user, to_copy);
Andy Grover70041082009-08-21 12:28:31 +000083 ret += to_copy;
Al Viroc310e722014-11-20 09:21:14 -050084
85 if (!iov_iter_count(to))
Andy Grover70041082009-08-21 12:28:31 +000086 goto out;
87 }
88 }
89out:
90 return ret;
91}
92
93/*
94 * We have a series of skbs that have fragmented pieces of the congestion
95 * bitmap. They must add up to the exact size of the congestion bitmap. We
96 * use the skb helpers to copy those into the pages that make up the in-memory
97 * congestion bitmap for the remote address of this connection. We then tell
98 * the congestion core that the bitmap has been changed so that it can wake up
99 * sleepers.
100 *
101 * This is racing with sending paths which are using test_bit to see if the
102 * bitmap indicates that their recipient is congested.
103 */
104
105static void rds_tcp_cong_recv(struct rds_connection *conn,
106 struct rds_tcp_incoming *tinc)
107{
108 struct sk_buff *skb;
109 unsigned int to_copy, skb_off;
110 unsigned int map_off;
111 unsigned int map_page;
112 struct rds_cong_map *map;
113 int ret;
114
115 /* catch completely corrupt packets */
116 if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
117 return;
118
119 map_page = 0;
120 map_off = 0;
121 map = conn->c_fcong;
122
123 skb_queue_walk(&tinc->ti_skb_list, skb) {
124 skb_off = 0;
125 while (skb_off < skb->len) {
126 to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
127 skb->len - skb_off);
128
129 BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
130
131 /* only returns 0 or -error */
132 ret = skb_copy_bits(skb, skb_off,
133 (void *)map->m_page_addrs[map_page] + map_off,
134 to_copy);
135 BUG_ON(ret != 0);
136
137 skb_off += to_copy;
138 map_off += to_copy;
139 if (map_off == PAGE_SIZE) {
140 map_off = 0;
141 map_page++;
142 }
143 }
144 }
145
146 rds_cong_map_updated(map, ~(u64) 0);
147}
148
149struct rds_tcp_desc_arg {
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700150 struct rds_conn_path *conn_path;
Andy Grover70041082009-08-21 12:28:31 +0000151 gfp_t gfp;
Andy Grover70041082009-08-21 12:28:31 +0000152};
153
154static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
155 unsigned int offset, size_t len)
156{
157 struct rds_tcp_desc_arg *arg = desc->arg.data;
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700158 struct rds_conn_path *cp = arg->conn_path;
159 struct rds_tcp_connection *tc = cp->cp_transport_data;
Andy Grover70041082009-08-21 12:28:31 +0000160 struct rds_tcp_incoming *tinc = tc->t_tinc;
161 struct sk_buff *clone;
162 size_t left = len, to_copy;
163
164 rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
165 len);
166
167 /*
168 * tcp_read_sock() interprets partial progress as an indication to stop
169 * processing.
170 */
171 while (left) {
Andy Grover8690bfa2010-01-12 11:56:44 -0800172 if (!tinc) {
Andy Grover70041082009-08-21 12:28:31 +0000173 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
Joshua Houghton5c3da572016-06-18 15:46:31 +0000174 arg->gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800175 if (!tinc) {
Andy Grover70041082009-08-21 12:28:31 +0000176 desc->error = -ENOMEM;
177 goto out;
178 }
179 tc->t_tinc = tinc;
180 rdsdebug("alloced tinc %p\n", tinc);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700181 rds_inc_path_init(&tinc->ti_inc, cp,
182 cp->cp_conn->c_faddr);
Santosh Shilimkar32890252016-07-04 22:35:15 -0700183 tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
184 local_clock();
185
Andy Grover70041082009-08-21 12:28:31 +0000186 /*
187 * XXX * we might be able to use the __ variants when
188 * we've already serialized at a higher level.
189 */
190 skb_queue_head_init(&tinc->ti_skb_list);
191 }
192
193 if (left && tc->t_tinc_hdr_rem) {
194 to_copy = min(tc->t_tinc_hdr_rem, left);
195 rdsdebug("copying %zu header from skb %p\n", to_copy,
196 skb);
197 skb_copy_bits(skb, offset,
198 (char *)&tinc->ti_inc.i_hdr +
199 sizeof(struct rds_header) -
200 tc->t_tinc_hdr_rem,
201 to_copy);
202 tc->t_tinc_hdr_rem -= to_copy;
203 left -= to_copy;
204 offset += to_copy;
205
206 if (tc->t_tinc_hdr_rem == 0) {
207 /* could be 0 for a 0 len message */
208 tc->t_tinc_data_rem =
209 be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
Santosh Shilimkar32890252016-07-04 22:35:15 -0700210 tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
211 local_clock();
Andy Grover70041082009-08-21 12:28:31 +0000212 }
213 }
214
215 if (left && tc->t_tinc_data_rem) {
Sowmini Varadhan947d2752016-04-22 18:36:36 -0700216 to_copy = min(tc->t_tinc_data_rem, left);
217
218 clone = pskb_extract(skb, offset, to_copy, arg->gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800219 if (!clone) {
Andy Grover70041082009-08-21 12:28:31 +0000220 desc->error = -ENOMEM;
221 goto out;
222 }
223
Andy Grover70041082009-08-21 12:28:31 +0000224 skb_queue_tail(&tinc->ti_skb_list, clone);
225
226 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
227 "clone %p data %p len %d\n",
228 skb, skb->data, skb->len, offset, to_copy,
229 clone, clone->data, clone->len);
230
231 tc->t_tinc_data_rem -= to_copy;
232 left -= to_copy;
233 offset += to_copy;
234 }
235
236 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700237 struct rds_connection *conn = cp->cp_conn;
238
Andy Grover70041082009-08-21 12:28:31 +0000239 if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
240 rds_tcp_cong_recv(conn, tinc);
241 else
242 rds_recv_incoming(conn, conn->c_faddr,
243 conn->c_laddr, &tinc->ti_inc,
Cong Wang6114eab2011-11-25 23:14:40 +0800244 arg->gfp);
Andy Grover70041082009-08-21 12:28:31 +0000245
246 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
247 tc->t_tinc_data_rem = 0;
248 tc->t_tinc = NULL;
249 rds_inc_put(&tinc->ti_inc);
250 tinc = NULL;
251 }
252 }
253out:
254 rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
255 len, left, skb->len,
256 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
257 return len - left;
258}
259
260/* the caller has to hold the sock lock */
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700261static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
Andy Grover70041082009-08-21 12:28:31 +0000262{
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700263 struct rds_tcp_connection *tc = cp->cp_transport_data;
Andy Grover70041082009-08-21 12:28:31 +0000264 struct socket *sock = tc->t_sock;
265 read_descriptor_t desc;
266 struct rds_tcp_desc_arg arg;
267
268 /* It's like glib in the kernel! */
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700269 arg.conn_path = cp;
Andy Grover70041082009-08-21 12:28:31 +0000270 arg.gfp = gfp;
Andy Grover70041082009-08-21 12:28:31 +0000271 desc.arg.data = &arg;
272 desc.error = 0;
273 desc.count = 1; /* give more than one skb per call */
274
275 tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
276 rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
277 desc.error);
278
279 return desc.error;
280}
281
282/*
283 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
284 * data_ready.
285 *
286 * if we fail to allocate we're in trouble.. blindly wait some time before
287 * trying again to see if the VM can free up something for us.
288 */
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700289int rds_tcp_recv_path(struct rds_conn_path *cp)
Andy Grover70041082009-08-21 12:28:31 +0000290{
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700291 struct rds_tcp_connection *tc = cp->cp_transport_data;
Andy Grover70041082009-08-21 12:28:31 +0000292 struct socket *sock = tc->t_sock;
293 int ret = 0;
294
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700295 rdsdebug("recv worker path [%d] tc %p sock %p\n",
296 cp->cp_index, tc, sock);
Andy Grover70041082009-08-21 12:28:31 +0000297
298 lock_sock(sock->sk);
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700299 ret = rds_tcp_read_sock(cp, GFP_KERNEL);
Andy Grover70041082009-08-21 12:28:31 +0000300 release_sock(sock->sk);
301
302 return ret;
303}
304
David S. Miller676d2362014-04-11 16:15:36 -0400305void rds_tcp_data_ready(struct sock *sk)
Andy Grover70041082009-08-21 12:28:31 +0000306{
David S. Miller676d2362014-04-11 16:15:36 -0400307 void (*ready)(struct sock *sk);
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700308 struct rds_conn_path *cp;
Andy Grover70041082009-08-21 12:28:31 +0000309 struct rds_tcp_connection *tc;
310
David S. Miller676d2362014-04-11 16:15:36 -0400311 rdsdebug("data ready sk %p\n", sk);
Andy Grover70041082009-08-21 12:28:31 +0000312
Eric Dumazet38036622016-05-17 17:44:08 -0700313 read_lock_bh(&sk->sk_callback_lock);
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700314 cp = sk->sk_user_data;
315 if (!cp) { /* check for teardown race */
Andy Grover70041082009-08-21 12:28:31 +0000316 ready = sk->sk_data_ready;
317 goto out;
318 }
319
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700320 tc = cp->cp_transport_data;
Andy Grover70041082009-08-21 12:28:31 +0000321 ready = tc->t_orig_data_ready;
322 rds_tcp_stats_inc(s_tcp_data_ready_calls);
323
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800324 if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
325 rcu_read_lock();
Sowmini Varadhanebeeb1a2018-02-03 04:26:51 -0800326 if (!rds_destroy_pending(cp->cp_conn))
Sowmini Varadhan3db6e0d2018-01-04 06:53:00 -0800327 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
328 rcu_read_unlock();
329 }
Andy Grover70041082009-08-21 12:28:31 +0000330out:
Eric Dumazet38036622016-05-17 17:44:08 -0700331 read_unlock_bh(&sk->sk_callback_lock);
David S. Miller676d2362014-04-11 16:15:36 -0400332 ready(sk);
Andy Grover70041082009-08-21 12:28:31 +0000333}
334
Zach Brownef87b7e2010-07-09 12:26:20 -0700335int rds_tcp_recv_init(void)
Andy Grover70041082009-08-21 12:28:31 +0000336{
337 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
338 sizeof(struct rds_tcp_incoming),
339 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800340 if (!rds_tcp_incoming_slab)
Andy Grover70041082009-08-21 12:28:31 +0000341 return -ENOMEM;
342 return 0;
343}
344
345void rds_tcp_recv_exit(void)
346{
347 kmem_cache_destroy(rds_tcp_incoming_slab);
348}