blob: ea7382908aa5d15ed8f2d1e13c61217d10138d0c [file] [log] [blame]
Andy Grover70041082009-08-21 12:28:31 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover70041082009-08-21 12:28:31 +000035#include <net/tcp.h>
36
37#include "rds.h"
38#include "tcp.h"
39
40static struct kmem_cache *rds_tcp_incoming_slab;
41
42void rds_tcp_inc_purge(struct rds_incoming *inc)
43{
44 struct rds_tcp_incoming *tinc;
45 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
46 rdsdebug("purging tinc %p inc %p\n", tinc, inc);
47 skb_queue_purge(&tinc->ti_skb_list);
48}
49
50void rds_tcp_inc_free(struct rds_incoming *inc)
51{
52 struct rds_tcp_incoming *tinc;
53 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
54 rds_tcp_inc_purge(inc);
55 rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
56 kmem_cache_free(rds_tcp_incoming_slab, tinc);
57}
58
59/*
60 * this is pretty lame, but, whatever.
61 */
62int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
63 size_t size)
64{
65 struct rds_tcp_incoming *tinc;
66 struct iovec *iov, tmp;
67 struct sk_buff *skb;
68 unsigned long to_copy, skb_off;
69 int ret = 0;
70
71 if (size == 0)
72 goto out;
73
74 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
75 iov = first_iov;
76 tmp = *iov;
77
78 skb_queue_walk(&tinc->ti_skb_list, skb) {
79 skb_off = 0;
80 while (skb_off < skb->len) {
81 while (tmp.iov_len == 0) {
82 iov++;
83 tmp = *iov;
84 }
85
86 to_copy = min(tmp.iov_len, size);
87 to_copy = min(to_copy, skb->len - skb_off);
88
89 rdsdebug("ret %d size %zu skb %p skb_off %lu "
90 "skblen %d iov_base %p iov_len %zu cpy %lu\n",
91 ret, size, skb, skb_off, skb->len,
92 tmp.iov_base, tmp.iov_len, to_copy);
93
94 /* modifies tmp as it copies */
95 if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
96 to_copy)) {
97 ret = -EFAULT;
98 goto out;
99 }
100
Andy Groverb075cfd2010-03-11 13:49:57 +0000101 rds_stats_add(s_copy_to_user, to_copy);
Andy Grover70041082009-08-21 12:28:31 +0000102 size -= to_copy;
103 ret += to_copy;
104 skb_off += to_copy;
105 if (size == 0)
106 goto out;
107 }
108 }
109out:
110 return ret;
111}
112
113/*
114 * We have a series of skbs that have fragmented pieces of the congestion
115 * bitmap. They must add up to the exact size of the congestion bitmap. We
116 * use the skb helpers to copy those into the pages that make up the in-memory
117 * congestion bitmap for the remote address of this connection. We then tell
118 * the congestion core that the bitmap has been changed so that it can wake up
119 * sleepers.
120 *
121 * This is racing with sending paths which are using test_bit to see if the
122 * bitmap indicates that their recipient is congested.
123 */
124
125static void rds_tcp_cong_recv(struct rds_connection *conn,
126 struct rds_tcp_incoming *tinc)
127{
128 struct sk_buff *skb;
129 unsigned int to_copy, skb_off;
130 unsigned int map_off;
131 unsigned int map_page;
132 struct rds_cong_map *map;
133 int ret;
134
135 /* catch completely corrupt packets */
136 if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
137 return;
138
139 map_page = 0;
140 map_off = 0;
141 map = conn->c_fcong;
142
143 skb_queue_walk(&tinc->ti_skb_list, skb) {
144 skb_off = 0;
145 while (skb_off < skb->len) {
146 to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
147 skb->len - skb_off);
148
149 BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
150
151 /* only returns 0 or -error */
152 ret = skb_copy_bits(skb, skb_off,
153 (void *)map->m_page_addrs[map_page] + map_off,
154 to_copy);
155 BUG_ON(ret != 0);
156
157 skb_off += to_copy;
158 map_off += to_copy;
159 if (map_off == PAGE_SIZE) {
160 map_off = 0;
161 map_page++;
162 }
163 }
164 }
165
166 rds_cong_map_updated(map, ~(u64) 0);
167}
168
169struct rds_tcp_desc_arg {
170 struct rds_connection *conn;
171 gfp_t gfp;
172 enum km_type km;
173};
174
175static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
176 unsigned int offset, size_t len)
177{
178 struct rds_tcp_desc_arg *arg = desc->arg.data;
179 struct rds_connection *conn = arg->conn;
180 struct rds_tcp_connection *tc = conn->c_transport_data;
181 struct rds_tcp_incoming *tinc = tc->t_tinc;
182 struct sk_buff *clone;
183 size_t left = len, to_copy;
184
185 rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
186 len);
187
188 /*
189 * tcp_read_sock() interprets partial progress as an indication to stop
190 * processing.
191 */
192 while (left) {
Andy Grover8690bfa2010-01-12 11:56:44 -0800193 if (!tinc) {
Andy Grover70041082009-08-21 12:28:31 +0000194 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
195 arg->gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800196 if (!tinc) {
Andy Grover70041082009-08-21 12:28:31 +0000197 desc->error = -ENOMEM;
198 goto out;
199 }
200 tc->t_tinc = tinc;
201 rdsdebug("alloced tinc %p\n", tinc);
202 rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr);
203 /*
204 * XXX * we might be able to use the __ variants when
205 * we've already serialized at a higher level.
206 */
207 skb_queue_head_init(&tinc->ti_skb_list);
208 }
209
210 if (left && tc->t_tinc_hdr_rem) {
211 to_copy = min(tc->t_tinc_hdr_rem, left);
212 rdsdebug("copying %zu header from skb %p\n", to_copy,
213 skb);
214 skb_copy_bits(skb, offset,
215 (char *)&tinc->ti_inc.i_hdr +
216 sizeof(struct rds_header) -
217 tc->t_tinc_hdr_rem,
218 to_copy);
219 tc->t_tinc_hdr_rem -= to_copy;
220 left -= to_copy;
221 offset += to_copy;
222
223 if (tc->t_tinc_hdr_rem == 0) {
224 /* could be 0 for a 0 len message */
225 tc->t_tinc_data_rem =
226 be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
227 }
228 }
229
230 if (left && tc->t_tinc_data_rem) {
231 clone = skb_clone(skb, arg->gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800232 if (!clone) {
Andy Grover70041082009-08-21 12:28:31 +0000233 desc->error = -ENOMEM;
234 goto out;
235 }
236
237 to_copy = min(tc->t_tinc_data_rem, left);
238 pskb_pull(clone, offset);
239 pskb_trim(clone, to_copy);
240 skb_queue_tail(&tinc->ti_skb_list, clone);
241
242 rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
243 "clone %p data %p len %d\n",
244 skb, skb->data, skb->len, offset, to_copy,
245 clone, clone->data, clone->len);
246
247 tc->t_tinc_data_rem -= to_copy;
248 left -= to_copy;
249 offset += to_copy;
250 }
251
252 if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
253 if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
254 rds_tcp_cong_recv(conn, tinc);
255 else
256 rds_recv_incoming(conn, conn->c_faddr,
257 conn->c_laddr, &tinc->ti_inc,
258 arg->gfp, arg->km);
259
260 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
261 tc->t_tinc_data_rem = 0;
262 tc->t_tinc = NULL;
263 rds_inc_put(&tinc->ti_inc);
264 tinc = NULL;
265 }
266 }
267out:
268 rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
269 len, left, skb->len,
270 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
271 return len - left;
272}
273
274/* the caller has to hold the sock lock */
275int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, enum km_type km)
276{
277 struct rds_tcp_connection *tc = conn->c_transport_data;
278 struct socket *sock = tc->t_sock;
279 read_descriptor_t desc;
280 struct rds_tcp_desc_arg arg;
281
282 /* It's like glib in the kernel! */
283 arg.conn = conn;
284 arg.gfp = gfp;
285 arg.km = km;
286 desc.arg.data = &arg;
287 desc.error = 0;
288 desc.count = 1; /* give more than one skb per call */
289
290 tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
291 rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
292 desc.error);
293
294 return desc.error;
295}
296
297/*
298 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
299 * data_ready.
300 *
301 * if we fail to allocate we're in trouble.. blindly wait some time before
302 * trying again to see if the VM can free up something for us.
303 */
304int rds_tcp_recv(struct rds_connection *conn)
305{
306 struct rds_tcp_connection *tc = conn->c_transport_data;
307 struct socket *sock = tc->t_sock;
308 int ret = 0;
309
310 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
311
312 lock_sock(sock->sk);
313 ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0);
314 release_sock(sock->sk);
315
316 return ret;
317}
318
319void rds_tcp_data_ready(struct sock *sk, int bytes)
320{
321 void (*ready)(struct sock *sk, int bytes);
322 struct rds_connection *conn;
323 struct rds_tcp_connection *tc;
324
325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
326
327 read_lock(&sk->sk_callback_lock);
328 conn = sk->sk_user_data;
Andy Grover8690bfa2010-01-12 11:56:44 -0800329 if (!conn) { /* check for teardown race */
Andy Grover70041082009-08-21 12:28:31 +0000330 ready = sk->sk_data_ready;
331 goto out;
332 }
333
334 tc = conn->c_transport_data;
335 ready = tc->t_orig_data_ready;
336 rds_tcp_stats_inc(s_tcp_data_ready_calls);
337
338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
340out:
341 read_unlock(&sk->sk_callback_lock);
342 ready(sk, bytes);
343}
344
345int __init rds_tcp_recv_init(void)
346{
347 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
348 sizeof(struct rds_tcp_incoming),
349 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800350 if (!rds_tcp_incoming_slab)
Andy Grover70041082009-08-21 12:28:31 +0000351 return -ENOMEM;
352 return 0;
353}
354
355void rds_tcp_recv_exit(void)
356{
357 kmem_cache_destroy(rds_tcp_incoming_slab);
358}