blob: c42b60bf4c68eb26f7364df7e045c40847ea3be3 [file] [log] [blame]
Andy Grover70041082009-08-21 12:28:31 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover70041082009-08-21 12:28:31 +000035#include <linux/in.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040036#include <linux/module.h>
Andy Grover70041082009-08-21 12:28:31 +000037#include <net/tcp.h>
Sowmini Varadhan467fa152015-08-05 01:43:26 -040038#include <net/net_namespace.h>
39#include <net/netns/generic.h>
40#include <net/tcp.h>
Andy Grover70041082009-08-21 12:28:31 +000041
42#include "rds.h"
43#include "tcp.h"
44
45/* only for info exporting */
46static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
47static LIST_HEAD(rds_tcp_tc_list);
stephen hemmingerff51bf82010-10-19 08:08:33 +000048static unsigned int rds_tcp_tc_count;
Andy Grover70041082009-08-21 12:28:31 +000049
50/* Track rds_tcp_connection structs so they can be cleaned up */
51static DEFINE_SPINLOCK(rds_tcp_conn_lock);
52static LIST_HEAD(rds_tcp_conn_list);
53
54static struct kmem_cache *rds_tcp_conn_slab;
55
56#define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024)
57
58/* doing it this way avoids calling tcp_sk() */
59void rds_tcp_nonagle(struct socket *sock)
60{
61 mm_segment_t oldfs = get_fs();
62 int val = 1;
63
64 set_fs(KERNEL_DS);
65 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
66 sizeof(val));
67 set_fs(oldfs);
68}
69
70void rds_tcp_tune(struct socket *sock)
71{
72 struct sock *sk = sock->sk;
73
74 rds_tcp_nonagle(sock);
75
76 /*
77 * We're trying to saturate gigabit with the default,
78 * see svc_sock_setbufsize().
79 */
80 lock_sock(sk);
81 sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE;
82 sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE;
83 sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
84 release_sock(sk);
85}
86
87u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
88{
89 return tcp_sk(tc->t_sock->sk)->snd_nxt;
90}
91
92u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
93{
94 return tcp_sk(tc->t_sock->sk)->snd_una;
95}
96
97void rds_tcp_restore_callbacks(struct socket *sock,
98 struct rds_tcp_connection *tc)
99{
100 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
101 write_lock_bh(&sock->sk->sk_callback_lock);
102
103 /* done under the callback_lock to serialize with write_space */
104 spin_lock(&rds_tcp_tc_list_lock);
105 list_del_init(&tc->t_list_item);
106 rds_tcp_tc_count--;
107 spin_unlock(&rds_tcp_tc_list_lock);
108
109 tc->t_sock = NULL;
110
111 sock->sk->sk_write_space = tc->t_orig_write_space;
112 sock->sk->sk_data_ready = tc->t_orig_data_ready;
113 sock->sk->sk_state_change = tc->t_orig_state_change;
114 sock->sk->sk_user_data = NULL;
115
116 write_unlock_bh(&sock->sk->sk_callback_lock);
117}
118
119/*
120 * This is the only path that sets tc->t_sock. Send and receive trust that
121 * it is set. The RDS_CONN_CONNECTED bit protects those paths from being
122 * called while it isn't set.
123 */
124void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
125{
126 struct rds_tcp_connection *tc = conn->c_transport_data;
127
128 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
129 write_lock_bh(&sock->sk->sk_callback_lock);
130
131 /* done under the callback_lock to serialize with write_space */
132 spin_lock(&rds_tcp_tc_list_lock);
133 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
134 rds_tcp_tc_count++;
135 spin_unlock(&rds_tcp_tc_list_lock);
136
137 /* accepted sockets need our listen data ready undone */
138 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
139 sock->sk->sk_data_ready = sock->sk->sk_user_data;
140
141 tc->t_sock = sock;
142 tc->conn = conn;
143 tc->t_orig_data_ready = sock->sk->sk_data_ready;
144 tc->t_orig_write_space = sock->sk->sk_write_space;
145 tc->t_orig_state_change = sock->sk->sk_state_change;
146
147 sock->sk->sk_user_data = conn;
148 sock->sk->sk_data_ready = rds_tcp_data_ready;
149 sock->sk->sk_write_space = rds_tcp_write_space;
150 sock->sk->sk_state_change = rds_tcp_state_change;
151
152 write_unlock_bh(&sock->sk->sk_callback_lock);
153}
154
155static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
156 struct rds_info_iterator *iter,
157 struct rds_info_lengths *lens)
158{
159 struct rds_info_tcp_socket tsinfo;
160 struct rds_tcp_connection *tc;
161 unsigned long flags;
162 struct sockaddr_in sin;
163 int sinlen;
164
165 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
166
167 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
168 goto out;
169
170 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
171
172 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
173 tsinfo.local_addr = sin.sin_addr.s_addr;
174 tsinfo.local_port = sin.sin_port;
175 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
176 tsinfo.peer_addr = sin.sin_addr.s_addr;
177 tsinfo.peer_port = sin.sin_port;
178
179 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
180 tsinfo.data_rem = tc->t_tinc_data_rem;
181 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
182 tsinfo.last_expected_una = tc->t_last_expected_una;
183 tsinfo.last_seen_una = tc->t_last_seen_una;
184
185 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
186 }
187
188out:
189 lens->nr = rds_tcp_tc_count;
190 lens->each = sizeof(tsinfo);
191
192 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
193}
194
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400195static int rds_tcp_laddr_check(struct net *net, __be32 addr)
Andy Grover70041082009-08-21 12:28:31 +0000196{
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400197 if (inet_addr_type(net, addr) == RTN_LOCAL)
Andy Grover70041082009-08-21 12:28:31 +0000198 return 0;
199 return -EADDRNOTAVAIL;
200}
201
202static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
203{
204 struct rds_tcp_connection *tc;
205
206 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
Andy Grover8690bfa2010-01-12 11:56:44 -0800207 if (!tc)
Andy Grover70041082009-08-21 12:28:31 +0000208 return -ENOMEM;
209
210 tc->t_sock = NULL;
211 tc->t_tinc = NULL;
212 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
213 tc->t_tinc_data_rem = 0;
214
215 conn->c_transport_data = tc;
216
217 spin_lock_irq(&rds_tcp_conn_lock);
218 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
219 spin_unlock_irq(&rds_tcp_conn_lock);
220
221 rdsdebug("alloced tc %p\n", conn->c_transport_data);
222 return 0;
223}
224
225static void rds_tcp_conn_free(void *arg)
226{
227 struct rds_tcp_connection *tc = arg;
Pavel Emelyanov8200a592010-11-02 01:54:01 +0000228 unsigned long flags;
Andy Grover70041082009-08-21 12:28:31 +0000229 rdsdebug("freeing tc %p\n", tc);
Pavel Emelyanov8200a592010-11-02 01:54:01 +0000230
231 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
232 list_del(&tc->t_tcp_node);
233 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
234
Andy Grover70041082009-08-21 12:28:31 +0000235 kmem_cache_free(rds_tcp_conn_slab, tc);
236}
237
238static void rds_tcp_destroy_conns(void)
239{
240 struct rds_tcp_connection *tc, *_tc;
241 LIST_HEAD(tmp_list);
242
243 /* avoid calling conn_destroy with irqs off */
244 spin_lock_irq(&rds_tcp_conn_lock);
245 list_splice(&rds_tcp_conn_list, &tmp_list);
246 INIT_LIST_HEAD(&rds_tcp_conn_list);
247 spin_unlock_irq(&rds_tcp_conn_lock);
248
249 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
250 if (tc->conn->c_passive)
251 rds_conn_destroy(tc->conn->c_passive);
252 rds_conn_destroy(tc->conn);
253 }
254}
255
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400256static void rds_tcp_exit(void);
Andy Grover70041082009-08-21 12:28:31 +0000257
258struct rds_transport rds_tcp_transport = {
259 .laddr_check = rds_tcp_laddr_check,
260 .xmit_prepare = rds_tcp_xmit_prepare,
261 .xmit_complete = rds_tcp_xmit_complete,
Andy Grover70041082009-08-21 12:28:31 +0000262 .xmit = rds_tcp_xmit,
263 .recv = rds_tcp_recv,
264 .conn_alloc = rds_tcp_conn_alloc,
265 .conn_free = rds_tcp_conn_free,
266 .conn_connect = rds_tcp_conn_connect,
267 .conn_shutdown = rds_tcp_conn_shutdown,
268 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
Andy Grover70041082009-08-21 12:28:31 +0000269 .inc_free = rds_tcp_inc_free,
270 .stats_info_copy = rds_tcp_stats_info_copy,
271 .exit = rds_tcp_exit,
272 .t_owner = THIS_MODULE,
273 .t_name = "tcp",
Andy Grover335776b2009-08-21 12:28:34 +0000274 .t_type = RDS_TRANS_TCP,
Andy Grover70041082009-08-21 12:28:31 +0000275 .t_prefer_loopback = 1,
276};
277
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400278static int rds_tcp_netid;
279
280/* per-network namespace private data for this module */
281struct rds_tcp_net {
282 struct socket *rds_tcp_listen_sock;
283 struct work_struct rds_tcp_accept_w;
284};
285
286static void rds_tcp_accept_worker(struct work_struct *work)
287{
288 struct rds_tcp_net *rtn = container_of(work,
289 struct rds_tcp_net,
290 rds_tcp_accept_w);
291
292 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
293 cond_resched();
294}
295
296void rds_tcp_accept_work(struct sock *sk)
297{
298 struct net *net = sock_net(sk);
299 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
300
301 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
302}
303
304static __net_init int rds_tcp_init_net(struct net *net)
305{
306 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
307
308 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
309 if (!rtn->rds_tcp_listen_sock) {
310 pr_warn("could not set up listen sock\n");
311 return -EAFNOSUPPORT;
312 }
313 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
314 return 0;
315}
316
317static void __net_exit rds_tcp_exit_net(struct net *net)
318{
319 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
320
321 /* If rds_tcp_exit_net() is called as a result of netns deletion,
322 * the rds_tcp_kill_sock() device notifier would already have cleaned
323 * up the listen socket, thus there is no work to do in this function.
324 *
325 * If rds_tcp_exit_net() is called as a result of module unload,
326 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
327 * we do need to clean up the listen socket here.
328 */
329 if (rtn->rds_tcp_listen_sock) {
330 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
331 rtn->rds_tcp_listen_sock = NULL;
332 flush_work(&rtn->rds_tcp_accept_w);
333 }
334}
335
336static struct pernet_operations rds_tcp_net_ops = {
337 .init = rds_tcp_init_net,
338 .exit = rds_tcp_exit_net,
339 .id = &rds_tcp_netid,
340 .size = sizeof(struct rds_tcp_net),
341};
342
343static void rds_tcp_kill_sock(struct net *net)
344{
345 struct rds_tcp_connection *tc, *_tc;
346 struct sock *sk;
347 LIST_HEAD(tmp_list);
348 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
349
350 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
351 rtn->rds_tcp_listen_sock = NULL;
352 flush_work(&rtn->rds_tcp_accept_w);
353 spin_lock_irq(&rds_tcp_conn_lock);
354 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
355 struct net *c_net = read_pnet(&tc->conn->c_net);
356
357 if (net != c_net || !tc->t_sock)
358 continue;
359 list_move_tail(&tc->t_tcp_node, &tmp_list);
360 }
361 spin_unlock_irq(&rds_tcp_conn_lock);
362 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
363 sk = tc->t_sock->sk;
364 sk->sk_prot->disconnect(sk, 0);
365 tcp_done(sk);
366 if (tc->conn->c_passive)
367 rds_conn_destroy(tc->conn->c_passive);
368 rds_conn_destroy(tc->conn);
369 }
370}
371
372static int rds_tcp_dev_event(struct notifier_block *this,
373 unsigned long event, void *ptr)
374{
375 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
376
377 /* rds-tcp registers as a pernet subys, so the ->exit will only
378 * get invoked after network acitivity has quiesced. We need to
379 * clean up all sockets to quiesce network activity, and use
380 * the unregistration of the per-net loopback device as a trigger
381 * to start that cleanup.
382 */
383 if (event == NETDEV_UNREGISTER_FINAL &&
384 dev->ifindex == LOOPBACK_IFINDEX)
385 rds_tcp_kill_sock(dev_net(dev));
386
387 return NOTIFY_DONE;
388}
389
390static struct notifier_block rds_tcp_dev_notifier = {
391 .notifier_call = rds_tcp_dev_event,
392 .priority = -10, /* must be called after other network notifiers */
393};
394
395static void rds_tcp_exit(void)
396{
397 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
398 unregister_pernet_subsys(&rds_tcp_net_ops);
399 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
400 pr_warn("could not unregister rds_tcp_dev_notifier\n");
401 rds_tcp_destroy_conns();
402 rds_trans_unregister(&rds_tcp_transport);
403 rds_tcp_recv_exit();
404 kmem_cache_destroy(rds_tcp_conn_slab);
405}
406module_exit(rds_tcp_exit);
407
stephen hemmingerff51bf82010-10-19 08:08:33 +0000408static int rds_tcp_init(void)
Andy Grover70041082009-08-21 12:28:31 +0000409{
410 int ret;
411
412 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
413 sizeof(struct rds_tcp_connection),
414 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800415 if (!rds_tcp_conn_slab) {
Andy Grover70041082009-08-21 12:28:31 +0000416 ret = -ENOMEM;
417 goto out;
418 }
419
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400420 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
421 if (ret) {
422 pr_warn("could not register rds_tcp_dev_notifier\n");
423 goto out;
424 }
425
426 ret = register_pernet_subsys(&rds_tcp_net_ops);
427 if (ret)
428 goto out_slab;
429
Andy Grover70041082009-08-21 12:28:31 +0000430 ret = rds_tcp_recv_init();
431 if (ret)
432 goto out_slab;
433
434 ret = rds_trans_register(&rds_tcp_transport);
435 if (ret)
436 goto out_recv;
437
Andy Grover70041082009-08-21 12:28:31 +0000438 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
439
440 goto out;
441
Andy Grover70041082009-08-21 12:28:31 +0000442out_recv:
443 rds_tcp_recv_exit();
444out_slab:
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400445 unregister_pernet_subsys(&rds_tcp_net_ops);
Andy Grover70041082009-08-21 12:28:31 +0000446 kmem_cache_destroy(rds_tcp_conn_slab);
447out:
448 return ret;
449}
450module_init(rds_tcp_init);
451
452MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
453MODULE_DESCRIPTION("RDS: TCP transport");
454MODULE_LICENSE("Dual BSD/GPL");
455