blob: 130713869a1681827487eaa4fa9afdac872c22a3 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070016#include <linux/net.h>
17#include <linux/skbuff.h>
18#include <linux/crypto.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Time till a connection expires after last use (in seconds).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_connection_expiry = 10 * 60;
David Howells5873c082014-02-07 18:58:44 +000027
David Howells17926a72007-04-26 15:48:28 -070028static void rxrpc_connection_reaper(struct work_struct *work);
29
30LIST_HEAD(rxrpc_connections);
31DEFINE_RWLOCK(rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -070032static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33
34/*
David Howells17926a72007-04-26 15:48:28 -070035 * allocate a new connection
36 */
David Howellsc6d2b8d2016-04-04 14:00:40 +010037struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
David Howells17926a72007-04-26 15:48:28 -070038{
39 struct rxrpc_connection *conn;
40
41 _enter("");
42
43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
44 if (conn) {
David Howells999b69f2016-06-17 15:42:35 +010045 spin_lock_init(&conn->channel_lock);
46 init_waitqueue_head(&conn->channel_wq);
David Howells17926a72007-04-26 15:48:28 -070047 INIT_WORK(&conn->processor, &rxrpc_process_connection);
David Howells999b69f2016-06-17 15:42:35 +010048 INIT_LIST_HEAD(&conn->link);
David Howells17926a72007-04-26 15:48:28 -070049 skb_queue_head_init(&conn->rx_queue);
David Howellse0e4d822016-04-07 17:23:58 +010050 conn->security = &rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -070051 spin_lock_init(&conn->state_lock);
David Howells001c1122016-06-30 10:45:22 +010052 /* We maintain an extra ref on the connection whilst it is
53 * on the rxrpc_connections list.
54 */
55 atomic_set(&conn->usage, 2);
David Howells17926a72007-04-26 15:48:28 -070056 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells999b69f2016-06-17 15:42:35 +010057 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
David Howells17926a72007-04-26 15:48:28 -070058 conn->size_align = 4;
David Howells0d12f8a2016-03-04 15:53:46 +000059 conn->header_size = sizeof(struct rxrpc_wire_header);
David Howells17926a72007-04-26 15:48:28 -070060 }
61
Adrian Bunk16c61ad2007-06-15 15:15:43 -070062 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
David Howells17926a72007-04-26 15:48:28 -070063 return conn;
64}
65
66/*
David Howells17926a72007-04-26 15:48:28 -070067 * find a connection based on transport and RxRPC connection ID for an incoming
68 * packet
69 */
David Howellsaa390bb2016-06-17 10:06:56 +010070struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
David Howells42886ff2016-06-16 13:31:07 +010071 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -070072{
73 struct rxrpc_connection *conn;
David Howells1291e9d2016-06-30 12:02:53 +010074 struct rxrpc_conn_proto k;
David Howells42886ff2016-06-16 13:31:07 +010075 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells1291e9d2016-06-30 12:02:53 +010076 struct sockaddr_rxrpc srx;
77 struct rxrpc_peer *peer;
David Howells17926a72007-04-26 15:48:28 -070078 struct rb_node *p;
David Howells17926a72007-04-26 15:48:28 -070079
David Howells42886ff2016-06-16 13:31:07 +010080 _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
David Howells17926a72007-04-26 15:48:28 -070081
David Howells1291e9d2016-06-30 12:02:53 +010082 if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
83 goto not_found;
David Howells17926a72007-04-26 15:48:28 -070084
David Howells1291e9d2016-06-30 12:02:53 +010085 /* We may have to handle mixing IPv4 and IPv6 */
86 if (srx.transport.family != local->srx.transport.family) {
87 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
88 srx.transport.family,
89 local->srx.transport.family);
90 goto not_found;
91 }
92
93 k.epoch = sp->hdr.epoch;
94 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
David Howells17926a72007-04-26 15:48:28 -070095
David Howells4a3388c2016-04-04 14:00:37 +010096 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
David Howells1291e9d2016-06-30 12:02:53 +010097 /* We need to look up service connections by the full protocol
98 * parameter set. We look up the peer first as an intermediate
99 * step and then the connection from the peer's tree.
100 */
101 peer = rxrpc_lookup_peer_rcu(local, &srx);
102 if (!peer)
103 goto not_found;
104
105 read_lock_bh(&peer->conn_lock);
106
David Howellsaa390bb2016-06-17 10:06:56 +0100107 p = peer->service_conns.rb_node;
David Howells4a3388c2016-04-04 14:00:37 +0100108 while (p) {
David Howells999b69f2016-06-17 15:42:35 +0100109 conn = rb_entry(p, struct rxrpc_connection, service_node);
David Howells17926a72007-04-26 15:48:28 -0700110
David Howells4a3388c2016-04-04 14:00:37 +0100111 _debug("maybe %x", conn->proto.cid);
David Howells17926a72007-04-26 15:48:28 -0700112
David Howells1291e9d2016-06-30 12:02:53 +0100113 if (k.epoch < conn->proto.epoch)
David Howells4a3388c2016-04-04 14:00:37 +0100114 p = p->rb_left;
David Howells1291e9d2016-06-30 12:02:53 +0100115 else if (k.epoch > conn->proto.epoch)
David Howells4a3388c2016-04-04 14:00:37 +0100116 p = p->rb_right;
David Howells1291e9d2016-06-30 12:02:53 +0100117 else if (k.cid < conn->proto.cid)
David Howells4a3388c2016-04-04 14:00:37 +0100118 p = p->rb_left;
David Howells1291e9d2016-06-30 12:02:53 +0100119 else if (k.cid > conn->proto.cid)
David Howells4a3388c2016-04-04 14:00:37 +0100120 p = p->rb_right;
121 else
David Howells1291e9d2016-06-30 12:02:53 +0100122 goto found_service_conn;
David Howells4a3388c2016-04-04 14:00:37 +0100123 }
David Howells1291e9d2016-06-30 12:02:53 +0100124 read_unlock_bh(&peer->conn_lock);
David Howells4a3388c2016-04-04 14:00:37 +0100125 } else {
David Howells1291e9d2016-06-30 12:02:53 +0100126 conn = idr_find(&rxrpc_client_conn_ids,
127 k.cid >> RXRPC_CIDSHIFT);
128 if (!conn ||
129 conn->proto.epoch != k.epoch ||
130 conn->params.local != local)
131 goto not_found;
132
133 peer = conn->params.peer;
134 switch (srx.transport.family) {
135 case AF_INET:
136 if (peer->srx.transport.sin.sin_port !=
137 srx.transport.sin.sin_port ||
138 peer->srx.transport.sin.sin_addr.s_addr !=
139 srx.transport.sin.sin_addr.s_addr)
140 goto not_found;
141 break;
142 default:
143 BUG();
144 }
145
146 conn = rxrpc_get_connection_maybe(conn);
147 _leave(" = %p", conn);
148 return conn;
David Howells17926a72007-04-26 15:48:28 -0700149 }
150
David Howells1291e9d2016-06-30 12:02:53 +0100151not_found:
David Howells17926a72007-04-26 15:48:28 -0700152 _leave(" = NULL");
153 return NULL;
154
David Howells1291e9d2016-06-30 12:02:53 +0100155found_service_conn:
David Howells001c1122016-06-30 10:45:22 +0100156 conn = rxrpc_get_connection_maybe(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100157 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700158 _leave(" = %p", conn);
159 return conn;
160}
161
162/*
David Howells999b69f2016-06-17 15:42:35 +0100163 * Disconnect a call and clear any channel it occupies when that call
David Howellsa1399f82016-06-27 14:39:44 +0100164 * terminates. The caller must hold the channel_lock and must release the
165 * call's ref on the connection.
166 */
167void __rxrpc_disconnect_call(struct rxrpc_call *call)
168{
169 struct rxrpc_connection *conn = call->conn;
170 struct rxrpc_channel *chan = &conn->channels[call->channel];
171
172 _enter("%d,%d", conn->debug_id, call->channel);
173
174 if (rcu_access_pointer(chan->call) == call) {
175 /* Save the result of the call so that we can repeat it if necessary
176 * through the channel, whilst disposing of the actual call record.
177 */
178 chan->last_result = call->local_abort;
179 smp_wmb();
180 chan->last_call = chan->call_id;
181 chan->call_id = chan->call_counter;
182
183 rcu_assign_pointer(chan->call, NULL);
184 atomic_inc(&conn->avail_chans);
185 wake_up(&conn->channel_wq);
186 }
187
188 _leave("");
189}
190
191/*
192 * Disconnect a call and clear any channel it occupies when that call
David Howells999b69f2016-06-17 15:42:35 +0100193 * terminates.
194 */
195void rxrpc_disconnect_call(struct rxrpc_call *call)
196{
197 struct rxrpc_connection *conn = call->conn;
David Howells999b69f2016-06-17 15:42:35 +0100198
David Howellse653cfe2016-04-04 14:00:38 +0100199 spin_lock(&conn->channel_lock);
David Howellsa1399f82016-06-27 14:39:44 +0100200 __rxrpc_disconnect_call(call);
David Howellse653cfe2016-04-04 14:00:38 +0100201 spin_unlock(&conn->channel_lock);
202
203 call->conn = NULL;
204 rxrpc_put_connection(conn);
David Howells999b69f2016-06-17 15:42:35 +0100205}
206
207/*
David Howells17926a72007-04-26 15:48:28 -0700208 * release a virtual connection
209 */
210void rxrpc_put_connection(struct rxrpc_connection *conn)
211{
David Howells999b69f2016-06-17 15:42:35 +0100212 if (!conn)
213 return;
214
David Howells17926a72007-04-26 15:48:28 -0700215 _enter("%p{u=%d,d=%d}",
216 conn, atomic_read(&conn->usage), conn->debug_id);
217
David Howells001c1122016-06-30 10:45:22 +0100218 ASSERTCMP(atomic_read(&conn->usage), >, 1);
David Howells17926a72007-04-26 15:48:28 -0700219
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200220 conn->put_time = ktime_get_seconds();
David Howells001c1122016-06-30 10:45:22 +0100221 if (atomic_dec_return(&conn->usage) == 1) {
David Howells17926a72007-04-26 15:48:28 -0700222 _debug("zombie");
David Howells651350d2007-04-26 15:50:17 -0700223 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howells17926a72007-04-26 15:48:28 -0700224 }
225
226 _leave("");
227}
228
229/*
230 * destroy a virtual connection
231 */
David Howellsdee46362016-06-27 17:11:19 +0100232static void rxrpc_destroy_connection(struct rcu_head *rcu)
David Howells17926a72007-04-26 15:48:28 -0700233{
David Howellsdee46362016-06-27 17:11:19 +0100234 struct rxrpc_connection *conn =
235 container_of(rcu, struct rxrpc_connection, rcu);
236
237 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
David Howells17926a72007-04-26 15:48:28 -0700238
239 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
240
241 _net("DESTROY CONN %d", conn->debug_id);
242
David Howells17926a72007-04-26 15:48:28 -0700243 rxrpc_purge_queue(&conn->rx_queue);
244
David Howellse0e4d822016-04-07 17:23:58 +0100245 conn->security->clear(conn);
David Howells19ffa012016-04-04 14:00:36 +0100246 key_put(conn->params.key);
David Howellse0e4d822016-04-07 17:23:58 +0100247 key_put(conn->server_key);
David Howellsaa390bb2016-06-17 10:06:56 +0100248 rxrpc_put_peer(conn->params.peer);
249 rxrpc_put_local(conn->params.local);
David Howellse0e4d822016-04-07 17:23:58 +0100250
David Howells17926a72007-04-26 15:48:28 -0700251 kfree(conn);
252 _leave("");
253}
254
255/*
256 * reap dead connections
257 */
Roel Kluin5eaa65b2008-12-10 15:18:31 -0800258static void rxrpc_connection_reaper(struct work_struct *work)
David Howells17926a72007-04-26 15:48:28 -0700259{
260 struct rxrpc_connection *conn, *_p;
David Howells001c1122016-06-30 10:45:22 +0100261 unsigned long reap_older_than, earliest, put_time, now;
David Howells17926a72007-04-26 15:48:28 -0700262
263 LIST_HEAD(graveyard);
264
265 _enter("");
266
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200267 now = ktime_get_seconds();
David Howells001c1122016-06-30 10:45:22 +0100268 reap_older_than = now - rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700269 earliest = ULONG_MAX;
270
David Howellsb3f57502016-06-21 16:10:03 +0100271 write_lock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700272 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
David Howells001c1122016-06-30 10:45:22 +0100273 ASSERTCMP(atomic_read(&conn->usage), >, 0);
274 if (likely(atomic_read(&conn->usage) > 1))
David Howells17926a72007-04-26 15:48:28 -0700275 continue;
276
David Howells001c1122016-06-30 10:45:22 +0100277 put_time = READ_ONCE(conn->put_time);
278 if (time_after(put_time, reap_older_than)) {
279 if (time_before(put_time, earliest))
280 earliest = put_time;
281 continue;
David Howells999b69f2016-06-17 15:42:35 +0100282 }
David Howells001c1122016-06-30 10:45:22 +0100283
284 /* The usage count sits at 1 whilst the object is unused on the
285 * list; we reduce that to 0 to make the object unavailable.
286 */
287 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
288 continue;
289
290 if (rxrpc_conn_is_client(conn))
291 rxrpc_unpublish_client_conn(conn);
292 else
293 rxrpc_unpublish_service_conn(conn);
294
295 list_move_tail(&conn->link, &graveyard);
David Howells17926a72007-04-26 15:48:28 -0700296 }
David Howellsb3f57502016-06-21 16:10:03 +0100297 write_unlock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700298
299 if (earliest != ULONG_MAX) {
300 _debug("reschedule reaper %ld", (long) earliest - now);
301 ASSERTCMP(earliest, >, now);
David Howells651350d2007-04-26 15:50:17 -0700302 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
303 (earliest - now) * HZ);
David Howells17926a72007-04-26 15:48:28 -0700304 }
305
David Howells17926a72007-04-26 15:48:28 -0700306 while (!list_empty(&graveyard)) {
307 conn = list_entry(graveyard.next, struct rxrpc_connection,
308 link);
309 list_del_init(&conn->link);
310
311 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
David Howellsdee46362016-06-27 17:11:19 +0100312 skb_queue_purge(&conn->rx_queue);
313 call_rcu(&conn->rcu, rxrpc_destroy_connection);
David Howells17926a72007-04-26 15:48:28 -0700314 }
315
316 _leave("");
317}
318
319/*
320 * preemptively destroy all the connection records rather than waiting for them
321 * to time out
322 */
323void __exit rxrpc_destroy_all_connections(void)
324{
David Howellsdee46362016-06-27 17:11:19 +0100325 struct rxrpc_connection *conn, *_p;
326 bool leak = false;
327
David Howells17926a72007-04-26 15:48:28 -0700328 _enter("");
329
David Howells5873c082014-02-07 18:58:44 +0000330 rxrpc_connection_expiry = 0;
David Howells17926a72007-04-26 15:48:28 -0700331 cancel_delayed_work(&rxrpc_connection_reap);
David Howells651350d2007-04-26 15:50:17 -0700332 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howellsdee46362016-06-27 17:11:19 +0100333 flush_workqueue(rxrpc_workqueue);
334
335 write_lock(&rxrpc_connection_lock);
336 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
337 pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
338 conn, atomic_read(&conn->usage));
339 leak = true;
340 }
341 write_unlock(&rxrpc_connection_lock);
342 BUG_ON(leak);
343
344 /* Make sure the local and peer records pinned by any dying connections
345 * are released.
346 */
347 rcu_barrier();
348 rxrpc_destroy_client_conn_ids();
David Howells17926a72007-04-26 15:48:28 -0700349
350 _leave("");
351}