blob: 89bc6480b4e283e79e5da8cf26e8598f167d4d03 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070016#include <linux/net.h>
17#include <linux/skbuff.h>
18#include <linux/crypto.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Time till a connection expires after last use (in seconds).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_connection_expiry = 10 * 60;
David Howells5873c082014-02-07 18:58:44 +000027
David Howells17926a72007-04-26 15:48:28 -070028static void rxrpc_connection_reaper(struct work_struct *work);
29
30LIST_HEAD(rxrpc_connections);
31DEFINE_RWLOCK(rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -070032static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33
34/*
David Howells17926a72007-04-26 15:48:28 -070035 * allocate a new connection
36 */
David Howellsc6d2b8d2016-04-04 14:00:40 +010037struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
David Howells17926a72007-04-26 15:48:28 -070038{
39 struct rxrpc_connection *conn;
40
41 _enter("");
42
43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
44 if (conn) {
David Howells999b69f2016-06-17 15:42:35 +010045 spin_lock_init(&conn->channel_lock);
46 init_waitqueue_head(&conn->channel_wq);
David Howells17926a72007-04-26 15:48:28 -070047 INIT_WORK(&conn->processor, &rxrpc_process_connection);
David Howells999b69f2016-06-17 15:42:35 +010048 INIT_LIST_HEAD(&conn->link);
David Howells17926a72007-04-26 15:48:28 -070049 skb_queue_head_init(&conn->rx_queue);
David Howellse0e4d822016-04-07 17:23:58 +010050 conn->security = &rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -070051 spin_lock_init(&conn->state_lock);
David Howells001c1122016-06-30 10:45:22 +010052 /* We maintain an extra ref on the connection whilst it is
53 * on the rxrpc_connections list.
54 */
55 atomic_set(&conn->usage, 2);
David Howells17926a72007-04-26 15:48:28 -070056 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells999b69f2016-06-17 15:42:35 +010057 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
David Howells17926a72007-04-26 15:48:28 -070058 conn->size_align = 4;
David Howells0d12f8a2016-03-04 15:53:46 +000059 conn->header_size = sizeof(struct rxrpc_wire_header);
David Howells17926a72007-04-26 15:48:28 -070060 }
61
Adrian Bunk16c61ad2007-06-15 15:15:43 -070062 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
David Howells17926a72007-04-26 15:48:28 -070063 return conn;
64}
65
66/*
David Howells17926a72007-04-26 15:48:28 -070067 * find a connection based on transport and RxRPC connection ID for an incoming
68 * packet
69 */
David Howellsaa390bb2016-06-17 10:06:56 +010070struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
71 struct rxrpc_peer *peer,
David Howells42886ff2016-06-16 13:31:07 +010072 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -070073{
74 struct rxrpc_connection *conn;
David Howells42886ff2016-06-16 13:31:07 +010075 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -070076 struct rb_node *p;
David Howells0d12f8a2016-03-04 15:53:46 +000077 u32 epoch, cid;
David Howells17926a72007-04-26 15:48:28 -070078
David Howells42886ff2016-06-16 13:31:07 +010079 _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
David Howells17926a72007-04-26 15:48:28 -070080
David Howellsaa390bb2016-06-17 10:06:56 +010081 read_lock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -070082
David Howells42886ff2016-06-16 13:31:07 +010083 cid = sp->hdr.cid & RXRPC_CIDMASK;
84 epoch = sp->hdr.epoch;
David Howells17926a72007-04-26 15:48:28 -070085
David Howells4a3388c2016-04-04 14:00:37 +010086 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
David Howellsaa390bb2016-06-17 10:06:56 +010087 p = peer->service_conns.rb_node;
David Howells4a3388c2016-04-04 14:00:37 +010088 while (p) {
David Howells999b69f2016-06-17 15:42:35 +010089 conn = rb_entry(p, struct rxrpc_connection, service_node);
David Howells17926a72007-04-26 15:48:28 -070090
David Howells4a3388c2016-04-04 14:00:37 +010091 _debug("maybe %x", conn->proto.cid);
David Howells17926a72007-04-26 15:48:28 -070092
David Howells4a3388c2016-04-04 14:00:37 +010093 if (epoch < conn->proto.epoch)
94 p = p->rb_left;
95 else if (epoch > conn->proto.epoch)
96 p = p->rb_right;
97 else if (cid < conn->proto.cid)
98 p = p->rb_left;
99 else if (cid > conn->proto.cid)
100 p = p->rb_right;
101 else
102 goto found;
103 }
104 } else {
105 conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
David Howells689f4c62016-06-30 11:34:30 +0100106 if (conn &&
107 conn->proto.epoch == epoch &&
108 conn->params.peer == peer)
David Howells17926a72007-04-26 15:48:28 -0700109 goto found;
110 }
111
David Howellsaa390bb2016-06-17 10:06:56 +0100112 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700113 _leave(" = NULL");
114 return NULL;
115
116found:
David Howells001c1122016-06-30 10:45:22 +0100117 conn = rxrpc_get_connection_maybe(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100118 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700119 _leave(" = %p", conn);
120 return conn;
121}
122
123/*
David Howells999b69f2016-06-17 15:42:35 +0100124 * Disconnect a call and clear any channel it occupies when that call
David Howellsa1399f82016-06-27 14:39:44 +0100125 * terminates. The caller must hold the channel_lock and must release the
126 * call's ref on the connection.
127 */
128void __rxrpc_disconnect_call(struct rxrpc_call *call)
129{
130 struct rxrpc_connection *conn = call->conn;
131 struct rxrpc_channel *chan = &conn->channels[call->channel];
132
133 _enter("%d,%d", conn->debug_id, call->channel);
134
135 if (rcu_access_pointer(chan->call) == call) {
136 /* Save the result of the call so that we can repeat it if necessary
137 * through the channel, whilst disposing of the actual call record.
138 */
139 chan->last_result = call->local_abort;
140 smp_wmb();
141 chan->last_call = chan->call_id;
142 chan->call_id = chan->call_counter;
143
144 rcu_assign_pointer(chan->call, NULL);
145 atomic_inc(&conn->avail_chans);
146 wake_up(&conn->channel_wq);
147 }
148
149 _leave("");
150}
151
152/*
153 * Disconnect a call and clear any channel it occupies when that call
David Howells999b69f2016-06-17 15:42:35 +0100154 * terminates.
155 */
156void rxrpc_disconnect_call(struct rxrpc_call *call)
157{
158 struct rxrpc_connection *conn = call->conn;
David Howells999b69f2016-06-17 15:42:35 +0100159
David Howellse653cfe2016-04-04 14:00:38 +0100160 spin_lock(&conn->channel_lock);
David Howellsa1399f82016-06-27 14:39:44 +0100161 __rxrpc_disconnect_call(call);
David Howellse653cfe2016-04-04 14:00:38 +0100162 spin_unlock(&conn->channel_lock);
163
164 call->conn = NULL;
165 rxrpc_put_connection(conn);
David Howells999b69f2016-06-17 15:42:35 +0100166}
167
168/*
David Howells17926a72007-04-26 15:48:28 -0700169 * release a virtual connection
170 */
171void rxrpc_put_connection(struct rxrpc_connection *conn)
172{
David Howells999b69f2016-06-17 15:42:35 +0100173 if (!conn)
174 return;
175
David Howells17926a72007-04-26 15:48:28 -0700176 _enter("%p{u=%d,d=%d}",
177 conn, atomic_read(&conn->usage), conn->debug_id);
178
David Howells001c1122016-06-30 10:45:22 +0100179 ASSERTCMP(atomic_read(&conn->usage), >, 1);
David Howells17926a72007-04-26 15:48:28 -0700180
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200181 conn->put_time = ktime_get_seconds();
David Howells001c1122016-06-30 10:45:22 +0100182 if (atomic_dec_return(&conn->usage) == 1) {
David Howells17926a72007-04-26 15:48:28 -0700183 _debug("zombie");
David Howells651350d2007-04-26 15:50:17 -0700184 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howells17926a72007-04-26 15:48:28 -0700185 }
186
187 _leave("");
188}
189
190/*
191 * destroy a virtual connection
192 */
David Howellsdee46362016-06-27 17:11:19 +0100193static void rxrpc_destroy_connection(struct rcu_head *rcu)
David Howells17926a72007-04-26 15:48:28 -0700194{
David Howellsdee46362016-06-27 17:11:19 +0100195 struct rxrpc_connection *conn =
196 container_of(rcu, struct rxrpc_connection, rcu);
197
198 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
David Howells17926a72007-04-26 15:48:28 -0700199
200 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
201
202 _net("DESTROY CONN %d", conn->debug_id);
203
David Howells17926a72007-04-26 15:48:28 -0700204 rxrpc_purge_queue(&conn->rx_queue);
205
David Howellse0e4d822016-04-07 17:23:58 +0100206 conn->security->clear(conn);
David Howells19ffa012016-04-04 14:00:36 +0100207 key_put(conn->params.key);
David Howellse0e4d822016-04-07 17:23:58 +0100208 key_put(conn->server_key);
David Howellsaa390bb2016-06-17 10:06:56 +0100209 rxrpc_put_peer(conn->params.peer);
210 rxrpc_put_local(conn->params.local);
David Howellse0e4d822016-04-07 17:23:58 +0100211
David Howells17926a72007-04-26 15:48:28 -0700212 kfree(conn);
213 _leave("");
214}
215
216/*
217 * reap dead connections
218 */
Roel Kluin5eaa65b2008-12-10 15:18:31 -0800219static void rxrpc_connection_reaper(struct work_struct *work)
David Howells17926a72007-04-26 15:48:28 -0700220{
221 struct rxrpc_connection *conn, *_p;
David Howells001c1122016-06-30 10:45:22 +0100222 unsigned long reap_older_than, earliest, put_time, now;
David Howells17926a72007-04-26 15:48:28 -0700223
224 LIST_HEAD(graveyard);
225
226 _enter("");
227
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200228 now = ktime_get_seconds();
David Howells001c1122016-06-30 10:45:22 +0100229 reap_older_than = now - rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700230 earliest = ULONG_MAX;
231
David Howellsb3f57502016-06-21 16:10:03 +0100232 write_lock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700233 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
David Howells001c1122016-06-30 10:45:22 +0100234 ASSERTCMP(atomic_read(&conn->usage), >, 0);
235 if (likely(atomic_read(&conn->usage) > 1))
David Howells17926a72007-04-26 15:48:28 -0700236 continue;
237
David Howells001c1122016-06-30 10:45:22 +0100238 put_time = READ_ONCE(conn->put_time);
239 if (time_after(put_time, reap_older_than)) {
240 if (time_before(put_time, earliest))
241 earliest = put_time;
242 continue;
David Howells999b69f2016-06-17 15:42:35 +0100243 }
David Howells001c1122016-06-30 10:45:22 +0100244
245 /* The usage count sits at 1 whilst the object is unused on the
246 * list; we reduce that to 0 to make the object unavailable.
247 */
248 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
249 continue;
250
251 if (rxrpc_conn_is_client(conn))
252 rxrpc_unpublish_client_conn(conn);
253 else
254 rxrpc_unpublish_service_conn(conn);
255
256 list_move_tail(&conn->link, &graveyard);
David Howells17926a72007-04-26 15:48:28 -0700257 }
David Howellsb3f57502016-06-21 16:10:03 +0100258 write_unlock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700259
260 if (earliest != ULONG_MAX) {
261 _debug("reschedule reaper %ld", (long) earliest - now);
262 ASSERTCMP(earliest, >, now);
David Howells651350d2007-04-26 15:50:17 -0700263 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
264 (earliest - now) * HZ);
David Howells17926a72007-04-26 15:48:28 -0700265 }
266
David Howells17926a72007-04-26 15:48:28 -0700267 while (!list_empty(&graveyard)) {
268 conn = list_entry(graveyard.next, struct rxrpc_connection,
269 link);
270 list_del_init(&conn->link);
271
272 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
David Howellsdee46362016-06-27 17:11:19 +0100273 skb_queue_purge(&conn->rx_queue);
274 call_rcu(&conn->rcu, rxrpc_destroy_connection);
David Howells17926a72007-04-26 15:48:28 -0700275 }
276
277 _leave("");
278}
279
280/*
281 * preemptively destroy all the connection records rather than waiting for them
282 * to time out
283 */
284void __exit rxrpc_destroy_all_connections(void)
285{
David Howellsdee46362016-06-27 17:11:19 +0100286 struct rxrpc_connection *conn, *_p;
287 bool leak = false;
288
David Howells17926a72007-04-26 15:48:28 -0700289 _enter("");
290
David Howells5873c082014-02-07 18:58:44 +0000291 rxrpc_connection_expiry = 0;
David Howells17926a72007-04-26 15:48:28 -0700292 cancel_delayed_work(&rxrpc_connection_reap);
David Howells651350d2007-04-26 15:50:17 -0700293 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howellsdee46362016-06-27 17:11:19 +0100294 flush_workqueue(rxrpc_workqueue);
295
296 write_lock(&rxrpc_connection_lock);
297 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
298 pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
299 conn, atomic_read(&conn->usage));
300 leak = true;
301 }
302 write_unlock(&rxrpc_connection_lock);
303 BUG_ON(leak);
304
305 /* Make sure the local and peer records pinned by any dying connections
306 * are released.
307 */
308 rcu_barrier();
309 rxrpc_destroy_client_conn_ids();
David Howells17926a72007-04-26 15:48:28 -0700310
311 _leave("");
312}