blob: 8379e3748d131cb8bfe1fd71d5790aae804aa5ca [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070016#include <linux/net.h>
17#include <linux/skbuff.h>
18#include <linux/crypto.h>
19#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Time till a connection expires after last use (in seconds).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_connection_expiry = 10 * 60;
David Howells5873c082014-02-07 18:58:44 +000027
David Howells17926a72007-04-26 15:48:28 -070028static void rxrpc_connection_reaper(struct work_struct *work);
29
30LIST_HEAD(rxrpc_connections);
31DEFINE_RWLOCK(rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -070032static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33
34/*
David Howells17926a72007-04-26 15:48:28 -070035 * allocate a new connection
36 */
David Howellsc6d2b8d2016-04-04 14:00:40 +010037struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
David Howells17926a72007-04-26 15:48:28 -070038{
39 struct rxrpc_connection *conn;
40
41 _enter("");
42
43 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
44 if (conn) {
David Howells999b69f2016-06-17 15:42:35 +010045 spin_lock_init(&conn->channel_lock);
46 init_waitqueue_head(&conn->channel_wq);
David Howells17926a72007-04-26 15:48:28 -070047 INIT_WORK(&conn->processor, &rxrpc_process_connection);
David Howells999b69f2016-06-17 15:42:35 +010048 INIT_LIST_HEAD(&conn->link);
David Howells17926a72007-04-26 15:48:28 -070049 skb_queue_head_init(&conn->rx_queue);
David Howellse0e4d822016-04-07 17:23:58 +010050 conn->security = &rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -070051 spin_lock_init(&conn->state_lock);
52 atomic_set(&conn->usage, 1);
53 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells999b69f2016-06-17 15:42:35 +010054 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
David Howells17926a72007-04-26 15:48:28 -070055 conn->size_align = 4;
David Howells0d12f8a2016-03-04 15:53:46 +000056 conn->header_size = sizeof(struct rxrpc_wire_header);
David Howells17926a72007-04-26 15:48:28 -070057 }
58
Adrian Bunk16c61ad2007-06-15 15:15:43 -070059 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
David Howells17926a72007-04-26 15:48:28 -070060 return conn;
61}
62
63/*
David Howells17926a72007-04-26 15:48:28 -070064 * find a connection based on transport and RxRPC connection ID for an incoming
65 * packet
66 */
David Howellsaa390bb2016-06-17 10:06:56 +010067struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
68 struct rxrpc_peer *peer,
David Howells42886ff2016-06-16 13:31:07 +010069 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -070070{
71 struct rxrpc_connection *conn;
David Howells42886ff2016-06-16 13:31:07 +010072 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -070073 struct rb_node *p;
David Howells0d12f8a2016-03-04 15:53:46 +000074 u32 epoch, cid;
David Howells17926a72007-04-26 15:48:28 -070075
David Howells42886ff2016-06-16 13:31:07 +010076 _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
David Howells17926a72007-04-26 15:48:28 -070077
David Howellsaa390bb2016-06-17 10:06:56 +010078 read_lock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -070079
David Howells42886ff2016-06-16 13:31:07 +010080 cid = sp->hdr.cid & RXRPC_CIDMASK;
81 epoch = sp->hdr.epoch;
David Howells17926a72007-04-26 15:48:28 -070082
David Howells4a3388c2016-04-04 14:00:37 +010083 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
David Howellsaa390bb2016-06-17 10:06:56 +010084 p = peer->service_conns.rb_node;
David Howells4a3388c2016-04-04 14:00:37 +010085 while (p) {
David Howells999b69f2016-06-17 15:42:35 +010086 conn = rb_entry(p, struct rxrpc_connection, service_node);
David Howells17926a72007-04-26 15:48:28 -070087
David Howells4a3388c2016-04-04 14:00:37 +010088 _debug("maybe %x", conn->proto.cid);
David Howells17926a72007-04-26 15:48:28 -070089
David Howells4a3388c2016-04-04 14:00:37 +010090 if (epoch < conn->proto.epoch)
91 p = p->rb_left;
92 else if (epoch > conn->proto.epoch)
93 p = p->rb_right;
94 else if (cid < conn->proto.cid)
95 p = p->rb_left;
96 else if (cid > conn->proto.cid)
97 p = p->rb_right;
98 else
99 goto found;
100 }
101 } else {
102 conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
David Howells689f4c62016-06-30 11:34:30 +0100103 if (conn &&
104 conn->proto.epoch == epoch &&
105 conn->params.peer == peer)
David Howells17926a72007-04-26 15:48:28 -0700106 goto found;
107 }
108
David Howellsaa390bb2016-06-17 10:06:56 +0100109 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700110 _leave(" = NULL");
111 return NULL;
112
113found:
David Howells5627cc82016-04-04 14:00:38 +0100114 rxrpc_get_connection(conn);
David Howellsaa390bb2016-06-17 10:06:56 +0100115 read_unlock_bh(&peer->conn_lock);
David Howells17926a72007-04-26 15:48:28 -0700116 _leave(" = %p", conn);
117 return conn;
118}
119
120/*
David Howells999b69f2016-06-17 15:42:35 +0100121 * Disconnect a call and clear any channel it occupies when that call
David Howellsa1399f82016-06-27 14:39:44 +0100122 * terminates. The caller must hold the channel_lock and must release the
123 * call's ref on the connection.
124 */
125void __rxrpc_disconnect_call(struct rxrpc_call *call)
126{
127 struct rxrpc_connection *conn = call->conn;
128 struct rxrpc_channel *chan = &conn->channels[call->channel];
129
130 _enter("%d,%d", conn->debug_id, call->channel);
131
132 if (rcu_access_pointer(chan->call) == call) {
133 /* Save the result of the call so that we can repeat it if necessary
134 * through the channel, whilst disposing of the actual call record.
135 */
136 chan->last_result = call->local_abort;
137 smp_wmb();
138 chan->last_call = chan->call_id;
139 chan->call_id = chan->call_counter;
140
141 rcu_assign_pointer(chan->call, NULL);
142 atomic_inc(&conn->avail_chans);
143 wake_up(&conn->channel_wq);
144 }
145
146 _leave("");
147}
148
149/*
150 * Disconnect a call and clear any channel it occupies when that call
David Howells999b69f2016-06-17 15:42:35 +0100151 * terminates.
152 */
153void rxrpc_disconnect_call(struct rxrpc_call *call)
154{
155 struct rxrpc_connection *conn = call->conn;
David Howells999b69f2016-06-17 15:42:35 +0100156
David Howellse653cfe2016-04-04 14:00:38 +0100157 spin_lock(&conn->channel_lock);
David Howellsa1399f82016-06-27 14:39:44 +0100158 __rxrpc_disconnect_call(call);
David Howellse653cfe2016-04-04 14:00:38 +0100159 spin_unlock(&conn->channel_lock);
160
161 call->conn = NULL;
162 rxrpc_put_connection(conn);
David Howells999b69f2016-06-17 15:42:35 +0100163}
164
165/*
David Howells17926a72007-04-26 15:48:28 -0700166 * release a virtual connection
167 */
168void rxrpc_put_connection(struct rxrpc_connection *conn)
169{
David Howells999b69f2016-06-17 15:42:35 +0100170 if (!conn)
171 return;
172
David Howells17926a72007-04-26 15:48:28 -0700173 _enter("%p{u=%d,d=%d}",
174 conn, atomic_read(&conn->usage), conn->debug_id);
175
176 ASSERTCMP(atomic_read(&conn->usage), >, 0);
177
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200178 conn->put_time = ktime_get_seconds();
David Howells17926a72007-04-26 15:48:28 -0700179 if (atomic_dec_and_test(&conn->usage)) {
180 _debug("zombie");
David Howells651350d2007-04-26 15:50:17 -0700181 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howells17926a72007-04-26 15:48:28 -0700182 }
183
184 _leave("");
185}
186
187/*
188 * destroy a virtual connection
189 */
David Howellsdee46362016-06-27 17:11:19 +0100190static void rxrpc_destroy_connection(struct rcu_head *rcu)
David Howells17926a72007-04-26 15:48:28 -0700191{
David Howellsdee46362016-06-27 17:11:19 +0100192 struct rxrpc_connection *conn =
193 container_of(rcu, struct rxrpc_connection, rcu);
194
195 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
David Howells17926a72007-04-26 15:48:28 -0700196
197 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
198
199 _net("DESTROY CONN %d", conn->debug_id);
200
David Howells17926a72007-04-26 15:48:28 -0700201 rxrpc_purge_queue(&conn->rx_queue);
202
David Howellse0e4d822016-04-07 17:23:58 +0100203 conn->security->clear(conn);
David Howells19ffa012016-04-04 14:00:36 +0100204 key_put(conn->params.key);
David Howellse0e4d822016-04-07 17:23:58 +0100205 key_put(conn->server_key);
David Howellsaa390bb2016-06-17 10:06:56 +0100206 rxrpc_put_peer(conn->params.peer);
207 rxrpc_put_local(conn->params.local);
David Howellse0e4d822016-04-07 17:23:58 +0100208
David Howells17926a72007-04-26 15:48:28 -0700209 kfree(conn);
210 _leave("");
211}
212
213/*
214 * reap dead connections
215 */
Roel Kluin5eaa65b2008-12-10 15:18:31 -0800216static void rxrpc_connection_reaper(struct work_struct *work)
David Howells17926a72007-04-26 15:48:28 -0700217{
218 struct rxrpc_connection *conn, *_p;
David Howellsaa390bb2016-06-17 10:06:56 +0100219 struct rxrpc_peer *peer;
David Howells17926a72007-04-26 15:48:28 -0700220 unsigned long now, earliest, reap_time;
221
222 LIST_HEAD(graveyard);
223
224 _enter("");
225
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200226 now = ktime_get_seconds();
David Howells17926a72007-04-26 15:48:28 -0700227 earliest = ULONG_MAX;
228
David Howellsb3f57502016-06-21 16:10:03 +0100229 write_lock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700230 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
231 _debug("reap CONN %d { u=%d,t=%ld }",
232 conn->debug_id, atomic_read(&conn->usage),
233 (long) now - (long) conn->put_time);
234
235 if (likely(atomic_read(&conn->usage) > 0))
236 continue;
237
David Howells999b69f2016-06-17 15:42:35 +0100238 if (rxrpc_conn_is_client(conn)) {
239 struct rxrpc_local *local = conn->params.local;
240 spin_lock(&local->client_conns_lock);
241 reap_time = conn->put_time + rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700242
David Howells999b69f2016-06-17 15:42:35 +0100243 if (atomic_read(&conn->usage) > 0) {
244 ;
245 } else if (reap_time <= now) {
246 list_move_tail(&conn->link, &graveyard);
David Howells4a3388c2016-04-04 14:00:37 +0100247 rxrpc_put_client_connection_id(conn);
David Howells999b69f2016-06-17 15:42:35 +0100248 rb_erase(&conn->client_node,
249 &local->client_conns);
250 } else if (reap_time < earliest) {
251 earliest = reap_time;
David Howells17926a72007-04-26 15:48:28 -0700252 }
253
David Howells999b69f2016-06-17 15:42:35 +0100254 spin_unlock(&local->client_conns_lock);
255 } else {
David Howellsaa390bb2016-06-17 10:06:56 +0100256 peer = conn->params.peer;
257 write_lock_bh(&peer->conn_lock);
David Howells999b69f2016-06-17 15:42:35 +0100258 reap_time = conn->put_time + rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700259
David Howells999b69f2016-06-17 15:42:35 +0100260 if (atomic_read(&conn->usage) > 0) {
261 ;
262 } else if (reap_time <= now) {
263 list_move_tail(&conn->link, &graveyard);
264 rb_erase(&conn->service_node,
David Howellsaa390bb2016-06-17 10:06:56 +0100265 &peer->service_conns);
David Howells999b69f2016-06-17 15:42:35 +0100266 } else if (reap_time < earliest) {
267 earliest = reap_time;
268 }
269
David Howellsaa390bb2016-06-17 10:06:56 +0100270 write_unlock_bh(&peer->conn_lock);
David Howells999b69f2016-06-17 15:42:35 +0100271 }
David Howells17926a72007-04-26 15:48:28 -0700272 }
David Howellsb3f57502016-06-21 16:10:03 +0100273 write_unlock(&rxrpc_connection_lock);
David Howells17926a72007-04-26 15:48:28 -0700274
275 if (earliest != ULONG_MAX) {
276 _debug("reschedule reaper %ld", (long) earliest - now);
277 ASSERTCMP(earliest, >, now);
David Howells651350d2007-04-26 15:50:17 -0700278 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
279 (earliest - now) * HZ);
David Howells17926a72007-04-26 15:48:28 -0700280 }
281
282 /* then destroy all those pulled out */
283 while (!list_empty(&graveyard)) {
284 conn = list_entry(graveyard.next, struct rxrpc_connection,
285 link);
286 list_del_init(&conn->link);
287
288 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
David Howellsdee46362016-06-27 17:11:19 +0100289 skb_queue_purge(&conn->rx_queue);
290 call_rcu(&conn->rcu, rxrpc_destroy_connection);
David Howells17926a72007-04-26 15:48:28 -0700291 }
292
293 _leave("");
294}
295
296/*
297 * preemptively destroy all the connection records rather than waiting for them
298 * to time out
299 */
300void __exit rxrpc_destroy_all_connections(void)
301{
David Howellsdee46362016-06-27 17:11:19 +0100302 struct rxrpc_connection *conn, *_p;
303 bool leak = false;
304
David Howells17926a72007-04-26 15:48:28 -0700305 _enter("");
306
David Howells5873c082014-02-07 18:58:44 +0000307 rxrpc_connection_expiry = 0;
David Howells17926a72007-04-26 15:48:28 -0700308 cancel_delayed_work(&rxrpc_connection_reap);
David Howells651350d2007-04-26 15:50:17 -0700309 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
David Howellsdee46362016-06-27 17:11:19 +0100310 flush_workqueue(rxrpc_workqueue);
311
312 write_lock(&rxrpc_connection_lock);
313 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
314 pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
315 conn, atomic_read(&conn->usage));
316 leak = true;
317 }
318 write_unlock(&rxrpc_connection_lock);
319 BUG_ON(leak);
320
321 /* Make sure the local and peer records pinned by any dying connections
322 * are released.
323 */
324 rcu_barrier();
325 rxrpc_destroy_client_conn_ids();
David Howells17926a72007-04-26 15:48:28 -0700326
327 _leave("");
328}