blob: 189338a604575c5ab153677975b79b09d9b74657 [file] [log] [blame]
David Howells7877a4a2016-04-04 14:00:40 +01001/* Service connection management
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/slab.h>
13#include "ar-internal.h"
14
15/*
David Howells8496af52016-07-01 07:51:50 +010016 * Find a service connection under RCU conditions.
17 *
18 * We could use a hash table, but that is subject to bucket stuffing by an
19 * attacker as the client gets to pick the epoch and cid values and would know
20 * the hash function. So, instead, we use a hash table for the peer and from
21 * that an rbtree to find the service connection. Under ordinary circumstances
22 * it might be slower than a large hash table, but it is at least limited in
23 * depth.
24 */
25struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
26 struct sk_buff *skb)
27{
28 struct rxrpc_connection *conn = NULL;
29 struct rxrpc_conn_proto k;
30 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
31 struct rb_node *p;
32 unsigned int seq = 0;
33
34 k.epoch = sp->hdr.epoch;
35 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
36
37 do {
38 /* Unfortunately, rbtree walking doesn't give reliable results
39 * under just the RCU read lock, so we have to check for
40 * changes.
41 */
42 read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
43
44 p = rcu_dereference_raw(peer->service_conns.rb_node);
45 while (p) {
46 conn = rb_entry(p, struct rxrpc_connection, service_node);
47
48 if (conn->proto.index_key < k.index_key)
49 p = rcu_dereference_raw(p->rb_left);
50 else if (conn->proto.index_key > k.index_key)
51 p = rcu_dereference_raw(p->rb_right);
52 else
53 goto done;
54 conn = NULL;
55 }
56 } while (need_seqretry(&peer->service_conn_lock, seq));
57
58done:
59 done_seqretry(&peer->service_conn_lock, seq);
60 _leave(" = %d", conn ? conn->debug_id : -1);
61 return conn;
62}
63
64/*
65 * Insert a service connection into a peer's tree, thereby making it a target
66 * for incoming packets.
67 */
68static struct rxrpc_connection *
69rxrpc_publish_service_conn(struct rxrpc_peer *peer,
70 struct rxrpc_connection *conn)
71{
72 struct rxrpc_connection *cursor = NULL;
73 struct rxrpc_conn_proto k = conn->proto;
74 struct rb_node **pp, *parent;
75
76 write_seqlock_bh(&peer->service_conn_lock);
77
78 pp = &peer->service_conns.rb_node;
79 parent = NULL;
80 while (*pp) {
81 parent = *pp;
82 cursor = rb_entry(parent,
83 struct rxrpc_connection, service_node);
84
85 if (cursor->proto.index_key < k.index_key)
86 pp = &(*pp)->rb_left;
87 else if (cursor->proto.index_key > k.index_key)
88 pp = &(*pp)->rb_right;
89 else
90 goto found_extant_conn;
91 }
92
93 rb_link_node_rcu(&conn->service_node, parent, pp);
94 rb_insert_color(&conn->service_node, &peer->service_conns);
95conn_published:
96 set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
97 write_sequnlock_bh(&peer->service_conn_lock);
98 _leave(" = %d [new]", conn->debug_id);
99 return conn;
100
101found_extant_conn:
102 if (atomic_read(&cursor->usage) == 0)
103 goto replace_old_connection;
104 write_sequnlock_bh(&peer->service_conn_lock);
105 /* We should not be able to get here. rxrpc_incoming_connection() is
106 * called in a non-reentrant context, so there can't be a race to
107 * insert a new connection.
108 */
109 BUG();
110
111replace_old_connection:
112 /* The old connection is from an outdated epoch. */
113 _debug("replace conn");
114 rb_replace_node_rcu(&cursor->service_node,
115 &conn->service_node,
116 &peer->service_conns);
117 clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
118 goto conn_published;
119}
120
121/*
David Howells00e90712016-09-08 11:10:12 +0100122 * Preallocate a service connection. The connection is placed on the proc and
123 * reap lists so that we don't have to get the lock from BH context.
124 */
125struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp)
126{
127 struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
128
129 if (conn) {
130 /* We maintain an extra ref on the connection whilst it is on
131 * the rxrpc_connections list.
132 */
133 conn->state = RXRPC_CONN_SERVICE_PREALLOC;
134 atomic_set(&conn->usage, 2);
135
136 write_lock(&rxrpc_connection_lock);
137 list_add_tail(&conn->link, &rxrpc_connections);
138 list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
139 write_unlock(&rxrpc_connection_lock);
140 }
141
142 return conn;
143}
144
145/*
David Howells7877a4a2016-04-04 14:00:40 +0100146 * get a record of an incoming connection
147 */
148struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
David Howellsd991b4a2016-06-29 14:40:39 +0100149 struct sockaddr_rxrpc *srx,
David Howells7877a4a2016-04-04 14:00:40 +0100150 struct sk_buff *skb)
151{
David Howells8496af52016-07-01 07:51:50 +0100152 struct rxrpc_connection *conn;
David Howells7877a4a2016-04-04 14:00:40 +0100153 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howellsd991b4a2016-06-29 14:40:39 +0100154 struct rxrpc_peer *peer;
David Howells7877a4a2016-04-04 14:00:40 +0100155 const char *new = "old";
David Howells7877a4a2016-04-04 14:00:40 +0100156
157 _enter("");
158
David Howellsd991b4a2016-06-29 14:40:39 +0100159 peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
160 if (!peer) {
161 _debug("no peer");
162 return ERR_PTR(-EBUSY);
163 }
164
David Howells7877a4a2016-04-04 14:00:40 +0100165 ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
166
David Howells8496af52016-07-01 07:51:50 +0100167 rcu_read_lock();
168 peer = rxrpc_lookup_peer_rcu(local, srx);
169 if (peer) {
170 conn = rxrpc_find_service_conn_rcu(peer, skb);
171 if (conn) {
172 if (sp->hdr.securityIndex != conn->security_ix)
173 goto security_mismatch_rcu;
174 if (rxrpc_get_connection_maybe(conn))
175 goto found_extant_connection_rcu;
David Howells7877a4a2016-04-04 14:00:40 +0100176
David Howells8496af52016-07-01 07:51:50 +0100177 /* The conn has expired but we can't remove it without
178 * the appropriate lock, so we attempt to replace it
179 * when we have a new candidate.
180 */
181 }
David Howells7877a4a2016-04-04 14:00:40 +0100182
David Howells8496af52016-07-01 07:51:50 +0100183 if (!rxrpc_get_peer_maybe(peer))
184 peer = NULL;
David Howells7877a4a2016-04-04 14:00:40 +0100185 }
David Howells8496af52016-07-01 07:51:50 +0100186 rcu_read_unlock();
David Howells7877a4a2016-04-04 14:00:40 +0100187
David Howells8496af52016-07-01 07:51:50 +0100188 if (!peer) {
189 peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
Dan Carpenter7acef602016-07-14 15:47:01 +0100190 if (!peer)
David Howells8496af52016-07-01 07:51:50 +0100191 goto enomem;
David Howells7877a4a2016-04-04 14:00:40 +0100192 }
193
David Howells8496af52016-07-01 07:51:50 +0100194 /* We don't have a matching record yet. */
195 conn = rxrpc_alloc_connection(GFP_NOIO);
196 if (!conn)
197 goto enomem_peer;
David Howells7877a4a2016-04-04 14:00:40 +0100198
David Howells8496af52016-07-01 07:51:50 +0100199 conn->proto.epoch = sp->hdr.epoch;
200 conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
201 conn->params.local = local;
202 conn->params.peer = peer;
203 conn->params.service_id = sp->hdr.serviceId;
204 conn->security_ix = sp->hdr.securityIndex;
205 conn->out_clientflag = 0;
206 conn->state = RXRPC_CONN_SERVICE;
207 if (conn->params.service_id)
208 conn->state = RXRPC_CONN_SERVICE_UNSECURED;
David Howells7877a4a2016-04-04 14:00:40 +0100209
David Howells7877a4a2016-04-04 14:00:40 +0100210 rxrpc_get_local(local);
211
David Howells45025bc2016-08-24 07:30:52 +0100212 /* We maintain an extra ref on the connection whilst it is on
213 * the rxrpc_connections list.
214 */
215 atomic_set(&conn->usage, 2);
216
David Howells7877a4a2016-04-04 14:00:40 +0100217 write_lock(&rxrpc_connection_lock);
218 list_add_tail(&conn->link, &rxrpc_connections);
David Howells4d028b22016-08-24 07:30:52 +0100219 list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
David Howells7877a4a2016-04-04 14:00:40 +0100220 write_unlock(&rxrpc_connection_lock);
221
David Howells8496af52016-07-01 07:51:50 +0100222 /* Make the connection a target for incoming packets. */
223 rxrpc_publish_service_conn(peer, conn);
224
David Howells7877a4a2016-04-04 14:00:40 +0100225 new = "new";
226
227success:
228 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
David Howells7877a4a2016-04-04 14:00:40 +0100229 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
230 return conn;
231
David Howells8496af52016-07-01 07:51:50 +0100232found_extant_connection_rcu:
233 rcu_read_unlock();
David Howells7877a4a2016-04-04 14:00:40 +0100234 goto success;
235
David Howells8496af52016-07-01 07:51:50 +0100236security_mismatch_rcu:
237 rcu_read_unlock();
David Howells7877a4a2016-04-04 14:00:40 +0100238 _leave(" = -EKEYREJECTED");
239 return ERR_PTR(-EKEYREJECTED);
David Howells8496af52016-07-01 07:51:50 +0100240
241enomem_peer:
242 rxrpc_put_peer(peer);
243enomem:
244 _leave(" = -ENOMEM");
245 return ERR_PTR(-ENOMEM);
David Howells7877a4a2016-04-04 14:00:40 +0100246}
David Howells001c1122016-06-30 10:45:22 +0100247
248/*
249 * Remove the service connection from the peer's tree, thereby removing it as a
250 * target for incoming packets.
251 */
252void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
253{
254 struct rxrpc_peer *peer = conn->params.peer;
255
David Howells8496af52016-07-01 07:51:50 +0100256 write_seqlock_bh(&peer->service_conn_lock);
David Howells001c1122016-06-30 10:45:22 +0100257 if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
258 rb_erase(&conn->service_node, &peer->service_conns);
David Howells8496af52016-07-01 07:51:50 +0100259 write_sequnlock_bh(&peer->service_conn_lock);
David Howells001c1122016-06-30 10:45:22 +0100260}