blob: 6e1099ed1dbd9279f0e0c710e60a3ec5d12873d9 [file] [log] [blame]
David Howells4a3388c2016-04-04 14:00:37 +01001/* Client connection-specific management code.
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/slab.h>
15#include <linux/idr.h>
16#include <linux/timer.h>
17#include "ar-internal.h"
18
19/*
20 * We use machine-unique IDs for our client connections.
21 */
22DEFINE_IDR(rxrpc_client_conn_ids);
23static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
24
25/*
26 * Get a connection ID and epoch for a client connection from the global pool.
27 * The connection struct pointer is then recorded in the idr radix tree. The
28 * epoch is changed if this wraps.
29 *
30 * TODO: The IDR tree gets very expensive on memory if the connection IDs are
31 * widely scattered throughout the number space, so we shall need to retire
32 * connections that have, say, an ID more than four times the maximum number of
33 * client conns away from the current allocation point to try and keep the IDs
34 * concentrated. We will also need to retire connections from an old epoch.
35 */
David Howellsc6d2b8d2016-04-04 14:00:40 +010036static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
37 gfp_t gfp)
David Howells4a3388c2016-04-04 14:00:37 +010038{
39 u32 epoch;
40 int id;
41
42 _enter("");
43
44 idr_preload(gfp);
David Howells4a3388c2016-04-04 14:00:37 +010045 spin_lock(&rxrpc_conn_id_lock);
46
47 epoch = rxrpc_epoch;
48
49 /* We could use idr_alloc_cyclic() here, but we really need to know
50 * when the thing wraps so that we can advance the epoch.
51 */
52 if (rxrpc_client_conn_ids.cur == 0)
53 rxrpc_client_conn_ids.cur = 1;
54 id = idr_alloc(&rxrpc_client_conn_ids, conn,
55 rxrpc_client_conn_ids.cur, 0x40000000, GFP_NOWAIT);
56 if (id < 0) {
57 if (id != -ENOSPC)
58 goto error;
59 id = idr_alloc(&rxrpc_client_conn_ids, conn,
60 1, 0x40000000, GFP_NOWAIT);
61 if (id < 0)
62 goto error;
63 epoch++;
64 rxrpc_epoch = epoch;
65 }
66 rxrpc_client_conn_ids.cur = id + 1;
67
68 spin_unlock(&rxrpc_conn_id_lock);
David Howells4a3388c2016-04-04 14:00:37 +010069 idr_preload_end();
70
71 conn->proto.epoch = epoch;
72 conn->proto.cid = id << RXRPC_CIDSHIFT;
73 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
74 _leave(" [CID %x:%x]", epoch, conn->proto.cid);
75 return 0;
76
77error:
78 spin_unlock(&rxrpc_conn_id_lock);
David Howells4a3388c2016-04-04 14:00:37 +010079 idr_preload_end();
80 _leave(" = %d", id);
81 return id;
82}
83
84/*
85 * Release a connection ID for a client connection from the global pool.
86 */
David Howells001c1122016-06-30 10:45:22 +010087static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
David Howells4a3388c2016-04-04 14:00:37 +010088{
89 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
90 spin_lock(&rxrpc_conn_id_lock);
91 idr_remove(&rxrpc_client_conn_ids,
92 conn->proto.cid >> RXRPC_CIDSHIFT);
93 spin_unlock(&rxrpc_conn_id_lock);
94 }
95}
David Howellseb9b9d22016-06-27 10:32:02 +010096
97/*
98 * Destroy the client connection ID tree.
99 */
100void rxrpc_destroy_client_conn_ids(void)
101{
102 struct rxrpc_connection *conn;
103 int id;
104
105 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
106 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
107 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
108 conn, atomic_read(&conn->usage));
109 }
110 BUG();
111 }
112
113 idr_destroy(&rxrpc_client_conn_ids);
114}
David Howellsc6d2b8d2016-04-04 14:00:40 +0100115
116/*
117 * Allocate a client connection. The caller must take care to clear any
118 * padding bytes in *cp.
119 */
120static struct rxrpc_connection *
121rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
122{
123 struct rxrpc_connection *conn;
124 int ret;
125
126 _enter("");
127
128 conn = rxrpc_alloc_connection(gfp);
129 if (!conn) {
130 _leave(" = -ENOMEM");
131 return ERR_PTR(-ENOMEM);
132 }
133
134 conn->params = *cp;
David Howellsc6d2b8d2016-04-04 14:00:40 +0100135 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
136 conn->state = RXRPC_CONN_CLIENT;
137
David Howellsc6d2b8d2016-04-04 14:00:40 +0100138 ret = rxrpc_get_client_connection_id(conn, gfp);
139 if (ret < 0)
140 goto error_0;
141
142 ret = rxrpc_init_client_conn_security(conn);
143 if (ret < 0)
144 goto error_1;
145
146 ret = conn->security->prime_packet_security(conn);
147 if (ret < 0)
148 goto error_2;
149
150 write_lock(&rxrpc_connection_lock);
151 list_add_tail(&conn->link, &rxrpc_connections);
David Howells4d028b22016-08-24 07:30:52 +0100152 list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
David Howellsc6d2b8d2016-04-04 14:00:40 +0100153 write_unlock(&rxrpc_connection_lock);
154
155 /* We steal the caller's peer ref. */
156 cp->peer = NULL;
157 rxrpc_get_local(conn->params.local);
158 key_get(conn->params.key);
159
160 _leave(" = %p", conn);
161 return conn;
162
163error_2:
164 conn->security->clear(conn);
165error_1:
166 rxrpc_put_client_connection_id(conn);
167error_0:
168 kfree(conn);
169 _leave(" = %d", ret);
170 return ERR_PTR(ret);
171}
172
173/*
174 * find a connection for a call
175 * - called in process context with IRQs enabled
176 */
177int rxrpc_connect_call(struct rxrpc_call *call,
178 struct rxrpc_conn_parameters *cp,
179 struct sockaddr_rxrpc *srx,
180 gfp_t gfp)
181{
182 struct rxrpc_connection *conn, *candidate = NULL;
183 struct rxrpc_local *local = cp->local;
184 struct rb_node *p, **pp, *parent;
185 long diff;
186 int chan;
187
188 DECLARE_WAITQUEUE(myself, current);
189
190 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
191
192 cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
193 if (!cp->peer)
194 return -ENOMEM;
195
196 if (!cp->exclusive) {
197 /* Search for a existing client connection unless this is going
198 * to be a connection that's used exclusively for a single call.
199 */
200 _debug("search 1");
201 spin_lock(&local->client_conns_lock);
202 p = local->client_conns.rb_node;
203 while (p) {
204 conn = rb_entry(p, struct rxrpc_connection, client_node);
205
206#define cmp(X) ((long)conn->params.X - (long)cp->X)
207 diff = (cmp(peer) ?:
208 cmp(key) ?:
209 cmp(security_level));
210 if (diff < 0)
211 p = p->rb_left;
212 else if (diff > 0)
213 p = p->rb_right;
214 else
215 goto found_extant_conn;
216 }
217 spin_unlock(&local->client_conns_lock);
218 }
219
220 /* We didn't find a connection or we want an exclusive one. */
221 _debug("get new conn");
222 candidate = rxrpc_alloc_client_connection(cp, gfp);
223 if (!candidate) {
224 _leave(" = -ENOMEM");
225 return -ENOMEM;
226 }
227
228 if (cp->exclusive) {
229 /* Assign the call on an exclusive connection to channel 0 and
230 * don't add the connection to the endpoint's shareable conn
231 * lookup tree.
232 */
233 _debug("exclusive chan 0");
234 conn = candidate;
235 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
236 spin_lock(&conn->channel_lock);
237 chan = 0;
238 goto found_channel;
239 }
240
241 /* We need to redo the search before attempting to add a new connection
242 * lest we race with someone else adding a conflicting instance.
243 */
244 _debug("search 2");
245 spin_lock(&local->client_conns_lock);
246
247 pp = &local->client_conns.rb_node;
248 parent = NULL;
249 while (*pp) {
250 parent = *pp;
251 conn = rb_entry(parent, struct rxrpc_connection, client_node);
252
253 diff = (cmp(peer) ?:
254 cmp(key) ?:
255 cmp(security_level));
256 if (diff < 0)
257 pp = &(*pp)->rb_left;
258 else if (diff > 0)
259 pp = &(*pp)->rb_right;
260 else
261 goto found_extant_conn;
262 }
263
264 /* The second search also failed; simply add the new connection with
265 * the new call in channel 0. Note that we need to take the channel
266 * lock before dropping the client conn lock.
267 */
268 _debug("new conn");
David Howells001c1122016-06-30 10:45:22 +0100269 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
270 rb_link_node(&candidate->client_node, parent, pp);
271 rb_insert_color(&candidate->client_node, &local->client_conns);
272attached:
David Howellsc6d2b8d2016-04-04 14:00:40 +0100273 conn = candidate;
274 candidate = NULL;
275
David Howellsc6d2b8d2016-04-04 14:00:40 +0100276 atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
277 spin_lock(&conn->channel_lock);
278 spin_unlock(&local->client_conns_lock);
279 chan = 0;
280
281found_channel:
282 _debug("found chan");
283 call->conn = conn;
David Howellsdf5d8bf2016-08-24 14:31:43 +0100284 call->peer = rxrpc_get_peer(conn->params.peer);
David Howellsc6d2b8d2016-04-04 14:00:40 +0100285 call->cid = conn->proto.cid | chan;
286 call->call_id = ++conn->channels[chan].call_counter;
287 conn->channels[chan].call_id = call->call_id;
288 rcu_assign_pointer(conn->channels[chan].call, call);
289
290 _net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
291
292 spin_unlock(&conn->channel_lock);
293 rxrpc_put_peer(cp->peer);
294 cp->peer = NULL;
295 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
296 return 0;
297
David Howells001c1122016-06-30 10:45:22 +0100298 /* We found a potentially suitable connection already in existence. If
299 * we can reuse it (ie. its usage count hasn't been reduced to 0 by the
300 * reaper), discard any candidate we may have allocated, and try to get
301 * a channel on this one, otherwise we have to replace it.
David Howellsc6d2b8d2016-04-04 14:00:40 +0100302 */
303found_extant_conn:
304 _debug("found conn");
David Howells001c1122016-06-30 10:45:22 +0100305 if (!rxrpc_get_connection_maybe(conn)) {
306 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
307 rb_replace_node(&conn->client_node,
308 &candidate->client_node,
309 &local->client_conns);
310 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
311 goto attached;
312 }
313
David Howellsc6d2b8d2016-04-04 14:00:40 +0100314 spin_unlock(&local->client_conns_lock);
315
316 rxrpc_put_connection(candidate);
317
318 if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
319 if (!gfpflags_allow_blocking(gfp)) {
320 rxrpc_put_connection(conn);
321 _leave(" = -EAGAIN");
322 return -EAGAIN;
323 }
324
325 add_wait_queue(&conn->channel_wq, &myself);
326 for (;;) {
327 set_current_state(TASK_INTERRUPTIBLE);
328 if (atomic_add_unless(&conn->avail_chans, -1, 0))
329 break;
330 if (signal_pending(current))
331 goto interrupted;
332 schedule();
333 }
334 remove_wait_queue(&conn->channel_wq, &myself);
335 __set_current_state(TASK_RUNNING);
336 }
337
338 /* The connection allegedly now has a free channel and we can now
339 * attach the call to it.
340 */
341 spin_lock(&conn->channel_lock);
342
343 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
344 if (!conn->channels[chan].call)
345 goto found_channel;
346 BUG();
347
348interrupted:
349 remove_wait_queue(&conn->channel_wq, &myself);
350 __set_current_state(TASK_RUNNING);
351 rxrpc_put_connection(conn);
352 rxrpc_put_peer(cp->peer);
353 cp->peer = NULL;
354 _leave(" = -ERESTARTSYS");
355 return -ERESTARTSYS;
356}
David Howells001c1122016-06-30 10:45:22 +0100357
358/*
359 * Remove a client connection from the local endpoint's tree, thereby removing
360 * it as a target for reuse for new client calls.
361 */
362void rxrpc_unpublish_client_conn(struct rxrpc_connection *conn)
363{
364 struct rxrpc_local *local = conn->params.local;
365
366 spin_lock(&local->client_conns_lock);
367 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags))
368 rb_erase(&conn->client_node, &local->client_conns);
369 spin_unlock(&local->client_conns_lock);
370
371 rxrpc_put_client_connection_id(conn);
372}