blob: ff4864d550b8153bcb8a222c8537a85c79c7c7ca [file] [log] [blame]
David Howells87563612016-04-04 14:00:34 +01001/* Local endpoint object management
David Howells17926a72007-04-26 15:48:28 -07002 *
David Howells4f95dd72016-04-04 14:00:35 +01003 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
David Howells17926a72007-04-26 15:48:28 -07004 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
David Howells87563612016-04-04 14:00:34 +01007 * modify it under the terms of the GNU General Public Licence
David Howells17926a72007-04-26 15:48:28 -07008 * as published by the Free Software Foundation; either version
David Howells87563612016-04-04 14:00:34 +01009 * 2 of the Licence, or (at your option) any later version.
David Howells17926a72007-04-26 15:48:28 -070010 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Howells44ba0692015-04-01 16:31:26 +010018#include <linux/udp.h>
19#include <linux/ip.h>
David Howells4f95dd72016-04-04 14:00:35 +010020#include <linux/hashtable.h>
David Howells17926a72007-04-26 15:48:28 -070021#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include "ar-internal.h"
24
David Howells4f95dd72016-04-04 14:00:35 +010025static void rxrpc_local_processor(struct work_struct *);
26static void rxrpc_local_rcu(struct rcu_head *);
David Howells17926a72007-04-26 15:48:28 -070027
David Howells4f95dd72016-04-04 14:00:35 +010028static DEFINE_MUTEX(rxrpc_local_mutex);
29static LIST_HEAD(rxrpc_local_endpoints);
David Howells17926a72007-04-26 15:48:28 -070030
31/*
David Howells4f95dd72016-04-04 14:00:35 +010032 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
33 * same or greater than.
34 *
35 * We explicitly don't compare the RxRPC service ID as we want to reject
36 * conflicting uses by differing services. Further, we don't want to share
37 * addresses with different options (IPv6), so we don't compare those bits
38 * either.
David Howells17926a72007-04-26 15:48:28 -070039 */
David Howells4f95dd72016-04-04 14:00:35 +010040static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
41 const struct sockaddr_rxrpc *srx)
42{
43 long diff;
44
45 diff = ((local->srx.transport_type - srx->transport_type) ?:
46 (local->srx.transport_len - srx->transport_len) ?:
47 (local->srx.transport.family - srx->transport.family));
48 if (diff != 0)
49 return diff;
50
51 switch (srx->transport.family) {
52 case AF_INET:
53 /* If the choice of UDP port is left up to the transport, then
54 * the endpoint record doesn't match.
55 */
56 return ((u16 __force)local->srx.transport.sin.sin_port -
57 (u16 __force)srx->transport.sin.sin_port) ?:
58 memcmp(&local->srx.transport.sin.sin_addr,
59 &srx->transport.sin.sin_addr,
60 sizeof(struct in_addr));
David Howellsd1912742016-09-17 07:26:01 +010061#ifdef CONFIG_AF_RXRPC_IPV6
David Howells75b54cb2016-09-13 08:49:05 +010062 case AF_INET6:
63 /* If the choice of UDP6 port is left up to the transport, then
64 * the endpoint record doesn't match.
65 */
66 return ((u16 __force)local->srx.transport.sin6.sin6_port -
67 (u16 __force)srx->transport.sin6.sin6_port) ?:
68 memcmp(&local->srx.transport.sin6.sin6_addr,
69 &srx->transport.sin6.sin6_addr,
70 sizeof(struct in6_addr));
David Howellsd1912742016-09-17 07:26:01 +010071#endif
David Howells4f95dd72016-04-04 14:00:35 +010072 default:
73 BUG();
74 }
75}
76
77/*
78 * Allocate a new local endpoint.
79 */
80static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
David Howells17926a72007-04-26 15:48:28 -070081{
82 struct rxrpc_local *local;
83
84 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
85 if (local) {
David Howells4f95dd72016-04-04 14:00:35 +010086 atomic_set(&local->usage, 1);
David Howells17926a72007-04-26 15:48:28 -070087 INIT_LIST_HEAD(&local->link);
David Howells4f95dd72016-04-04 14:00:35 +010088 INIT_WORK(&local->processor, rxrpc_local_processor);
David Howells17926a72007-04-26 15:48:28 -070089 init_rwsem(&local->defrag_sem);
David Howells17926a72007-04-26 15:48:28 -070090 skb_queue_head_init(&local->reject_queue);
David Howells44ba0692015-04-01 16:31:26 +010091 skb_queue_head_init(&local->event_queue);
David Howells999b69f2016-06-17 15:42:35 +010092 local->client_conns = RB_ROOT;
93 spin_lock_init(&local->client_conns_lock);
David Howells17926a72007-04-26 15:48:28 -070094 spin_lock_init(&local->lock);
95 rwlock_init(&local->services_lock);
David Howells17926a72007-04-26 15:48:28 -070096 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
97 memcpy(&local->srx, srx, sizeof(*srx));
98 }
99
100 _leave(" = %p", local);
101 return local;
102}
103
104/*
105 * create the local socket
David Howells4f95dd72016-04-04 14:00:35 +0100106 * - must be called with rxrpc_local_mutex locked
David Howells17926a72007-04-26 15:48:28 -0700107 */
David Howells4f95dd72016-04-04 14:00:35 +0100108static int rxrpc_open_socket(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700109{
110 struct sock *sock;
111 int ret, opt;
112
David Howells75b54cb2016-09-13 08:49:05 +0100113 _enter("%p{%d,%d}",
114 local, local->srx.transport_type, local->srx.transport.family);
David Howells17926a72007-04-26 15:48:28 -0700115
116 /* create a socket to represent the local endpoint */
David Howellsaaa31cb2016-09-13 08:49:05 +0100117 ret = sock_create_kern(&init_net, local->srx.transport.family,
118 local->srx.transport_type, 0, &local->socket);
David Howells17926a72007-04-26 15:48:28 -0700119 if (ret < 0) {
120 _leave(" = %d [socket]", ret);
121 return ret;
122 }
123
124 /* if a local address was supplied then bind it */
125 if (local->srx.transport_len > sizeof(sa_family_t)) {
126 _debug("bind");
127 ret = kernel_bind(local->socket,
David Howells4f95dd72016-04-04 14:00:35 +0100128 (struct sockaddr *)&local->srx.transport,
David Howells17926a72007-04-26 15:48:28 -0700129 local->srx.transport_len);
130 if (ret < 0) {
David Howells4f95dd72016-04-04 14:00:35 +0100131 _debug("bind failed %d", ret);
David Howells17926a72007-04-26 15:48:28 -0700132 goto error;
133 }
134 }
135
136 /* we want to receive ICMP errors */
137 opt = 1;
138 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
139 (char *) &opt, sizeof(opt));
140 if (ret < 0) {
141 _debug("setsockopt failed");
142 goto error;
143 }
144
145 /* we want to set the don't fragment bit */
146 opt = IP_PMTUDISC_DO;
147 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
148 (char *) &opt, sizeof(opt));
149 if (ret < 0) {
150 _debug("setsockopt failed");
151 goto error;
152 }
153
David Howells17926a72007-04-26 15:48:28 -0700154 /* set the socket up */
155 sock = local->socket->sk;
156 sock->sk_user_data = local;
157 sock->sk_data_ready = rxrpc_data_ready;
David Howellsabe89ef2016-04-04 14:00:32 +0100158 sock->sk_error_report = rxrpc_error_report;
David Howells17926a72007-04-26 15:48:28 -0700159 _leave(" = 0");
160 return 0;
161
162error:
Trond Myklebust91cf45f2007-11-12 18:10:39 -0800163 kernel_sock_shutdown(local->socket, SHUT_RDWR);
David Howells17926a72007-04-26 15:48:28 -0700164 local->socket->sk->sk_user_data = NULL;
165 sock_release(local->socket);
166 local->socket = NULL;
167
168 _leave(" = %d", ret);
169 return ret;
170}
171
172/*
David Howells4f95dd72016-04-04 14:00:35 +0100173 * Look up or create a new local endpoint using the specified local address.
David Howells17926a72007-04-26 15:48:28 -0700174 */
David Howells4f95dd72016-04-04 14:00:35 +0100175struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
David Howells17926a72007-04-26 15:48:28 -0700176{
177 struct rxrpc_local *local;
David Howells4f95dd72016-04-04 14:00:35 +0100178 struct list_head *cursor;
179 const char *age;
180 long diff;
David Howells17926a72007-04-26 15:48:28 -0700181 int ret;
182
David Howells75b54cb2016-09-13 08:49:05 +0100183 _enter("{%d,%d,%pISp}",
184 srx->transport_type, srx->transport.family, &srx->transport);
David Howells17926a72007-04-26 15:48:28 -0700185
David Howells4f95dd72016-04-04 14:00:35 +0100186 mutex_lock(&rxrpc_local_mutex);
David Howells17926a72007-04-26 15:48:28 -0700187
David Howells4f95dd72016-04-04 14:00:35 +0100188 for (cursor = rxrpc_local_endpoints.next;
189 cursor != &rxrpc_local_endpoints;
190 cursor = cursor->next) {
191 local = list_entry(cursor, struct rxrpc_local, link);
David Howells17926a72007-04-26 15:48:28 -0700192
David Howells4f95dd72016-04-04 14:00:35 +0100193 diff = rxrpc_local_cmp_key(local, srx);
194 if (diff < 0)
David Howells17926a72007-04-26 15:48:28 -0700195 continue;
David Howells4f95dd72016-04-04 14:00:35 +0100196 if (diff > 0)
197 break;
David Howells17926a72007-04-26 15:48:28 -0700198
David Howells4f95dd72016-04-04 14:00:35 +0100199 /* Services aren't allowed to share transport sockets, so
200 * reject that here. It is possible that the object is dying -
201 * but it may also still have the local transport address that
202 * we want bound.
203 */
204 if (srx->srx_service) {
205 local = NULL;
206 goto addr_in_use;
David Howells17926a72007-04-26 15:48:28 -0700207 }
David Howells4f95dd72016-04-04 14:00:35 +0100208
209 /* Found a match. We replace a dying object. Attempting to
210 * bind the transport socket may still fail if we're attempting
211 * to use a local address that the dying object is still using.
212 */
David Howells5627cc82016-04-04 14:00:38 +0100213 if (!rxrpc_get_local_maybe(local)) {
David Howells4f95dd72016-04-04 14:00:35 +0100214 cursor = cursor->next;
215 list_del_init(&local->link);
216 break;
217 }
218
219 age = "old";
220 goto found;
David Howells17926a72007-04-26 15:48:28 -0700221 }
222
David Howells17926a72007-04-26 15:48:28 -0700223 local = rxrpc_alloc_local(srx);
David Howells4f95dd72016-04-04 14:00:35 +0100224 if (!local)
225 goto nomem;
David Howells17926a72007-04-26 15:48:28 -0700226
David Howells4f95dd72016-04-04 14:00:35 +0100227 ret = rxrpc_open_socket(local);
228 if (ret < 0)
229 goto sock_error;
David Howells17926a72007-04-26 15:48:28 -0700230
David Howells4f95dd72016-04-04 14:00:35 +0100231 list_add_tail(&local->link, cursor);
232 age = "new";
David Howells17926a72007-04-26 15:48:28 -0700233
David Howells4f95dd72016-04-04 14:00:35 +0100234found:
235 mutex_unlock(&rxrpc_local_mutex);
236
David Howells75b54cb2016-09-13 08:49:05 +0100237 _net("LOCAL %s %d {%pISp}",
238 age, local->debug_id, &local->srx.transport);
David Howells17926a72007-04-26 15:48:28 -0700239
David Howells4f95dd72016-04-04 14:00:35 +0100240 _leave(" = %p", local);
David Howells17926a72007-04-26 15:48:28 -0700241 return local;
242
David Howells4f95dd72016-04-04 14:00:35 +0100243nomem:
244 ret = -ENOMEM;
245sock_error:
246 mutex_unlock(&rxrpc_local_mutex);
247 kfree(local);
248 _leave(" = %d", ret);
249 return ERR_PTR(ret);
David Howells17926a72007-04-26 15:48:28 -0700250
David Howells4f95dd72016-04-04 14:00:35 +0100251addr_in_use:
252 mutex_unlock(&rxrpc_local_mutex);
253 _leave(" = -EADDRINUSE");
254 return ERR_PTR(-EADDRINUSE);
David Howells17926a72007-04-26 15:48:28 -0700255}
256
257/*
David Howells4f95dd72016-04-04 14:00:35 +0100258 * A local endpoint reached its end of life.
David Howells17926a72007-04-26 15:48:28 -0700259 */
David Howells4f95dd72016-04-04 14:00:35 +0100260void __rxrpc_put_local(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700261{
David Howells4f95dd72016-04-04 14:00:35 +0100262 _enter("%d", local->debug_id);
263 rxrpc_queue_work(&local->processor);
David Howells17926a72007-04-26 15:48:28 -0700264}
265
266/*
David Howells4f95dd72016-04-04 14:00:35 +0100267 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
268 * of.
269 *
270 * Closing the socket cannot be done from bottom half context or RCU callback
271 * context because it might sleep.
David Howells17926a72007-04-26 15:48:28 -0700272 */
David Howells4f95dd72016-04-04 14:00:35 +0100273static void rxrpc_local_destroyer(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700274{
David Howells4f95dd72016-04-04 14:00:35 +0100275 struct socket *socket = local->socket;
David Howells17926a72007-04-26 15:48:28 -0700276
David Howells4f95dd72016-04-04 14:00:35 +0100277 _enter("%d", local->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700278
David Howells4f95dd72016-04-04 14:00:35 +0100279 /* We can get a race between an incoming call packet queueing the
280 * processor again and the work processor starting the destruction
281 * process which will shut down the UDP socket.
282 */
283 if (local->dead) {
284 _leave(" [already dead]");
David Howells17926a72007-04-26 15:48:28 -0700285 return;
286 }
David Howells4f95dd72016-04-04 14:00:35 +0100287 local->dead = true;
David Howells17926a72007-04-26 15:48:28 -0700288
David Howells4f95dd72016-04-04 14:00:35 +0100289 mutex_lock(&rxrpc_local_mutex);
290 list_del_init(&local->link);
291 mutex_unlock(&rxrpc_local_mutex);
David Howells17926a72007-04-26 15:48:28 -0700292
David Howells999b69f2016-06-17 15:42:35 +0100293 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
David Howells1e9e5c92016-09-29 22:37:15 +0100294 ASSERT(!local->service);
David Howells17926a72007-04-26 15:48:28 -0700295
David Howells4f95dd72016-04-04 14:00:35 +0100296 if (socket) {
297 local->socket = NULL;
298 kernel_sock_shutdown(socket, SHUT_RDWR);
299 socket->sk->sk_user_data = NULL;
300 sock_release(socket);
301 }
302
303 /* At this point, there should be no more packets coming in to the
304 * local endpoint.
305 */
David Howells17926a72007-04-26 15:48:28 -0700306 rxrpc_purge_queue(&local->reject_queue);
David Howells44ba0692015-04-01 16:31:26 +0100307 rxrpc_purge_queue(&local->event_queue);
David Howells17926a72007-04-26 15:48:28 -0700308
David Howells4f95dd72016-04-04 14:00:35 +0100309 _debug("rcu local %d", local->debug_id);
310 call_rcu(&local->rcu, rxrpc_local_rcu);
311}
312
313/*
314 * Process events on an endpoint
315 */
316static void rxrpc_local_processor(struct work_struct *work)
317{
318 struct rxrpc_local *local =
319 container_of(work, struct rxrpc_local, processor);
320 bool again;
321
322 _enter("%d", local->debug_id);
323
324 do {
325 again = false;
326 if (atomic_read(&local->usage) == 0)
327 return rxrpc_local_destroyer(local);
328
David Howells4f95dd72016-04-04 14:00:35 +0100329 if (!skb_queue_empty(&local->reject_queue)) {
330 rxrpc_reject_packets(local);
331 again = true;
332 }
333
334 if (!skb_queue_empty(&local->event_queue)) {
335 rxrpc_process_local_events(local);
336 again = true;
337 }
338 } while (again);
339}
340
341/*
342 * Destroy a local endpoint after the RCU grace period expires.
343 */
344static void rxrpc_local_rcu(struct rcu_head *rcu)
345{
346 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
347
348 _enter("%d", local->debug_id);
349
350 ASSERT(!work_pending(&local->processor));
David Howells17926a72007-04-26 15:48:28 -0700351
352 _net("DESTROY LOCAL %d", local->debug_id);
353 kfree(local);
David Howells17926a72007-04-26 15:48:28 -0700354 _leave("");
355}
356
357/*
David Howells4f95dd72016-04-04 14:00:35 +0100358 * Verify the local endpoint list is empty by this point.
David Howells17926a72007-04-26 15:48:28 -0700359 */
360void __exit rxrpc_destroy_all_locals(void)
361{
David Howells4f95dd72016-04-04 14:00:35 +0100362 struct rxrpc_local *local;
David Howells17926a72007-04-26 15:48:28 -0700363
364 _enter("");
365
David Howellsdee46362016-06-27 17:11:19 +0100366 flush_workqueue(rxrpc_workqueue);
David Howells17926a72007-04-26 15:48:28 -0700367
David Howellsdee46362016-06-27 17:11:19 +0100368 if (!list_empty(&rxrpc_local_endpoints)) {
369 mutex_lock(&rxrpc_local_mutex);
370 list_for_each_entry(local, &rxrpc_local_endpoints, link) {
371 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
372 local, atomic_read(&local->usage));
373 }
374 mutex_unlock(&rxrpc_local_mutex);
375 BUG();
David Howells17926a72007-04-26 15:48:28 -0700376 }
David Howellsdee46362016-06-27 17:11:19 +0100377
378 rcu_barrier();
David Howells17926a72007-04-26 15:48:28 -0700379}