blob: f5b9bb0d3f98cc82b19076eb6bcb478d5ba80ccd [file] [log] [blame]
David Howells87563612016-04-04 14:00:34 +01001/* Local endpoint object management
David Howells17926a72007-04-26 15:48:28 -07002 *
David Howells4f95dd72016-04-04 14:00:35 +01003 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
David Howells17926a72007-04-26 15:48:28 -07004 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
David Howells87563612016-04-04 14:00:34 +01007 * modify it under the terms of the GNU General Public Licence
David Howells17926a72007-04-26 15:48:28 -07008 * as published by the Free Software Foundation; either version
David Howells87563612016-04-04 14:00:34 +01009 * 2 of the Licence, or (at your option) any later version.
David Howells17926a72007-04-26 15:48:28 -070010 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Howells44ba0692015-04-01 16:31:26 +010018#include <linux/udp.h>
19#include <linux/ip.h>
David Howells4f95dd72016-04-04 14:00:35 +010020#include <linux/hashtable.h>
David Howells17926a72007-04-26 15:48:28 -070021#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include "ar-internal.h"
24
David Howells4f95dd72016-04-04 14:00:35 +010025static void rxrpc_local_processor(struct work_struct *);
26static void rxrpc_local_rcu(struct rcu_head *);
David Howells17926a72007-04-26 15:48:28 -070027
David Howells4f95dd72016-04-04 14:00:35 +010028static DEFINE_MUTEX(rxrpc_local_mutex);
29static LIST_HEAD(rxrpc_local_endpoints);
David Howells17926a72007-04-26 15:48:28 -070030
31/*
David Howells4f95dd72016-04-04 14:00:35 +010032 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
33 * same or greater than.
34 *
35 * We explicitly don't compare the RxRPC service ID as we want to reject
36 * conflicting uses by differing services. Further, we don't want to share
37 * addresses with different options (IPv6), so we don't compare those bits
38 * either.
David Howells17926a72007-04-26 15:48:28 -070039 */
David Howells4f95dd72016-04-04 14:00:35 +010040static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
41 const struct sockaddr_rxrpc *srx)
42{
43 long diff;
44
45 diff = ((local->srx.transport_type - srx->transport_type) ?:
46 (local->srx.transport_len - srx->transport_len) ?:
47 (local->srx.transport.family - srx->transport.family));
48 if (diff != 0)
49 return diff;
50
51 switch (srx->transport.family) {
52 case AF_INET:
53 /* If the choice of UDP port is left up to the transport, then
54 * the endpoint record doesn't match.
55 */
56 return ((u16 __force)local->srx.transport.sin.sin_port -
57 (u16 __force)srx->transport.sin.sin_port) ?:
58 memcmp(&local->srx.transport.sin.sin_addr,
59 &srx->transport.sin.sin_addr,
60 sizeof(struct in_addr));
David Howells75b54cb2016-09-13 08:49:05 +010061 case AF_INET6:
62 /* If the choice of UDP6 port is left up to the transport, then
63 * the endpoint record doesn't match.
64 */
65 return ((u16 __force)local->srx.transport.sin6.sin6_port -
66 (u16 __force)srx->transport.sin6.sin6_port) ?:
67 memcmp(&local->srx.transport.sin6.sin6_addr,
68 &srx->transport.sin6.sin6_addr,
69 sizeof(struct in6_addr));
David Howells4f95dd72016-04-04 14:00:35 +010070 default:
71 BUG();
72 }
73}
74
75/*
76 * Allocate a new local endpoint.
77 */
78static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
David Howells17926a72007-04-26 15:48:28 -070079{
80 struct rxrpc_local *local;
81
82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
83 if (local) {
David Howells4f95dd72016-04-04 14:00:35 +010084 atomic_set(&local->usage, 1);
David Howells17926a72007-04-26 15:48:28 -070085 INIT_LIST_HEAD(&local->link);
David Howells4f95dd72016-04-04 14:00:35 +010086 INIT_WORK(&local->processor, rxrpc_local_processor);
David Howellsde8d6c72016-09-08 11:10:11 +010087 INIT_HLIST_HEAD(&local->services);
David Howells17926a72007-04-26 15:48:28 -070088 init_rwsem(&local->defrag_sem);
David Howells17926a72007-04-26 15:48:28 -070089 skb_queue_head_init(&local->reject_queue);
David Howells44ba0692015-04-01 16:31:26 +010090 skb_queue_head_init(&local->event_queue);
David Howells999b69f2016-06-17 15:42:35 +010091 local->client_conns = RB_ROOT;
92 spin_lock_init(&local->client_conns_lock);
David Howells17926a72007-04-26 15:48:28 -070093 spin_lock_init(&local->lock);
94 rwlock_init(&local->services_lock);
David Howells17926a72007-04-26 15:48:28 -070095 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
96 memcpy(&local->srx, srx, sizeof(*srx));
97 }
98
99 _leave(" = %p", local);
100 return local;
101}
102
103/*
104 * create the local socket
David Howells4f95dd72016-04-04 14:00:35 +0100105 * - must be called with rxrpc_local_mutex locked
David Howells17926a72007-04-26 15:48:28 -0700106 */
David Howells4f95dd72016-04-04 14:00:35 +0100107static int rxrpc_open_socket(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700108{
109 struct sock *sock;
110 int ret, opt;
111
David Howells75b54cb2016-09-13 08:49:05 +0100112 _enter("%p{%d,%d}",
113 local, local->srx.transport_type, local->srx.transport.family);
David Howells17926a72007-04-26 15:48:28 -0700114
115 /* create a socket to represent the local endpoint */
David Howellsaaa31cb2016-09-13 08:49:05 +0100116 ret = sock_create_kern(&init_net, local->srx.transport.family,
117 local->srx.transport_type, 0, &local->socket);
David Howells17926a72007-04-26 15:48:28 -0700118 if (ret < 0) {
119 _leave(" = %d [socket]", ret);
120 return ret;
121 }
122
123 /* if a local address was supplied then bind it */
124 if (local->srx.transport_len > sizeof(sa_family_t)) {
125 _debug("bind");
126 ret = kernel_bind(local->socket,
David Howells4f95dd72016-04-04 14:00:35 +0100127 (struct sockaddr *)&local->srx.transport,
David Howells17926a72007-04-26 15:48:28 -0700128 local->srx.transport_len);
129 if (ret < 0) {
David Howells4f95dd72016-04-04 14:00:35 +0100130 _debug("bind failed %d", ret);
David Howells17926a72007-04-26 15:48:28 -0700131 goto error;
132 }
133 }
134
135 /* we want to receive ICMP errors */
136 opt = 1;
137 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
138 (char *) &opt, sizeof(opt));
139 if (ret < 0) {
140 _debug("setsockopt failed");
141 goto error;
142 }
143
144 /* we want to set the don't fragment bit */
145 opt = IP_PMTUDISC_DO;
146 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
147 (char *) &opt, sizeof(opt));
148 if (ret < 0) {
149 _debug("setsockopt failed");
150 goto error;
151 }
152
David Howells17926a72007-04-26 15:48:28 -0700153 /* set the socket up */
154 sock = local->socket->sk;
155 sock->sk_user_data = local;
156 sock->sk_data_ready = rxrpc_data_ready;
David Howellsabe89ef2016-04-04 14:00:32 +0100157 sock->sk_error_report = rxrpc_error_report;
David Howells17926a72007-04-26 15:48:28 -0700158 _leave(" = 0");
159 return 0;
160
161error:
Trond Myklebust91cf45f2007-11-12 18:10:39 -0800162 kernel_sock_shutdown(local->socket, SHUT_RDWR);
David Howells17926a72007-04-26 15:48:28 -0700163 local->socket->sk->sk_user_data = NULL;
164 sock_release(local->socket);
165 local->socket = NULL;
166
167 _leave(" = %d", ret);
168 return ret;
169}
170
171/*
David Howells4f95dd72016-04-04 14:00:35 +0100172 * Look up or create a new local endpoint using the specified local address.
David Howells17926a72007-04-26 15:48:28 -0700173 */
David Howells4f95dd72016-04-04 14:00:35 +0100174struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
David Howells17926a72007-04-26 15:48:28 -0700175{
176 struct rxrpc_local *local;
David Howells4f95dd72016-04-04 14:00:35 +0100177 struct list_head *cursor;
178 const char *age;
179 long diff;
David Howells17926a72007-04-26 15:48:28 -0700180 int ret;
181
David Howells75b54cb2016-09-13 08:49:05 +0100182 _enter("{%d,%d,%pISp}",
183 srx->transport_type, srx->transport.family, &srx->transport);
David Howells17926a72007-04-26 15:48:28 -0700184
David Howells4f95dd72016-04-04 14:00:35 +0100185 mutex_lock(&rxrpc_local_mutex);
David Howells17926a72007-04-26 15:48:28 -0700186
David Howells4f95dd72016-04-04 14:00:35 +0100187 for (cursor = rxrpc_local_endpoints.next;
188 cursor != &rxrpc_local_endpoints;
189 cursor = cursor->next) {
190 local = list_entry(cursor, struct rxrpc_local, link);
David Howells17926a72007-04-26 15:48:28 -0700191
David Howells4f95dd72016-04-04 14:00:35 +0100192 diff = rxrpc_local_cmp_key(local, srx);
193 if (diff < 0)
David Howells17926a72007-04-26 15:48:28 -0700194 continue;
David Howells4f95dd72016-04-04 14:00:35 +0100195 if (diff > 0)
196 break;
David Howells17926a72007-04-26 15:48:28 -0700197
David Howells4f95dd72016-04-04 14:00:35 +0100198 /* Services aren't allowed to share transport sockets, so
199 * reject that here. It is possible that the object is dying -
200 * but it may also still have the local transport address that
201 * we want bound.
202 */
203 if (srx->srx_service) {
204 local = NULL;
205 goto addr_in_use;
David Howells17926a72007-04-26 15:48:28 -0700206 }
David Howells4f95dd72016-04-04 14:00:35 +0100207
208 /* Found a match. We replace a dying object. Attempting to
209 * bind the transport socket may still fail if we're attempting
210 * to use a local address that the dying object is still using.
211 */
David Howells5627cc82016-04-04 14:00:38 +0100212 if (!rxrpc_get_local_maybe(local)) {
David Howells4f95dd72016-04-04 14:00:35 +0100213 cursor = cursor->next;
214 list_del_init(&local->link);
215 break;
216 }
217
218 age = "old";
219 goto found;
David Howells17926a72007-04-26 15:48:28 -0700220 }
221
David Howells17926a72007-04-26 15:48:28 -0700222 local = rxrpc_alloc_local(srx);
David Howells4f95dd72016-04-04 14:00:35 +0100223 if (!local)
224 goto nomem;
David Howells17926a72007-04-26 15:48:28 -0700225
David Howells4f95dd72016-04-04 14:00:35 +0100226 ret = rxrpc_open_socket(local);
227 if (ret < 0)
228 goto sock_error;
David Howells17926a72007-04-26 15:48:28 -0700229
David Howells4f95dd72016-04-04 14:00:35 +0100230 list_add_tail(&local->link, cursor);
231 age = "new";
David Howells17926a72007-04-26 15:48:28 -0700232
David Howells4f95dd72016-04-04 14:00:35 +0100233found:
234 mutex_unlock(&rxrpc_local_mutex);
235
David Howells75b54cb2016-09-13 08:49:05 +0100236 _net("LOCAL %s %d {%pISp}",
237 age, local->debug_id, &local->srx.transport);
David Howells17926a72007-04-26 15:48:28 -0700238
David Howells4f95dd72016-04-04 14:00:35 +0100239 _leave(" = %p", local);
David Howells17926a72007-04-26 15:48:28 -0700240 return local;
241
David Howells4f95dd72016-04-04 14:00:35 +0100242nomem:
243 ret = -ENOMEM;
244sock_error:
245 mutex_unlock(&rxrpc_local_mutex);
246 kfree(local);
247 _leave(" = %d", ret);
248 return ERR_PTR(ret);
David Howells17926a72007-04-26 15:48:28 -0700249
David Howells4f95dd72016-04-04 14:00:35 +0100250addr_in_use:
251 mutex_unlock(&rxrpc_local_mutex);
252 _leave(" = -EADDRINUSE");
253 return ERR_PTR(-EADDRINUSE);
David Howells17926a72007-04-26 15:48:28 -0700254}
255
256/*
David Howells4f95dd72016-04-04 14:00:35 +0100257 * A local endpoint reached its end of life.
David Howells17926a72007-04-26 15:48:28 -0700258 */
David Howells4f95dd72016-04-04 14:00:35 +0100259void __rxrpc_put_local(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700260{
David Howells4f95dd72016-04-04 14:00:35 +0100261 _enter("%d", local->debug_id);
262 rxrpc_queue_work(&local->processor);
David Howells17926a72007-04-26 15:48:28 -0700263}
264
265/*
David Howells4f95dd72016-04-04 14:00:35 +0100266 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
267 * of.
268 *
269 * Closing the socket cannot be done from bottom half context or RCU callback
270 * context because it might sleep.
David Howells17926a72007-04-26 15:48:28 -0700271 */
David Howells4f95dd72016-04-04 14:00:35 +0100272static void rxrpc_local_destroyer(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700273{
David Howells4f95dd72016-04-04 14:00:35 +0100274 struct socket *socket = local->socket;
David Howells17926a72007-04-26 15:48:28 -0700275
David Howells4f95dd72016-04-04 14:00:35 +0100276 _enter("%d", local->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700277
David Howells4f95dd72016-04-04 14:00:35 +0100278 /* We can get a race between an incoming call packet queueing the
279 * processor again and the work processor starting the destruction
280 * process which will shut down the UDP socket.
281 */
282 if (local->dead) {
283 _leave(" [already dead]");
David Howells17926a72007-04-26 15:48:28 -0700284 return;
285 }
David Howells4f95dd72016-04-04 14:00:35 +0100286 local->dead = true;
David Howells17926a72007-04-26 15:48:28 -0700287
David Howells4f95dd72016-04-04 14:00:35 +0100288 mutex_lock(&rxrpc_local_mutex);
289 list_del_init(&local->link);
290 mutex_unlock(&rxrpc_local_mutex);
David Howells17926a72007-04-26 15:48:28 -0700291
David Howells999b69f2016-06-17 15:42:35 +0100292 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
David Howellsde8d6c72016-09-08 11:10:11 +0100293 ASSERT(hlist_empty(&local->services));
David Howells17926a72007-04-26 15:48:28 -0700294
David Howells4f95dd72016-04-04 14:00:35 +0100295 if (socket) {
296 local->socket = NULL;
297 kernel_sock_shutdown(socket, SHUT_RDWR);
298 socket->sk->sk_user_data = NULL;
299 sock_release(socket);
300 }
301
302 /* At this point, there should be no more packets coming in to the
303 * local endpoint.
304 */
David Howells17926a72007-04-26 15:48:28 -0700305 rxrpc_purge_queue(&local->reject_queue);
David Howells44ba0692015-04-01 16:31:26 +0100306 rxrpc_purge_queue(&local->event_queue);
David Howells17926a72007-04-26 15:48:28 -0700307
David Howells4f95dd72016-04-04 14:00:35 +0100308 _debug("rcu local %d", local->debug_id);
309 call_rcu(&local->rcu, rxrpc_local_rcu);
310}
311
312/*
313 * Process events on an endpoint
314 */
315static void rxrpc_local_processor(struct work_struct *work)
316{
317 struct rxrpc_local *local =
318 container_of(work, struct rxrpc_local, processor);
319 bool again;
320
321 _enter("%d", local->debug_id);
322
323 do {
324 again = false;
325 if (atomic_read(&local->usage) == 0)
326 return rxrpc_local_destroyer(local);
327
David Howells4f95dd72016-04-04 14:00:35 +0100328 if (!skb_queue_empty(&local->reject_queue)) {
329 rxrpc_reject_packets(local);
330 again = true;
331 }
332
333 if (!skb_queue_empty(&local->event_queue)) {
334 rxrpc_process_local_events(local);
335 again = true;
336 }
337 } while (again);
338}
339
340/*
341 * Destroy a local endpoint after the RCU grace period expires.
342 */
343static void rxrpc_local_rcu(struct rcu_head *rcu)
344{
345 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
346
347 _enter("%d", local->debug_id);
348
349 ASSERT(!work_pending(&local->processor));
David Howells17926a72007-04-26 15:48:28 -0700350
351 _net("DESTROY LOCAL %d", local->debug_id);
352 kfree(local);
David Howells17926a72007-04-26 15:48:28 -0700353 _leave("");
354}
355
356/*
David Howells4f95dd72016-04-04 14:00:35 +0100357 * Verify the local endpoint list is empty by this point.
David Howells17926a72007-04-26 15:48:28 -0700358 */
359void __exit rxrpc_destroy_all_locals(void)
360{
David Howells4f95dd72016-04-04 14:00:35 +0100361 struct rxrpc_local *local;
David Howells17926a72007-04-26 15:48:28 -0700362
363 _enter("");
364
David Howellsdee46362016-06-27 17:11:19 +0100365 flush_workqueue(rxrpc_workqueue);
David Howells17926a72007-04-26 15:48:28 -0700366
David Howellsdee46362016-06-27 17:11:19 +0100367 if (!list_empty(&rxrpc_local_endpoints)) {
368 mutex_lock(&rxrpc_local_mutex);
369 list_for_each_entry(local, &rxrpc_local_endpoints, link) {
370 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
371 local, atomic_read(&local->usage));
372 }
373 mutex_unlock(&rxrpc_local_mutex);
374 BUG();
David Howells17926a72007-04-26 15:48:28 -0700375 }
David Howellsdee46362016-06-27 17:11:19 +0100376
377 rcu_barrier();
David Howells17926a72007-04-26 15:48:28 -0700378}