blob: 3f278721269e06bb397d54fc9f5cccfea926c004 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <linux/module.h>
16#include <linux/circ_buf.h>
Tim Smith77276402014-03-03 23:04:45 +000017#include <linux/hashtable.h>
18#include <linux/spinlock_types.h>
David Howells17926a72007-04-26 15:48:28 -070019#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Maximum lifetime of a call (in jiffies).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_max_call_lifetime = 60 * HZ;
David Howells5873c082014-02-07 18:58:44 +000027
28/*
29 * Time till dead call expires after last use (in jiffies).
30 */
David Howellsdad8aff2016-03-09 23:22:56 +000031unsigned int rxrpc_dead_call_expiry = 2 * HZ;
David Howells5873c082014-02-07 18:58:44 +000032
David Howells5b8848d2016-03-04 15:53:46 +000033const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
David Howells999b69f2016-06-17 15:42:35 +010034 [RXRPC_CALL_UNINITIALISED] = "Uninit",
35 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
David Howells1f8481d2007-05-22 16:14:24 -070036 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
37 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
38 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
39 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
40 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
41 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
42 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
43 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
44 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
45 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
46 [RXRPC_CALL_COMPLETE] = "Complete",
47 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
48 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
49 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
50 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
51 [RXRPC_CALL_DEAD] = "Dead ",
52};
53
David Howells17926a72007-04-26 15:48:28 -070054struct kmem_cache *rxrpc_call_jar;
55LIST_HEAD(rxrpc_calls);
56DEFINE_RWLOCK(rxrpc_call_lock);
David Howells17926a72007-04-26 15:48:28 -070057
58static void rxrpc_destroy_call(struct work_struct *work);
59static void rxrpc_call_life_expired(unsigned long _call);
60static void rxrpc_dead_call_expired(unsigned long _call);
61static void rxrpc_ack_time_expired(unsigned long _call);
62static void rxrpc_resend_time_expired(unsigned long _call);
63
Tim Smith77276402014-03-03 23:04:45 +000064static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
65static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
66
67/*
68 * Hash function for rxrpc_call_hash
69 */
70static unsigned long rxrpc_call_hashfunc(
David Howells0d12f8a2016-03-04 15:53:46 +000071 u8 in_clientflag,
72 u32 cid,
73 u32 call_id,
74 u32 epoch,
75 u16 service_id,
David Howells19ffa012016-04-04 14:00:36 +010076 sa_family_t family,
Tim Smith77276402014-03-03 23:04:45 +000077 void *localptr,
78 unsigned int addr_size,
79 const u8 *peer_addr)
80{
81 const u16 *p;
82 unsigned int i;
83 unsigned long key;
Tim Smith77276402014-03-03 23:04:45 +000084
85 _enter("");
86
87 key = (unsigned long)localptr;
88 /* We just want to add up the __be32 values, so forcing the
89 * cast should be okay.
90 */
David Howells0d12f8a2016-03-04 15:53:46 +000091 key += epoch;
92 key += service_id;
93 key += call_id;
94 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
95 key += cid & RXRPC_CHANNELMASK;
96 key += in_clientflag;
David Howells19ffa012016-04-04 14:00:36 +010097 key += family;
Tim Smith77276402014-03-03 23:04:45 +000098 /* Step through the peer address in 16-bit portions for speed */
99 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
100 key += *p;
101 _leave(" key = 0x%lx", key);
102 return key;
103}
104
105/*
106 * Add a call to the hashtable
107 */
108static void rxrpc_call_hash_add(struct rxrpc_call *call)
109{
110 unsigned long key;
111 unsigned int addr_size = 0;
112
113 _enter("");
David Howells19ffa012016-04-04 14:00:36 +0100114 switch (call->family) {
Tim Smith77276402014-03-03 23:04:45 +0000115 case AF_INET:
116 addr_size = sizeof(call->peer_ip.ipv4_addr);
117 break;
118 case AF_INET6:
119 addr_size = sizeof(call->peer_ip.ipv6_addr);
120 break;
121 default:
122 break;
123 }
124 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
125 call->call_id, call->epoch,
David Howells19ffa012016-04-04 14:00:36 +0100126 call->service_id, call->family,
David Howells85f32272016-04-04 14:00:36 +0100127 call->conn->params.local, addr_size,
Tim Smith77276402014-03-03 23:04:45 +0000128 call->peer_ip.ipv6_addr);
129 /* Store the full key in the call */
130 call->hash_key = key;
131 spin_lock(&rxrpc_call_hash_lock);
132 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
133 spin_unlock(&rxrpc_call_hash_lock);
134 _leave("");
135}
136
137/*
138 * Remove a call from the hashtable
139 */
140static void rxrpc_call_hash_del(struct rxrpc_call *call)
141{
142 _enter("");
143 spin_lock(&rxrpc_call_hash_lock);
144 hash_del_rcu(&call->hash_node);
145 spin_unlock(&rxrpc_call_hash_lock);
146 _leave("");
147}
148
149/*
150 * Find a call in the hashtable and return it, or NULL if it
151 * isn't there.
152 */
153struct rxrpc_call *rxrpc_find_call_hash(
David Howells0d12f8a2016-03-04 15:53:46 +0000154 struct rxrpc_host_header *hdr,
Tim Smith77276402014-03-03 23:04:45 +0000155 void *localptr,
David Howells19ffa012016-04-04 14:00:36 +0100156 sa_family_t family,
David Howells0d12f8a2016-03-04 15:53:46 +0000157 const void *peer_addr)
Tim Smith77276402014-03-03 23:04:45 +0000158{
159 unsigned long key;
160 unsigned int addr_size = 0;
161 struct rxrpc_call *call = NULL;
162 struct rxrpc_call *ret = NULL;
David Howells0d12f8a2016-03-04 15:53:46 +0000163 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
Tim Smith77276402014-03-03 23:04:45 +0000164
165 _enter("");
David Howells19ffa012016-04-04 14:00:36 +0100166 switch (family) {
Tim Smith77276402014-03-03 23:04:45 +0000167 case AF_INET:
168 addr_size = sizeof(call->peer_ip.ipv4_addr);
169 break;
170 case AF_INET6:
171 addr_size = sizeof(call->peer_ip.ipv6_addr);
172 break;
173 default:
174 break;
175 }
176
David Howells0d12f8a2016-03-04 15:53:46 +0000177 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
178 hdr->epoch, hdr->serviceId,
David Howells19ffa012016-04-04 14:00:36 +0100179 family, localptr, addr_size,
Tim Smith77276402014-03-03 23:04:45 +0000180 peer_addr);
181 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
182 if (call->hash_key == key &&
David Howells0d12f8a2016-03-04 15:53:46 +0000183 call->call_id == hdr->callNumber &&
184 call->cid == hdr->cid &&
185 call->in_clientflag == in_clientflag &&
186 call->service_id == hdr->serviceId &&
David Howells19ffa012016-04-04 14:00:36 +0100187 call->family == family &&
Tim Smith77276402014-03-03 23:04:45 +0000188 call->local == localptr &&
189 memcmp(call->peer_ip.ipv6_addr, peer_addr,
David Howells0d12f8a2016-03-04 15:53:46 +0000190 addr_size) == 0 &&
191 call->epoch == hdr->epoch) {
Tim Smith77276402014-03-03 23:04:45 +0000192 ret = call;
193 break;
194 }
195 }
196 _leave(" = %p", ret);
197 return ret;
198}
199
David Howells17926a72007-04-26 15:48:28 -0700200/*
David Howells2341e072016-06-09 23:02:51 +0100201 * find an extant server call
202 * - called in process context with IRQs enabled
203 */
204struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
205 unsigned long user_call_ID)
206{
207 struct rxrpc_call *call;
208 struct rb_node *p;
209
210 _enter("%p,%lx", rx, user_call_ID);
211
212 read_lock(&rx->call_lock);
213
214 p = rx->calls.rb_node;
215 while (p) {
216 call = rb_entry(p, struct rxrpc_call, sock_node);
217
218 if (user_call_ID < call->user_call_ID)
219 p = p->rb_left;
220 else if (user_call_ID > call->user_call_ID)
221 p = p->rb_right;
222 else
223 goto found_extant_call;
224 }
225
226 read_unlock(&rx->call_lock);
227 _leave(" = NULL");
228 return NULL;
229
230found_extant_call:
231 rxrpc_get_call(call);
232 read_unlock(&rx->call_lock);
233 _leave(" = %p [%d]", call, atomic_read(&call->usage));
234 return call;
235}
236
237/*
David Howells17926a72007-04-26 15:48:28 -0700238 * allocate a new call
239 */
240static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
241{
242 struct rxrpc_call *call;
243
244 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
245 if (!call)
246 return NULL;
247
248 call->acks_winsz = 16;
249 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
250 gfp);
251 if (!call->acks_window) {
252 kmem_cache_free(rxrpc_call_jar, call);
253 return NULL;
254 }
255
256 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
257 (unsigned long) call);
258 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
259 (unsigned long) call);
260 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
261 (unsigned long) call);
262 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
263 (unsigned long) call);
264 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
265 INIT_WORK(&call->processor, &rxrpc_process_call);
David Howells999b69f2016-06-17 15:42:35 +0100266 INIT_LIST_HEAD(&call->link);
David Howells17926a72007-04-26 15:48:28 -0700267 INIT_LIST_HEAD(&call->accept_link);
268 skb_queue_head_init(&call->rx_queue);
269 skb_queue_head_init(&call->rx_oos_queue);
270 init_waitqueue_head(&call->tx_waitq);
271 spin_lock_init(&call->lock);
272 rwlock_init(&call->state_lock);
273 atomic_set(&call->usage, 1);
274 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells17926a72007-04-26 15:48:28 -0700275
276 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
277
278 call->rx_data_expect = 1;
279 call->rx_data_eaten = 0;
280 call->rx_first_oos = 0;
David Howells817913d2014-02-07 18:10:30 +0000281 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
David Howells17926a72007-04-26 15:48:28 -0700282 call->creation_jif = jiffies;
283 return call;
284}
285
286/*
David Howells999b69f2016-06-17 15:42:35 +0100287 * Allocate a new client call.
David Howells17926a72007-04-26 15:48:28 -0700288 */
David Howellsaa390bb2016-06-17 10:06:56 +0100289static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
290 struct sockaddr_rxrpc *srx,
291 gfp_t gfp)
David Howells17926a72007-04-26 15:48:28 -0700292{
293 struct rxrpc_call *call;
David Howells17926a72007-04-26 15:48:28 -0700294
295 _enter("");
296
David Howells999b69f2016-06-17 15:42:35 +0100297 ASSERT(rx->local != NULL);
David Howells17926a72007-04-26 15:48:28 -0700298
299 call = rxrpc_alloc_call(gfp);
300 if (!call)
301 return ERR_PTR(-ENOMEM);
David Howells999b69f2016-06-17 15:42:35 +0100302 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
David Howells17926a72007-04-26 15:48:28 -0700303
304 sock_hold(&rx->sk);
305 call->socket = rx;
306 call->rx_data_post = 1;
307
Tim Smith77276402014-03-03 23:04:45 +0000308 /* Record copies of information for hashtable lookup */
David Howells19ffa012016-04-04 14:00:36 +0100309 call->family = rx->family;
David Howells999b69f2016-06-17 15:42:35 +0100310 call->local = rx->local;
David Howells19ffa012016-04-04 14:00:36 +0100311 switch (call->family) {
Tim Smith77276402014-03-03 23:04:45 +0000312 case AF_INET:
David Howells999b69f2016-06-17 15:42:35 +0100313 call->peer_ip.ipv4_addr = srx->transport.sin.sin_addr.s_addr;
Tim Smith77276402014-03-03 23:04:45 +0000314 break;
315 case AF_INET6:
316 memcpy(call->peer_ip.ipv6_addr,
David Howells999b69f2016-06-17 15:42:35 +0100317 srx->transport.sin6.sin6_addr.in6_u.u6_addr8,
Tim Smith77276402014-03-03 23:04:45 +0000318 sizeof(call->peer_ip.ipv6_addr));
319 break;
320 }
David Howells999b69f2016-06-17 15:42:35 +0100321
322 call->service_id = srx->srx_service;
323 call->in_clientflag = 0;
324
325 _leave(" = %p", call);
326 return call;
327}
328
329/*
330 * Begin client call.
331 */
332static int rxrpc_begin_client_call(struct rxrpc_call *call,
333 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100334 struct sockaddr_rxrpc *srx,
335 gfp_t gfp)
336{
337 int ret;
338
339 /* Set up or get a connection record and set the protocol parameters,
340 * including channel number and call ID.
341 */
David Howellsaa390bb2016-06-17 10:06:56 +0100342 ret = rxrpc_connect_call(call, cp, srx, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100343 if (ret < 0)
344 return ret;
345
346 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
347
Tim Smith77276402014-03-03 23:04:45 +0000348 /* Add the new call to the hashtable */
349 rxrpc_call_hash_add(call);
350
David Howells85f32272016-04-04 14:00:36 +0100351 spin_lock(&call->conn->params.peer->lock);
352 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
353 spin_unlock(&call->conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700354
David Howells5873c082014-02-07 18:58:44 +0000355 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700356 add_timer(&call->lifetimer);
David Howells999b69f2016-06-17 15:42:35 +0100357 return 0;
David Howells17926a72007-04-26 15:48:28 -0700358}
359
360/*
361 * set up a call for the given data
362 * - called in process context with IRQs enabled
363 */
David Howells2341e072016-06-09 23:02:51 +0100364struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
David Howells19ffa012016-04-04 14:00:36 +0100365 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100366 struct sockaddr_rxrpc *srx,
David Howells17926a72007-04-26 15:48:28 -0700367 unsigned long user_call_ID,
David Howells17926a72007-04-26 15:48:28 -0700368 gfp_t gfp)
369{
David Howells2341e072016-06-09 23:02:51 +0100370 struct rxrpc_call *call, *xcall;
371 struct rb_node *parent, **pp;
David Howells999b69f2016-06-17 15:42:35 +0100372 int ret;
David Howells17926a72007-04-26 15:48:28 -0700373
David Howells999b69f2016-06-17 15:42:35 +0100374 _enter("%p,%lx", rx, user_call_ID);
David Howells17926a72007-04-26 15:48:28 -0700375
David Howellsaa390bb2016-06-17 10:06:56 +0100376 call = rxrpc_alloc_client_call(rx, srx, gfp);
David Howells2341e072016-06-09 23:02:51 +0100377 if (IS_ERR(call)) {
378 _leave(" = %ld", PTR_ERR(call));
379 return call;
David Howells17926a72007-04-26 15:48:28 -0700380 }
381
David Howells999b69f2016-06-17 15:42:35 +0100382 /* Publish the call, even though it is incompletely set up as yet */
David Howells2341e072016-06-09 23:02:51 +0100383 call->user_call_ID = user_call_ID;
384 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
David Howells17926a72007-04-26 15:48:28 -0700385
386 write_lock(&rx->call_lock);
387
388 pp = &rx->calls.rb_node;
389 parent = NULL;
390 while (*pp) {
391 parent = *pp;
David Howells2341e072016-06-09 23:02:51 +0100392 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
David Howells17926a72007-04-26 15:48:28 -0700393
David Howells2341e072016-06-09 23:02:51 +0100394 if (user_call_ID < xcall->user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700395 pp = &(*pp)->rb_left;
David Howells2341e072016-06-09 23:02:51 +0100396 else if (user_call_ID > xcall->user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700397 pp = &(*pp)->rb_right;
398 else
David Howells2341e072016-06-09 23:02:51 +0100399 goto found_user_ID_now_present;
David Howells17926a72007-04-26 15:48:28 -0700400 }
401
David Howells17926a72007-04-26 15:48:28 -0700402 rxrpc_get_call(call);
403
404 rb_link_node(&call->sock_node, parent, pp);
405 rb_insert_color(&call->sock_node, &rx->calls);
406 write_unlock(&rx->call_lock);
407
408 write_lock_bh(&rxrpc_call_lock);
409 list_add_tail(&call->link, &rxrpc_calls);
410 write_unlock_bh(&rxrpc_call_lock);
411
David Howellsaa390bb2016-06-17 10:06:56 +0100412 ret = rxrpc_begin_client_call(call, cp, srx, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100413 if (ret < 0)
414 goto error;
415
David Howells17926a72007-04-26 15:48:28 -0700416 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
417
418 _leave(" = %p [new]", call);
419 return call;
420
David Howells999b69f2016-06-17 15:42:35 +0100421error:
422 write_lock(&rx->call_lock);
423 rb_erase(&call->sock_node, &rx->calls);
424 write_unlock(&rx->call_lock);
425 rxrpc_put_call(call);
426
427 write_lock_bh(&rxrpc_call_lock);
David Howellsd1e858c2016-04-04 14:00:39 +0100428 list_del_init(&call->link);
David Howells999b69f2016-06-17 15:42:35 +0100429 write_unlock_bh(&rxrpc_call_lock);
430
David Howellsd1e858c2016-04-04 14:00:39 +0100431 call->state = RXRPC_CALL_DEAD;
David Howells999b69f2016-06-17 15:42:35 +0100432 rxrpc_put_call(call);
433 _leave(" = %d", ret);
434 return ERR_PTR(ret);
435
David Howells2341e072016-06-09 23:02:51 +0100436 /* We unexpectedly found the user ID in the list after taking
437 * the call_lock. This shouldn't happen unless the user races
438 * with itself and tries to add the same user ID twice at the
439 * same time in different threads.
440 */
441found_user_ID_now_present:
David Howells17926a72007-04-26 15:48:28 -0700442 write_unlock(&rx->call_lock);
David Howellsd1e858c2016-04-04 14:00:39 +0100443 call->state = RXRPC_CALL_DEAD;
David Howells2341e072016-06-09 23:02:51 +0100444 rxrpc_put_call(call);
445 _leave(" = -EEXIST [%p]", call);
446 return ERR_PTR(-EEXIST);
David Howells17926a72007-04-26 15:48:28 -0700447}
448
449/*
450 * set up an incoming call
451 * - called in process context with IRQs enabled
452 */
453struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
454 struct rxrpc_connection *conn,
David Howells42886ff2016-06-16 13:31:07 +0100455 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700456{
David Howells42886ff2016-06-16 13:31:07 +0100457 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700458 struct rxrpc_call *call, *candidate;
David Howellsa1399f82016-06-27 14:39:44 +0100459 u32 call_id, chan;
David Howells17926a72007-04-26 15:48:28 -0700460
David Howells843099c2016-04-07 17:23:37 +0100461 _enter(",%d", conn->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700462
463 ASSERT(rx != NULL);
464
David Howells843099c2016-04-07 17:23:37 +0100465 candidate = rxrpc_alloc_call(GFP_NOIO);
David Howells17926a72007-04-26 15:48:28 -0700466 if (!candidate)
467 return ERR_PTR(-EBUSY);
468
David Howellsa1399f82016-06-27 14:39:44 +0100469 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
David Howells42886ff2016-06-16 13:31:07 +0100470 candidate->socket = rx;
471 candidate->conn = conn;
472 candidate->cid = sp->hdr.cid;
473 candidate->call_id = sp->hdr.callNumber;
David Howellsa1399f82016-06-27 14:39:44 +0100474 candidate->channel = chan;
David Howells42886ff2016-06-16 13:31:07 +0100475 candidate->rx_data_post = 0;
476 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
David Howells17926a72007-04-26 15:48:28 -0700477 if (conn->security_ix > 0)
478 candidate->state = RXRPC_CALL_SERVER_SECURING;
479
David Howellsa1399f82016-06-27 14:39:44 +0100480 spin_lock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700481
482 /* set the channel for this call */
David Howellsa1399f82016-06-27 14:39:44 +0100483 call = rcu_dereference_protected(conn->channels[chan].call,
484 lockdep_is_held(&conn->channel_lock));
485
David Howells17926a72007-04-26 15:48:28 -0700486 _debug("channel[%u] is %p", candidate->channel, call);
David Howells42886ff2016-06-16 13:31:07 +0100487 if (call && call->call_id == sp->hdr.callNumber) {
David Howells17926a72007-04-26 15:48:28 -0700488 /* already set; must've been a duplicate packet */
489 _debug("extant call [%d]", call->state);
490 ASSERTCMP(call->conn, ==, conn);
491
492 read_lock(&call->state_lock);
493 switch (call->state) {
494 case RXRPC_CALL_LOCALLY_ABORTED:
David Howells4c198ad2016-03-04 15:53:46 +0000495 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700496 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700497 case RXRPC_CALL_REMOTELY_ABORTED:
498 read_unlock(&call->state_lock);
499 goto aborted_call;
500 default:
501 rxrpc_get_call(call);
502 read_unlock(&call->state_lock);
503 goto extant_call;
504 }
505 }
506
507 if (call) {
508 /* it seems the channel is still in use from the previous call
509 * - ditch the old binding if its call is now complete */
510 _debug("CALL: %u { %s }",
511 call->debug_id, rxrpc_call_states[call->state]);
512
513 if (call->state >= RXRPC_CALL_COMPLETE) {
David Howellsa1399f82016-06-27 14:39:44 +0100514 __rxrpc_disconnect_call(call);
David Howells17926a72007-04-26 15:48:28 -0700515 } else {
David Howellsa1399f82016-06-27 14:39:44 +0100516 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700517 kmem_cache_free(rxrpc_call_jar, candidate);
518 _leave(" = -EBUSY");
519 return ERR_PTR(-EBUSY);
520 }
521 }
522
523 /* check the call number isn't duplicate */
524 _debug("check dup");
David Howells42886ff2016-06-16 13:31:07 +0100525 call_id = sp->hdr.callNumber;
David Howells17926a72007-04-26 15:48:28 -0700526
David Howellsa1399f82016-06-27 14:39:44 +0100527 /* We just ignore calls prior to the current call ID. Terminated calls
528 * are handled via the connection.
529 */
530 if (call_id <= conn->channels[chan].call_counter)
531 goto old_call; /* TODO: Just drop packet */
David Howells17926a72007-04-26 15:48:28 -0700532
533 /* make the call available */
534 _debug("new call");
535 call = candidate;
536 candidate = NULL;
David Howellsa1399f82016-06-27 14:39:44 +0100537 conn->channels[chan].call_counter = call_id;
538 rcu_assign_pointer(conn->channels[chan].call, call);
David Howells17926a72007-04-26 15:48:28 -0700539 sock_hold(&rx->sk);
David Howells5627cc82016-04-04 14:00:38 +0100540 rxrpc_get_connection(conn);
David Howellsa1399f82016-06-27 14:39:44 +0100541 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700542
David Howells85f32272016-04-04 14:00:36 +0100543 spin_lock(&conn->params.peer->lock);
544 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
545 spin_unlock(&conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700546
547 write_lock_bh(&rxrpc_call_lock);
548 list_add_tail(&call->link, &rxrpc_calls);
549 write_unlock_bh(&rxrpc_call_lock);
550
Tim Smith77276402014-03-03 23:04:45 +0000551 /* Record copies of information for hashtable lookup */
David Howells19ffa012016-04-04 14:00:36 +0100552 call->family = rx->family;
David Howells85f32272016-04-04 14:00:36 +0100553 call->local = conn->params.local;
David Howells19ffa012016-04-04 14:00:36 +0100554 switch (call->family) {
Tim Smith77276402014-03-03 23:04:45 +0000555 case AF_INET:
556 call->peer_ip.ipv4_addr =
David Howells85f32272016-04-04 14:00:36 +0100557 conn->params.peer->srx.transport.sin.sin_addr.s_addr;
Tim Smith77276402014-03-03 23:04:45 +0000558 break;
559 case AF_INET6:
560 memcpy(call->peer_ip.ipv6_addr,
David Howells85f32272016-04-04 14:00:36 +0100561 conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
Tim Smith77276402014-03-03 23:04:45 +0000562 sizeof(call->peer_ip.ipv6_addr));
563 break;
564 default:
565 break;
566 }
David Howells19ffa012016-04-04 14:00:36 +0100567 call->epoch = conn->proto.epoch;
568 call->service_id = conn->params.service_id;
569 call->in_clientflag = conn->proto.in_clientflag;
Tim Smith77276402014-03-03 23:04:45 +0000570 /* Add the new call to the hashtable */
571 rxrpc_call_hash_add(call);
572
David Howells17926a72007-04-26 15:48:28 -0700573 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
574
David Howells5873c082014-02-07 18:58:44 +0000575 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700576 add_timer(&call->lifetimer);
577 _leave(" = %p {%d} [new]", call, call->debug_id);
578 return call;
579
580extant_call:
David Howellsa1399f82016-06-27 14:39:44 +0100581 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700582 kmem_cache_free(rxrpc_call_jar, candidate);
583 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
584 return call;
585
586aborted_call:
David Howellsa1399f82016-06-27 14:39:44 +0100587 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700588 kmem_cache_free(rxrpc_call_jar, candidate);
589 _leave(" = -ECONNABORTED");
590 return ERR_PTR(-ECONNABORTED);
591
592old_call:
David Howellsa1399f82016-06-27 14:39:44 +0100593 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700594 kmem_cache_free(rxrpc_call_jar, candidate);
595 _leave(" = -ECONNRESET [old]");
596 return ERR_PTR(-ECONNRESET);
597}
598
599/*
David Howells17926a72007-04-26 15:48:28 -0700600 * detach a call from a socket and set up for release
601 */
602void rxrpc_release_call(struct rxrpc_call *call)
603{
David Howells651350d2007-04-26 15:50:17 -0700604 struct rxrpc_connection *conn = call->conn;
David Howells17926a72007-04-26 15:48:28 -0700605 struct rxrpc_sock *rx = call->socket;
606
607 _enter("{%d,%d,%d,%d}",
608 call->debug_id, atomic_read(&call->usage),
609 atomic_read(&call->ackr_not_idle),
610 call->rx_first_oos);
611
612 spin_lock_bh(&call->lock);
613 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
614 BUG();
615 spin_unlock_bh(&call->lock);
616
617 /* dissociate from the socket
618 * - the socket's ref on the call is passed to the death timer
619 */
David Howells651350d2007-04-26 15:50:17 -0700620 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
David Howells17926a72007-04-26 15:48:28 -0700621
David Howellse653cfe2016-04-04 14:00:38 +0100622 spin_lock(&conn->params.peer->lock);
623 hlist_del_init(&call->error_link);
624 spin_unlock(&conn->params.peer->lock);
625
David Howells17926a72007-04-26 15:48:28 -0700626 write_lock_bh(&rx->call_lock);
627 if (!list_empty(&call->accept_link)) {
628 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
629 call, call->events, call->flags);
630 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
631 list_del_init(&call->accept_link);
632 sk_acceptq_removed(&rx->sk);
633 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
634 rb_erase(&call->sock_node, &rx->calls);
635 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
636 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
637 }
638 write_unlock_bh(&rx->call_lock);
639
David Howells17926a72007-04-26 15:48:28 -0700640 /* free up the channel for reuse */
David Howellsa1399f82016-06-27 14:39:44 +0100641 write_lock_bh(&call->state_lock);
David Howells651350d2007-04-26 15:50:17 -0700642
David Howells17926a72007-04-26 15:48:28 -0700643 if (call->state < RXRPC_CALL_COMPLETE &&
644 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
645 _debug("+++ ABORTING STATE %d +++\n", call->state);
646 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howellsdc44b3a2016-04-07 17:23:30 +0100647 call->local_abort = RX_CALL_DEAD;
David Howells17926a72007-04-26 15:48:28 -0700648 }
David Howellsa1399f82016-06-27 14:39:44 +0100649 write_unlock_bh(&call->state_lock);
David Howells17926a72007-04-26 15:48:28 -0700650
David Howellse653cfe2016-04-04 14:00:38 +0100651 rxrpc_disconnect_call(call);
652
David Howells651350d2007-04-26 15:50:17 -0700653 /* clean up the Rx queue */
David Howells17926a72007-04-26 15:48:28 -0700654 if (!skb_queue_empty(&call->rx_queue) ||
655 !skb_queue_empty(&call->rx_oos_queue)) {
656 struct rxrpc_skb_priv *sp;
657 struct sk_buff *skb;
658
659 _debug("purge Rx queues");
660
661 spin_lock_bh(&call->lock);
662 while ((skb = skb_dequeue(&call->rx_queue)) ||
663 (skb = skb_dequeue(&call->rx_oos_queue))) {
664 sp = rxrpc_skb(skb);
665 if (sp->call) {
666 ASSERTCMP(sp->call, ==, call);
667 rxrpc_put_call(call);
668 sp->call = NULL;
669 }
670 skb->destructor = NULL;
671 spin_unlock_bh(&call->lock);
672
673 _debug("- zap %s %%%u #%u",
674 rxrpc_pkts[sp->hdr.type],
David Howells0d12f8a2016-03-04 15:53:46 +0000675 sp->hdr.serial, sp->hdr.seq);
David Howells17926a72007-04-26 15:48:28 -0700676 rxrpc_free_skb(skb);
677 spin_lock_bh(&call->lock);
678 }
679 spin_unlock_bh(&call->lock);
680
681 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
682 }
683
684 del_timer_sync(&call->resend_timer);
685 del_timer_sync(&call->ack_timer);
686 del_timer_sync(&call->lifetimer);
David Howells5873c082014-02-07 18:58:44 +0000687 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700688 add_timer(&call->deadspan);
689
690 _leave("");
691}
692
693/*
694 * handle a dead call being ready for reaping
695 */
696static void rxrpc_dead_call_expired(unsigned long _call)
697{
698 struct rxrpc_call *call = (struct rxrpc_call *) _call;
699
700 _enter("{%d}", call->debug_id);
701
702 write_lock_bh(&call->state_lock);
703 call->state = RXRPC_CALL_DEAD;
704 write_unlock_bh(&call->state_lock);
705 rxrpc_put_call(call);
706}
707
708/*
709 * mark a call as to be released, aborting it if it's still in progress
710 * - called with softirqs disabled
711 */
712static void rxrpc_mark_call_released(struct rxrpc_call *call)
713{
714 bool sched;
715
716 write_lock(&call->state_lock);
717 if (call->state < RXRPC_CALL_DEAD) {
718 sched = false;
719 if (call->state < RXRPC_CALL_COMPLETE) {
720 _debug("abort call %p", call);
721 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howellsdc44b3a2016-04-07 17:23:30 +0100722 call->local_abort = RX_CALL_DEAD;
David Howells4c198ad2016-03-04 15:53:46 +0000723 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells17926a72007-04-26 15:48:28 -0700724 sched = true;
725 }
David Howells4c198ad2016-03-04 15:53:46 +0000726 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
David Howells17926a72007-04-26 15:48:28 -0700727 sched = true;
728 if (sched)
David Howells651350d2007-04-26 15:50:17 -0700729 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700730 }
731 write_unlock(&call->state_lock);
732}
733
734/*
735 * release all the calls associated with a socket
736 */
737void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
738{
739 struct rxrpc_call *call;
740 struct rb_node *p;
741
742 _enter("%p", rx);
743
744 read_lock_bh(&rx->call_lock);
745
746 /* mark all the calls as no longer wanting incoming packets */
747 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
748 call = rb_entry(p, struct rxrpc_call, sock_node);
749 rxrpc_mark_call_released(call);
750 }
751
752 /* kill the not-yet-accepted incoming calls */
753 list_for_each_entry(call, &rx->secureq, accept_link) {
754 rxrpc_mark_call_released(call);
755 }
756
757 list_for_each_entry(call, &rx->acceptq, accept_link) {
758 rxrpc_mark_call_released(call);
759 }
760
761 read_unlock_bh(&rx->call_lock);
762 _leave("");
763}
764
765/*
766 * release a call
767 */
768void __rxrpc_put_call(struct rxrpc_call *call)
769{
770 ASSERT(call != NULL);
771
772 _enter("%p{u=%d}", call, atomic_read(&call->usage));
773
774 ASSERTCMP(atomic_read(&call->usage), >, 0);
775
776 if (atomic_dec_and_test(&call->usage)) {
777 _debug("call %d dead", call->debug_id);
778 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
David Howells651350d2007-04-26 15:50:17 -0700779 rxrpc_queue_work(&call->destroyer);
David Howells17926a72007-04-26 15:48:28 -0700780 }
781 _leave("");
782}
783
784/*
David Howellsdee46362016-06-27 17:11:19 +0100785 * Final call destruction under RCU.
786 */
787static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
788{
789 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
790
791 rxrpc_purge_queue(&call->rx_queue);
792 kmem_cache_free(rxrpc_call_jar, call);
793}
794
795/*
David Howells17926a72007-04-26 15:48:28 -0700796 * clean up a call
797 */
798static void rxrpc_cleanup_call(struct rxrpc_call *call)
799{
800 _net("DESTROY CALL %d", call->debug_id);
801
802 ASSERT(call->socket);
803
804 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
805
806 del_timer_sync(&call->lifetimer);
807 del_timer_sync(&call->deadspan);
808 del_timer_sync(&call->ack_timer);
809 del_timer_sync(&call->resend_timer);
810
811 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
812 ASSERTCMP(call->events, ==, 0);
813 if (work_pending(&call->processor)) {
814 _debug("defer destroy");
David Howells651350d2007-04-26 15:50:17 -0700815 rxrpc_queue_work(&call->destroyer);
David Howells17926a72007-04-26 15:48:28 -0700816 return;
817 }
818
David Howellse653cfe2016-04-04 14:00:38 +0100819 ASSERTCMP(call->conn, ==, NULL);
David Howells17926a72007-04-26 15:48:28 -0700820
Tim Smith77276402014-03-03 23:04:45 +0000821 /* Remove the call from the hash */
822 rxrpc_call_hash_del(call);
823
David Howells17926a72007-04-26 15:48:28 -0700824 if (call->acks_window) {
825 _debug("kill Tx window %d",
826 CIRC_CNT(call->acks_head, call->acks_tail,
827 call->acks_winsz));
828 smp_mb();
829 while (CIRC_CNT(call->acks_head, call->acks_tail,
830 call->acks_winsz) > 0) {
831 struct rxrpc_skb_priv *sp;
832 unsigned long _skb;
833
834 _skb = call->acks_window[call->acks_tail] & ~1;
David Howells0d12f8a2016-03-04 15:53:46 +0000835 sp = rxrpc_skb((struct sk_buff *)_skb);
836 _debug("+++ clear Tx %u", sp->hdr.seq);
837 rxrpc_free_skb((struct sk_buff *)_skb);
David Howells17926a72007-04-26 15:48:28 -0700838 call->acks_tail =
839 (call->acks_tail + 1) & (call->acks_winsz - 1);
840 }
841
842 kfree(call->acks_window);
843 }
844
845 rxrpc_free_skb(call->tx_pending);
846
847 rxrpc_purge_queue(&call->rx_queue);
848 ASSERT(skb_queue_empty(&call->rx_oos_queue));
849 sock_put(&call->socket->sk);
David Howellsdee46362016-06-27 17:11:19 +0100850 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
David Howells17926a72007-04-26 15:48:28 -0700851}
852
853/*
854 * destroy a call
855 */
856static void rxrpc_destroy_call(struct work_struct *work)
857{
858 struct rxrpc_call *call =
859 container_of(work, struct rxrpc_call, destroyer);
860
861 _enter("%p{%d,%d,%p}",
862 call, atomic_read(&call->usage), call->channel, call->conn);
863
864 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
865
866 write_lock_bh(&rxrpc_call_lock);
867 list_del_init(&call->link);
868 write_unlock_bh(&rxrpc_call_lock);
869
870 rxrpc_cleanup_call(call);
871 _leave("");
872}
873
874/*
875 * preemptively destroy all the call records from a transport endpoint rather
876 * than waiting for them to time out
877 */
878void __exit rxrpc_destroy_all_calls(void)
879{
880 struct rxrpc_call *call;
881
882 _enter("");
883 write_lock_bh(&rxrpc_call_lock);
884
885 while (!list_empty(&rxrpc_calls)) {
886 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
887 _debug("Zapping call %p", call);
888
889 list_del_init(&call->link);
890
891 switch (atomic_read(&call->usage)) {
892 case 0:
893 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
894 break;
895 case 1:
896 if (del_timer_sync(&call->deadspan) != 0 &&
897 call->state != RXRPC_CALL_DEAD)
898 rxrpc_dead_call_expired((unsigned long) call);
899 if (call->state != RXRPC_CALL_DEAD)
900 break;
901 default:
Joe Perches9b6d5392016-06-02 12:08:52 -0700902 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
David Howells17926a72007-04-26 15:48:28 -0700903 call, atomic_read(&call->usage),
904 atomic_read(&call->ackr_not_idle),
905 rxrpc_call_states[call->state],
906 call->flags, call->events);
907 if (!skb_queue_empty(&call->rx_queue))
Joe Perches9b6d5392016-06-02 12:08:52 -0700908 pr_err("Rx queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700909 if (!skb_queue_empty(&call->rx_oos_queue))
Joe Perches9b6d5392016-06-02 12:08:52 -0700910 pr_err("OOS queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700911 break;
912 }
913
914 write_unlock_bh(&rxrpc_call_lock);
915 cond_resched();
916 write_lock_bh(&rxrpc_call_lock);
917 }
918
919 write_unlock_bh(&rxrpc_call_lock);
920 _leave("");
921}
922
923/*
924 * handle call lifetime being exceeded
925 */
926static void rxrpc_call_life_expired(unsigned long _call)
927{
928 struct rxrpc_call *call = (struct rxrpc_call *) _call;
929
930 if (call->state >= RXRPC_CALL_COMPLETE)
931 return;
932
933 _enter("{%d}", call->debug_id);
934 read_lock_bh(&call->state_lock);
935 if (call->state < RXRPC_CALL_COMPLETE) {
David Howells4c198ad2016-03-04 15:53:46 +0000936 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
David Howells651350d2007-04-26 15:50:17 -0700937 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700938 }
939 read_unlock_bh(&call->state_lock);
940}
941
942/*
943 * handle resend timer expiry
David Howells3b5bac22010-08-04 02:34:17 +0000944 * - may not take call->state_lock as this can deadlock against del_timer_sync()
David Howells17926a72007-04-26 15:48:28 -0700945 */
946static void rxrpc_resend_time_expired(unsigned long _call)
947{
948 struct rxrpc_call *call = (struct rxrpc_call *) _call;
949
950 _enter("{%d}", call->debug_id);
951
952 if (call->state >= RXRPC_CALL_COMPLETE)
953 return;
954
David Howells17926a72007-04-26 15:48:28 -0700955 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
David Howells4c198ad2016-03-04 15:53:46 +0000956 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700957 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700958}
959
960/*
961 * handle ACK timer expiry
962 */
963static void rxrpc_ack_time_expired(unsigned long _call)
964{
965 struct rxrpc_call *call = (struct rxrpc_call *) _call;
966
967 _enter("{%d}", call->debug_id);
968
969 if (call->state >= RXRPC_CALL_COMPLETE)
970 return;
971
972 read_lock_bh(&call->state_lock);
973 if (call->state < RXRPC_CALL_COMPLETE &&
David Howells4c198ad2016-03-04 15:53:46 +0000974 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700975 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700976 read_unlock_bh(&call->state_lock);
977}