blob: ad933daae13b90b469f74cfe56a2af01f9cb76d8 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <linux/module.h>
16#include <linux/circ_buf.h>
Tim Smith77276402014-03-03 23:04:45 +000017#include <linux/hashtable.h>
18#include <linux/spinlock_types.h>
David Howells17926a72007-04-26 15:48:28 -070019#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Maximum lifetime of a call (in jiffies).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_max_call_lifetime = 60 * HZ;
David Howells5873c082014-02-07 18:58:44 +000027
28/*
29 * Time till dead call expires after last use (in jiffies).
30 */
David Howellsdad8aff2016-03-09 23:22:56 +000031unsigned int rxrpc_dead_call_expiry = 2 * HZ;
David Howells5873c082014-02-07 18:58:44 +000032
David Howells5b8848d2016-03-04 15:53:46 +000033const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
David Howells999b69f2016-06-17 15:42:35 +010034 [RXRPC_CALL_UNINITIALISED] = "Uninit",
35 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
David Howells1f8481d2007-05-22 16:14:24 -070036 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
37 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
38 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
39 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
40 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
41 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
42 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
43 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
44 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
45 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
46 [RXRPC_CALL_COMPLETE] = "Complete",
47 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
48 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
49 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
50 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
51 [RXRPC_CALL_DEAD] = "Dead ",
52};
53
David Howells17926a72007-04-26 15:48:28 -070054struct kmem_cache *rxrpc_call_jar;
55LIST_HEAD(rxrpc_calls);
56DEFINE_RWLOCK(rxrpc_call_lock);
David Howells17926a72007-04-26 15:48:28 -070057
58static void rxrpc_destroy_call(struct work_struct *work);
59static void rxrpc_call_life_expired(unsigned long _call);
60static void rxrpc_dead_call_expired(unsigned long _call);
61static void rxrpc_ack_time_expired(unsigned long _call);
62static void rxrpc_resend_time_expired(unsigned long _call);
63
Tim Smith77276402014-03-03 23:04:45 +000064static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
65static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
66
67/*
68 * Hash function for rxrpc_call_hash
69 */
70static unsigned long rxrpc_call_hashfunc(
David Howells0d12f8a2016-03-04 15:53:46 +000071 u8 in_clientflag,
72 u32 cid,
73 u32 call_id,
74 u32 epoch,
75 u16 service_id,
David Howells19ffa012016-04-04 14:00:36 +010076 sa_family_t family,
Tim Smith77276402014-03-03 23:04:45 +000077 void *localptr,
78 unsigned int addr_size,
79 const u8 *peer_addr)
80{
81 const u16 *p;
82 unsigned int i;
83 unsigned long key;
Tim Smith77276402014-03-03 23:04:45 +000084
85 _enter("");
86
87 key = (unsigned long)localptr;
88 /* We just want to add up the __be32 values, so forcing the
89 * cast should be okay.
90 */
David Howells0d12f8a2016-03-04 15:53:46 +000091 key += epoch;
92 key += service_id;
93 key += call_id;
94 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
95 key += cid & RXRPC_CHANNELMASK;
96 key += in_clientflag;
David Howells19ffa012016-04-04 14:00:36 +010097 key += family;
Tim Smith77276402014-03-03 23:04:45 +000098 /* Step through the peer address in 16-bit portions for speed */
99 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
100 key += *p;
101 _leave(" key = 0x%lx", key);
102 return key;
103}
104
105/*
106 * Add a call to the hashtable
107 */
108static void rxrpc_call_hash_add(struct rxrpc_call *call)
109{
110 unsigned long key;
111 unsigned int addr_size = 0;
112
113 _enter("");
David Howells19ffa012016-04-04 14:00:36 +0100114 switch (call->family) {
Tim Smith77276402014-03-03 23:04:45 +0000115 case AF_INET:
116 addr_size = sizeof(call->peer_ip.ipv4_addr);
117 break;
118 case AF_INET6:
119 addr_size = sizeof(call->peer_ip.ipv6_addr);
120 break;
121 default:
122 break;
123 }
124 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
125 call->call_id, call->epoch,
David Howells19ffa012016-04-04 14:00:36 +0100126 call->service_id, call->family,
David Howells85f32272016-04-04 14:00:36 +0100127 call->conn->params.local, addr_size,
Tim Smith77276402014-03-03 23:04:45 +0000128 call->peer_ip.ipv6_addr);
129 /* Store the full key in the call */
130 call->hash_key = key;
131 spin_lock(&rxrpc_call_hash_lock);
132 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
133 spin_unlock(&rxrpc_call_hash_lock);
134 _leave("");
135}
136
137/*
138 * Remove a call from the hashtable
139 */
140static void rxrpc_call_hash_del(struct rxrpc_call *call)
141{
142 _enter("");
143 spin_lock(&rxrpc_call_hash_lock);
144 hash_del_rcu(&call->hash_node);
145 spin_unlock(&rxrpc_call_hash_lock);
146 _leave("");
147}
148
149/*
150 * Find a call in the hashtable and return it, or NULL if it
151 * isn't there.
152 */
153struct rxrpc_call *rxrpc_find_call_hash(
David Howells0d12f8a2016-03-04 15:53:46 +0000154 struct rxrpc_host_header *hdr,
Tim Smith77276402014-03-03 23:04:45 +0000155 void *localptr,
David Howells19ffa012016-04-04 14:00:36 +0100156 sa_family_t family,
David Howells0d12f8a2016-03-04 15:53:46 +0000157 const void *peer_addr)
Tim Smith77276402014-03-03 23:04:45 +0000158{
159 unsigned long key;
160 unsigned int addr_size = 0;
161 struct rxrpc_call *call = NULL;
162 struct rxrpc_call *ret = NULL;
David Howells0d12f8a2016-03-04 15:53:46 +0000163 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
Tim Smith77276402014-03-03 23:04:45 +0000164
165 _enter("");
David Howells19ffa012016-04-04 14:00:36 +0100166 switch (family) {
Tim Smith77276402014-03-03 23:04:45 +0000167 case AF_INET:
168 addr_size = sizeof(call->peer_ip.ipv4_addr);
169 break;
170 case AF_INET6:
171 addr_size = sizeof(call->peer_ip.ipv6_addr);
172 break;
173 default:
174 break;
175 }
176
David Howells0d12f8a2016-03-04 15:53:46 +0000177 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
178 hdr->epoch, hdr->serviceId,
David Howells19ffa012016-04-04 14:00:36 +0100179 family, localptr, addr_size,
Tim Smith77276402014-03-03 23:04:45 +0000180 peer_addr);
181 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
182 if (call->hash_key == key &&
David Howells0d12f8a2016-03-04 15:53:46 +0000183 call->call_id == hdr->callNumber &&
184 call->cid == hdr->cid &&
185 call->in_clientflag == in_clientflag &&
186 call->service_id == hdr->serviceId &&
David Howells19ffa012016-04-04 14:00:36 +0100187 call->family == family &&
Tim Smith77276402014-03-03 23:04:45 +0000188 call->local == localptr &&
189 memcmp(call->peer_ip.ipv6_addr, peer_addr,
David Howells0d12f8a2016-03-04 15:53:46 +0000190 addr_size) == 0 &&
191 call->epoch == hdr->epoch) {
Tim Smith77276402014-03-03 23:04:45 +0000192 ret = call;
193 break;
194 }
195 }
196 _leave(" = %p", ret);
197 return ret;
198}
199
David Howells17926a72007-04-26 15:48:28 -0700200/*
David Howells2341e072016-06-09 23:02:51 +0100201 * find an extant server call
202 * - called in process context with IRQs enabled
203 */
204struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
205 unsigned long user_call_ID)
206{
207 struct rxrpc_call *call;
208 struct rb_node *p;
209
210 _enter("%p,%lx", rx, user_call_ID);
211
212 read_lock(&rx->call_lock);
213
214 p = rx->calls.rb_node;
215 while (p) {
216 call = rb_entry(p, struct rxrpc_call, sock_node);
217
218 if (user_call_ID < call->user_call_ID)
219 p = p->rb_left;
220 else if (user_call_ID > call->user_call_ID)
221 p = p->rb_right;
222 else
223 goto found_extant_call;
224 }
225
226 read_unlock(&rx->call_lock);
227 _leave(" = NULL");
228 return NULL;
229
230found_extant_call:
231 rxrpc_get_call(call);
232 read_unlock(&rx->call_lock);
233 _leave(" = %p [%d]", call, atomic_read(&call->usage));
234 return call;
235}
236
237/*
David Howells17926a72007-04-26 15:48:28 -0700238 * allocate a new call
239 */
240static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
241{
242 struct rxrpc_call *call;
243
244 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
245 if (!call)
246 return NULL;
247
248 call->acks_winsz = 16;
249 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
250 gfp);
251 if (!call->acks_window) {
252 kmem_cache_free(rxrpc_call_jar, call);
253 return NULL;
254 }
255
256 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
257 (unsigned long) call);
258 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
259 (unsigned long) call);
260 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
261 (unsigned long) call);
262 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
263 (unsigned long) call);
264 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
265 INIT_WORK(&call->processor, &rxrpc_process_call);
David Howells999b69f2016-06-17 15:42:35 +0100266 INIT_LIST_HEAD(&call->link);
David Howells17926a72007-04-26 15:48:28 -0700267 INIT_LIST_HEAD(&call->accept_link);
268 skb_queue_head_init(&call->rx_queue);
269 skb_queue_head_init(&call->rx_oos_queue);
270 init_waitqueue_head(&call->tx_waitq);
271 spin_lock_init(&call->lock);
272 rwlock_init(&call->state_lock);
273 atomic_set(&call->usage, 1);
274 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells17926a72007-04-26 15:48:28 -0700275
276 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
277
278 call->rx_data_expect = 1;
279 call->rx_data_eaten = 0;
280 call->rx_first_oos = 0;
David Howells817913d2014-02-07 18:10:30 +0000281 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
David Howells17926a72007-04-26 15:48:28 -0700282 call->creation_jif = jiffies;
283 return call;
284}
285
286/*
David Howells999b69f2016-06-17 15:42:35 +0100287 * Allocate a new client call.
David Howells17926a72007-04-26 15:48:28 -0700288 */
David Howellsaa390bb2016-06-17 10:06:56 +0100289static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
290 struct sockaddr_rxrpc *srx,
291 gfp_t gfp)
David Howells17926a72007-04-26 15:48:28 -0700292{
293 struct rxrpc_call *call;
David Howells17926a72007-04-26 15:48:28 -0700294
295 _enter("");
296
David Howells999b69f2016-06-17 15:42:35 +0100297 ASSERT(rx->local != NULL);
David Howells17926a72007-04-26 15:48:28 -0700298
299 call = rxrpc_alloc_call(gfp);
300 if (!call)
301 return ERR_PTR(-ENOMEM);
David Howells999b69f2016-06-17 15:42:35 +0100302 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
David Howells17926a72007-04-26 15:48:28 -0700303
304 sock_hold(&rx->sk);
305 call->socket = rx;
306 call->rx_data_post = 1;
307
Tim Smith77276402014-03-03 23:04:45 +0000308 /* Record copies of information for hashtable lookup */
David Howells19ffa012016-04-04 14:00:36 +0100309 call->family = rx->family;
David Howells999b69f2016-06-17 15:42:35 +0100310 call->local = rx->local;
David Howells19ffa012016-04-04 14:00:36 +0100311 switch (call->family) {
Tim Smith77276402014-03-03 23:04:45 +0000312 case AF_INET:
David Howells999b69f2016-06-17 15:42:35 +0100313 call->peer_ip.ipv4_addr = srx->transport.sin.sin_addr.s_addr;
Tim Smith77276402014-03-03 23:04:45 +0000314 break;
315 case AF_INET6:
316 memcpy(call->peer_ip.ipv6_addr,
David Howells999b69f2016-06-17 15:42:35 +0100317 srx->transport.sin6.sin6_addr.in6_u.u6_addr8,
Tim Smith77276402014-03-03 23:04:45 +0000318 sizeof(call->peer_ip.ipv6_addr));
319 break;
320 }
David Howells999b69f2016-06-17 15:42:35 +0100321
322 call->service_id = srx->srx_service;
323 call->in_clientflag = 0;
324
325 _leave(" = %p", call);
326 return call;
327}
328
329/*
330 * Begin client call.
331 */
332static int rxrpc_begin_client_call(struct rxrpc_call *call,
333 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100334 struct sockaddr_rxrpc *srx,
335 gfp_t gfp)
336{
337 int ret;
338
339 /* Set up or get a connection record and set the protocol parameters,
340 * including channel number and call ID.
341 */
David Howellsaa390bb2016-06-17 10:06:56 +0100342 ret = rxrpc_connect_call(call, cp, srx, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100343 if (ret < 0)
344 return ret;
345
346 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
347
Tim Smith77276402014-03-03 23:04:45 +0000348 /* Add the new call to the hashtable */
349 rxrpc_call_hash_add(call);
350
David Howells85f32272016-04-04 14:00:36 +0100351 spin_lock(&call->conn->params.peer->lock);
352 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
353 spin_unlock(&call->conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700354
David Howells5873c082014-02-07 18:58:44 +0000355 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700356 add_timer(&call->lifetimer);
David Howells999b69f2016-06-17 15:42:35 +0100357 return 0;
David Howells17926a72007-04-26 15:48:28 -0700358}
359
360/*
361 * set up a call for the given data
362 * - called in process context with IRQs enabled
363 */
David Howells2341e072016-06-09 23:02:51 +0100364struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
David Howells19ffa012016-04-04 14:00:36 +0100365 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100366 struct sockaddr_rxrpc *srx,
David Howells17926a72007-04-26 15:48:28 -0700367 unsigned long user_call_ID,
David Howells17926a72007-04-26 15:48:28 -0700368 gfp_t gfp)
369{
David Howells2341e072016-06-09 23:02:51 +0100370 struct rxrpc_call *call, *xcall;
371 struct rb_node *parent, **pp;
David Howells999b69f2016-06-17 15:42:35 +0100372 int ret;
David Howells17926a72007-04-26 15:48:28 -0700373
David Howells999b69f2016-06-17 15:42:35 +0100374 _enter("%p,%lx", rx, user_call_ID);
David Howells17926a72007-04-26 15:48:28 -0700375
David Howellsaa390bb2016-06-17 10:06:56 +0100376 call = rxrpc_alloc_client_call(rx, srx, gfp);
David Howells2341e072016-06-09 23:02:51 +0100377 if (IS_ERR(call)) {
378 _leave(" = %ld", PTR_ERR(call));
379 return call;
David Howells17926a72007-04-26 15:48:28 -0700380 }
381
David Howells999b69f2016-06-17 15:42:35 +0100382 /* Publish the call, even though it is incompletely set up as yet */
David Howells2341e072016-06-09 23:02:51 +0100383 call->user_call_ID = user_call_ID;
384 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
David Howells17926a72007-04-26 15:48:28 -0700385
386 write_lock(&rx->call_lock);
387
388 pp = &rx->calls.rb_node;
389 parent = NULL;
390 while (*pp) {
391 parent = *pp;
David Howells2341e072016-06-09 23:02:51 +0100392 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
David Howells17926a72007-04-26 15:48:28 -0700393
David Howells2341e072016-06-09 23:02:51 +0100394 if (user_call_ID < xcall->user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700395 pp = &(*pp)->rb_left;
David Howells2341e072016-06-09 23:02:51 +0100396 else if (user_call_ID > xcall->user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700397 pp = &(*pp)->rb_right;
398 else
David Howells2341e072016-06-09 23:02:51 +0100399 goto found_user_ID_now_present;
David Howells17926a72007-04-26 15:48:28 -0700400 }
401
David Howells17926a72007-04-26 15:48:28 -0700402 rxrpc_get_call(call);
403
404 rb_link_node(&call->sock_node, parent, pp);
405 rb_insert_color(&call->sock_node, &rx->calls);
406 write_unlock(&rx->call_lock);
407
408 write_lock_bh(&rxrpc_call_lock);
409 list_add_tail(&call->link, &rxrpc_calls);
410 write_unlock_bh(&rxrpc_call_lock);
411
David Howellsaa390bb2016-06-17 10:06:56 +0100412 ret = rxrpc_begin_client_call(call, cp, srx, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100413 if (ret < 0)
414 goto error;
415
David Howells17926a72007-04-26 15:48:28 -0700416 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
417
418 _leave(" = %p [new]", call);
419 return call;
420
David Howells999b69f2016-06-17 15:42:35 +0100421error:
422 write_lock(&rx->call_lock);
423 rb_erase(&call->sock_node, &rx->calls);
424 write_unlock(&rx->call_lock);
425 rxrpc_put_call(call);
426
427 write_lock_bh(&rxrpc_call_lock);
428 list_del(&call->link);
429 write_unlock_bh(&rxrpc_call_lock);
430
431 rxrpc_put_call(call);
432 _leave(" = %d", ret);
433 return ERR_PTR(ret);
434
David Howells2341e072016-06-09 23:02:51 +0100435 /* We unexpectedly found the user ID in the list after taking
436 * the call_lock. This shouldn't happen unless the user races
437 * with itself and tries to add the same user ID twice at the
438 * same time in different threads.
439 */
440found_user_ID_now_present:
David Howells17926a72007-04-26 15:48:28 -0700441 write_unlock(&rx->call_lock);
David Howells2341e072016-06-09 23:02:51 +0100442 rxrpc_put_call(call);
443 _leave(" = -EEXIST [%p]", call);
444 return ERR_PTR(-EEXIST);
David Howells17926a72007-04-26 15:48:28 -0700445}
446
447/*
448 * set up an incoming call
449 * - called in process context with IRQs enabled
450 */
451struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
452 struct rxrpc_connection *conn,
David Howells42886ff2016-06-16 13:31:07 +0100453 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700454{
David Howells42886ff2016-06-16 13:31:07 +0100455 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700456 struct rxrpc_call *call, *candidate;
457 struct rb_node **p, *parent;
David Howells0d12f8a2016-03-04 15:53:46 +0000458 u32 call_id;
David Howells17926a72007-04-26 15:48:28 -0700459
David Howells843099c2016-04-07 17:23:37 +0100460 _enter(",%d", conn->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700461
462 ASSERT(rx != NULL);
463
David Howells843099c2016-04-07 17:23:37 +0100464 candidate = rxrpc_alloc_call(GFP_NOIO);
David Howells17926a72007-04-26 15:48:28 -0700465 if (!candidate)
466 return ERR_PTR(-EBUSY);
467
David Howells42886ff2016-06-16 13:31:07 +0100468 candidate->socket = rx;
469 candidate->conn = conn;
470 candidate->cid = sp->hdr.cid;
471 candidate->call_id = sp->hdr.callNumber;
472 candidate->channel = sp->hdr.cid & RXRPC_CHANNELMASK;
473 candidate->rx_data_post = 0;
474 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
David Howells17926a72007-04-26 15:48:28 -0700475 if (conn->security_ix > 0)
476 candidate->state = RXRPC_CALL_SERVER_SECURING;
477
478 write_lock_bh(&conn->lock);
479
480 /* set the channel for this call */
481 call = conn->channels[candidate->channel];
482 _debug("channel[%u] is %p", candidate->channel, call);
David Howells42886ff2016-06-16 13:31:07 +0100483 if (call && call->call_id == sp->hdr.callNumber) {
David Howells17926a72007-04-26 15:48:28 -0700484 /* already set; must've been a duplicate packet */
485 _debug("extant call [%d]", call->state);
486 ASSERTCMP(call->conn, ==, conn);
487
488 read_lock(&call->state_lock);
489 switch (call->state) {
490 case RXRPC_CALL_LOCALLY_ABORTED:
David Howells4c198ad2016-03-04 15:53:46 +0000491 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700492 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700493 case RXRPC_CALL_REMOTELY_ABORTED:
494 read_unlock(&call->state_lock);
495 goto aborted_call;
496 default:
497 rxrpc_get_call(call);
498 read_unlock(&call->state_lock);
499 goto extant_call;
500 }
501 }
502
503 if (call) {
504 /* it seems the channel is still in use from the previous call
505 * - ditch the old binding if its call is now complete */
506 _debug("CALL: %u { %s }",
507 call->debug_id, rxrpc_call_states[call->state]);
508
509 if (call->state >= RXRPC_CALL_COMPLETE) {
510 conn->channels[call->channel] = NULL;
511 } else {
512 write_unlock_bh(&conn->lock);
513 kmem_cache_free(rxrpc_call_jar, candidate);
514 _leave(" = -EBUSY");
515 return ERR_PTR(-EBUSY);
516 }
517 }
518
519 /* check the call number isn't duplicate */
520 _debug("check dup");
David Howells42886ff2016-06-16 13:31:07 +0100521 call_id = sp->hdr.callNumber;
David Howells17926a72007-04-26 15:48:28 -0700522 p = &conn->calls.rb_node;
523 parent = NULL;
524 while (*p) {
525 parent = *p;
526 call = rb_entry(parent, struct rxrpc_call, conn_node);
527
Tim Smith77276402014-03-03 23:04:45 +0000528 /* The tree is sorted in order of the __be32 value without
529 * turning it into host order.
530 */
David Howells0d12f8a2016-03-04 15:53:46 +0000531 if (call_id < call->call_id)
David Howells17926a72007-04-26 15:48:28 -0700532 p = &(*p)->rb_left;
David Howells0d12f8a2016-03-04 15:53:46 +0000533 else if (call_id > call->call_id)
David Howells17926a72007-04-26 15:48:28 -0700534 p = &(*p)->rb_right;
535 else
536 goto old_call;
537 }
538
539 /* make the call available */
540 _debug("new call");
541 call = candidate;
542 candidate = NULL;
543 rb_link_node(&call->conn_node, parent, p);
544 rb_insert_color(&call->conn_node, &conn->calls);
545 conn->channels[call->channel] = call;
546 sock_hold(&rx->sk);
David Howells5627cc82016-04-04 14:00:38 +0100547 rxrpc_get_connection(conn);
David Howells17926a72007-04-26 15:48:28 -0700548 write_unlock_bh(&conn->lock);
549
David Howells85f32272016-04-04 14:00:36 +0100550 spin_lock(&conn->params.peer->lock);
551 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
552 spin_unlock(&conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700553
554 write_lock_bh(&rxrpc_call_lock);
555 list_add_tail(&call->link, &rxrpc_calls);
556 write_unlock_bh(&rxrpc_call_lock);
557
Tim Smith77276402014-03-03 23:04:45 +0000558 /* Record copies of information for hashtable lookup */
David Howells19ffa012016-04-04 14:00:36 +0100559 call->family = rx->family;
David Howells85f32272016-04-04 14:00:36 +0100560 call->local = conn->params.local;
David Howells19ffa012016-04-04 14:00:36 +0100561 switch (call->family) {
Tim Smith77276402014-03-03 23:04:45 +0000562 case AF_INET:
563 call->peer_ip.ipv4_addr =
David Howells85f32272016-04-04 14:00:36 +0100564 conn->params.peer->srx.transport.sin.sin_addr.s_addr;
Tim Smith77276402014-03-03 23:04:45 +0000565 break;
566 case AF_INET6:
567 memcpy(call->peer_ip.ipv6_addr,
David Howells85f32272016-04-04 14:00:36 +0100568 conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
Tim Smith77276402014-03-03 23:04:45 +0000569 sizeof(call->peer_ip.ipv6_addr));
570 break;
571 default:
572 break;
573 }
David Howells19ffa012016-04-04 14:00:36 +0100574 call->epoch = conn->proto.epoch;
575 call->service_id = conn->params.service_id;
576 call->in_clientflag = conn->proto.in_clientflag;
Tim Smith77276402014-03-03 23:04:45 +0000577 /* Add the new call to the hashtable */
578 rxrpc_call_hash_add(call);
579
David Howells17926a72007-04-26 15:48:28 -0700580 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
581
David Howells5873c082014-02-07 18:58:44 +0000582 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700583 add_timer(&call->lifetimer);
584 _leave(" = %p {%d} [new]", call, call->debug_id);
585 return call;
586
587extant_call:
588 write_unlock_bh(&conn->lock);
589 kmem_cache_free(rxrpc_call_jar, candidate);
590 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
591 return call;
592
593aborted_call:
594 write_unlock_bh(&conn->lock);
595 kmem_cache_free(rxrpc_call_jar, candidate);
596 _leave(" = -ECONNABORTED");
597 return ERR_PTR(-ECONNABORTED);
598
599old_call:
600 write_unlock_bh(&conn->lock);
601 kmem_cache_free(rxrpc_call_jar, candidate);
602 _leave(" = -ECONNRESET [old]");
603 return ERR_PTR(-ECONNRESET);
604}
605
606/*
David Howells17926a72007-04-26 15:48:28 -0700607 * detach a call from a socket and set up for release
608 */
609void rxrpc_release_call(struct rxrpc_call *call)
610{
David Howells651350d2007-04-26 15:50:17 -0700611 struct rxrpc_connection *conn = call->conn;
David Howells17926a72007-04-26 15:48:28 -0700612 struct rxrpc_sock *rx = call->socket;
613
614 _enter("{%d,%d,%d,%d}",
615 call->debug_id, atomic_read(&call->usage),
616 atomic_read(&call->ackr_not_idle),
617 call->rx_first_oos);
618
619 spin_lock_bh(&call->lock);
620 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
621 BUG();
622 spin_unlock_bh(&call->lock);
623
624 /* dissociate from the socket
625 * - the socket's ref on the call is passed to the death timer
626 */
David Howells651350d2007-04-26 15:50:17 -0700627 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
David Howells17926a72007-04-26 15:48:28 -0700628
629 write_lock_bh(&rx->call_lock);
630 if (!list_empty(&call->accept_link)) {
631 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
632 call, call->events, call->flags);
633 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
634 list_del_init(&call->accept_link);
635 sk_acceptq_removed(&rx->sk);
636 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
637 rb_erase(&call->sock_node, &rx->calls);
638 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
639 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
640 }
641 write_unlock_bh(&rx->call_lock);
642
David Howells17926a72007-04-26 15:48:28 -0700643 /* free up the channel for reuse */
David Howells999b69f2016-06-17 15:42:35 +0100644 spin_lock(&conn->channel_lock);
David Howells651350d2007-04-26 15:50:17 -0700645 write_lock_bh(&conn->lock);
646 write_lock(&call->state_lock);
647
David Howells999b69f2016-06-17 15:42:35 +0100648 rxrpc_disconnect_call(call);
David Howells651350d2007-04-26 15:50:17 -0700649
David Howells999b69f2016-06-17 15:42:35 +0100650 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700651
652 if (call->state < RXRPC_CALL_COMPLETE &&
653 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
654 _debug("+++ ABORTING STATE %d +++\n", call->state);
655 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howellsdc44b3a2016-04-07 17:23:30 +0100656 call->local_abort = RX_CALL_DEAD;
David Howells4c198ad2016-03-04 15:53:46 +0000657 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells651350d2007-04-26 15:50:17 -0700658 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700659 }
660 write_unlock(&call->state_lock);
David Howells651350d2007-04-26 15:50:17 -0700661 write_unlock_bh(&conn->lock);
David Howells17926a72007-04-26 15:48:28 -0700662
David Howells651350d2007-04-26 15:50:17 -0700663 /* clean up the Rx queue */
David Howells17926a72007-04-26 15:48:28 -0700664 if (!skb_queue_empty(&call->rx_queue) ||
665 !skb_queue_empty(&call->rx_oos_queue)) {
666 struct rxrpc_skb_priv *sp;
667 struct sk_buff *skb;
668
669 _debug("purge Rx queues");
670
671 spin_lock_bh(&call->lock);
672 while ((skb = skb_dequeue(&call->rx_queue)) ||
673 (skb = skb_dequeue(&call->rx_oos_queue))) {
674 sp = rxrpc_skb(skb);
675 if (sp->call) {
676 ASSERTCMP(sp->call, ==, call);
677 rxrpc_put_call(call);
678 sp->call = NULL;
679 }
680 skb->destructor = NULL;
681 spin_unlock_bh(&call->lock);
682
683 _debug("- zap %s %%%u #%u",
684 rxrpc_pkts[sp->hdr.type],
David Howells0d12f8a2016-03-04 15:53:46 +0000685 sp->hdr.serial, sp->hdr.seq);
David Howells17926a72007-04-26 15:48:28 -0700686 rxrpc_free_skb(skb);
687 spin_lock_bh(&call->lock);
688 }
689 spin_unlock_bh(&call->lock);
690
691 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
692 }
693
694 del_timer_sync(&call->resend_timer);
695 del_timer_sync(&call->ack_timer);
696 del_timer_sync(&call->lifetimer);
David Howells5873c082014-02-07 18:58:44 +0000697 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700698 add_timer(&call->deadspan);
699
700 _leave("");
701}
702
703/*
704 * handle a dead call being ready for reaping
705 */
706static void rxrpc_dead_call_expired(unsigned long _call)
707{
708 struct rxrpc_call *call = (struct rxrpc_call *) _call;
709
710 _enter("{%d}", call->debug_id);
711
712 write_lock_bh(&call->state_lock);
713 call->state = RXRPC_CALL_DEAD;
714 write_unlock_bh(&call->state_lock);
715 rxrpc_put_call(call);
716}
717
718/*
719 * mark a call as to be released, aborting it if it's still in progress
720 * - called with softirqs disabled
721 */
722static void rxrpc_mark_call_released(struct rxrpc_call *call)
723{
724 bool sched;
725
726 write_lock(&call->state_lock);
727 if (call->state < RXRPC_CALL_DEAD) {
728 sched = false;
729 if (call->state < RXRPC_CALL_COMPLETE) {
730 _debug("abort call %p", call);
731 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howellsdc44b3a2016-04-07 17:23:30 +0100732 call->local_abort = RX_CALL_DEAD;
David Howells4c198ad2016-03-04 15:53:46 +0000733 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells17926a72007-04-26 15:48:28 -0700734 sched = true;
735 }
David Howells4c198ad2016-03-04 15:53:46 +0000736 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
David Howells17926a72007-04-26 15:48:28 -0700737 sched = true;
738 if (sched)
David Howells651350d2007-04-26 15:50:17 -0700739 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700740 }
741 write_unlock(&call->state_lock);
742}
743
744/*
745 * release all the calls associated with a socket
746 */
747void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
748{
749 struct rxrpc_call *call;
750 struct rb_node *p;
751
752 _enter("%p", rx);
753
754 read_lock_bh(&rx->call_lock);
755
756 /* mark all the calls as no longer wanting incoming packets */
757 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
758 call = rb_entry(p, struct rxrpc_call, sock_node);
759 rxrpc_mark_call_released(call);
760 }
761
762 /* kill the not-yet-accepted incoming calls */
763 list_for_each_entry(call, &rx->secureq, accept_link) {
764 rxrpc_mark_call_released(call);
765 }
766
767 list_for_each_entry(call, &rx->acceptq, accept_link) {
768 rxrpc_mark_call_released(call);
769 }
770
771 read_unlock_bh(&rx->call_lock);
772 _leave("");
773}
774
775/*
776 * release a call
777 */
778void __rxrpc_put_call(struct rxrpc_call *call)
779{
780 ASSERT(call != NULL);
781
782 _enter("%p{u=%d}", call, atomic_read(&call->usage));
783
784 ASSERTCMP(atomic_read(&call->usage), >, 0);
785
786 if (atomic_dec_and_test(&call->usage)) {
787 _debug("call %d dead", call->debug_id);
788 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
David Howells651350d2007-04-26 15:50:17 -0700789 rxrpc_queue_work(&call->destroyer);
David Howells17926a72007-04-26 15:48:28 -0700790 }
791 _leave("");
792}
793
794/*
795 * clean up a call
796 */
797static void rxrpc_cleanup_call(struct rxrpc_call *call)
798{
799 _net("DESTROY CALL %d", call->debug_id);
800
801 ASSERT(call->socket);
802
803 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
804
805 del_timer_sync(&call->lifetimer);
806 del_timer_sync(&call->deadspan);
807 del_timer_sync(&call->ack_timer);
808 del_timer_sync(&call->resend_timer);
809
810 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
811 ASSERTCMP(call->events, ==, 0);
812 if (work_pending(&call->processor)) {
813 _debug("defer destroy");
David Howells651350d2007-04-26 15:50:17 -0700814 rxrpc_queue_work(&call->destroyer);
David Howells17926a72007-04-26 15:48:28 -0700815 return;
816 }
817
818 if (call->conn) {
David Howells85f32272016-04-04 14:00:36 +0100819 spin_lock(&call->conn->params.peer->lock);
David Howellsf66d7492016-04-04 14:00:34 +0100820 hlist_del_init(&call->error_link);
David Howells85f32272016-04-04 14:00:36 +0100821 spin_unlock(&call->conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700822
823 write_lock_bh(&call->conn->lock);
824 rb_erase(&call->conn_node, &call->conn->calls);
825 write_unlock_bh(&call->conn->lock);
826 rxrpc_put_connection(call->conn);
827 }
828
Tim Smith77276402014-03-03 23:04:45 +0000829 /* Remove the call from the hash */
830 rxrpc_call_hash_del(call);
831
David Howells17926a72007-04-26 15:48:28 -0700832 if (call->acks_window) {
833 _debug("kill Tx window %d",
834 CIRC_CNT(call->acks_head, call->acks_tail,
835 call->acks_winsz));
836 smp_mb();
837 while (CIRC_CNT(call->acks_head, call->acks_tail,
838 call->acks_winsz) > 0) {
839 struct rxrpc_skb_priv *sp;
840 unsigned long _skb;
841
842 _skb = call->acks_window[call->acks_tail] & ~1;
David Howells0d12f8a2016-03-04 15:53:46 +0000843 sp = rxrpc_skb((struct sk_buff *)_skb);
844 _debug("+++ clear Tx %u", sp->hdr.seq);
845 rxrpc_free_skb((struct sk_buff *)_skb);
David Howells17926a72007-04-26 15:48:28 -0700846 call->acks_tail =
847 (call->acks_tail + 1) & (call->acks_winsz - 1);
848 }
849
850 kfree(call->acks_window);
851 }
852
853 rxrpc_free_skb(call->tx_pending);
854
855 rxrpc_purge_queue(&call->rx_queue);
856 ASSERT(skb_queue_empty(&call->rx_oos_queue));
857 sock_put(&call->socket->sk);
858 kmem_cache_free(rxrpc_call_jar, call);
859}
860
861/*
862 * destroy a call
863 */
864static void rxrpc_destroy_call(struct work_struct *work)
865{
866 struct rxrpc_call *call =
867 container_of(work, struct rxrpc_call, destroyer);
868
869 _enter("%p{%d,%d,%p}",
870 call, atomic_read(&call->usage), call->channel, call->conn);
871
872 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
873
874 write_lock_bh(&rxrpc_call_lock);
875 list_del_init(&call->link);
876 write_unlock_bh(&rxrpc_call_lock);
877
878 rxrpc_cleanup_call(call);
879 _leave("");
880}
881
882/*
883 * preemptively destroy all the call records from a transport endpoint rather
884 * than waiting for them to time out
885 */
886void __exit rxrpc_destroy_all_calls(void)
887{
888 struct rxrpc_call *call;
889
890 _enter("");
891 write_lock_bh(&rxrpc_call_lock);
892
893 while (!list_empty(&rxrpc_calls)) {
894 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
895 _debug("Zapping call %p", call);
896
897 list_del_init(&call->link);
898
899 switch (atomic_read(&call->usage)) {
900 case 0:
901 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
902 break;
903 case 1:
904 if (del_timer_sync(&call->deadspan) != 0 &&
905 call->state != RXRPC_CALL_DEAD)
906 rxrpc_dead_call_expired((unsigned long) call);
907 if (call->state != RXRPC_CALL_DEAD)
908 break;
909 default:
Joe Perches9b6d5392016-06-02 12:08:52 -0700910 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
David Howells17926a72007-04-26 15:48:28 -0700911 call, atomic_read(&call->usage),
912 atomic_read(&call->ackr_not_idle),
913 rxrpc_call_states[call->state],
914 call->flags, call->events);
915 if (!skb_queue_empty(&call->rx_queue))
Joe Perches9b6d5392016-06-02 12:08:52 -0700916 pr_err("Rx queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700917 if (!skb_queue_empty(&call->rx_oos_queue))
Joe Perches9b6d5392016-06-02 12:08:52 -0700918 pr_err("OOS queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700919 break;
920 }
921
922 write_unlock_bh(&rxrpc_call_lock);
923 cond_resched();
924 write_lock_bh(&rxrpc_call_lock);
925 }
926
927 write_unlock_bh(&rxrpc_call_lock);
928 _leave("");
929}
930
931/*
932 * handle call lifetime being exceeded
933 */
934static void rxrpc_call_life_expired(unsigned long _call)
935{
936 struct rxrpc_call *call = (struct rxrpc_call *) _call;
937
938 if (call->state >= RXRPC_CALL_COMPLETE)
939 return;
940
941 _enter("{%d}", call->debug_id);
942 read_lock_bh(&call->state_lock);
943 if (call->state < RXRPC_CALL_COMPLETE) {
David Howells4c198ad2016-03-04 15:53:46 +0000944 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
David Howells651350d2007-04-26 15:50:17 -0700945 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700946 }
947 read_unlock_bh(&call->state_lock);
948}
949
950/*
951 * handle resend timer expiry
David Howells3b5bac22010-08-04 02:34:17 +0000952 * - may not take call->state_lock as this can deadlock against del_timer_sync()
David Howells17926a72007-04-26 15:48:28 -0700953 */
954static void rxrpc_resend_time_expired(unsigned long _call)
955{
956 struct rxrpc_call *call = (struct rxrpc_call *) _call;
957
958 _enter("{%d}", call->debug_id);
959
960 if (call->state >= RXRPC_CALL_COMPLETE)
961 return;
962
David Howells17926a72007-04-26 15:48:28 -0700963 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
David Howells4c198ad2016-03-04 15:53:46 +0000964 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700965 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700966}
967
968/*
969 * handle ACK timer expiry
970 */
971static void rxrpc_ack_time_expired(unsigned long _call)
972{
973 struct rxrpc_call *call = (struct rxrpc_call *) _call;
974
975 _enter("{%d}", call->debug_id);
976
977 if (call->state >= RXRPC_CALL_COMPLETE)
978 return;
979
980 read_lock_bh(&call->state_lock);
981 if (call->state < RXRPC_CALL_COMPLETE &&
David Howells4c198ad2016-03-04 15:53:46 +0000982 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700983 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700984 read_unlock_bh(&call->state_lock);
985}