blob: 1fbaae1cba5f7ce2d37c7c728774883da9acf83a [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <linux/module.h>
16#include <linux/circ_buf.h>
Tim Smith77276402014-03-03 23:04:45 +000017#include <linux/hashtable.h>
18#include <linux/spinlock_types.h>
David Howells17926a72007-04-26 15:48:28 -070019#include <net/sock.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
David Howells5873c082014-02-07 18:58:44 +000023/*
24 * Maximum lifetime of a call (in jiffies).
25 */
David Howellsdad8aff2016-03-09 23:22:56 +000026unsigned int rxrpc_max_call_lifetime = 60 * HZ;
David Howells5873c082014-02-07 18:58:44 +000027
28/*
29 * Time till dead call expires after last use (in jiffies).
30 */
David Howellsdad8aff2016-03-09 23:22:56 +000031unsigned int rxrpc_dead_call_expiry = 2 * HZ;
David Howells5873c082014-02-07 18:58:44 +000032
David Howells5b8848d2016-03-04 15:53:46 +000033const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
David Howells1f8481d2007-05-22 16:14:24 -070034 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
35 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
36 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
37 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
38 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
39 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
40 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
41 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
42 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
43 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
44 [RXRPC_CALL_COMPLETE] = "Complete",
45 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
46 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
47 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
48 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
49 [RXRPC_CALL_DEAD] = "Dead ",
50};
51
David Howells17926a72007-04-26 15:48:28 -070052struct kmem_cache *rxrpc_call_jar;
53LIST_HEAD(rxrpc_calls);
54DEFINE_RWLOCK(rxrpc_call_lock);
David Howells17926a72007-04-26 15:48:28 -070055
56static void rxrpc_destroy_call(struct work_struct *work);
57static void rxrpc_call_life_expired(unsigned long _call);
58static void rxrpc_dead_call_expired(unsigned long _call);
59static void rxrpc_ack_time_expired(unsigned long _call);
60static void rxrpc_resend_time_expired(unsigned long _call);
61
Tim Smith77276402014-03-03 23:04:45 +000062static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
63static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
64
65/*
66 * Hash function for rxrpc_call_hash
67 */
68static unsigned long rxrpc_call_hashfunc(
David Howells0d12f8a2016-03-04 15:53:46 +000069 u8 in_clientflag,
70 u32 cid,
71 u32 call_id,
72 u32 epoch,
73 u16 service_id,
Tim Smith77276402014-03-03 23:04:45 +000074 sa_family_t proto,
75 void *localptr,
76 unsigned int addr_size,
77 const u8 *peer_addr)
78{
79 const u16 *p;
80 unsigned int i;
81 unsigned long key;
Tim Smith77276402014-03-03 23:04:45 +000082
83 _enter("");
84
85 key = (unsigned long)localptr;
86 /* We just want to add up the __be32 values, so forcing the
87 * cast should be okay.
88 */
David Howells0d12f8a2016-03-04 15:53:46 +000089 key += epoch;
90 key += service_id;
91 key += call_id;
92 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
93 key += cid & RXRPC_CHANNELMASK;
94 key += in_clientflag;
Tim Smith77276402014-03-03 23:04:45 +000095 key += proto;
96 /* Step through the peer address in 16-bit portions for speed */
97 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
98 key += *p;
99 _leave(" key = 0x%lx", key);
100 return key;
101}
102
103/*
104 * Add a call to the hashtable
105 */
106static void rxrpc_call_hash_add(struct rxrpc_call *call)
107{
108 unsigned long key;
109 unsigned int addr_size = 0;
110
111 _enter("");
112 switch (call->proto) {
113 case AF_INET:
114 addr_size = sizeof(call->peer_ip.ipv4_addr);
115 break;
116 case AF_INET6:
117 addr_size = sizeof(call->peer_ip.ipv6_addr);
118 break;
119 default:
120 break;
121 }
122 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
123 call->call_id, call->epoch,
124 call->service_id, call->proto,
125 call->conn->trans->local, addr_size,
126 call->peer_ip.ipv6_addr);
127 /* Store the full key in the call */
128 call->hash_key = key;
129 spin_lock(&rxrpc_call_hash_lock);
130 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
131 spin_unlock(&rxrpc_call_hash_lock);
132 _leave("");
133}
134
135/*
136 * Remove a call from the hashtable
137 */
138static void rxrpc_call_hash_del(struct rxrpc_call *call)
139{
140 _enter("");
141 spin_lock(&rxrpc_call_hash_lock);
142 hash_del_rcu(&call->hash_node);
143 spin_unlock(&rxrpc_call_hash_lock);
144 _leave("");
145}
146
147/*
148 * Find a call in the hashtable and return it, or NULL if it
149 * isn't there.
150 */
151struct rxrpc_call *rxrpc_find_call_hash(
David Howells0d12f8a2016-03-04 15:53:46 +0000152 struct rxrpc_host_header *hdr,
Tim Smith77276402014-03-03 23:04:45 +0000153 void *localptr,
154 sa_family_t proto,
David Howells0d12f8a2016-03-04 15:53:46 +0000155 const void *peer_addr)
Tim Smith77276402014-03-03 23:04:45 +0000156{
157 unsigned long key;
158 unsigned int addr_size = 0;
159 struct rxrpc_call *call = NULL;
160 struct rxrpc_call *ret = NULL;
David Howells0d12f8a2016-03-04 15:53:46 +0000161 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
Tim Smith77276402014-03-03 23:04:45 +0000162
163 _enter("");
164 switch (proto) {
165 case AF_INET:
166 addr_size = sizeof(call->peer_ip.ipv4_addr);
167 break;
168 case AF_INET6:
169 addr_size = sizeof(call->peer_ip.ipv6_addr);
170 break;
171 default:
172 break;
173 }
174
David Howells0d12f8a2016-03-04 15:53:46 +0000175 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
176 hdr->epoch, hdr->serviceId,
177 proto, localptr, addr_size,
Tim Smith77276402014-03-03 23:04:45 +0000178 peer_addr);
179 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
180 if (call->hash_key == key &&
David Howells0d12f8a2016-03-04 15:53:46 +0000181 call->call_id == hdr->callNumber &&
182 call->cid == hdr->cid &&
183 call->in_clientflag == in_clientflag &&
184 call->service_id == hdr->serviceId &&
Tim Smith77276402014-03-03 23:04:45 +0000185 call->proto == proto &&
186 call->local == localptr &&
187 memcmp(call->peer_ip.ipv6_addr, peer_addr,
David Howells0d12f8a2016-03-04 15:53:46 +0000188 addr_size) == 0 &&
189 call->epoch == hdr->epoch) {
Tim Smith77276402014-03-03 23:04:45 +0000190 ret = call;
191 break;
192 }
193 }
194 _leave(" = %p", ret);
195 return ret;
196}
197
David Howells17926a72007-04-26 15:48:28 -0700198/*
199 * allocate a new call
200 */
201static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
202{
203 struct rxrpc_call *call;
204
205 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
206 if (!call)
207 return NULL;
208
209 call->acks_winsz = 16;
210 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
211 gfp);
212 if (!call->acks_window) {
213 kmem_cache_free(rxrpc_call_jar, call);
214 return NULL;
215 }
216
217 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
218 (unsigned long) call);
219 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
220 (unsigned long) call);
221 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
222 (unsigned long) call);
223 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
224 (unsigned long) call);
225 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
226 INIT_WORK(&call->processor, &rxrpc_process_call);
227 INIT_LIST_HEAD(&call->accept_link);
228 skb_queue_head_init(&call->rx_queue);
229 skb_queue_head_init(&call->rx_oos_queue);
230 init_waitqueue_head(&call->tx_waitq);
231 spin_lock_init(&call->lock);
232 rwlock_init(&call->state_lock);
233 atomic_set(&call->usage, 1);
234 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
235 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
236
237 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
238
239 call->rx_data_expect = 1;
240 call->rx_data_eaten = 0;
241 call->rx_first_oos = 0;
David Howells817913d2014-02-07 18:10:30 +0000242 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
David Howells17926a72007-04-26 15:48:28 -0700243 call->creation_jif = jiffies;
244 return call;
245}
246
247/*
Anand Gadiyarfd589a82009-07-16 17:13:03 +0200248 * allocate a new client call and attempt to get a connection slot for it
David Howells17926a72007-04-26 15:48:28 -0700249 */
250static struct rxrpc_call *rxrpc_alloc_client_call(
251 struct rxrpc_sock *rx,
252 struct rxrpc_transport *trans,
253 struct rxrpc_conn_bundle *bundle,
254 gfp_t gfp)
255{
256 struct rxrpc_call *call;
257 int ret;
258
259 _enter("");
260
261 ASSERT(rx != NULL);
262 ASSERT(trans != NULL);
263 ASSERT(bundle != NULL);
264
265 call = rxrpc_alloc_call(gfp);
266 if (!call)
267 return ERR_PTR(-ENOMEM);
268
269 sock_hold(&rx->sk);
270 call->socket = rx;
271 call->rx_data_post = 1;
272
273 ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
274 if (ret < 0) {
275 kmem_cache_free(rxrpc_call_jar, call);
276 return ERR_PTR(ret);
277 }
278
Tim Smith77276402014-03-03 23:04:45 +0000279 /* Record copies of information for hashtable lookup */
280 call->proto = rx->proto;
281 call->local = trans->local;
282 switch (call->proto) {
283 case AF_INET:
284 call->peer_ip.ipv4_addr =
285 trans->peer->srx.transport.sin.sin_addr.s_addr;
286 break;
287 case AF_INET6:
288 memcpy(call->peer_ip.ipv6_addr,
289 trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
290 sizeof(call->peer_ip.ipv6_addr));
291 break;
292 }
293 call->epoch = call->conn->epoch;
294 call->service_id = call->conn->service_id;
295 call->in_clientflag = call->conn->in_clientflag;
296 /* Add the new call to the hashtable */
297 rxrpc_call_hash_add(call);
298
David Howells17926a72007-04-26 15:48:28 -0700299 spin_lock(&call->conn->trans->peer->lock);
300 list_add(&call->error_link, &call->conn->trans->peer->error_targets);
301 spin_unlock(&call->conn->trans->peer->lock);
302
David Howells5873c082014-02-07 18:58:44 +0000303 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700304 add_timer(&call->lifetimer);
305
306 _leave(" = %p", call);
307 return call;
308}
309
310/*
311 * set up a call for the given data
312 * - called in process context with IRQs enabled
313 */
314struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
315 struct rxrpc_transport *trans,
316 struct rxrpc_conn_bundle *bundle,
317 unsigned long user_call_ID,
318 int create,
319 gfp_t gfp)
320{
321 struct rxrpc_call *call, *candidate;
322 struct rb_node *p, *parent, **pp;
323
324 _enter("%p,%d,%d,%lx,%d",
325 rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
326 user_call_ID, create);
327
328 /* search the extant calls first for one that matches the specified
329 * user ID */
330 read_lock(&rx->call_lock);
331
332 p = rx->calls.rb_node;
333 while (p) {
334 call = rb_entry(p, struct rxrpc_call, sock_node);
335
336 if (user_call_ID < call->user_call_ID)
337 p = p->rb_left;
338 else if (user_call_ID > call->user_call_ID)
339 p = p->rb_right;
340 else
341 goto found_extant_call;
342 }
343
344 read_unlock(&rx->call_lock);
345
346 if (!create || !trans)
347 return ERR_PTR(-EBADSLT);
348
349 /* not yet present - create a candidate for a new record and then
350 * redo the search */
351 candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
352 if (IS_ERR(candidate)) {
353 _leave(" = %ld", PTR_ERR(candidate));
354 return candidate;
355 }
356
357 candidate->user_call_ID = user_call_ID;
358 __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
359
360 write_lock(&rx->call_lock);
361
362 pp = &rx->calls.rb_node;
363 parent = NULL;
364 while (*pp) {
365 parent = *pp;
366 call = rb_entry(parent, struct rxrpc_call, sock_node);
367
368 if (user_call_ID < call->user_call_ID)
369 pp = &(*pp)->rb_left;
370 else if (user_call_ID > call->user_call_ID)
371 pp = &(*pp)->rb_right;
372 else
373 goto found_extant_second;
374 }
375
376 /* second search also failed; add the new call */
377 call = candidate;
378 candidate = NULL;
379 rxrpc_get_call(call);
380
381 rb_link_node(&call->sock_node, parent, pp);
382 rb_insert_color(&call->sock_node, &rx->calls);
383 write_unlock(&rx->call_lock);
384
385 write_lock_bh(&rxrpc_call_lock);
386 list_add_tail(&call->link, &rxrpc_calls);
387 write_unlock_bh(&rxrpc_call_lock);
388
389 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
390
391 _leave(" = %p [new]", call);
392 return call;
393
394 /* we found the call in the list immediately */
395found_extant_call:
396 rxrpc_get_call(call);
397 read_unlock(&rx->call_lock);
398 _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
399 return call;
400
401 /* we found the call on the second time through the list */
402found_extant_second:
403 rxrpc_get_call(call);
404 write_unlock(&rx->call_lock);
405 rxrpc_put_call(candidate);
406 _leave(" = %p [second %d]", call, atomic_read(&call->usage));
407 return call;
408}
409
410/*
411 * set up an incoming call
412 * - called in process context with IRQs enabled
413 */
414struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
415 struct rxrpc_connection *conn,
David Howells843099c2016-04-07 17:23:37 +0100416 struct rxrpc_host_header *hdr)
David Howells17926a72007-04-26 15:48:28 -0700417{
418 struct rxrpc_call *call, *candidate;
419 struct rb_node **p, *parent;
David Howells0d12f8a2016-03-04 15:53:46 +0000420 u32 call_id;
David Howells17926a72007-04-26 15:48:28 -0700421
David Howells843099c2016-04-07 17:23:37 +0100422 _enter(",%d", conn->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700423
424 ASSERT(rx != NULL);
425
David Howells843099c2016-04-07 17:23:37 +0100426 candidate = rxrpc_alloc_call(GFP_NOIO);
David Howells17926a72007-04-26 15:48:28 -0700427 if (!candidate)
428 return ERR_PTR(-EBUSY);
429
430 candidate->socket = rx;
431 candidate->conn = conn;
432 candidate->cid = hdr->cid;
433 candidate->call_id = hdr->callNumber;
David Howells0d12f8a2016-03-04 15:53:46 +0000434 candidate->channel = hdr->cid & RXRPC_CHANNELMASK;
David Howells17926a72007-04-26 15:48:28 -0700435 candidate->rx_data_post = 0;
436 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
437 if (conn->security_ix > 0)
438 candidate->state = RXRPC_CALL_SERVER_SECURING;
439
440 write_lock_bh(&conn->lock);
441
442 /* set the channel for this call */
443 call = conn->channels[candidate->channel];
444 _debug("channel[%u] is %p", candidate->channel, call);
445 if (call && call->call_id == hdr->callNumber) {
446 /* already set; must've been a duplicate packet */
447 _debug("extant call [%d]", call->state);
448 ASSERTCMP(call->conn, ==, conn);
449
450 read_lock(&call->state_lock);
451 switch (call->state) {
452 case RXRPC_CALL_LOCALLY_ABORTED:
David Howells4c198ad2016-03-04 15:53:46 +0000453 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700454 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700455 case RXRPC_CALL_REMOTELY_ABORTED:
456 read_unlock(&call->state_lock);
457 goto aborted_call;
458 default:
459 rxrpc_get_call(call);
460 read_unlock(&call->state_lock);
461 goto extant_call;
462 }
463 }
464
465 if (call) {
466 /* it seems the channel is still in use from the previous call
467 * - ditch the old binding if its call is now complete */
468 _debug("CALL: %u { %s }",
469 call->debug_id, rxrpc_call_states[call->state]);
470
471 if (call->state >= RXRPC_CALL_COMPLETE) {
472 conn->channels[call->channel] = NULL;
473 } else {
474 write_unlock_bh(&conn->lock);
475 kmem_cache_free(rxrpc_call_jar, candidate);
476 _leave(" = -EBUSY");
477 return ERR_PTR(-EBUSY);
478 }
479 }
480
481 /* check the call number isn't duplicate */
482 _debug("check dup");
483 call_id = hdr->callNumber;
484 p = &conn->calls.rb_node;
485 parent = NULL;
486 while (*p) {
487 parent = *p;
488 call = rb_entry(parent, struct rxrpc_call, conn_node);
489
Tim Smith77276402014-03-03 23:04:45 +0000490 /* The tree is sorted in order of the __be32 value without
491 * turning it into host order.
492 */
David Howells0d12f8a2016-03-04 15:53:46 +0000493 if (call_id < call->call_id)
David Howells17926a72007-04-26 15:48:28 -0700494 p = &(*p)->rb_left;
David Howells0d12f8a2016-03-04 15:53:46 +0000495 else if (call_id > call->call_id)
David Howells17926a72007-04-26 15:48:28 -0700496 p = &(*p)->rb_right;
497 else
498 goto old_call;
499 }
500
501 /* make the call available */
502 _debug("new call");
503 call = candidate;
504 candidate = NULL;
505 rb_link_node(&call->conn_node, parent, p);
506 rb_insert_color(&call->conn_node, &conn->calls);
507 conn->channels[call->channel] = call;
508 sock_hold(&rx->sk);
509 atomic_inc(&conn->usage);
510 write_unlock_bh(&conn->lock);
511
512 spin_lock(&conn->trans->peer->lock);
513 list_add(&call->error_link, &conn->trans->peer->error_targets);
514 spin_unlock(&conn->trans->peer->lock);
515
516 write_lock_bh(&rxrpc_call_lock);
517 list_add_tail(&call->link, &rxrpc_calls);
518 write_unlock_bh(&rxrpc_call_lock);
519
Tim Smith77276402014-03-03 23:04:45 +0000520 /* Record copies of information for hashtable lookup */
521 call->proto = rx->proto;
522 call->local = conn->trans->local;
523 switch (call->proto) {
524 case AF_INET:
525 call->peer_ip.ipv4_addr =
526 conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
527 break;
528 case AF_INET6:
529 memcpy(call->peer_ip.ipv6_addr,
530 conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
531 sizeof(call->peer_ip.ipv6_addr));
532 break;
533 default:
534 break;
535 }
536 call->epoch = conn->epoch;
537 call->service_id = conn->service_id;
538 call->in_clientflag = conn->in_clientflag;
539 /* Add the new call to the hashtable */
540 rxrpc_call_hash_add(call);
541
David Howells17926a72007-04-26 15:48:28 -0700542 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
543
David Howells5873c082014-02-07 18:58:44 +0000544 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700545 add_timer(&call->lifetimer);
546 _leave(" = %p {%d} [new]", call, call->debug_id);
547 return call;
548
549extant_call:
550 write_unlock_bh(&conn->lock);
551 kmem_cache_free(rxrpc_call_jar, candidate);
552 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
553 return call;
554
555aborted_call:
556 write_unlock_bh(&conn->lock);
557 kmem_cache_free(rxrpc_call_jar, candidate);
558 _leave(" = -ECONNABORTED");
559 return ERR_PTR(-ECONNABORTED);
560
561old_call:
562 write_unlock_bh(&conn->lock);
563 kmem_cache_free(rxrpc_call_jar, candidate);
564 _leave(" = -ECONNRESET [old]");
565 return ERR_PTR(-ECONNRESET);
566}
567
568/*
569 * find an extant server call
570 * - called in process context with IRQs enabled
571 */
572struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
573 unsigned long user_call_ID)
574{
575 struct rxrpc_call *call;
576 struct rb_node *p;
577
578 _enter("%p,%lx", rx, user_call_ID);
579
580 /* search the extant calls for one that matches the specified user
581 * ID */
582 read_lock(&rx->call_lock);
583
584 p = rx->calls.rb_node;
585 while (p) {
586 call = rb_entry(p, struct rxrpc_call, sock_node);
587
588 if (user_call_ID < call->user_call_ID)
589 p = p->rb_left;
590 else if (user_call_ID > call->user_call_ID)
591 p = p->rb_right;
592 else
593 goto found_extant_call;
594 }
595
596 read_unlock(&rx->call_lock);
597 _leave(" = NULL");
598 return NULL;
599
600 /* we found the call in the list immediately */
601found_extant_call:
602 rxrpc_get_call(call);
603 read_unlock(&rx->call_lock);
604 _leave(" = %p [%d]", call, atomic_read(&call->usage));
605 return call;
606}
607
608/*
609 * detach a call from a socket and set up for release
610 */
611void rxrpc_release_call(struct rxrpc_call *call)
612{
David Howells651350d2007-04-26 15:50:17 -0700613 struct rxrpc_connection *conn = call->conn;
David Howells17926a72007-04-26 15:48:28 -0700614 struct rxrpc_sock *rx = call->socket;
615
616 _enter("{%d,%d,%d,%d}",
617 call->debug_id, atomic_read(&call->usage),
618 atomic_read(&call->ackr_not_idle),
619 call->rx_first_oos);
620
621 spin_lock_bh(&call->lock);
622 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
623 BUG();
624 spin_unlock_bh(&call->lock);
625
626 /* dissociate from the socket
627 * - the socket's ref on the call is passed to the death timer
628 */
David Howells651350d2007-04-26 15:50:17 -0700629 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
David Howells17926a72007-04-26 15:48:28 -0700630
631 write_lock_bh(&rx->call_lock);
632 if (!list_empty(&call->accept_link)) {
633 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
634 call, call->events, call->flags);
635 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
636 list_del_init(&call->accept_link);
637 sk_acceptq_removed(&rx->sk);
638 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
639 rb_erase(&call->sock_node, &rx->calls);
640 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
641 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
642 }
643 write_unlock_bh(&rx->call_lock);
644
David Howells17926a72007-04-26 15:48:28 -0700645 /* free up the channel for reuse */
David Howells651350d2007-04-26 15:50:17 -0700646 spin_lock(&conn->trans->client_lock);
647 write_lock_bh(&conn->lock);
648 write_lock(&call->state_lock);
649
650 if (conn->channels[call->channel] == call)
651 conn->channels[call->channel] = NULL;
652
653 if (conn->out_clientflag && conn->bundle) {
654 conn->avail_calls++;
655 switch (conn->avail_calls) {
656 case 1:
657 list_move_tail(&conn->bundle_link,
658 &conn->bundle->avail_conns);
659 case 2 ... RXRPC_MAXCALLS - 1:
660 ASSERT(conn->channels[0] == NULL ||
661 conn->channels[1] == NULL ||
662 conn->channels[2] == NULL ||
663 conn->channels[3] == NULL);
664 break;
665 case RXRPC_MAXCALLS:
666 list_move_tail(&conn->bundle_link,
667 &conn->bundle->unused_conns);
668 ASSERT(conn->channels[0] == NULL &&
669 conn->channels[1] == NULL &&
670 conn->channels[2] == NULL &&
671 conn->channels[3] == NULL);
672 break;
673 default:
Joe Perches9b6d5392016-06-02 12:08:52 -0700674 pr_err("conn->avail_calls=%d\n", conn->avail_calls);
David Howells651350d2007-04-26 15:50:17 -0700675 BUG();
676 }
David Howells17926a72007-04-26 15:48:28 -0700677 }
678
David Howells651350d2007-04-26 15:50:17 -0700679 spin_unlock(&conn->trans->client_lock);
David Howells17926a72007-04-26 15:48:28 -0700680
681 if (call->state < RXRPC_CALL_COMPLETE &&
682 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
683 _debug("+++ ABORTING STATE %d +++\n", call->state);
684 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howellsdc44b3a2016-04-07 17:23:30 +0100685 call->local_abort = RX_CALL_DEAD;
David Howells4c198ad2016-03-04 15:53:46 +0000686 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells651350d2007-04-26 15:50:17 -0700687 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700688 }
689 write_unlock(&call->state_lock);
David Howells651350d2007-04-26 15:50:17 -0700690 write_unlock_bh(&conn->lock);
David Howells17926a72007-04-26 15:48:28 -0700691
David Howells651350d2007-04-26 15:50:17 -0700692 /* clean up the Rx queue */
David Howells17926a72007-04-26 15:48:28 -0700693 if (!skb_queue_empty(&call->rx_queue) ||
694 !skb_queue_empty(&call->rx_oos_queue)) {
695 struct rxrpc_skb_priv *sp;
696 struct sk_buff *skb;
697
698 _debug("purge Rx queues");
699
700 spin_lock_bh(&call->lock);
701 while ((skb = skb_dequeue(&call->rx_queue)) ||
702 (skb = skb_dequeue(&call->rx_oos_queue))) {
703 sp = rxrpc_skb(skb);
704 if (sp->call) {
705 ASSERTCMP(sp->call, ==, call);
706 rxrpc_put_call(call);
707 sp->call = NULL;
708 }
709 skb->destructor = NULL;
710 spin_unlock_bh(&call->lock);
711
712 _debug("- zap %s %%%u #%u",
713 rxrpc_pkts[sp->hdr.type],
David Howells0d12f8a2016-03-04 15:53:46 +0000714 sp->hdr.serial, sp->hdr.seq);
David Howells17926a72007-04-26 15:48:28 -0700715 rxrpc_free_skb(skb);
716 spin_lock_bh(&call->lock);
717 }
718 spin_unlock_bh(&call->lock);
719
720 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
721 }
722
723 del_timer_sync(&call->resend_timer);
724 del_timer_sync(&call->ack_timer);
725 del_timer_sync(&call->lifetimer);
David Howells5873c082014-02-07 18:58:44 +0000726 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700727 add_timer(&call->deadspan);
728
729 _leave("");
730}
731
732/*
733 * handle a dead call being ready for reaping
734 */
735static void rxrpc_dead_call_expired(unsigned long _call)
736{
737 struct rxrpc_call *call = (struct rxrpc_call *) _call;
738
739 _enter("{%d}", call->debug_id);
740
741 write_lock_bh(&call->state_lock);
742 call->state = RXRPC_CALL_DEAD;
743 write_unlock_bh(&call->state_lock);
744 rxrpc_put_call(call);
745}
746
747/*
748 * mark a call as to be released, aborting it if it's still in progress
749 * - called with softirqs disabled
750 */
751static void rxrpc_mark_call_released(struct rxrpc_call *call)
752{
753 bool sched;
754
755 write_lock(&call->state_lock);
756 if (call->state < RXRPC_CALL_DEAD) {
757 sched = false;
758 if (call->state < RXRPC_CALL_COMPLETE) {
759 _debug("abort call %p", call);
760 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howellsdc44b3a2016-04-07 17:23:30 +0100761 call->local_abort = RX_CALL_DEAD;
David Howells4c198ad2016-03-04 15:53:46 +0000762 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells17926a72007-04-26 15:48:28 -0700763 sched = true;
764 }
David Howells4c198ad2016-03-04 15:53:46 +0000765 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
David Howells17926a72007-04-26 15:48:28 -0700766 sched = true;
767 if (sched)
David Howells651350d2007-04-26 15:50:17 -0700768 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700769 }
770 write_unlock(&call->state_lock);
771}
772
773/*
774 * release all the calls associated with a socket
775 */
776void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
777{
778 struct rxrpc_call *call;
779 struct rb_node *p;
780
781 _enter("%p", rx);
782
783 read_lock_bh(&rx->call_lock);
784
785 /* mark all the calls as no longer wanting incoming packets */
786 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
787 call = rb_entry(p, struct rxrpc_call, sock_node);
788 rxrpc_mark_call_released(call);
789 }
790
791 /* kill the not-yet-accepted incoming calls */
792 list_for_each_entry(call, &rx->secureq, accept_link) {
793 rxrpc_mark_call_released(call);
794 }
795
796 list_for_each_entry(call, &rx->acceptq, accept_link) {
797 rxrpc_mark_call_released(call);
798 }
799
800 read_unlock_bh(&rx->call_lock);
801 _leave("");
802}
803
804/*
805 * release a call
806 */
807void __rxrpc_put_call(struct rxrpc_call *call)
808{
809 ASSERT(call != NULL);
810
811 _enter("%p{u=%d}", call, atomic_read(&call->usage));
812
813 ASSERTCMP(atomic_read(&call->usage), >, 0);
814
815 if (atomic_dec_and_test(&call->usage)) {
816 _debug("call %d dead", call->debug_id);
817 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
David Howells651350d2007-04-26 15:50:17 -0700818 rxrpc_queue_work(&call->destroyer);
David Howells17926a72007-04-26 15:48:28 -0700819 }
820 _leave("");
821}
822
823/*
824 * clean up a call
825 */
826static void rxrpc_cleanup_call(struct rxrpc_call *call)
827{
828 _net("DESTROY CALL %d", call->debug_id);
829
830 ASSERT(call->socket);
831
832 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
833
834 del_timer_sync(&call->lifetimer);
835 del_timer_sync(&call->deadspan);
836 del_timer_sync(&call->ack_timer);
837 del_timer_sync(&call->resend_timer);
838
839 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
840 ASSERTCMP(call->events, ==, 0);
841 if (work_pending(&call->processor)) {
842 _debug("defer destroy");
David Howells651350d2007-04-26 15:50:17 -0700843 rxrpc_queue_work(&call->destroyer);
David Howells17926a72007-04-26 15:48:28 -0700844 return;
845 }
846
847 if (call->conn) {
848 spin_lock(&call->conn->trans->peer->lock);
849 list_del(&call->error_link);
850 spin_unlock(&call->conn->trans->peer->lock);
851
852 write_lock_bh(&call->conn->lock);
853 rb_erase(&call->conn_node, &call->conn->calls);
854 write_unlock_bh(&call->conn->lock);
855 rxrpc_put_connection(call->conn);
856 }
857
Tim Smith77276402014-03-03 23:04:45 +0000858 /* Remove the call from the hash */
859 rxrpc_call_hash_del(call);
860
David Howells17926a72007-04-26 15:48:28 -0700861 if (call->acks_window) {
862 _debug("kill Tx window %d",
863 CIRC_CNT(call->acks_head, call->acks_tail,
864 call->acks_winsz));
865 smp_mb();
866 while (CIRC_CNT(call->acks_head, call->acks_tail,
867 call->acks_winsz) > 0) {
868 struct rxrpc_skb_priv *sp;
869 unsigned long _skb;
870
871 _skb = call->acks_window[call->acks_tail] & ~1;
David Howells0d12f8a2016-03-04 15:53:46 +0000872 sp = rxrpc_skb((struct sk_buff *)_skb);
873 _debug("+++ clear Tx %u", sp->hdr.seq);
874 rxrpc_free_skb((struct sk_buff *)_skb);
David Howells17926a72007-04-26 15:48:28 -0700875 call->acks_tail =
876 (call->acks_tail + 1) & (call->acks_winsz - 1);
877 }
878
879 kfree(call->acks_window);
880 }
881
882 rxrpc_free_skb(call->tx_pending);
883
884 rxrpc_purge_queue(&call->rx_queue);
885 ASSERT(skb_queue_empty(&call->rx_oos_queue));
886 sock_put(&call->socket->sk);
887 kmem_cache_free(rxrpc_call_jar, call);
888}
889
890/*
891 * destroy a call
892 */
893static void rxrpc_destroy_call(struct work_struct *work)
894{
895 struct rxrpc_call *call =
896 container_of(work, struct rxrpc_call, destroyer);
897
898 _enter("%p{%d,%d,%p}",
899 call, atomic_read(&call->usage), call->channel, call->conn);
900
901 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
902
903 write_lock_bh(&rxrpc_call_lock);
904 list_del_init(&call->link);
905 write_unlock_bh(&rxrpc_call_lock);
906
907 rxrpc_cleanup_call(call);
908 _leave("");
909}
910
911/*
912 * preemptively destroy all the call records from a transport endpoint rather
913 * than waiting for them to time out
914 */
915void __exit rxrpc_destroy_all_calls(void)
916{
917 struct rxrpc_call *call;
918
919 _enter("");
920 write_lock_bh(&rxrpc_call_lock);
921
922 while (!list_empty(&rxrpc_calls)) {
923 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
924 _debug("Zapping call %p", call);
925
926 list_del_init(&call->link);
927
928 switch (atomic_read(&call->usage)) {
929 case 0:
930 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
931 break;
932 case 1:
933 if (del_timer_sync(&call->deadspan) != 0 &&
934 call->state != RXRPC_CALL_DEAD)
935 rxrpc_dead_call_expired((unsigned long) call);
936 if (call->state != RXRPC_CALL_DEAD)
937 break;
938 default:
Joe Perches9b6d5392016-06-02 12:08:52 -0700939 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
David Howells17926a72007-04-26 15:48:28 -0700940 call, atomic_read(&call->usage),
941 atomic_read(&call->ackr_not_idle),
942 rxrpc_call_states[call->state],
943 call->flags, call->events);
944 if (!skb_queue_empty(&call->rx_queue))
Joe Perches9b6d5392016-06-02 12:08:52 -0700945 pr_err("Rx queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700946 if (!skb_queue_empty(&call->rx_oos_queue))
Joe Perches9b6d5392016-06-02 12:08:52 -0700947 pr_err("OOS queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700948 break;
949 }
950
951 write_unlock_bh(&rxrpc_call_lock);
952 cond_resched();
953 write_lock_bh(&rxrpc_call_lock);
954 }
955
956 write_unlock_bh(&rxrpc_call_lock);
957 _leave("");
958}
959
960/*
961 * handle call lifetime being exceeded
962 */
963static void rxrpc_call_life_expired(unsigned long _call)
964{
965 struct rxrpc_call *call = (struct rxrpc_call *) _call;
966
967 if (call->state >= RXRPC_CALL_COMPLETE)
968 return;
969
970 _enter("{%d}", call->debug_id);
971 read_lock_bh(&call->state_lock);
972 if (call->state < RXRPC_CALL_COMPLETE) {
David Howells4c198ad2016-03-04 15:53:46 +0000973 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
David Howells651350d2007-04-26 15:50:17 -0700974 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700975 }
976 read_unlock_bh(&call->state_lock);
977}
978
979/*
980 * handle resend timer expiry
David Howells3b5bac22010-08-04 02:34:17 +0000981 * - may not take call->state_lock as this can deadlock against del_timer_sync()
David Howells17926a72007-04-26 15:48:28 -0700982 */
983static void rxrpc_resend_time_expired(unsigned long _call)
984{
985 struct rxrpc_call *call = (struct rxrpc_call *) _call;
986
987 _enter("{%d}", call->debug_id);
988
989 if (call->state >= RXRPC_CALL_COMPLETE)
990 return;
991
David Howells17926a72007-04-26 15:48:28 -0700992 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
David Howells4c198ad2016-03-04 15:53:46 +0000993 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700994 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700995}
996
997/*
998 * handle ACK timer expiry
999 */
1000static void rxrpc_ack_time_expired(unsigned long _call)
1001{
1002 struct rxrpc_call *call = (struct rxrpc_call *) _call;
1003
1004 _enter("{%d}", call->debug_id);
1005
1006 if (call->state >= RXRPC_CALL_COMPLETE)
1007 return;
1008
1009 read_lock_bh(&call->state_lock);
1010 if (call->state < RXRPC_CALL_COMPLETE &&
David Howells4c198ad2016-03-04 15:53:46 +00001011 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
David Howells651350d2007-04-26 15:50:17 -07001012 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -07001013 read_unlock_bh(&call->state_lock);
1014}