blob: f843397e03b64e5d6d834dd56af412d0968e270c [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <linux/module.h>
16#include <linux/circ_buf.h>
Tim Smith77276402014-03-03 23:04:45 +000017#include <linux/spinlock_types.h>
David Howells17926a72007-04-26 15:48:28 -070018#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include "ar-internal.h"
21
David Howells5873c082014-02-07 18:58:44 +000022/*
23 * Maximum lifetime of a call (in jiffies).
24 */
David Howellsdad8aff2016-03-09 23:22:56 +000025unsigned int rxrpc_max_call_lifetime = 60 * HZ;
David Howells5873c082014-02-07 18:58:44 +000026
David Howells5b8848d2016-03-04 15:53:46 +000027const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
David Howellsf5c17aa2016-08-30 09:49:28 +010028 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
David Howells999b69f2016-06-17 15:42:35 +010029 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
David Howells1f8481d2007-05-22 16:14:24 -070030 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
31 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
32 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
33 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
34 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
35 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
36 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
37 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
38 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
39 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
40 [RXRPC_CALL_COMPLETE] = "Complete",
David Howellsf5c17aa2016-08-30 09:49:28 +010041};
42
43const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
44 [RXRPC_CALL_SUCCEEDED] = "Complete",
David Howells1f8481d2007-05-22 16:14:24 -070045 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
46 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
47 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
David Howellsf5c17aa2016-08-30 09:49:28 +010048 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
David Howells1f8481d2007-05-22 16:14:24 -070049 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
David Howells1f8481d2007-05-22 16:14:24 -070050};
51
David Howellsfff72422016-09-07 14:34:21 +010052const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
53 [rxrpc_call_new_client] = "NWc",
54 [rxrpc_call_new_service] = "NWs",
55 [rxrpc_call_queued] = "QUE",
56 [rxrpc_call_queued_ref] = "QUR",
57 [rxrpc_call_seen] = "SEE",
58 [rxrpc_call_got] = "GOT",
59 [rxrpc_call_got_skb] = "Gsk",
60 [rxrpc_call_got_userid] = "Gus",
61 [rxrpc_call_put] = "PUT",
62 [rxrpc_call_put_skb] = "Psk",
63 [rxrpc_call_put_userid] = "Pus",
64 [rxrpc_call_put_noqueue] = "PNQ",
65};
66
David Howells17926a72007-04-26 15:48:28 -070067struct kmem_cache *rxrpc_call_jar;
68LIST_HEAD(rxrpc_calls);
69DEFINE_RWLOCK(rxrpc_call_lock);
David Howells17926a72007-04-26 15:48:28 -070070
David Howells17926a72007-04-26 15:48:28 -070071static void rxrpc_call_life_expired(unsigned long _call);
David Howells17926a72007-04-26 15:48:28 -070072static void rxrpc_ack_time_expired(unsigned long _call);
73static void rxrpc_resend_time_expired(unsigned long _call);
David Howells8d94aa32016-09-07 09:19:31 +010074static void rxrpc_cleanup_call(struct rxrpc_call *call);
David Howells17926a72007-04-26 15:48:28 -070075
76/*
David Howells2341e072016-06-09 23:02:51 +010077 * find an extant server call
78 * - called in process context with IRQs enabled
79 */
80struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
81 unsigned long user_call_ID)
82{
83 struct rxrpc_call *call;
84 struct rb_node *p;
85
86 _enter("%p,%lx", rx, user_call_ID);
87
88 read_lock(&rx->call_lock);
89
90 p = rx->calls.rb_node;
91 while (p) {
92 call = rb_entry(p, struct rxrpc_call, sock_node);
93
94 if (user_call_ID < call->user_call_ID)
95 p = p->rb_left;
96 else if (user_call_ID > call->user_call_ID)
97 p = p->rb_right;
98 else
99 goto found_extant_call;
100 }
101
102 read_unlock(&rx->call_lock);
103 _leave(" = NULL");
104 return NULL;
105
106found_extant_call:
David Howellsfff72422016-09-07 14:34:21 +0100107 rxrpc_get_call(call, rxrpc_call_got);
David Howells2341e072016-06-09 23:02:51 +0100108 read_unlock(&rx->call_lock);
109 _leave(" = %p [%d]", call, atomic_read(&call->usage));
110 return call;
111}
112
113/*
David Howells17926a72007-04-26 15:48:28 -0700114 * allocate a new call
115 */
116static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
117{
118 struct rxrpc_call *call;
119
120 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
121 if (!call)
122 return NULL;
123
124 call->acks_winsz = 16;
125 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
126 gfp);
127 if (!call->acks_window) {
128 kmem_cache_free(rxrpc_call_jar, call);
129 return NULL;
130 }
131
132 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
133 (unsigned long) call);
David Howells17926a72007-04-26 15:48:28 -0700134 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
135 (unsigned long) call);
136 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
137 (unsigned long) call);
David Howells17926a72007-04-26 15:48:28 -0700138 INIT_WORK(&call->processor, &rxrpc_process_call);
David Howells999b69f2016-06-17 15:42:35 +0100139 INIT_LIST_HEAD(&call->link);
David Howells45025bc2016-08-24 07:30:52 +0100140 INIT_LIST_HEAD(&call->chan_wait_link);
David Howells17926a72007-04-26 15:48:28 -0700141 INIT_LIST_HEAD(&call->accept_link);
142 skb_queue_head_init(&call->rx_queue);
143 skb_queue_head_init(&call->rx_oos_queue);
David Howellsd0016482016-08-30 20:42:14 +0100144 skb_queue_head_init(&call->knlrecv_queue);
David Howells45025bc2016-08-24 07:30:52 +0100145 init_waitqueue_head(&call->waitq);
David Howells17926a72007-04-26 15:48:28 -0700146 spin_lock_init(&call->lock);
147 rwlock_init(&call->state_lock);
148 atomic_set(&call->usage, 1);
149 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
David Howells17926a72007-04-26 15:48:28 -0700150
151 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
152
153 call->rx_data_expect = 1;
154 call->rx_data_eaten = 0;
155 call->rx_first_oos = 0;
David Howells817913d2014-02-07 18:10:30 +0000156 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
David Howells17926a72007-04-26 15:48:28 -0700157 call->creation_jif = jiffies;
158 return call;
159}
160
161/*
David Howells999b69f2016-06-17 15:42:35 +0100162 * Allocate a new client call.
David Howells17926a72007-04-26 15:48:28 -0700163 */
David Howellsaa390bb2016-06-17 10:06:56 +0100164static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
165 struct sockaddr_rxrpc *srx,
166 gfp_t gfp)
David Howells17926a72007-04-26 15:48:28 -0700167{
168 struct rxrpc_call *call;
David Howells17926a72007-04-26 15:48:28 -0700169
170 _enter("");
171
David Howells999b69f2016-06-17 15:42:35 +0100172 ASSERT(rx->local != NULL);
David Howells17926a72007-04-26 15:48:28 -0700173
174 call = rxrpc_alloc_call(gfp);
175 if (!call)
176 return ERR_PTR(-ENOMEM);
David Howells999b69f2016-06-17 15:42:35 +0100177 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
David Howells17926a72007-04-26 15:48:28 -0700178 call->rx_data_post = 1;
David Howells999b69f2016-06-17 15:42:35 +0100179 call->service_id = srx->srx_service;
David Howells8d94aa32016-09-07 09:19:31 +0100180 rcu_assign_pointer(call->socket, rx);
David Howells999b69f2016-06-17 15:42:35 +0100181
182 _leave(" = %p", call);
183 return call;
184}
185
186/*
187 * Begin client call.
188 */
189static int rxrpc_begin_client_call(struct rxrpc_call *call,
190 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100191 struct sockaddr_rxrpc *srx,
192 gfp_t gfp)
193{
194 int ret;
195
196 /* Set up or get a connection record and set the protocol parameters,
197 * including channel number and call ID.
198 */
David Howellsaa390bb2016-06-17 10:06:56 +0100199 ret = rxrpc_connect_call(call, cp, srx, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100200 if (ret < 0)
201 return ret;
202
David Howells85f32272016-04-04 14:00:36 +0100203 spin_lock(&call->conn->params.peer->lock);
204 hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
205 spin_unlock(&call->conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700206
David Howells5873c082014-02-07 18:58:44 +0000207 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700208 add_timer(&call->lifetimer);
David Howells999b69f2016-06-17 15:42:35 +0100209 return 0;
David Howells17926a72007-04-26 15:48:28 -0700210}
211
212/*
213 * set up a call for the given data
214 * - called in process context with IRQs enabled
215 */
David Howells2341e072016-06-09 23:02:51 +0100216struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
David Howells19ffa012016-04-04 14:00:36 +0100217 struct rxrpc_conn_parameters *cp,
David Howells999b69f2016-06-17 15:42:35 +0100218 struct sockaddr_rxrpc *srx,
David Howells17926a72007-04-26 15:48:28 -0700219 unsigned long user_call_ID,
David Howells17926a72007-04-26 15:48:28 -0700220 gfp_t gfp)
221{
David Howells2341e072016-06-09 23:02:51 +0100222 struct rxrpc_call *call, *xcall;
223 struct rb_node *parent, **pp;
David Howellse34d4232016-08-30 09:49:29 +0100224 const void *here = __builtin_return_address(0);
David Howells999b69f2016-06-17 15:42:35 +0100225 int ret;
David Howells17926a72007-04-26 15:48:28 -0700226
David Howells999b69f2016-06-17 15:42:35 +0100227 _enter("%p,%lx", rx, user_call_ID);
David Howells17926a72007-04-26 15:48:28 -0700228
David Howellsaa390bb2016-06-17 10:06:56 +0100229 call = rxrpc_alloc_client_call(rx, srx, gfp);
David Howells2341e072016-06-09 23:02:51 +0100230 if (IS_ERR(call)) {
231 _leave(" = %ld", PTR_ERR(call));
232 return call;
David Howells17926a72007-04-26 15:48:28 -0700233 }
234
David Howells2ab27212016-09-08 11:10:12 +0100235 trace_rxrpc_call(call, 0, atomic_read(&call->usage), here,
236 (const void *)user_call_ID);
David Howellse34d4232016-08-30 09:49:29 +0100237
David Howells999b69f2016-06-17 15:42:35 +0100238 /* Publish the call, even though it is incompletely set up as yet */
David Howells2341e072016-06-09 23:02:51 +0100239 call->user_call_ID = user_call_ID;
240 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
David Howells17926a72007-04-26 15:48:28 -0700241
242 write_lock(&rx->call_lock);
243
244 pp = &rx->calls.rb_node;
245 parent = NULL;
246 while (*pp) {
247 parent = *pp;
David Howells2341e072016-06-09 23:02:51 +0100248 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
David Howells17926a72007-04-26 15:48:28 -0700249
David Howells2341e072016-06-09 23:02:51 +0100250 if (user_call_ID < xcall->user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700251 pp = &(*pp)->rb_left;
David Howells2341e072016-06-09 23:02:51 +0100252 else if (user_call_ID > xcall->user_call_ID)
David Howells17926a72007-04-26 15:48:28 -0700253 pp = &(*pp)->rb_right;
254 else
David Howells2341e072016-06-09 23:02:51 +0100255 goto found_user_ID_now_present;
David Howells17926a72007-04-26 15:48:28 -0700256 }
257
David Howellsfff72422016-09-07 14:34:21 +0100258 rxrpc_get_call(call, rxrpc_call_got_userid);
David Howells17926a72007-04-26 15:48:28 -0700259 rb_link_node(&call->sock_node, parent, pp);
260 rb_insert_color(&call->sock_node, &rx->calls);
261 write_unlock(&rx->call_lock);
262
263 write_lock_bh(&rxrpc_call_lock);
264 list_add_tail(&call->link, &rxrpc_calls);
265 write_unlock_bh(&rxrpc_call_lock);
266
David Howellsaa390bb2016-06-17 10:06:56 +0100267 ret = rxrpc_begin_client_call(call, cp, srx, gfp);
David Howells999b69f2016-06-17 15:42:35 +0100268 if (ret < 0)
269 goto error;
270
David Howells17926a72007-04-26 15:48:28 -0700271 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
272
273 _leave(" = %p [new]", call);
274 return call;
275
David Howells999b69f2016-06-17 15:42:35 +0100276error:
277 write_lock(&rx->call_lock);
278 rb_erase(&call->sock_node, &rx->calls);
279 write_unlock(&rx->call_lock);
David Howellsfff72422016-09-07 14:34:21 +0100280 rxrpc_put_call(call, rxrpc_call_put_userid);
David Howells999b69f2016-06-17 15:42:35 +0100281
282 write_lock_bh(&rxrpc_call_lock);
David Howellsd1e858c2016-04-04 14:00:39 +0100283 list_del_init(&call->link);
David Howells999b69f2016-06-17 15:42:35 +0100284 write_unlock_bh(&rxrpc_call_lock);
285
David Howells8d94aa32016-09-07 09:19:31 +0100286error_out:
287 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
288 RX_CALL_DEAD, ret);
David Howells17b963e2016-08-08 13:06:41 +0100289 set_bit(RXRPC_CALL_RELEASED, &call->flags);
David Howellsfff72422016-09-07 14:34:21 +0100290 rxrpc_put_call(call, rxrpc_call_put);
David Howells999b69f2016-06-17 15:42:35 +0100291 _leave(" = %d", ret);
292 return ERR_PTR(ret);
293
David Howells2341e072016-06-09 23:02:51 +0100294 /* We unexpectedly found the user ID in the list after taking
295 * the call_lock. This shouldn't happen unless the user races
296 * with itself and tries to add the same user ID twice at the
297 * same time in different threads.
298 */
299found_user_ID_now_present:
David Howells17926a72007-04-26 15:48:28 -0700300 write_unlock(&rx->call_lock);
David Howells8d94aa32016-09-07 09:19:31 +0100301 ret = -EEXIST;
302 goto error_out;
David Howells17926a72007-04-26 15:48:28 -0700303}
304
305/*
306 * set up an incoming call
307 * - called in process context with IRQs enabled
308 */
309struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
310 struct rxrpc_connection *conn,
David Howells42886ff2016-06-16 13:31:07 +0100311 struct sk_buff *skb)
David Howells17926a72007-04-26 15:48:28 -0700312{
David Howells42886ff2016-06-16 13:31:07 +0100313 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700314 struct rxrpc_call *call, *candidate;
David Howellse34d4232016-08-30 09:49:29 +0100315 const void *here = __builtin_return_address(0);
David Howellsa1399f82016-06-27 14:39:44 +0100316 u32 call_id, chan;
David Howells17926a72007-04-26 15:48:28 -0700317
David Howells843099c2016-04-07 17:23:37 +0100318 _enter(",%d", conn->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700319
320 ASSERT(rx != NULL);
321
David Howells843099c2016-04-07 17:23:37 +0100322 candidate = rxrpc_alloc_call(GFP_NOIO);
David Howells17926a72007-04-26 15:48:28 -0700323 if (!candidate)
324 return ERR_PTR(-EBUSY);
325
David Howellsfff72422016-09-07 14:34:21 +0100326 trace_rxrpc_call(candidate, rxrpc_call_new_service,
David Howells2ab27212016-09-08 11:10:12 +0100327 atomic_read(&candidate->usage), here, NULL);
David Howellse34d4232016-08-30 09:49:29 +0100328
David Howellsa1399f82016-06-27 14:39:44 +0100329 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
David Howells42886ff2016-06-16 13:31:07 +0100330 candidate->conn = conn;
David Howellsdf5d8bf2016-08-24 14:31:43 +0100331 candidate->peer = conn->params.peer;
David Howells42886ff2016-06-16 13:31:07 +0100332 candidate->cid = sp->hdr.cid;
333 candidate->call_id = sp->hdr.callNumber;
David Howells278ac0c2016-09-07 15:19:25 +0100334 candidate->security_ix = sp->hdr.securityIndex;
David Howells42886ff2016-06-16 13:31:07 +0100335 candidate->rx_data_post = 0;
336 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
David Howellsdabe5a72016-08-23 15:27:24 +0100337 candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
David Howells17926a72007-04-26 15:48:28 -0700338 if (conn->security_ix > 0)
339 candidate->state = RXRPC_CALL_SERVER_SECURING;
David Howells8d94aa32016-09-07 09:19:31 +0100340 rcu_assign_pointer(candidate->socket, rx);
David Howells17926a72007-04-26 15:48:28 -0700341
David Howellsa1399f82016-06-27 14:39:44 +0100342 spin_lock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700343
344 /* set the channel for this call */
David Howellsa1399f82016-06-27 14:39:44 +0100345 call = rcu_dereference_protected(conn->channels[chan].call,
346 lockdep_is_held(&conn->channel_lock));
347
David Howells01a90a42016-08-23 15:27:24 +0100348 _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
David Howells42886ff2016-06-16 13:31:07 +0100349 if (call && call->call_id == sp->hdr.callNumber) {
David Howells17926a72007-04-26 15:48:28 -0700350 /* already set; must've been a duplicate packet */
351 _debug("extant call [%d]", call->state);
352 ASSERTCMP(call->conn, ==, conn);
353
354 read_lock(&call->state_lock);
355 switch (call->state) {
356 case RXRPC_CALL_LOCALLY_ABORTED:
David Howells4c198ad2016-03-04 15:53:46 +0000357 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700358 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700359 case RXRPC_CALL_REMOTELY_ABORTED:
360 read_unlock(&call->state_lock);
361 goto aborted_call;
362 default:
David Howellsfff72422016-09-07 14:34:21 +0100363 rxrpc_get_call(call, rxrpc_call_got);
David Howells17926a72007-04-26 15:48:28 -0700364 read_unlock(&call->state_lock);
365 goto extant_call;
366 }
367 }
368
369 if (call) {
370 /* it seems the channel is still in use from the previous call
371 * - ditch the old binding if its call is now complete */
372 _debug("CALL: %u { %s }",
373 call->debug_id, rxrpc_call_states[call->state]);
374
David Howellsf5c17aa2016-08-30 09:49:28 +0100375 if (call->state == RXRPC_CALL_COMPLETE) {
David Howells45025bc2016-08-24 07:30:52 +0100376 __rxrpc_disconnect_call(conn, call);
David Howells17926a72007-04-26 15:48:28 -0700377 } else {
David Howellsa1399f82016-06-27 14:39:44 +0100378 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700379 kmem_cache_free(rxrpc_call_jar, candidate);
380 _leave(" = -EBUSY");
381 return ERR_PTR(-EBUSY);
382 }
383 }
384
385 /* check the call number isn't duplicate */
386 _debug("check dup");
David Howells42886ff2016-06-16 13:31:07 +0100387 call_id = sp->hdr.callNumber;
David Howells17926a72007-04-26 15:48:28 -0700388
David Howellsa1399f82016-06-27 14:39:44 +0100389 /* We just ignore calls prior to the current call ID. Terminated calls
390 * are handled via the connection.
391 */
392 if (call_id <= conn->channels[chan].call_counter)
393 goto old_call; /* TODO: Just drop packet */
David Howells17926a72007-04-26 15:48:28 -0700394
395 /* make the call available */
396 _debug("new call");
397 call = candidate;
398 candidate = NULL;
David Howellsa1399f82016-06-27 14:39:44 +0100399 conn->channels[chan].call_counter = call_id;
400 rcu_assign_pointer(conn->channels[chan].call, call);
David Howells5627cc82016-04-04 14:00:38 +0100401 rxrpc_get_connection(conn);
David Howellsdf5d8bf2016-08-24 14:31:43 +0100402 rxrpc_get_peer(call->peer);
David Howellsa1399f82016-06-27 14:39:44 +0100403 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700404
David Howells85f32272016-04-04 14:00:36 +0100405 spin_lock(&conn->params.peer->lock);
406 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
407 spin_unlock(&conn->params.peer->lock);
David Howells17926a72007-04-26 15:48:28 -0700408
409 write_lock_bh(&rxrpc_call_lock);
410 list_add_tail(&call->link, &rxrpc_calls);
411 write_unlock_bh(&rxrpc_call_lock);
412
David Howells19ffa012016-04-04 14:00:36 +0100413 call->service_id = conn->params.service_id;
Tim Smith77276402014-03-03 23:04:45 +0000414
David Howells17926a72007-04-26 15:48:28 -0700415 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
416
David Howells5873c082014-02-07 18:58:44 +0000417 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
David Howells17926a72007-04-26 15:48:28 -0700418 add_timer(&call->lifetimer);
419 _leave(" = %p {%d} [new]", call, call->debug_id);
420 return call;
421
422extant_call:
David Howellsa1399f82016-06-27 14:39:44 +0100423 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700424 kmem_cache_free(rxrpc_call_jar, candidate);
425 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
426 return call;
427
428aborted_call:
David Howellsa1399f82016-06-27 14:39:44 +0100429 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700430 kmem_cache_free(rxrpc_call_jar, candidate);
431 _leave(" = -ECONNABORTED");
432 return ERR_PTR(-ECONNABORTED);
433
434old_call:
David Howellsa1399f82016-06-27 14:39:44 +0100435 spin_unlock(&conn->channel_lock);
David Howells17926a72007-04-26 15:48:28 -0700436 kmem_cache_free(rxrpc_call_jar, candidate);
437 _leave(" = -ECONNRESET [old]");
438 return ERR_PTR(-ECONNRESET);
439}
440
441/*
David Howells8d94aa32016-09-07 09:19:31 +0100442 * Queue a call's work processor, getting a ref to pass to the work queue.
443 */
444bool rxrpc_queue_call(struct rxrpc_call *call)
445{
446 const void *here = __builtin_return_address(0);
447 int n = __atomic_add_unless(&call->usage, 1, 0);
David Howells8d94aa32016-09-07 09:19:31 +0100448 if (n == 0)
449 return false;
450 if (rxrpc_queue_work(&call->processor))
David Howells2ab27212016-09-08 11:10:12 +0100451 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
David Howells8d94aa32016-09-07 09:19:31 +0100452 else
453 rxrpc_put_call(call, rxrpc_call_put_noqueue);
454 return true;
455}
456
457/*
458 * Queue a call's work processor, passing the callers ref to the work queue.
459 */
460bool __rxrpc_queue_call(struct rxrpc_call *call)
461{
462 const void *here = __builtin_return_address(0);
463 int n = atomic_read(&call->usage);
David Howells8d94aa32016-09-07 09:19:31 +0100464 ASSERTCMP(n, >=, 1);
465 if (rxrpc_queue_work(&call->processor))
David Howells2ab27212016-09-08 11:10:12 +0100466 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
David Howells8d94aa32016-09-07 09:19:31 +0100467 else
468 rxrpc_put_call(call, rxrpc_call_put_noqueue);
469 return true;
470}
471
472/*
David Howellse34d4232016-08-30 09:49:29 +0100473 * Note the re-emergence of a call.
474 */
475void rxrpc_see_call(struct rxrpc_call *call)
476{
477 const void *here = __builtin_return_address(0);
478 if (call) {
479 int n = atomic_read(&call->usage);
David Howellse34d4232016-08-30 09:49:29 +0100480
David Howells2ab27212016-09-08 11:10:12 +0100481 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
David Howellse34d4232016-08-30 09:49:29 +0100482 }
483}
484
485/*
486 * Note the addition of a ref on a call.
487 */
David Howellsfff72422016-09-07 14:34:21 +0100488void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
David Howellse34d4232016-08-30 09:49:29 +0100489{
490 const void *here = __builtin_return_address(0);
491 int n = atomic_inc_return(&call->usage);
David Howellse34d4232016-08-30 09:49:29 +0100492
David Howells2ab27212016-09-08 11:10:12 +0100493 trace_rxrpc_call(call, op, n, here, NULL);
David Howellse34d4232016-08-30 09:49:29 +0100494}
495
496/*
497 * Note the addition of a ref on a call for a socket buffer.
498 */
499void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
500{
501 const void *here = __builtin_return_address(0);
502 int n = atomic_inc_return(&call->usage);
David Howellse34d4232016-08-30 09:49:29 +0100503
David Howells2ab27212016-09-08 11:10:12 +0100504 trace_rxrpc_call(call, rxrpc_call_got_skb, n, here, skb);
David Howellse34d4232016-08-30 09:49:29 +0100505}
506
507/*
David Howells17926a72007-04-26 15:48:28 -0700508 * detach a call from a socket and set up for release
509 */
David Howells8d94aa32016-09-07 09:19:31 +0100510void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
David Howells17926a72007-04-26 15:48:28 -0700511{
David Howells17926a72007-04-26 15:48:28 -0700512 _enter("{%d,%d,%d,%d}",
513 call->debug_id, atomic_read(&call->usage),
514 atomic_read(&call->ackr_not_idle),
515 call->rx_first_oos);
516
David Howellse34d4232016-08-30 09:49:29 +0100517 rxrpc_see_call(call);
518
David Howells17926a72007-04-26 15:48:28 -0700519 spin_lock_bh(&call->lock);
520 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
521 BUG();
522 spin_unlock_bh(&call->lock);
523
524 /* dissociate from the socket
525 * - the socket's ref on the call is passed to the death timer
526 */
David Howells8d94aa32016-09-07 09:19:31 +0100527 _debug("RELEASE CALL %p (%d)", call, call->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700528
David Howellsf4fdb352016-09-07 14:45:26 +0100529 if (call->peer) {
530 spin_lock(&call->peer->lock);
531 hlist_del_init(&call->error_link);
532 spin_unlock(&call->peer->lock);
533 }
David Howellse653cfe2016-04-04 14:00:38 +0100534
David Howells17926a72007-04-26 15:48:28 -0700535 write_lock_bh(&rx->call_lock);
536 if (!list_empty(&call->accept_link)) {
537 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
538 call, call->events, call->flags);
539 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
540 list_del_init(&call->accept_link);
541 sk_acceptq_removed(&rx->sk);
542 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
543 rb_erase(&call->sock_node, &rx->calls);
544 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
545 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
David Howells8d94aa32016-09-07 09:19:31 +0100546 rxrpc_put_call(call, rxrpc_call_put_userid);
David Howells17926a72007-04-26 15:48:28 -0700547 }
548 write_unlock_bh(&rx->call_lock);
549
David Howells17926a72007-04-26 15:48:28 -0700550 /* free up the channel for reuse */
David Howells8d94aa32016-09-07 09:19:31 +0100551 if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK) {
552 clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
553 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
554 rxrpc_call_completed(call);
555 } else {
556 write_lock_bh(&call->state_lock);
David Howells651350d2007-04-26 15:50:17 -0700557
David Howells8d94aa32016-09-07 09:19:31 +0100558 if (call->state < RXRPC_CALL_COMPLETE) {
559 _debug("+++ ABORTING STATE %d +++\n", call->state);
David Howells5a429762016-09-06 22:19:51 +0100560 __rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
David Howells8d94aa32016-09-07 09:19:31 +0100561 clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
562 rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
563 }
564
565 write_unlock_bh(&call->state_lock);
David Howells17926a72007-04-26 15:48:28 -0700566 }
David Howells17926a72007-04-26 15:48:28 -0700567
David Howells8d94aa32016-09-07 09:19:31 +0100568 if (call->conn)
569 rxrpc_disconnect_call(call);
David Howellse653cfe2016-04-04 14:00:38 +0100570
David Howells651350d2007-04-26 15:50:17 -0700571 /* clean up the Rx queue */
David Howells17926a72007-04-26 15:48:28 -0700572 if (!skb_queue_empty(&call->rx_queue) ||
573 !skb_queue_empty(&call->rx_oos_queue)) {
574 struct rxrpc_skb_priv *sp;
575 struct sk_buff *skb;
576
577 _debug("purge Rx queues");
578
579 spin_lock_bh(&call->lock);
580 while ((skb = skb_dequeue(&call->rx_queue)) ||
581 (skb = skb_dequeue(&call->rx_oos_queue))) {
David Howells17926a72007-04-26 15:48:28 -0700582 spin_unlock_bh(&call->lock);
583
Arnd Bergmann55cae7a2016-08-08 12:13:45 +0200584 sp = rxrpc_skb(skb);
David Howells17926a72007-04-26 15:48:28 -0700585 _debug("- zap %s %%%u #%u",
586 rxrpc_pkts[sp->hdr.type],
David Howells0d12f8a2016-03-04 15:53:46 +0000587 sp->hdr.serial, sp->hdr.seq);
David Howells17926a72007-04-26 15:48:28 -0700588 rxrpc_free_skb(skb);
589 spin_lock_bh(&call->lock);
590 }
591 spin_unlock_bh(&call->lock);
David Howells17926a72007-04-26 15:48:28 -0700592 }
David Howells8d94aa32016-09-07 09:19:31 +0100593 rxrpc_purge_queue(&call->knlrecv_queue);
David Howells17926a72007-04-26 15:48:28 -0700594
595 del_timer_sync(&call->resend_timer);
596 del_timer_sync(&call->ack_timer);
597 del_timer_sync(&call->lifetimer);
David Howells17926a72007-04-26 15:48:28 -0700598
599 _leave("");
600}
601
602/*
David Howells17926a72007-04-26 15:48:28 -0700603 * release all the calls associated with a socket
604 */
605void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
606{
607 struct rxrpc_call *call;
608 struct rb_node *p;
609
610 _enter("%p", rx);
611
612 read_lock_bh(&rx->call_lock);
613
David Howells17926a72007-04-26 15:48:28 -0700614 /* kill the not-yet-accepted incoming calls */
615 list_for_each_entry(call, &rx->secureq, accept_link) {
David Howells8d94aa32016-09-07 09:19:31 +0100616 rxrpc_release_call(rx, call);
David Howells17926a72007-04-26 15:48:28 -0700617 }
618
619 list_for_each_entry(call, &rx->acceptq, accept_link) {
David Howells8d94aa32016-09-07 09:19:31 +0100620 rxrpc_release_call(rx, call);
David Howells17926a72007-04-26 15:48:28 -0700621 }
622
David Howellsf36b5e42016-08-23 15:27:24 +0100623 /* mark all the calls as no longer wanting incoming packets */
624 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
625 call = rb_entry(p, struct rxrpc_call, sock_node);
David Howells8d94aa32016-09-07 09:19:31 +0100626 rxrpc_release_call(rx, call);
David Howellsf36b5e42016-08-23 15:27:24 +0100627 }
628
David Howells17926a72007-04-26 15:48:28 -0700629 read_unlock_bh(&rx->call_lock);
630 _leave("");
631}
632
633/*
634 * release a call
635 */
David Howellsfff72422016-09-07 14:34:21 +0100636void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
David Howells17926a72007-04-26 15:48:28 -0700637{
David Howellse34d4232016-08-30 09:49:29 +0100638 const void *here = __builtin_return_address(0);
David Howells2ab27212016-09-08 11:10:12 +0100639 int n;
David Howellse34d4232016-08-30 09:49:29 +0100640
David Howells17926a72007-04-26 15:48:28 -0700641 ASSERT(call != NULL);
642
David Howellse34d4232016-08-30 09:49:29 +0100643 n = atomic_dec_return(&call->usage);
David Howells2ab27212016-09-08 11:10:12 +0100644 trace_rxrpc_call(call, op, n, here, NULL);
David Howellse34d4232016-08-30 09:49:29 +0100645 ASSERTCMP(n, >=, 0);
646 if (n == 0) {
David Howells17926a72007-04-26 15:48:28 -0700647 _debug("call %d dead", call->debug_id);
David Howells8d94aa32016-09-07 09:19:31 +0100648 rxrpc_cleanup_call(call);
David Howells17926a72007-04-26 15:48:28 -0700649 }
David Howellse34d4232016-08-30 09:49:29 +0100650}
651
652/*
653 * Release a call ref held by a socket buffer.
654 */
655void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
656{
657 const void *here = __builtin_return_address(0);
David Howells2ab27212016-09-08 11:10:12 +0100658 int n;
David Howellse34d4232016-08-30 09:49:29 +0100659
660 n = atomic_dec_return(&call->usage);
David Howells2ab27212016-09-08 11:10:12 +0100661 trace_rxrpc_call(call, rxrpc_call_put_skb, n, here, skb);
David Howellse34d4232016-08-30 09:49:29 +0100662 ASSERTCMP(n, >=, 0);
663 if (n == 0) {
664 _debug("call %d dead", call->debug_id);
David Howells8d94aa32016-09-07 09:19:31 +0100665 rxrpc_cleanup_call(call);
David Howellse34d4232016-08-30 09:49:29 +0100666 }
David Howells17926a72007-04-26 15:48:28 -0700667}
668
669/*
David Howellsdee46362016-06-27 17:11:19 +0100670 * Final call destruction under RCU.
671 */
672static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
673{
674 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
675
676 rxrpc_purge_queue(&call->rx_queue);
David Howellsd0016482016-08-30 20:42:14 +0100677 rxrpc_purge_queue(&call->knlrecv_queue);
David Howellsdf5d8bf2016-08-24 14:31:43 +0100678 rxrpc_put_peer(call->peer);
David Howellsdee46362016-06-27 17:11:19 +0100679 kmem_cache_free(rxrpc_call_jar, call);
680}
681
682/*
David Howells17926a72007-04-26 15:48:28 -0700683 * clean up a call
684 */
685static void rxrpc_cleanup_call(struct rxrpc_call *call)
686{
687 _net("DESTROY CALL %d", call->debug_id);
688
David Howells8d94aa32016-09-07 09:19:31 +0100689 write_lock_bh(&rxrpc_call_lock);
690 list_del_init(&call->link);
691 write_unlock_bh(&rxrpc_call_lock);
David Howells17926a72007-04-26 15:48:28 -0700692
693 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
694
695 del_timer_sync(&call->lifetimer);
David Howells17926a72007-04-26 15:48:28 -0700696 del_timer_sync(&call->ack_timer);
697 del_timer_sync(&call->resend_timer);
698
David Howells8d94aa32016-09-07 09:19:31 +0100699 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
David Howells17926a72007-04-26 15:48:28 -0700700 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
David Howells8d94aa32016-09-07 09:19:31 +0100701 ASSERT(!work_pending(&call->processor));
David Howellse653cfe2016-04-04 14:00:38 +0100702 ASSERTCMP(call->conn, ==, NULL);
David Howells17926a72007-04-26 15:48:28 -0700703
704 if (call->acks_window) {
705 _debug("kill Tx window %d",
706 CIRC_CNT(call->acks_head, call->acks_tail,
707 call->acks_winsz));
708 smp_mb();
709 while (CIRC_CNT(call->acks_head, call->acks_tail,
710 call->acks_winsz) > 0) {
711 struct rxrpc_skb_priv *sp;
712 unsigned long _skb;
713
714 _skb = call->acks_window[call->acks_tail] & ~1;
David Howells0d12f8a2016-03-04 15:53:46 +0000715 sp = rxrpc_skb((struct sk_buff *)_skb);
716 _debug("+++ clear Tx %u", sp->hdr.seq);
717 rxrpc_free_skb((struct sk_buff *)_skb);
David Howells17926a72007-04-26 15:48:28 -0700718 call->acks_tail =
719 (call->acks_tail + 1) & (call->acks_winsz - 1);
720 }
721
722 kfree(call->acks_window);
723 }
724
725 rxrpc_free_skb(call->tx_pending);
726
727 rxrpc_purge_queue(&call->rx_queue);
728 ASSERT(skb_queue_empty(&call->rx_oos_queue));
David Howellsd0016482016-08-30 20:42:14 +0100729 rxrpc_purge_queue(&call->knlrecv_queue);
David Howellsdee46362016-06-27 17:11:19 +0100730 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
David Howells17926a72007-04-26 15:48:28 -0700731}
732
733/*
David Howells8d94aa32016-09-07 09:19:31 +0100734 * Make sure that all calls are gone.
David Howells17926a72007-04-26 15:48:28 -0700735 */
736void __exit rxrpc_destroy_all_calls(void)
737{
738 struct rxrpc_call *call;
739
740 _enter("");
David Howells8d94aa32016-09-07 09:19:31 +0100741
742 if (list_empty(&rxrpc_calls))
743 return;
744
David Howells17926a72007-04-26 15:48:28 -0700745 write_lock_bh(&rxrpc_call_lock);
746
747 while (!list_empty(&rxrpc_calls)) {
748 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
749 _debug("Zapping call %p", call);
750
David Howellse34d4232016-08-30 09:49:29 +0100751 rxrpc_see_call(call);
David Howells17926a72007-04-26 15:48:28 -0700752 list_del_init(&call->link);
753
David Howells8d94aa32016-09-07 09:19:31 +0100754 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
755 call, atomic_read(&call->usage),
756 atomic_read(&call->ackr_not_idle),
757 rxrpc_call_states[call->state],
758 call->flags, call->events);
759 if (!skb_queue_empty(&call->rx_queue))
760 pr_err("Rx queue occupied\n");
761 if (!skb_queue_empty(&call->rx_oos_queue))
762 pr_err("OOS queue occupied\n");
David Howells17926a72007-04-26 15:48:28 -0700763
764 write_unlock_bh(&rxrpc_call_lock);
765 cond_resched();
766 write_lock_bh(&rxrpc_call_lock);
767 }
768
769 write_unlock_bh(&rxrpc_call_lock);
770 _leave("");
771}
772
773/*
774 * handle call lifetime being exceeded
775 */
776static void rxrpc_call_life_expired(unsigned long _call)
777{
778 struct rxrpc_call *call = (struct rxrpc_call *) _call;
779
David Howellsf5c17aa2016-08-30 09:49:28 +0100780 _enter("{%d}", call->debug_id);
781
David Howellse34d4232016-08-30 09:49:29 +0100782 rxrpc_see_call(call);
David Howells17926a72007-04-26 15:48:28 -0700783 if (call->state >= RXRPC_CALL_COMPLETE)
784 return;
785
David Howellsf5c17aa2016-08-30 09:49:28 +0100786 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
787 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700788}
789
790/*
791 * handle resend timer expiry
David Howells3b5bac22010-08-04 02:34:17 +0000792 * - may not take call->state_lock as this can deadlock against del_timer_sync()
David Howells17926a72007-04-26 15:48:28 -0700793 */
794static void rxrpc_resend_time_expired(unsigned long _call)
795{
796 struct rxrpc_call *call = (struct rxrpc_call *) _call;
797
798 _enter("{%d}", call->debug_id);
799
David Howellse34d4232016-08-30 09:49:29 +0100800 rxrpc_see_call(call);
David Howells17926a72007-04-26 15:48:28 -0700801 if (call->state >= RXRPC_CALL_COMPLETE)
802 return;
803
David Howells17926a72007-04-26 15:48:28 -0700804 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
David Howells4c198ad2016-03-04 15:53:46 +0000805 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700806 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700807}
808
809/*
810 * handle ACK timer expiry
811 */
812static void rxrpc_ack_time_expired(unsigned long _call)
813{
814 struct rxrpc_call *call = (struct rxrpc_call *) _call;
815
816 _enter("{%d}", call->debug_id);
817
David Howellse34d4232016-08-30 09:49:29 +0100818 rxrpc_see_call(call);
David Howells17926a72007-04-26 15:48:28 -0700819 if (call->state >= RXRPC_CALL_COMPLETE)
820 return;
821
David Howellsf5c17aa2016-08-30 09:49:28 +0100822 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
David Howells651350d2007-04-26 15:50:17 -0700823 rxrpc_queue_call(call);
David Howells17926a72007-04-26 15:48:28 -0700824}