blob: 03919b9a8a31030ecac53680bf55577cebc3e251 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* AF_RXRPC internal definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
David Howellse0e4d822016-04-07 17:23:58 +010012#include <net/sock.h>
David Howells17926a72007-04-26 15:48:28 -070013#include <rxrpc/packet.h>
14
15#if 0
16#define CHECK_SLAB_OKAY(X) \
17 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
18 (POISON_FREE << 8 | POISON_FREE))
19#else
David Howellsb4f13422016-03-04 15:56:19 +000020#define CHECK_SLAB_OKAY(X) do {} while (0)
David Howells17926a72007-04-26 15:48:28 -070021#endif
22
David Howells17926a72007-04-26 15:48:28 -070023#define FCRYPT_BSIZE 8
24struct rxrpc_crypt {
25 union {
26 u8 x[FCRYPT_BSIZE];
Al Viro91e916c2008-03-29 03:08:38 +000027 __be32 n[2];
David Howells17926a72007-04-26 15:48:28 -070028 };
29} __attribute__((aligned(8)));
30
David Howells651350d2007-04-26 15:50:17 -070031#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
32#define rxrpc_queue_delayed_work(WS,D) \
33 queue_delayed_work(rxrpc_workqueue, (WS), (D))
34
35#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
36#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
David Howells17926a72007-04-26 15:48:28 -070037
38/*
39 * sk_state for RxRPC sockets
40 */
41enum {
David Howells2341e072016-06-09 23:02:51 +010042 RXRPC_UNBOUND = 0,
43 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
David Howells17926a72007-04-26 15:48:28 -070044 RXRPC_CLIENT_BOUND, /* client local address bound */
David Howells17926a72007-04-26 15:48:28 -070045 RXRPC_SERVER_BOUND, /* server local address bound */
46 RXRPC_SERVER_LISTENING, /* server listening for connections */
47 RXRPC_CLOSE, /* socket is being closed */
48};
49
50/*
51 * RxRPC socket definition
52 */
53struct rxrpc_sock {
54 /* WARNING: sk has to be the first member */
55 struct sock sk;
David Howells651350d2007-04-26 15:50:17 -070056 rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
David Howells17926a72007-04-26 15:48:28 -070057 struct rxrpc_local *local; /* local endpoint */
David Howells17926a72007-04-26 15:48:28 -070058 struct rxrpc_connection *conn; /* exclusive virtual connection */
59 struct list_head listen_link; /* link in the local endpoint's listen list */
60 struct list_head secureq; /* calls awaiting connection security clearance */
61 struct list_head acceptq; /* calls awaiting acceptance */
62 struct key *key; /* security for this socket */
63 struct key *securities; /* list of server security descriptors */
64 struct rb_root calls; /* outstanding calls on this socket */
65 unsigned long flags;
David Howells2341e072016-06-09 23:02:51 +010066#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
David Howells17926a72007-04-26 15:48:28 -070067#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */
68 rwlock_t call_lock; /* lock for calls */
69 u32 min_sec_level; /* minimum security level */
70#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
71 struct sockaddr_rxrpc srx; /* local address */
David Howells2341e072016-06-09 23:02:51 +010072 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
David Howells17926a72007-04-26 15:48:28 -070073 sa_family_t proto; /* protocol created with */
David Howells17926a72007-04-26 15:48:28 -070074};
75
76#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
77
78/*
David Howells0d12f8a2016-03-04 15:53:46 +000079 * CPU-byteorder normalised Rx packet header.
80 */
81struct rxrpc_host_header {
82 u32 epoch; /* client boot timestamp */
83 u32 cid; /* connection and channel ID */
84 u32 callNumber; /* call ID (0 for connection-level packets) */
85 u32 seq; /* sequence number of pkt in call stream */
86 u32 serial; /* serial number of pkt sent to network */
87 u8 type; /* packet type */
88 u8 flags; /* packet flags */
89 u8 userStatus; /* app-layer defined status */
90 u8 securityIndex; /* security protocol ID */
91 union {
92 u16 _rsvd; /* reserved */
93 u16 cksum; /* kerberos security checksum */
94 };
95 u16 serviceId; /* service ID */
96} __packed;
97
98/*
David Howells17926a72007-04-26 15:48:28 -070099 * RxRPC socket buffer private variables
100 * - max 48 bytes (struct sk_buff::cb)
101 */
102struct rxrpc_skb_priv {
103 struct rxrpc_call *call; /* call with which associated */
104 unsigned long resend_at; /* time in jiffies at which to resend */
105 union {
Eric Dumazet95c96172012-04-15 05:58:06 +0000106 unsigned int offset; /* offset into buffer of next read */
David Howells17926a72007-04-26 15:48:28 -0700107 int remain; /* amount of space remaining for next write */
108 u32 error; /* network error code */
109 bool need_resend; /* T if needs resending */
110 };
111
David Howells0d12f8a2016-03-04 15:53:46 +0000112 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
David Howells17926a72007-04-26 15:48:28 -0700113};
114
115#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
116
David Howells17926a72007-04-26 15:48:28 -0700117enum rxrpc_command {
118 RXRPC_CMD_SEND_DATA, /* send data message */
119 RXRPC_CMD_SEND_ABORT, /* request abort generation */
120 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
121 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
122};
123
124/*
125 * RxRPC security module interface
126 */
127struct rxrpc_security {
David Howells17926a72007-04-26 15:48:28 -0700128 const char *name; /* name of this service */
129 u8 security_index; /* security type provided */
130
David Howells648af7f2016-04-07 17:23:51 +0100131 /* Initialise a security service */
132 int (*init)(void);
133
134 /* Clean up a security service */
135 void (*exit)(void);
136
David Howells17926a72007-04-26 15:48:28 -0700137 /* initialise a connection's security */
138 int (*init_connection_security)(struct rxrpc_connection *);
139
140 /* prime a connection's packet security */
141 void (*prime_packet_security)(struct rxrpc_connection *);
142
143 /* impose security on a packet */
144 int (*secure_packet)(const struct rxrpc_call *,
145 struct sk_buff *,
146 size_t,
147 void *);
148
149 /* verify the security on a received packet */
150 int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
151 u32 *);
152
153 /* issue a challenge */
154 int (*issue_challenge)(struct rxrpc_connection *);
155
156 /* respond to a challenge */
157 int (*respond_to_challenge)(struct rxrpc_connection *,
158 struct sk_buff *,
159 u32 *);
160
161 /* verify a response */
162 int (*verify_response)(struct rxrpc_connection *,
163 struct sk_buff *,
164 u32 *);
165
166 /* clear connection security */
167 void (*clear)(struct rxrpc_connection *);
168};
169
170/*
171 * RxRPC local transport endpoint definition
172 * - matched by local port, address and protocol type
173 */
174struct rxrpc_local {
175 struct socket *socket; /* my UDP socket */
176 struct work_struct destroyer; /* endpoint destroyer */
177 struct work_struct acceptor; /* incoming call processor */
178 struct work_struct rejecter; /* packet reject writer */
David Howells44ba0692015-04-01 16:31:26 +0100179 struct work_struct event_processor; /* endpoint event processor */
David Howells17926a72007-04-26 15:48:28 -0700180 struct list_head services; /* services listening on this endpoint */
181 struct list_head link; /* link in endpoint list */
182 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
183 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
184 struct sk_buff_head reject_queue; /* packets awaiting rejection */
David Howells44ba0692015-04-01 16:31:26 +0100185 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
David Howells17926a72007-04-26 15:48:28 -0700186 spinlock_t lock; /* access lock */
187 rwlock_t services_lock; /* lock for services list */
188 atomic_t usage;
189 int debug_id; /* debug ID for printks */
190 volatile char error_rcvd; /* T if received ICMP error outstanding */
191 struct sockaddr_rxrpc srx; /* local address */
192};
193
194/*
195 * RxRPC remote transport endpoint definition
196 * - matched by remote port, address and protocol type
197 * - holds the connection ID counter for connections between the two endpoints
198 */
199struct rxrpc_peer {
200 struct work_struct destroyer; /* peer destroyer */
201 struct list_head link; /* link in master peer list */
202 struct list_head error_targets; /* targets for net error distribution */
203 spinlock_t lock; /* access lock */
204 atomic_t usage;
Eric Dumazet95c96172012-04-15 05:58:06 +0000205 unsigned int if_mtu; /* interface MTU for this peer */
206 unsigned int mtu; /* network MTU for this peer */
207 unsigned int maxdata; /* data size (MTU - hdrsize) */
David Howells17926a72007-04-26 15:48:28 -0700208 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
209 int debug_id; /* debug ID for printks */
210 int net_error; /* network error distributed */
211 struct sockaddr_rxrpc srx; /* remote address */
212
213 /* calculated RTT cache */
214#define RXRPC_RTT_CACHE_SIZE 32
215 suseconds_t rtt; /* current RTT estimate (in uS) */
Eric Dumazet95c96172012-04-15 05:58:06 +0000216 unsigned int rtt_point; /* next entry at which to insert */
217 unsigned int rtt_usage; /* amount of cache actually used */
David Howells17926a72007-04-26 15:48:28 -0700218 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
219};
220
221/*
222 * RxRPC point-to-point transport / connection manager definition
223 * - handles a bundle of connections between two endpoints
224 * - matched by { local, peer }
225 */
226struct rxrpc_transport {
227 struct rxrpc_local *local; /* local transport endpoint */
228 struct rxrpc_peer *peer; /* remote transport endpoint */
229 struct work_struct error_handler; /* network error distributor */
230 struct rb_root bundles; /* client connection bundles on this transport */
231 struct rb_root client_conns; /* client connections on this transport */
232 struct rb_root server_conns; /* server connections on this transport */
233 struct list_head link; /* link in master session list */
234 struct sk_buff_head error_queue; /* error packets awaiting processing */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200235 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700236 spinlock_t client_lock; /* client connection allocation lock */
237 rwlock_t conn_lock; /* lock for active/dead connections */
238 atomic_t usage;
239 int debug_id; /* debug ID for printks */
240 unsigned int conn_idcounter; /* connection ID counter (client) */
241};
242
243/*
244 * RxRPC client connection bundle
245 * - matched by { transport, service_id, key }
246 */
247struct rxrpc_conn_bundle {
248 struct rb_node node; /* node in transport's lookup tree */
249 struct list_head unused_conns; /* unused connections in this bundle */
250 struct list_head avail_conns; /* available connections in this bundle */
251 struct list_head busy_conns; /* busy connections in this bundle */
252 struct key *key; /* security for this bundle */
253 wait_queue_head_t chanwait; /* wait for channel to become available */
254 atomic_t usage;
255 int debug_id; /* debug ID for printks */
256 unsigned short num_conns; /* number of connections in this bundle */
David Howells0d12f8a2016-03-04 15:53:46 +0000257 u16 service_id; /* Service ID for this bundle */
David Howells4e36a952009-09-16 00:01:13 -0700258 u8 security_ix; /* security type */
David Howells17926a72007-04-26 15:48:28 -0700259};
260
261/*
262 * RxRPC connection definition
263 * - matched by { transport, service_id, conn_id, direction, key }
264 * - each connection can only handle four simultaneous calls
265 */
266struct rxrpc_connection {
267 struct rxrpc_transport *trans; /* transport session */
268 struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
269 struct work_struct processor; /* connection event processor */
270 struct rb_node node; /* node in transport's lookup tree */
271 struct list_head link; /* link in master connection list */
272 struct list_head bundle_link; /* link in bundle */
273 struct rb_root calls; /* calls on this connection */
274 struct sk_buff_head rx_queue; /* received conn-level packets */
275 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
David Howells648af7f2016-04-07 17:23:51 +0100276 const struct rxrpc_security *security; /* applied security module */
David Howells17926a72007-04-26 15:48:28 -0700277 struct key *key; /* security for this connection (client) */
278 struct key *server_key; /* security for this service */
Herbert Xu1afe5932016-01-24 21:19:01 +0800279 struct crypto_skcipher *cipher; /* encryption handle */
David Howells17926a72007-04-26 15:48:28 -0700280 struct rxrpc_crypt csum_iv; /* packet checksum base */
281 unsigned long events;
282#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200283 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700284 rwlock_t lock; /* access lock */
285 spinlock_t state_lock; /* state-change lock */
286 atomic_t usage;
David Howells17926a72007-04-26 15:48:28 -0700287 enum { /* current state of connection */
288 RXRPC_CONN_UNUSED, /* - connection not yet attempted */
289 RXRPC_CONN_CLIENT, /* - client connection */
290 RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */
291 RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */
292 RXRPC_CONN_SERVER, /* - server secured connection */
293 RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */
294 RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
295 RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
296 } state;
David Howellsdc44b3a2016-04-07 17:23:30 +0100297 u32 local_abort; /* local abort code */
298 u32 remote_abort; /* remote abort code */
299 int error; /* local error incurred */
David Howells17926a72007-04-26 15:48:28 -0700300 int debug_id; /* debug ID for printks */
Eric Dumazet95c96172012-04-15 05:58:06 +0000301 unsigned int call_counter; /* call ID counter */
David Howells17926a72007-04-26 15:48:28 -0700302 atomic_t serial; /* packet serial number counter */
303 atomic_t hi_serial; /* highest serial number received */
304 u8 avail_calls; /* number of calls available */
305 u8 size_align; /* data size alignment (for security) */
306 u8 header_size; /* rxrpc + security header size */
307 u8 security_size; /* security header size */
308 u32 security_level; /* security level negotiated */
309 u32 security_nonce; /* response re-use preventer */
David Howells0d12f8a2016-03-04 15:53:46 +0000310 u32 epoch; /* epoch of this connection */
311 u32 cid; /* connection ID */
312 u16 service_id; /* service ID for this connection */
David Howells17926a72007-04-26 15:48:28 -0700313 u8 security_ix; /* security type */
314 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
315 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
316};
317
318/*
David Howells5b8848d2016-03-04 15:53:46 +0000319 * Flags in call->flags.
320 */
321enum rxrpc_call_flag {
322 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
323 RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
324 RXRPC_CALL_RCVD_LAST, /* all packets received */
325 RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
326 RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
327 RXRPC_CALL_PROC_BUSY, /* the processor is busy */
328 RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
329 RXRPC_CALL_HAS_USERID, /* has a user ID attached */
330 RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
331};
332
333/*
334 * Events that can be raised on a call.
335 */
336enum rxrpc_call_event {
David Howells4c198ad2016-03-04 15:53:46 +0000337 RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
338 RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
339 RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
340 RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
341 RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
342 RXRPC_CALL_EV_ACK, /* need to generate ACK */
343 RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
344 RXRPC_CALL_EV_ABORT, /* need to generate abort */
345 RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
346 RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
347 RXRPC_CALL_EV_RESEND, /* Tx resend required */
348 RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
349 RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
350 RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
351 RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
352 RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
353 RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
David Howells5b8848d2016-03-04 15:53:46 +0000354};
355
356/*
357 * The states that a call can be in.
358 */
359enum rxrpc_call_state {
360 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
361 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
362 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
363 RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
364 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
365 RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
366 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
367 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
368 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
369 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
370 RXRPC_CALL_COMPLETE, /* - call completed */
371 RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
372 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
373 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
374 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
375 RXRPC_CALL_DEAD, /* - call is dead */
376 NR__RXRPC_CALL_STATES
377};
378
379/*
David Howells17926a72007-04-26 15:48:28 -0700380 * RxRPC call definition
381 * - matched by { connection, call_id }
382 */
383struct rxrpc_call {
384 struct rxrpc_connection *conn; /* connection carrying call */
385 struct rxrpc_sock *socket; /* socket responsible */
386 struct timer_list lifetimer; /* lifetime remaining on call */
387 struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
388 struct timer_list ack_timer; /* ACK generation timer */
389 struct timer_list resend_timer; /* Tx resend timer */
390 struct work_struct destroyer; /* call destroyer */
391 struct work_struct processor; /* packet processor and ACK generator */
392 struct list_head link; /* link in master call list */
393 struct list_head error_link; /* link in error distribution list */
394 struct list_head accept_link; /* calls awaiting acceptance */
395 struct rb_node sock_node; /* node in socket call tree */
396 struct rb_node conn_node; /* node in connection call tree */
397 struct sk_buff_head rx_queue; /* received packets */
398 struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
399 struct sk_buff *tx_pending; /* Tx socket buffer being filled */
400 wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
401 unsigned long user_call_ID; /* user-defined call ID */
402 unsigned long creation_jif; /* time of call creation */
403 unsigned long flags;
David Howells17926a72007-04-26 15:48:28 -0700404 unsigned long events;
David Howells17926a72007-04-26 15:48:28 -0700405 spinlock_t lock;
406 rwlock_t state_lock; /* lock for state transition */
407 atomic_t usage;
408 atomic_t sequence; /* Tx data packet sequence counter */
David Howellsdc44b3a2016-04-07 17:23:30 +0100409 u32 local_abort; /* local abort code */
410 u32 remote_abort; /* remote abort code */
411 int error; /* local error incurred */
David Howells5b8848d2016-03-04 15:53:46 +0000412 enum rxrpc_call_state state : 8; /* current state of call */
David Howells17926a72007-04-26 15:48:28 -0700413 int debug_id; /* debug ID for printks */
414 u8 channel; /* connection channel occupied by this call */
415
416 /* transmission-phase ACK management */
David Howells4e36a952009-09-16 00:01:13 -0700417 u8 acks_head; /* offset into window of first entry */
418 u8 acks_tail; /* offset into window of last entry */
419 u8 acks_winsz; /* size of un-ACK'd window */
420 u8 acks_unacked; /* lowest unacked packet in last ACK received */
David Howells17926a72007-04-26 15:48:28 -0700421 int acks_latest; /* serial number of latest ACK received */
422 rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
423 unsigned long *acks_window; /* sent packet window
424 * - elements are pointers with LSB set if ACK'd
425 */
426
427 /* receive-phase ACK management */
428 rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
429 rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
430 rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
431 rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
432 rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
433 rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
David Howells0d12f8a2016-03-04 15:53:46 +0000434 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
David Howells4e36a952009-09-16 00:01:13 -0700435 u8 ackr_reason; /* reason to ACK */
David Howells0d12f8a2016-03-04 15:53:46 +0000436 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
David Howells17926a72007-04-26 15:48:28 -0700437 atomic_t ackr_not_idle; /* number of packets in Rx queue */
438
439 /* received packet records, 1 bit per record */
440#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
441 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
442
Tim Smith77276402014-03-03 23:04:45 +0000443 struct hlist_node hash_node;
444 unsigned long hash_key; /* Full hash key */
445 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
446 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
447 sa_family_t proto; /* Frame protocol */
David Howells0d12f8a2016-03-04 15:53:46 +0000448 u32 call_id; /* call ID on connection */
449 u32 cid; /* connection ID plus channel index */
450 u32 epoch; /* epoch of this connection */
451 u16 service_id; /* service ID */
Tim Smith77276402014-03-03 23:04:45 +0000452 union { /* Peer IP address for hashing */
453 __be32 ipv4_addr;
454 __u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
455 } peer_ip;
David Howells17926a72007-04-26 15:48:28 -0700456};
457
458/*
David Howells17926a72007-04-26 15:48:28 -0700459 * locally abort an RxRPC call
460 */
461static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
462{
463 write_lock_bh(&call->state_lock);
464 if (call->state < RXRPC_CALL_COMPLETE) {
David Howellsdc44b3a2016-04-07 17:23:30 +0100465 call->local_abort = abort_code;
David Howells17926a72007-04-26 15:48:28 -0700466 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howells4c198ad2016-03-04 15:53:46 +0000467 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells17926a72007-04-26 15:48:28 -0700468 }
469 write_unlock_bh(&call->state_lock);
470}
471
472/*
David Howells651350d2007-04-26 15:50:17 -0700473 * af_rxrpc.c
David Howells17926a72007-04-26 15:48:28 -0700474 */
David Howells651350d2007-04-26 15:50:17 -0700475extern atomic_t rxrpc_n_skbs;
David Howells0d12f8a2016-03-04 15:53:46 +0000476extern u32 rxrpc_epoch;
David Howells651350d2007-04-26 15:50:17 -0700477extern atomic_t rxrpc_debug_id;
478extern struct workqueue_struct *rxrpc_workqueue;
David Howells17926a72007-04-26 15:48:28 -0700479
David Howells2341e072016-06-09 23:02:51 +0100480extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *,
481 struct sockaddr *,
482 int, int, gfp_t);
483
David Howells17926a72007-04-26 15:48:28 -0700484/*
David Howells0d81a512016-06-13 13:30:30 +0100485 * call_accept.c
David Howells17926a72007-04-26 15:48:28 -0700486 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700487void rxrpc_accept_incoming_calls(struct work_struct *);
488struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
489int rxrpc_reject_call(struct rxrpc_sock *);
David Howells17926a72007-04-26 15:48:28 -0700490
491/*
David Howells0d81a512016-06-13 13:30:30 +0100492 * call_event.c
David Howells17926a72007-04-26 15:48:28 -0700493 */
David Howells0d12f8a2016-03-04 15:53:46 +0000494void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
495void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
Joe Perchesc1b12032013-10-18 13:48:25 -0700496void rxrpc_process_call(struct work_struct *);
David Howells17926a72007-04-26 15:48:28 -0700497
498/*
David Howells0d81a512016-06-13 13:30:30 +0100499 * call_object.c
David Howells17926a72007-04-26 15:48:28 -0700500 */
David Howellsdad8aff2016-03-09 23:22:56 +0000501extern unsigned int rxrpc_max_call_lifetime;
502extern unsigned int rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700503extern struct kmem_cache *rxrpc_call_jar;
504extern struct list_head rxrpc_calls;
505extern rwlock_t rxrpc_call_lock;
506
David Howells0d12f8a2016-03-04 15:53:46 +0000507struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
508 void *, sa_family_t, const void *);
David Howells2341e072016-06-09 23:02:51 +0100509struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
510struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
Joe Perchesc1b12032013-10-18 13:48:25 -0700511 struct rxrpc_transport *,
512 struct rxrpc_conn_bundle *,
David Howells2341e072016-06-09 23:02:51 +0100513 unsigned long, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700514struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
515 struct rxrpc_connection *,
David Howells843099c2016-04-07 17:23:37 +0100516 struct rxrpc_host_header *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700517void rxrpc_release_call(struct rxrpc_call *);
518void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
519void __rxrpc_put_call(struct rxrpc_call *);
520void __exit rxrpc_destroy_all_calls(void);
David Howells17926a72007-04-26 15:48:28 -0700521
522/*
David Howells0d81a512016-06-13 13:30:30 +0100523 * conn_event.c
524 */
525void rxrpc_process_connection(struct work_struct *);
526void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
527void rxrpc_reject_packets(struct work_struct *);
528
529/*
530 * conn_object.c
David Howells17926a72007-04-26 15:48:28 -0700531 */
David Howellsdad8aff2016-03-09 23:22:56 +0000532extern unsigned int rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700533extern struct list_head rxrpc_connections;
534extern rwlock_t rxrpc_connection_lock;
535
Joe Perchesc1b12032013-10-18 13:48:25 -0700536struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
537 struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000538 struct key *, u16, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700539void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
540int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
541 struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
542void rxrpc_put_connection(struct rxrpc_connection *);
543void __exit rxrpc_destroy_all_connections(void);
544struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000545 struct rxrpc_host_header *);
David Howells17926a72007-04-26 15:48:28 -0700546extern struct rxrpc_connection *
David Howells843099c2016-04-07 17:23:37 +0100547rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *);
David Howells17926a72007-04-26 15:48:28 -0700548
549/*
David Howells0d81a512016-06-13 13:30:30 +0100550 * input.c
David Howells17926a72007-04-26 15:48:28 -0700551 */
David S. Miller676d2362014-04-11 16:15:36 -0400552void rxrpc_data_ready(struct sock *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700553int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
554void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700555
556/*
David Howells0d81a512016-06-13 13:30:30 +0100557 * insecure.c
David Howells17926a72007-04-26 15:48:28 -0700558 */
David Howells0d81a512016-06-13 13:30:30 +0100559extern const struct rxrpc_security rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -0700560
561/*
David Howells0d81a512016-06-13 13:30:30 +0100562 * key.c
David Howells17926a72007-04-26 15:48:28 -0700563 */
564extern struct key_type key_type_rxrpc;
565extern struct key_type key_type_rxrpc_s;
566
Joe Perchesc1b12032013-10-18 13:48:25 -0700567int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
568int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
569int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
570 u32);
David Howells17926a72007-04-26 15:48:28 -0700571
572/*
David Howells0d81a512016-06-13 13:30:30 +0100573 * local_object.c
David Howells17926a72007-04-26 15:48:28 -0700574 */
David Howells0d81a512016-06-13 13:30:30 +0100575extern rwlock_t rxrpc_local_lock;
David Howells17926a72007-04-26 15:48:28 -0700576
David Howells0d81a512016-06-13 13:30:30 +0100577struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
578void rxrpc_put_local(struct rxrpc_local *);
579void __exit rxrpc_destroy_all_locals(void);
David Howellse0e4d822016-04-07 17:23:58 +0100580
581/*
David Howells8e688d92016-04-07 17:23:16 +0100582 * misc.c
583 */
David Howells0e119b42016-06-10 22:30:37 +0100584extern unsigned int rxrpc_max_backlog __read_mostly;
David Howells8e688d92016-04-07 17:23:16 +0100585extern unsigned int rxrpc_requested_ack_delay;
586extern unsigned int rxrpc_soft_ack_delay;
587extern unsigned int rxrpc_idle_ack_delay;
588extern unsigned int rxrpc_rx_window_size;
589extern unsigned int rxrpc_rx_mtu;
590extern unsigned int rxrpc_rx_jumbo_max;
591
David Howells5b3e87f2016-04-07 17:23:23 +0100592extern const char *const rxrpc_pkts[];
David Howells8e688d92016-04-07 17:23:16 +0100593extern const s8 rxrpc_ack_priority[];
594
595extern const char *rxrpc_acks(u8 reason);
596
597/*
David Howells0d81a512016-06-13 13:30:30 +0100598 * output.c
599 */
600extern unsigned int rxrpc_resend_timeout;
601
602int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
603int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
604
605/*
606 * peer_error.c
607 */
608void rxrpc_UDP_error_report(struct sock *);
609void rxrpc_UDP_error_handler(struct work_struct *);
610
611/*
612 * peer_object.c
613 */
614struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
615void rxrpc_put_peer(struct rxrpc_peer *);
616struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
617void __exit rxrpc_destroy_all_peers(void);
618
619/*
620 * proc.c
621 */
622extern const char *const rxrpc_call_states[];
623extern const struct file_operations rxrpc_call_seq_fops;
624extern const struct file_operations rxrpc_connection_seq_fops;
625
626/*
627 * recvmsg.c
628 */
629void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
630int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
631
632/*
David Howells648af7f2016-04-07 17:23:51 +0100633 * rxkad.c
634 */
635#ifdef CONFIG_RXKAD
636extern const struct rxrpc_security rxkad;
637#endif
638
639/*
David Howells0d81a512016-06-13 13:30:30 +0100640 * security.c
641 */
642int __init rxrpc_init_security(void);
643void rxrpc_exit_security(void);
644int rxrpc_init_client_conn_security(struct rxrpc_connection *);
645int rxrpc_init_server_conn_security(struct rxrpc_connection *);
646
647/*
648 * skbuff.c
649 */
650void rxrpc_packet_destructor(struct sk_buff *);
651
652/*
David Howells5873c082014-02-07 18:58:44 +0000653 * sysctl.c
654 */
655#ifdef CONFIG_SYSCTL
656extern int __init rxrpc_sysctl_init(void);
657extern void rxrpc_sysctl_exit(void);
658#else
659static inline int __init rxrpc_sysctl_init(void) { return 0; }
660static inline void rxrpc_sysctl_exit(void) {}
661#endif
662
663/*
David Howells0d81a512016-06-13 13:30:30 +0100664 * transport.c
665 */
666extern unsigned int rxrpc_transport_expiry;
667
668struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
669 struct rxrpc_peer *, gfp_t);
670void rxrpc_put_transport(struct rxrpc_transport *);
671void __exit rxrpc_destroy_all_transports(void);
672struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
673 struct rxrpc_peer *);
674
675/*
David Howells17926a72007-04-26 15:48:28 -0700676 * debug tracing
677 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000678extern unsigned int rxrpc_debug;
David Howells17926a72007-04-26 15:48:28 -0700679
680#define dbgprintk(FMT,...) \
Sven Schnelle9f389f42008-04-03 10:45:30 +0100681 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700682
Harvey Harrison0dc47872008-03-05 20:47:47 -0800683#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
684#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700685#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
686#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
687#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
688
689
690#if defined(__KDEBUG)
691#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
692#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
693#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
694#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
695#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
696
697#elif defined(CONFIG_AF_RXRPC_DEBUG)
698#define RXRPC_DEBUG_KENTER 0x01
699#define RXRPC_DEBUG_KLEAVE 0x02
700#define RXRPC_DEBUG_KDEBUG 0x04
701#define RXRPC_DEBUG_KPROTO 0x08
702#define RXRPC_DEBUG_KNET 0x10
703
704#define _enter(FMT,...) \
705do { \
706 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
707 kenter(FMT,##__VA_ARGS__); \
708} while (0)
709
710#define _leave(FMT,...) \
711do { \
712 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
713 kleave(FMT,##__VA_ARGS__); \
714} while (0)
715
716#define _debug(FMT,...) \
717do { \
718 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
719 kdebug(FMT,##__VA_ARGS__); \
720} while (0)
721
722#define _proto(FMT,...) \
723do { \
724 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
725 kproto(FMT,##__VA_ARGS__); \
726} while (0)
727
728#define _net(FMT,...) \
729do { \
730 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
731 knet(FMT,##__VA_ARGS__); \
732} while (0)
733
734#else
David Howells12fdff32010-08-12 16:54:57 +0100735#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
736#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
737#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
738#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
739#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700740#endif
741
742/*
743 * debug assertion checking
744 */
745#if 1 // defined(__KDEBUGALL)
746
747#define ASSERT(X) \
748do { \
749 if (unlikely(!(X))) { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700750 pr_err("Assertion failed\n"); \
David Howells17926a72007-04-26 15:48:28 -0700751 BUG(); \
752 } \
David Howellsb4f13422016-03-04 15:56:19 +0000753} while (0)
David Howells17926a72007-04-26 15:48:28 -0700754
755#define ASSERTCMP(X, OP, Y) \
756do { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700757 unsigned long _x = (unsigned long)(X); \
758 unsigned long _y = (unsigned long)(Y); \
759 if (unlikely(!(_x OP _y))) { \
760 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
761 _x, _x, #OP, _y, _y); \
David Howells17926a72007-04-26 15:48:28 -0700762 BUG(); \
763 } \
David Howellsb4f13422016-03-04 15:56:19 +0000764} while (0)
David Howells17926a72007-04-26 15:48:28 -0700765
766#define ASSERTIF(C, X) \
767do { \
768 if (unlikely((C) && !(X))) { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700769 pr_err("Assertion failed\n"); \
David Howells17926a72007-04-26 15:48:28 -0700770 BUG(); \
771 } \
David Howellsb4f13422016-03-04 15:56:19 +0000772} while (0)
David Howells17926a72007-04-26 15:48:28 -0700773
774#define ASSERTIFCMP(C, X, OP, Y) \
775do { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700776 unsigned long _x = (unsigned long)(X); \
777 unsigned long _y = (unsigned long)(Y); \
778 if (unlikely((C) && !(_x OP _y))) { \
779 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
780 _x, _x, #OP, _y, _y); \
David Howells17926a72007-04-26 15:48:28 -0700781 BUG(); \
782 } \
David Howellsb4f13422016-03-04 15:56:19 +0000783} while (0)
David Howells17926a72007-04-26 15:48:28 -0700784
785#else
786
787#define ASSERT(X) \
788do { \
David Howellsb4f13422016-03-04 15:56:19 +0000789} while (0)
David Howells17926a72007-04-26 15:48:28 -0700790
791#define ASSERTCMP(X, OP, Y) \
792do { \
David Howellsb4f13422016-03-04 15:56:19 +0000793} while (0)
David Howells17926a72007-04-26 15:48:28 -0700794
795#define ASSERTIF(C, X) \
796do { \
David Howellsb4f13422016-03-04 15:56:19 +0000797} while (0)
David Howells17926a72007-04-26 15:48:28 -0700798
799#define ASSERTIFCMP(C, X, OP, Y) \
800do { \
David Howellsb4f13422016-03-04 15:56:19 +0000801} while (0)
David Howells17926a72007-04-26 15:48:28 -0700802
803#endif /* __KDEBUGALL */
804
805/*
806 * socket buffer accounting / leak finding
807 */
808static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
809{
810 //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
811 //atomic_inc(&rxrpc_n_skbs);
812}
813
814#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
815
816static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
817{
818 //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
819 //atomic_dec(&rxrpc_n_skbs);
820}
821
822#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
823
824static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
825{
826 if (skb) {
827 CHECK_SLAB_OKAY(&skb->users);
828 //_net("free skb %p %s [%d]",
829 // skb, fn, atomic_read(&rxrpc_n_skbs));
830 //atomic_dec(&rxrpc_n_skbs);
831 kfree_skb(skb);
832 }
833}
834
835#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
836
837static inline void rxrpc_purge_queue(struct sk_buff_head *list)
838{
839 struct sk_buff *skb;
840 while ((skb = skb_dequeue((list))) != NULL)
841 rxrpc_free_skb(skb);
842}
843
David Howells17926a72007-04-26 15:48:28 -0700844static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f)
845{
846 CHECK_SLAB_OKAY(&local->usage);
847 if (atomic_inc_return(&local->usage) == 1)
848 printk("resurrected (%s)\n", f);
849}
850
851#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__)
852
853#define rxrpc_get_call(CALL) \
854do { \
855 CHECK_SLAB_OKAY(&(CALL)->usage); \
856 if (atomic_inc_return(&(CALL)->usage) == 1) \
857 BUG(); \
David Howellsb4f13422016-03-04 15:56:19 +0000858} while (0)
David Howells17926a72007-04-26 15:48:28 -0700859
860#define rxrpc_put_call(CALL) \
861do { \
862 __rxrpc_put_call(CALL); \
David Howellsb4f13422016-03-04 15:56:19 +0000863} while (0)