blob: a3002f4ddc906fda10953f1084793c0163ce13ff [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* AF_RXRPC internal definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <rxrpc/packet.h>
13
14#if 0
15#define CHECK_SLAB_OKAY(X) \
16 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
17 (POISON_FREE << 8 | POISON_FREE))
18#else
David Howellsb4f13422016-03-04 15:56:19 +000019#define CHECK_SLAB_OKAY(X) do {} while (0)
David Howells17926a72007-04-26 15:48:28 -070020#endif
21
David Howells17926a72007-04-26 15:48:28 -070022#define FCRYPT_BSIZE 8
23struct rxrpc_crypt {
24 union {
25 u8 x[FCRYPT_BSIZE];
Al Viro91e916c2008-03-29 03:08:38 +000026 __be32 n[2];
David Howells17926a72007-04-26 15:48:28 -070027 };
28} __attribute__((aligned(8)));
29
David Howells651350d2007-04-26 15:50:17 -070030#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
31#define rxrpc_queue_delayed_work(WS,D) \
32 queue_delayed_work(rxrpc_workqueue, (WS), (D))
33
34#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
35#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
David Howells17926a72007-04-26 15:48:28 -070036
37/*
38 * sk_state for RxRPC sockets
39 */
40enum {
41 RXRPC_UNCONNECTED = 0,
42 RXRPC_CLIENT_BOUND, /* client local address bound */
43 RXRPC_CLIENT_CONNECTED, /* client is connected */
44 RXRPC_SERVER_BOUND, /* server local address bound */
45 RXRPC_SERVER_LISTENING, /* server listening for connections */
46 RXRPC_CLOSE, /* socket is being closed */
47};
48
49/*
50 * RxRPC socket definition
51 */
52struct rxrpc_sock {
53 /* WARNING: sk has to be the first member */
54 struct sock sk;
David Howells651350d2007-04-26 15:50:17 -070055 rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
David Howells17926a72007-04-26 15:48:28 -070056 struct rxrpc_local *local; /* local endpoint */
57 struct rxrpc_transport *trans; /* transport handler */
58 struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */
59 struct rxrpc_connection *conn; /* exclusive virtual connection */
60 struct list_head listen_link; /* link in the local endpoint's listen list */
61 struct list_head secureq; /* calls awaiting connection security clearance */
62 struct list_head acceptq; /* calls awaiting acceptance */
63 struct key *key; /* security for this socket */
64 struct key *securities; /* list of server security descriptors */
65 struct rb_root calls; /* outstanding calls on this socket */
66 unsigned long flags;
67#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */
68 rwlock_t call_lock; /* lock for calls */
69 u32 min_sec_level; /* minimum security level */
70#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
71 struct sockaddr_rxrpc srx; /* local address */
72 sa_family_t proto; /* protocol created with */
David Howells17926a72007-04-26 15:48:28 -070073};
74
75#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
76
77/*
David Howells0d12f8a2016-03-04 15:53:46 +000078 * CPU-byteorder normalised Rx packet header.
79 */
80struct rxrpc_host_header {
81 u32 epoch; /* client boot timestamp */
82 u32 cid; /* connection and channel ID */
83 u32 callNumber; /* call ID (0 for connection-level packets) */
84 u32 seq; /* sequence number of pkt in call stream */
85 u32 serial; /* serial number of pkt sent to network */
86 u8 type; /* packet type */
87 u8 flags; /* packet flags */
88 u8 userStatus; /* app-layer defined status */
89 u8 securityIndex; /* security protocol ID */
90 union {
91 u16 _rsvd; /* reserved */
92 u16 cksum; /* kerberos security checksum */
93 };
94 u16 serviceId; /* service ID */
95} __packed;
96
97/*
David Howells17926a72007-04-26 15:48:28 -070098 * RxRPC socket buffer private variables
99 * - max 48 bytes (struct sk_buff::cb)
100 */
101struct rxrpc_skb_priv {
102 struct rxrpc_call *call; /* call with which associated */
103 unsigned long resend_at; /* time in jiffies at which to resend */
104 union {
Eric Dumazet95c96172012-04-15 05:58:06 +0000105 unsigned int offset; /* offset into buffer of next read */
David Howells17926a72007-04-26 15:48:28 -0700106 int remain; /* amount of space remaining for next write */
107 u32 error; /* network error code */
108 bool need_resend; /* T if needs resending */
109 };
110
David Howells0d12f8a2016-03-04 15:53:46 +0000111 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
David Howells17926a72007-04-26 15:48:28 -0700112};
113
114#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
115
David Howells17926a72007-04-26 15:48:28 -0700116enum rxrpc_command {
117 RXRPC_CMD_SEND_DATA, /* send data message */
118 RXRPC_CMD_SEND_ABORT, /* request abort generation */
119 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
120 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
121};
122
123/*
124 * RxRPC security module interface
125 */
126struct rxrpc_security {
127 struct module *owner; /* providing module */
128 struct list_head link; /* link in master list */
129 const char *name; /* name of this service */
130 u8 security_index; /* security type provided */
131
132 /* initialise a connection's security */
133 int (*init_connection_security)(struct rxrpc_connection *);
134
135 /* prime a connection's packet security */
136 void (*prime_packet_security)(struct rxrpc_connection *);
137
138 /* impose security on a packet */
139 int (*secure_packet)(const struct rxrpc_call *,
140 struct sk_buff *,
141 size_t,
142 void *);
143
144 /* verify the security on a received packet */
145 int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
146 u32 *);
147
148 /* issue a challenge */
149 int (*issue_challenge)(struct rxrpc_connection *);
150
151 /* respond to a challenge */
152 int (*respond_to_challenge)(struct rxrpc_connection *,
153 struct sk_buff *,
154 u32 *);
155
156 /* verify a response */
157 int (*verify_response)(struct rxrpc_connection *,
158 struct sk_buff *,
159 u32 *);
160
161 /* clear connection security */
162 void (*clear)(struct rxrpc_connection *);
163};
164
165/*
166 * RxRPC local transport endpoint definition
167 * - matched by local port, address and protocol type
168 */
169struct rxrpc_local {
170 struct socket *socket; /* my UDP socket */
171 struct work_struct destroyer; /* endpoint destroyer */
172 struct work_struct acceptor; /* incoming call processor */
173 struct work_struct rejecter; /* packet reject writer */
David Howells44ba0692015-04-01 16:31:26 +0100174 struct work_struct event_processor; /* endpoint event processor */
David Howells17926a72007-04-26 15:48:28 -0700175 struct list_head services; /* services listening on this endpoint */
176 struct list_head link; /* link in endpoint list */
177 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
178 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
179 struct sk_buff_head reject_queue; /* packets awaiting rejection */
David Howells44ba0692015-04-01 16:31:26 +0100180 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
David Howells17926a72007-04-26 15:48:28 -0700181 spinlock_t lock; /* access lock */
182 rwlock_t services_lock; /* lock for services list */
183 atomic_t usage;
184 int debug_id; /* debug ID for printks */
185 volatile char error_rcvd; /* T if received ICMP error outstanding */
186 struct sockaddr_rxrpc srx; /* local address */
187};
188
189/*
190 * RxRPC remote transport endpoint definition
191 * - matched by remote port, address and protocol type
192 * - holds the connection ID counter for connections between the two endpoints
193 */
194struct rxrpc_peer {
195 struct work_struct destroyer; /* peer destroyer */
196 struct list_head link; /* link in master peer list */
197 struct list_head error_targets; /* targets for net error distribution */
198 spinlock_t lock; /* access lock */
199 atomic_t usage;
Eric Dumazet95c96172012-04-15 05:58:06 +0000200 unsigned int if_mtu; /* interface MTU for this peer */
201 unsigned int mtu; /* network MTU for this peer */
202 unsigned int maxdata; /* data size (MTU - hdrsize) */
David Howells17926a72007-04-26 15:48:28 -0700203 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
204 int debug_id; /* debug ID for printks */
205 int net_error; /* network error distributed */
206 struct sockaddr_rxrpc srx; /* remote address */
207
208 /* calculated RTT cache */
209#define RXRPC_RTT_CACHE_SIZE 32
210 suseconds_t rtt; /* current RTT estimate (in uS) */
Eric Dumazet95c96172012-04-15 05:58:06 +0000211 unsigned int rtt_point; /* next entry at which to insert */
212 unsigned int rtt_usage; /* amount of cache actually used */
David Howells17926a72007-04-26 15:48:28 -0700213 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
214};
215
216/*
217 * RxRPC point-to-point transport / connection manager definition
218 * - handles a bundle of connections between two endpoints
219 * - matched by { local, peer }
220 */
221struct rxrpc_transport {
222 struct rxrpc_local *local; /* local transport endpoint */
223 struct rxrpc_peer *peer; /* remote transport endpoint */
224 struct work_struct error_handler; /* network error distributor */
225 struct rb_root bundles; /* client connection bundles on this transport */
226 struct rb_root client_conns; /* client connections on this transport */
227 struct rb_root server_conns; /* server connections on this transport */
228 struct list_head link; /* link in master session list */
229 struct sk_buff_head error_queue; /* error packets awaiting processing */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200230 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700231 spinlock_t client_lock; /* client connection allocation lock */
232 rwlock_t conn_lock; /* lock for active/dead connections */
233 atomic_t usage;
234 int debug_id; /* debug ID for printks */
235 unsigned int conn_idcounter; /* connection ID counter (client) */
236};
237
238/*
239 * RxRPC client connection bundle
240 * - matched by { transport, service_id, key }
241 */
242struct rxrpc_conn_bundle {
243 struct rb_node node; /* node in transport's lookup tree */
244 struct list_head unused_conns; /* unused connections in this bundle */
245 struct list_head avail_conns; /* available connections in this bundle */
246 struct list_head busy_conns; /* busy connections in this bundle */
247 struct key *key; /* security for this bundle */
248 wait_queue_head_t chanwait; /* wait for channel to become available */
249 atomic_t usage;
250 int debug_id; /* debug ID for printks */
251 unsigned short num_conns; /* number of connections in this bundle */
David Howells0d12f8a2016-03-04 15:53:46 +0000252 u16 service_id; /* Service ID for this bundle */
David Howells4e36a952009-09-16 00:01:13 -0700253 u8 security_ix; /* security type */
David Howells17926a72007-04-26 15:48:28 -0700254};
255
256/*
257 * RxRPC connection definition
258 * - matched by { transport, service_id, conn_id, direction, key }
259 * - each connection can only handle four simultaneous calls
260 */
261struct rxrpc_connection {
262 struct rxrpc_transport *trans; /* transport session */
263 struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
264 struct work_struct processor; /* connection event processor */
265 struct rb_node node; /* node in transport's lookup tree */
266 struct list_head link; /* link in master connection list */
267 struct list_head bundle_link; /* link in bundle */
268 struct rb_root calls; /* calls on this connection */
269 struct sk_buff_head rx_queue; /* received conn-level packets */
270 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
271 struct rxrpc_security *security; /* applied security module */
272 struct key *key; /* security for this connection (client) */
273 struct key *server_key; /* security for this service */
274 struct crypto_blkcipher *cipher; /* encryption handle */
275 struct rxrpc_crypt csum_iv; /* packet checksum base */
276 unsigned long events;
277#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200278 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700279 rwlock_t lock; /* access lock */
280 spinlock_t state_lock; /* state-change lock */
281 atomic_t usage;
David Howells17926a72007-04-26 15:48:28 -0700282 enum { /* current state of connection */
283 RXRPC_CONN_UNUSED, /* - connection not yet attempted */
284 RXRPC_CONN_CLIENT, /* - client connection */
285 RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */
286 RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */
287 RXRPC_CONN_SERVER, /* - server secured connection */
288 RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */
289 RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
290 RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
291 } state;
292 int error; /* error code for local abort */
293 int debug_id; /* debug ID for printks */
Eric Dumazet95c96172012-04-15 05:58:06 +0000294 unsigned int call_counter; /* call ID counter */
David Howells17926a72007-04-26 15:48:28 -0700295 atomic_t serial; /* packet serial number counter */
296 atomic_t hi_serial; /* highest serial number received */
297 u8 avail_calls; /* number of calls available */
298 u8 size_align; /* data size alignment (for security) */
299 u8 header_size; /* rxrpc + security header size */
300 u8 security_size; /* security header size */
301 u32 security_level; /* security level negotiated */
302 u32 security_nonce; /* response re-use preventer */
David Howells0d12f8a2016-03-04 15:53:46 +0000303 u32 epoch; /* epoch of this connection */
304 u32 cid; /* connection ID */
305 u16 service_id; /* service ID for this connection */
David Howells17926a72007-04-26 15:48:28 -0700306 u8 security_ix; /* security type */
307 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
308 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
309};
310
311/*
David Howells5b8848d2016-03-04 15:53:46 +0000312 * Flags in call->flags.
313 */
314enum rxrpc_call_flag {
315 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
316 RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
317 RXRPC_CALL_RCVD_LAST, /* all packets received */
318 RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
319 RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
320 RXRPC_CALL_PROC_BUSY, /* the processor is busy */
321 RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
322 RXRPC_CALL_HAS_USERID, /* has a user ID attached */
323 RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
324};
325
326/*
327 * Events that can be raised on a call.
328 */
329enum rxrpc_call_event {
David Howells4c198ad2016-03-04 15:53:46 +0000330 RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
331 RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
332 RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
333 RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
334 RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
335 RXRPC_CALL_EV_ACK, /* need to generate ACK */
336 RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
337 RXRPC_CALL_EV_ABORT, /* need to generate abort */
338 RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
339 RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
340 RXRPC_CALL_EV_RESEND, /* Tx resend required */
341 RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
342 RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
343 RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
344 RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
345 RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
346 RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
David Howells5b8848d2016-03-04 15:53:46 +0000347};
348
349/*
350 * The states that a call can be in.
351 */
352enum rxrpc_call_state {
353 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
354 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
355 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
356 RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
357 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
358 RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
359 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
360 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
361 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
362 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
363 RXRPC_CALL_COMPLETE, /* - call completed */
364 RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
365 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
366 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
367 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
368 RXRPC_CALL_DEAD, /* - call is dead */
369 NR__RXRPC_CALL_STATES
370};
371
372/*
David Howells17926a72007-04-26 15:48:28 -0700373 * RxRPC call definition
374 * - matched by { connection, call_id }
375 */
376struct rxrpc_call {
377 struct rxrpc_connection *conn; /* connection carrying call */
378 struct rxrpc_sock *socket; /* socket responsible */
379 struct timer_list lifetimer; /* lifetime remaining on call */
380 struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
381 struct timer_list ack_timer; /* ACK generation timer */
382 struct timer_list resend_timer; /* Tx resend timer */
383 struct work_struct destroyer; /* call destroyer */
384 struct work_struct processor; /* packet processor and ACK generator */
385 struct list_head link; /* link in master call list */
386 struct list_head error_link; /* link in error distribution list */
387 struct list_head accept_link; /* calls awaiting acceptance */
388 struct rb_node sock_node; /* node in socket call tree */
389 struct rb_node conn_node; /* node in connection call tree */
390 struct sk_buff_head rx_queue; /* received packets */
391 struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
392 struct sk_buff *tx_pending; /* Tx socket buffer being filled */
393 wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
394 unsigned long user_call_ID; /* user-defined call ID */
395 unsigned long creation_jif; /* time of call creation */
396 unsigned long flags;
David Howells17926a72007-04-26 15:48:28 -0700397 unsigned long events;
David Howells17926a72007-04-26 15:48:28 -0700398 spinlock_t lock;
399 rwlock_t state_lock; /* lock for state transition */
400 atomic_t usage;
401 atomic_t sequence; /* Tx data packet sequence counter */
402 u32 abort_code; /* local/remote abort code */
David Howells5b8848d2016-03-04 15:53:46 +0000403 enum rxrpc_call_state state : 8; /* current state of call */
David Howells17926a72007-04-26 15:48:28 -0700404 int debug_id; /* debug ID for printks */
405 u8 channel; /* connection channel occupied by this call */
406
407 /* transmission-phase ACK management */
David Howells4e36a952009-09-16 00:01:13 -0700408 u8 acks_head; /* offset into window of first entry */
409 u8 acks_tail; /* offset into window of last entry */
410 u8 acks_winsz; /* size of un-ACK'd window */
411 u8 acks_unacked; /* lowest unacked packet in last ACK received */
David Howells17926a72007-04-26 15:48:28 -0700412 int acks_latest; /* serial number of latest ACK received */
413 rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
414 unsigned long *acks_window; /* sent packet window
415 * - elements are pointers with LSB set if ACK'd
416 */
417
418 /* receive-phase ACK management */
419 rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
420 rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
421 rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
422 rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
423 rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
424 rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
David Howells0d12f8a2016-03-04 15:53:46 +0000425 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
David Howells4e36a952009-09-16 00:01:13 -0700426 u8 ackr_reason; /* reason to ACK */
David Howells0d12f8a2016-03-04 15:53:46 +0000427 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
David Howells17926a72007-04-26 15:48:28 -0700428 atomic_t ackr_not_idle; /* number of packets in Rx queue */
429
430 /* received packet records, 1 bit per record */
431#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
432 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
433
Tim Smith77276402014-03-03 23:04:45 +0000434 struct hlist_node hash_node;
435 unsigned long hash_key; /* Full hash key */
436 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
437 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
438 sa_family_t proto; /* Frame protocol */
David Howells0d12f8a2016-03-04 15:53:46 +0000439 u32 call_id; /* call ID on connection */
440 u32 cid; /* connection ID plus channel index */
441 u32 epoch; /* epoch of this connection */
442 u16 service_id; /* service ID */
Tim Smith77276402014-03-03 23:04:45 +0000443 union { /* Peer IP address for hashing */
444 __be32 ipv4_addr;
445 __u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
446 } peer_ip;
David Howells17926a72007-04-26 15:48:28 -0700447};
448
449/*
David Howells17926a72007-04-26 15:48:28 -0700450 * locally abort an RxRPC call
451 */
452static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
453{
454 write_lock_bh(&call->state_lock);
455 if (call->state < RXRPC_CALL_COMPLETE) {
456 call->abort_code = abort_code;
457 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howells4c198ad2016-03-04 15:53:46 +0000458 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells17926a72007-04-26 15:48:28 -0700459 }
460 write_unlock_bh(&call->state_lock);
461}
462
463/*
David Howells651350d2007-04-26 15:50:17 -0700464 * af_rxrpc.c
David Howells17926a72007-04-26 15:48:28 -0700465 */
David Howells651350d2007-04-26 15:50:17 -0700466extern atomic_t rxrpc_n_skbs;
David Howells0d12f8a2016-03-04 15:53:46 +0000467extern u32 rxrpc_epoch;
David Howells651350d2007-04-26 15:50:17 -0700468extern atomic_t rxrpc_debug_id;
469extern struct workqueue_struct *rxrpc_workqueue;
David Howells17926a72007-04-26 15:48:28 -0700470
471/*
472 * ar-accept.c
473 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700474void rxrpc_accept_incoming_calls(struct work_struct *);
475struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
476int rxrpc_reject_call(struct rxrpc_sock *);
David Howells17926a72007-04-26 15:48:28 -0700477
478/*
479 * ar-ack.c
480 */
David Howellsdad8aff2016-03-09 23:22:56 +0000481extern unsigned int rxrpc_requested_ack_delay;
482extern unsigned int rxrpc_soft_ack_delay;
483extern unsigned int rxrpc_idle_ack_delay;
484extern unsigned int rxrpc_rx_window_size;
485extern unsigned int rxrpc_rx_mtu;
486extern unsigned int rxrpc_rx_jumbo_max;
David Howells5873c082014-02-07 18:58:44 +0000487
David Howells0d12f8a2016-03-04 15:53:46 +0000488void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
489void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
Joe Perchesc1b12032013-10-18 13:48:25 -0700490void rxrpc_process_call(struct work_struct *);
David Howells17926a72007-04-26 15:48:28 -0700491
492/*
493 * ar-call.c
494 */
David Howellsdad8aff2016-03-09 23:22:56 +0000495extern unsigned int rxrpc_max_call_lifetime;
496extern unsigned int rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700497extern struct kmem_cache *rxrpc_call_jar;
498extern struct list_head rxrpc_calls;
499extern rwlock_t rxrpc_call_lock;
500
David Howells0d12f8a2016-03-04 15:53:46 +0000501struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
502 void *, sa_family_t, const void *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700503struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
504 struct rxrpc_transport *,
505 struct rxrpc_conn_bundle *,
506 unsigned long, int, gfp_t);
507struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
508 struct rxrpc_connection *,
David Howells0d12f8a2016-03-04 15:53:46 +0000509 struct rxrpc_host_header *, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700510struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
511void rxrpc_release_call(struct rxrpc_call *);
512void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
513void __rxrpc_put_call(struct rxrpc_call *);
514void __exit rxrpc_destroy_all_calls(void);
David Howells17926a72007-04-26 15:48:28 -0700515
516/*
517 * ar-connection.c
518 */
David Howellsdad8aff2016-03-09 23:22:56 +0000519extern unsigned int rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700520extern struct list_head rxrpc_connections;
521extern rwlock_t rxrpc_connection_lock;
522
Joe Perchesc1b12032013-10-18 13:48:25 -0700523struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
524 struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000525 struct key *, u16, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700526void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
527int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
528 struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
529void rxrpc_put_connection(struct rxrpc_connection *);
530void __exit rxrpc_destroy_all_connections(void);
531struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000532 struct rxrpc_host_header *);
David Howells17926a72007-04-26 15:48:28 -0700533extern struct rxrpc_connection *
David Howells0d12f8a2016-03-04 15:53:46 +0000534rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *,
David Howells17926a72007-04-26 15:48:28 -0700535 gfp_t);
536
537/*
538 * ar-connevent.c
539 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700540void rxrpc_process_connection(struct work_struct *);
541void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
542void rxrpc_reject_packets(struct work_struct *);
David Howells17926a72007-04-26 15:48:28 -0700543
544/*
545 * ar-error.c
546 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700547void rxrpc_UDP_error_report(struct sock *);
548void rxrpc_UDP_error_handler(struct work_struct *);
David Howells17926a72007-04-26 15:48:28 -0700549
550/*
551 * ar-input.c
552 */
David Howells17926a72007-04-26 15:48:28 -0700553extern const char *rxrpc_pkts[];
554
David S. Miller676d2362014-04-11 16:15:36 -0400555void rxrpc_data_ready(struct sock *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700556int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
557void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700558
559/*
560 * ar-local.c
561 */
562extern rwlock_t rxrpc_local_lock;
David Howells5873c082014-02-07 18:58:44 +0000563
Joe Perchesc1b12032013-10-18 13:48:25 -0700564struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
565void rxrpc_put_local(struct rxrpc_local *);
566void __exit rxrpc_destroy_all_locals(void);
David Howells17926a72007-04-26 15:48:28 -0700567
568/*
569 * ar-key.c
570 */
571extern struct key_type key_type_rxrpc;
572extern struct key_type key_type_rxrpc_s;
573
Joe Perchesc1b12032013-10-18 13:48:25 -0700574int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
575int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
576int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
577 u32);
David Howells17926a72007-04-26 15:48:28 -0700578
579/*
580 * ar-output.c
581 */
David Howellsdad8aff2016-03-09 23:22:56 +0000582extern unsigned int rxrpc_resend_timeout;
David Howells17926a72007-04-26 15:48:28 -0700583
Joe Perchesc1b12032013-10-18 13:48:25 -0700584int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
Ying Xue1b784142015-03-02 15:37:48 +0800585int rxrpc_client_sendmsg(struct rxrpc_sock *, struct rxrpc_transport *,
586 struct msghdr *, size_t);
587int rxrpc_server_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
David Howells17926a72007-04-26 15:48:28 -0700588
589/*
590 * ar-peer.c
591 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700592struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
593void rxrpc_put_peer(struct rxrpc_peer *);
594struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
595void __exit rxrpc_destroy_all_peers(void);
David Howells17926a72007-04-26 15:48:28 -0700596
597/*
598 * ar-proc.c
599 */
Jan Engelhardt036c2e22008-01-30 18:55:45 -0800600extern const char *const rxrpc_call_states[];
601extern const struct file_operations rxrpc_call_seq_fops;
602extern const struct file_operations rxrpc_connection_seq_fops;
David Howells17926a72007-04-26 15:48:28 -0700603
604/*
605 * ar-recvmsg.c
606 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700607void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
Ying Xue1b784142015-03-02 15:37:48 +0800608int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
David Howells17926a72007-04-26 15:48:28 -0700609
610/*
611 * ar-security.c
612 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700613int rxrpc_register_security(struct rxrpc_security *);
614void rxrpc_unregister_security(struct rxrpc_security *);
615int rxrpc_init_client_conn_security(struct rxrpc_connection *);
616int rxrpc_init_server_conn_security(struct rxrpc_connection *);
617int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
618 void *);
619int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
620void rxrpc_clear_conn_security(struct rxrpc_connection *);
David Howells17926a72007-04-26 15:48:28 -0700621
622/*
623 * ar-skbuff.c
624 */
Joe Perchesc1b12032013-10-18 13:48:25 -0700625void rxrpc_packet_destructor(struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700626
627/*
628 * ar-transport.c
629 */
David Howellsdad8aff2016-03-09 23:22:56 +0000630extern unsigned int rxrpc_transport_expiry;
David Howells5873c082014-02-07 18:58:44 +0000631
Joe Perchesc1b12032013-10-18 13:48:25 -0700632struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
633 struct rxrpc_peer *, gfp_t);
634void rxrpc_put_transport(struct rxrpc_transport *);
635void __exit rxrpc_destroy_all_transports(void);
636struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
637 struct rxrpc_peer *);
David Howells17926a72007-04-26 15:48:28 -0700638
639/*
David Howells5873c082014-02-07 18:58:44 +0000640 * sysctl.c
641 */
642#ifdef CONFIG_SYSCTL
643extern int __init rxrpc_sysctl_init(void);
644extern void rxrpc_sysctl_exit(void);
645#else
646static inline int __init rxrpc_sysctl_init(void) { return 0; }
647static inline void rxrpc_sysctl_exit(void) {}
648#endif
649
650/*
David Howells17926a72007-04-26 15:48:28 -0700651 * debug tracing
652 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000653extern unsigned int rxrpc_debug;
David Howells17926a72007-04-26 15:48:28 -0700654
655#define dbgprintk(FMT,...) \
Sven Schnelle9f389f42008-04-03 10:45:30 +0100656 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700657
Harvey Harrison0dc47872008-03-05 20:47:47 -0800658#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
659#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700660#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
661#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
662#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
663
664
665#if defined(__KDEBUG)
666#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
667#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
668#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
669#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
670#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
671
672#elif defined(CONFIG_AF_RXRPC_DEBUG)
673#define RXRPC_DEBUG_KENTER 0x01
674#define RXRPC_DEBUG_KLEAVE 0x02
675#define RXRPC_DEBUG_KDEBUG 0x04
676#define RXRPC_DEBUG_KPROTO 0x08
677#define RXRPC_DEBUG_KNET 0x10
678
679#define _enter(FMT,...) \
680do { \
681 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
682 kenter(FMT,##__VA_ARGS__); \
683} while (0)
684
685#define _leave(FMT,...) \
686do { \
687 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
688 kleave(FMT,##__VA_ARGS__); \
689} while (0)
690
691#define _debug(FMT,...) \
692do { \
693 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
694 kdebug(FMT,##__VA_ARGS__); \
695} while (0)
696
697#define _proto(FMT,...) \
698do { \
699 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
700 kproto(FMT,##__VA_ARGS__); \
701} while (0)
702
703#define _net(FMT,...) \
704do { \
705 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
706 knet(FMT,##__VA_ARGS__); \
707} while (0)
708
709#else
David Howells12fdff32010-08-12 16:54:57 +0100710#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
711#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
712#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
713#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
714#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700715#endif
716
717/*
718 * debug assertion checking
719 */
720#if 1 // defined(__KDEBUGALL)
721
722#define ASSERT(X) \
723do { \
724 if (unlikely(!(X))) { \
725 printk(KERN_ERR "\n"); \
726 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
727 BUG(); \
728 } \
David Howellsb4f13422016-03-04 15:56:19 +0000729} while (0)
David Howells17926a72007-04-26 15:48:28 -0700730
731#define ASSERTCMP(X, OP, Y) \
732do { \
733 if (unlikely(!((X) OP (Y)))) { \
734 printk(KERN_ERR "\n"); \
735 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
736 printk(KERN_ERR "%lu " #OP " %lu is false\n", \
737 (unsigned long)(X), (unsigned long)(Y)); \
738 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
739 (unsigned long)(X), (unsigned long)(Y)); \
740 BUG(); \
741 } \
David Howellsb4f13422016-03-04 15:56:19 +0000742} while (0)
David Howells17926a72007-04-26 15:48:28 -0700743
744#define ASSERTIF(C, X) \
745do { \
746 if (unlikely((C) && !(X))) { \
747 printk(KERN_ERR "\n"); \
748 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
749 BUG(); \
750 } \
David Howellsb4f13422016-03-04 15:56:19 +0000751} while (0)
David Howells17926a72007-04-26 15:48:28 -0700752
753#define ASSERTIFCMP(C, X, OP, Y) \
754do { \
755 if (unlikely((C) && !((X) OP (Y)))) { \
756 printk(KERN_ERR "\n"); \
757 printk(KERN_ERR "RxRPC: Assertion failed\n"); \
758 printk(KERN_ERR "%lu " #OP " %lu is false\n", \
759 (unsigned long)(X), (unsigned long)(Y)); \
760 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
761 (unsigned long)(X), (unsigned long)(Y)); \
762 BUG(); \
763 } \
David Howellsb4f13422016-03-04 15:56:19 +0000764} while (0)
David Howells17926a72007-04-26 15:48:28 -0700765
766#else
767
768#define ASSERT(X) \
769do { \
David Howellsb4f13422016-03-04 15:56:19 +0000770} while (0)
David Howells17926a72007-04-26 15:48:28 -0700771
772#define ASSERTCMP(X, OP, Y) \
773do { \
David Howellsb4f13422016-03-04 15:56:19 +0000774} while (0)
David Howells17926a72007-04-26 15:48:28 -0700775
776#define ASSERTIF(C, X) \
777do { \
David Howellsb4f13422016-03-04 15:56:19 +0000778} while (0)
David Howells17926a72007-04-26 15:48:28 -0700779
780#define ASSERTIFCMP(C, X, OP, Y) \
781do { \
David Howellsb4f13422016-03-04 15:56:19 +0000782} while (0)
David Howells17926a72007-04-26 15:48:28 -0700783
784#endif /* __KDEBUGALL */
785
786/*
787 * socket buffer accounting / leak finding
788 */
789static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
790{
791 //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
792 //atomic_inc(&rxrpc_n_skbs);
793}
794
795#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
796
797static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
798{
799 //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
800 //atomic_dec(&rxrpc_n_skbs);
801}
802
803#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
804
805static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
806{
807 if (skb) {
808 CHECK_SLAB_OKAY(&skb->users);
809 //_net("free skb %p %s [%d]",
810 // skb, fn, atomic_read(&rxrpc_n_skbs));
811 //atomic_dec(&rxrpc_n_skbs);
812 kfree_skb(skb);
813 }
814}
815
816#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
817
818static inline void rxrpc_purge_queue(struct sk_buff_head *list)
819{
820 struct sk_buff *skb;
821 while ((skb = skb_dequeue((list))) != NULL)
822 rxrpc_free_skb(skb);
823}
824
David Howells17926a72007-04-26 15:48:28 -0700825static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f)
826{
827 CHECK_SLAB_OKAY(&local->usage);
828 if (atomic_inc_return(&local->usage) == 1)
829 printk("resurrected (%s)\n", f);
830}
831
832#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__)
833
834#define rxrpc_get_call(CALL) \
835do { \
836 CHECK_SLAB_OKAY(&(CALL)->usage); \
837 if (atomic_inc_return(&(CALL)->usage) == 1) \
838 BUG(); \
David Howellsb4f13422016-03-04 15:56:19 +0000839} while (0)
David Howells17926a72007-04-26 15:48:28 -0700840
841#define rxrpc_put_call(CALL) \
842do { \
843 __rxrpc_put_call(CALL); \
David Howellsb4f13422016-03-04 15:56:19 +0000844} while (0)