blob: c168268467cd84b0cbdb0888408fcceb9c2dab72 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* AF_RXRPC internal definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
David Howellsbe6e6702016-04-04 14:00:32 +010012#include <linux/atomic.h>
David Howellse0e4d822016-04-07 17:23:58 +010013#include <net/sock.h>
David Howellsbe6e6702016-04-04 14:00:32 +010014#include <net/af_rxrpc.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <rxrpc/packet.h>
16
17#if 0
18#define CHECK_SLAB_OKAY(X) \
19 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
20 (POISON_FREE << 8 | POISON_FREE))
21#else
David Howellsb4f13422016-03-04 15:56:19 +000022#define CHECK_SLAB_OKAY(X) do {} while (0)
David Howells17926a72007-04-26 15:48:28 -070023#endif
24
David Howells17926a72007-04-26 15:48:28 -070025#define FCRYPT_BSIZE 8
26struct rxrpc_crypt {
27 union {
28 u8 x[FCRYPT_BSIZE];
Al Viro91e916c2008-03-29 03:08:38 +000029 __be32 n[2];
David Howells17926a72007-04-26 15:48:28 -070030 };
31} __attribute__((aligned(8)));
32
David Howells651350d2007-04-26 15:50:17 -070033#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
34#define rxrpc_queue_delayed_work(WS,D) \
35 queue_delayed_work(rxrpc_workqueue, (WS), (D))
36
37#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
38#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
David Howells17926a72007-04-26 15:48:28 -070039
40/*
41 * sk_state for RxRPC sockets
42 */
43enum {
David Howells2341e072016-06-09 23:02:51 +010044 RXRPC_UNBOUND = 0,
45 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
David Howells17926a72007-04-26 15:48:28 -070046 RXRPC_CLIENT_BOUND, /* client local address bound */
David Howells17926a72007-04-26 15:48:28 -070047 RXRPC_SERVER_BOUND, /* server local address bound */
48 RXRPC_SERVER_LISTENING, /* server listening for connections */
49 RXRPC_CLOSE, /* socket is being closed */
50};
51
52/*
53 * RxRPC socket definition
54 */
55struct rxrpc_sock {
56 /* WARNING: sk has to be the first member */
57 struct sock sk;
David Howells651350d2007-04-26 15:50:17 -070058 rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
David Howells17926a72007-04-26 15:48:28 -070059 struct rxrpc_local *local; /* local endpoint */
David Howells17926a72007-04-26 15:48:28 -070060 struct rxrpc_connection *conn; /* exclusive virtual connection */
61 struct list_head listen_link; /* link in the local endpoint's listen list */
62 struct list_head secureq; /* calls awaiting connection security clearance */
63 struct list_head acceptq; /* calls awaiting acceptance */
64 struct key *key; /* security for this socket */
65 struct key *securities; /* list of server security descriptors */
66 struct rb_root calls; /* outstanding calls on this socket */
67 unsigned long flags;
David Howells2341e072016-06-09 23:02:51 +010068#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
David Howells17926a72007-04-26 15:48:28 -070069#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */
70 rwlock_t call_lock; /* lock for calls */
71 u32 min_sec_level; /* minimum security level */
72#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
73 struct sockaddr_rxrpc srx; /* local address */
David Howells2341e072016-06-09 23:02:51 +010074 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
David Howells17926a72007-04-26 15:48:28 -070075 sa_family_t proto; /* protocol created with */
David Howells17926a72007-04-26 15:48:28 -070076};
77
78#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
79
80/*
David Howells0d12f8a2016-03-04 15:53:46 +000081 * CPU-byteorder normalised Rx packet header.
82 */
83struct rxrpc_host_header {
84 u32 epoch; /* client boot timestamp */
85 u32 cid; /* connection and channel ID */
86 u32 callNumber; /* call ID (0 for connection-level packets) */
87 u32 seq; /* sequence number of pkt in call stream */
88 u32 serial; /* serial number of pkt sent to network */
89 u8 type; /* packet type */
90 u8 flags; /* packet flags */
91 u8 userStatus; /* app-layer defined status */
92 u8 securityIndex; /* security protocol ID */
93 union {
94 u16 _rsvd; /* reserved */
95 u16 cksum; /* kerberos security checksum */
96 };
97 u16 serviceId; /* service ID */
98} __packed;
99
100/*
David Howells17926a72007-04-26 15:48:28 -0700101 * RxRPC socket buffer private variables
102 * - max 48 bytes (struct sk_buff::cb)
103 */
104struct rxrpc_skb_priv {
105 struct rxrpc_call *call; /* call with which associated */
106 unsigned long resend_at; /* time in jiffies at which to resend */
107 union {
Eric Dumazet95c96172012-04-15 05:58:06 +0000108 unsigned int offset; /* offset into buffer of next read */
David Howells17926a72007-04-26 15:48:28 -0700109 int remain; /* amount of space remaining for next write */
110 u32 error; /* network error code */
111 bool need_resend; /* T if needs resending */
112 };
113
David Howells0d12f8a2016-03-04 15:53:46 +0000114 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
David Howells17926a72007-04-26 15:48:28 -0700115};
116
117#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
118
David Howells17926a72007-04-26 15:48:28 -0700119enum rxrpc_command {
120 RXRPC_CMD_SEND_DATA, /* send data message */
121 RXRPC_CMD_SEND_ABORT, /* request abort generation */
122 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
123 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
124};
125
126/*
127 * RxRPC security module interface
128 */
129struct rxrpc_security {
David Howells17926a72007-04-26 15:48:28 -0700130 const char *name; /* name of this service */
131 u8 security_index; /* security type provided */
132
David Howells648af7f2016-04-07 17:23:51 +0100133 /* Initialise a security service */
134 int (*init)(void);
135
136 /* Clean up a security service */
137 void (*exit)(void);
138
David Howells17926a72007-04-26 15:48:28 -0700139 /* initialise a connection's security */
140 int (*init_connection_security)(struct rxrpc_connection *);
141
142 /* prime a connection's packet security */
143 void (*prime_packet_security)(struct rxrpc_connection *);
144
145 /* impose security on a packet */
146 int (*secure_packet)(const struct rxrpc_call *,
147 struct sk_buff *,
148 size_t,
149 void *);
150
151 /* verify the security on a received packet */
152 int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
153 u32 *);
154
155 /* issue a challenge */
156 int (*issue_challenge)(struct rxrpc_connection *);
157
158 /* respond to a challenge */
159 int (*respond_to_challenge)(struct rxrpc_connection *,
160 struct sk_buff *,
161 u32 *);
162
163 /* verify a response */
164 int (*verify_response)(struct rxrpc_connection *,
165 struct sk_buff *,
166 u32 *);
167
168 /* clear connection security */
169 void (*clear)(struct rxrpc_connection *);
170};
171
172/*
David Howells4f95dd72016-04-04 14:00:35 +0100173 * RxRPC local transport endpoint description
174 * - owned by a single AF_RXRPC socket
175 * - pointed to by transport socket struct sk_user_data
David Howells17926a72007-04-26 15:48:28 -0700176 */
177struct rxrpc_local {
David Howells4f95dd72016-04-04 14:00:35 +0100178 struct rcu_head rcu;
179 atomic_t usage;
180 struct list_head link;
David Howells17926a72007-04-26 15:48:28 -0700181 struct socket *socket; /* my UDP socket */
David Howells4f95dd72016-04-04 14:00:35 +0100182 struct work_struct processor;
David Howells17926a72007-04-26 15:48:28 -0700183 struct list_head services; /* services listening on this endpoint */
David Howells17926a72007-04-26 15:48:28 -0700184 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
185 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
186 struct sk_buff_head reject_queue; /* packets awaiting rejection */
David Howells44ba0692015-04-01 16:31:26 +0100187 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
David Howells4f95dd72016-04-04 14:00:35 +0100188 struct mutex conn_lock; /* Client connection creation lock */
David Howells17926a72007-04-26 15:48:28 -0700189 spinlock_t lock; /* access lock */
190 rwlock_t services_lock; /* lock for services list */
David Howells17926a72007-04-26 15:48:28 -0700191 int debug_id; /* debug ID for printks */
David Howells4f95dd72016-04-04 14:00:35 +0100192 bool dead;
David Howells17926a72007-04-26 15:48:28 -0700193 struct sockaddr_rxrpc srx; /* local address */
194};
195
196/*
197 * RxRPC remote transport endpoint definition
David Howellsbe6e6702016-04-04 14:00:32 +0100198 * - matched by local endpoint, remote port, address and protocol type
David Howells17926a72007-04-26 15:48:28 -0700199 */
200struct rxrpc_peer {
David Howellsbe6e6702016-04-04 14:00:32 +0100201 struct rcu_head rcu; /* This must be first */
202 atomic_t usage;
203 unsigned long hash_key;
204 struct hlist_node hash_link;
205 struct rxrpc_local *local;
David Howellsf66d7492016-04-04 14:00:34 +0100206 struct hlist_head error_targets; /* targets for net error distribution */
207 struct work_struct error_distributor;
David Howells17926a72007-04-26 15:48:28 -0700208 spinlock_t lock; /* access lock */
Eric Dumazet95c96172012-04-15 05:58:06 +0000209 unsigned int if_mtu; /* interface MTU for this peer */
210 unsigned int mtu; /* network MTU for this peer */
211 unsigned int maxdata; /* data size (MTU - hdrsize) */
David Howells17926a72007-04-26 15:48:28 -0700212 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
213 int debug_id; /* debug ID for printks */
David Howellsf66d7492016-04-04 14:00:34 +0100214 int error_report; /* Net (+0) or local (+1000000) to distribute */
215#define RXRPC_LOCAL_ERROR_OFFSET 1000000
David Howells17926a72007-04-26 15:48:28 -0700216 struct sockaddr_rxrpc srx; /* remote address */
217
218 /* calculated RTT cache */
219#define RXRPC_RTT_CACHE_SIZE 32
220 suseconds_t rtt; /* current RTT estimate (in uS) */
Eric Dumazet95c96172012-04-15 05:58:06 +0000221 unsigned int rtt_point; /* next entry at which to insert */
222 unsigned int rtt_usage; /* amount of cache actually used */
David Howells17926a72007-04-26 15:48:28 -0700223 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
224};
225
226/*
227 * RxRPC point-to-point transport / connection manager definition
228 * - handles a bundle of connections between two endpoints
229 * - matched by { local, peer }
230 */
231struct rxrpc_transport {
232 struct rxrpc_local *local; /* local transport endpoint */
233 struct rxrpc_peer *peer; /* remote transport endpoint */
David Howells17926a72007-04-26 15:48:28 -0700234 struct rb_root bundles; /* client connection bundles on this transport */
235 struct rb_root client_conns; /* client connections on this transport */
236 struct rb_root server_conns; /* server connections on this transport */
237 struct list_head link; /* link in master session list */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200238 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700239 spinlock_t client_lock; /* client connection allocation lock */
240 rwlock_t conn_lock; /* lock for active/dead connections */
241 atomic_t usage;
242 int debug_id; /* debug ID for printks */
243 unsigned int conn_idcounter; /* connection ID counter (client) */
244};
245
246/*
247 * RxRPC client connection bundle
248 * - matched by { transport, service_id, key }
249 */
250struct rxrpc_conn_bundle {
251 struct rb_node node; /* node in transport's lookup tree */
252 struct list_head unused_conns; /* unused connections in this bundle */
253 struct list_head avail_conns; /* available connections in this bundle */
254 struct list_head busy_conns; /* busy connections in this bundle */
255 struct key *key; /* security for this bundle */
256 wait_queue_head_t chanwait; /* wait for channel to become available */
257 atomic_t usage;
258 int debug_id; /* debug ID for printks */
259 unsigned short num_conns; /* number of connections in this bundle */
David Howells0d12f8a2016-03-04 15:53:46 +0000260 u16 service_id; /* Service ID for this bundle */
David Howells4e36a952009-09-16 00:01:13 -0700261 u8 security_ix; /* security type */
David Howells17926a72007-04-26 15:48:28 -0700262};
263
264/*
265 * RxRPC connection definition
266 * - matched by { transport, service_id, conn_id, direction, key }
267 * - each connection can only handle four simultaneous calls
268 */
269struct rxrpc_connection {
270 struct rxrpc_transport *trans; /* transport session */
271 struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
272 struct work_struct processor; /* connection event processor */
273 struct rb_node node; /* node in transport's lookup tree */
274 struct list_head link; /* link in master connection list */
275 struct list_head bundle_link; /* link in bundle */
276 struct rb_root calls; /* calls on this connection */
277 struct sk_buff_head rx_queue; /* received conn-level packets */
278 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
David Howells648af7f2016-04-07 17:23:51 +0100279 const struct rxrpc_security *security; /* applied security module */
David Howells17926a72007-04-26 15:48:28 -0700280 struct key *key; /* security for this connection (client) */
281 struct key *server_key; /* security for this service */
Herbert Xu1afe5932016-01-24 21:19:01 +0800282 struct crypto_skcipher *cipher; /* encryption handle */
David Howells17926a72007-04-26 15:48:28 -0700283 struct rxrpc_crypt csum_iv; /* packet checksum base */
284 unsigned long events;
285#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200286 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700287 rwlock_t lock; /* access lock */
288 spinlock_t state_lock; /* state-change lock */
289 atomic_t usage;
David Howells17926a72007-04-26 15:48:28 -0700290 enum { /* current state of connection */
291 RXRPC_CONN_UNUSED, /* - connection not yet attempted */
292 RXRPC_CONN_CLIENT, /* - client connection */
293 RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */
294 RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */
295 RXRPC_CONN_SERVER, /* - server secured connection */
296 RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */
297 RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
298 RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
299 } state;
David Howellsdc44b3a2016-04-07 17:23:30 +0100300 u32 local_abort; /* local abort code */
301 u32 remote_abort; /* remote abort code */
302 int error; /* local error incurred */
David Howells17926a72007-04-26 15:48:28 -0700303 int debug_id; /* debug ID for printks */
Eric Dumazet95c96172012-04-15 05:58:06 +0000304 unsigned int call_counter; /* call ID counter */
David Howells17926a72007-04-26 15:48:28 -0700305 atomic_t serial; /* packet serial number counter */
306 atomic_t hi_serial; /* highest serial number received */
307 u8 avail_calls; /* number of calls available */
308 u8 size_align; /* data size alignment (for security) */
309 u8 header_size; /* rxrpc + security header size */
310 u8 security_size; /* security header size */
311 u32 security_level; /* security level negotiated */
312 u32 security_nonce; /* response re-use preventer */
David Howells0d12f8a2016-03-04 15:53:46 +0000313 u32 epoch; /* epoch of this connection */
314 u32 cid; /* connection ID */
315 u16 service_id; /* service ID for this connection */
David Howells17926a72007-04-26 15:48:28 -0700316 u8 security_ix; /* security type */
317 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
318 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
319};
320
321/*
David Howells5b8848d2016-03-04 15:53:46 +0000322 * Flags in call->flags.
323 */
324enum rxrpc_call_flag {
325 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
326 RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
327 RXRPC_CALL_RCVD_LAST, /* all packets received */
328 RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
329 RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
330 RXRPC_CALL_PROC_BUSY, /* the processor is busy */
331 RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
332 RXRPC_CALL_HAS_USERID, /* has a user ID attached */
333 RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
334};
335
336/*
337 * Events that can be raised on a call.
338 */
339enum rxrpc_call_event {
David Howells4c198ad2016-03-04 15:53:46 +0000340 RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
341 RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
342 RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
343 RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
344 RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
345 RXRPC_CALL_EV_ACK, /* need to generate ACK */
346 RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
347 RXRPC_CALL_EV_ABORT, /* need to generate abort */
348 RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
349 RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
350 RXRPC_CALL_EV_RESEND, /* Tx resend required */
351 RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
352 RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
353 RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
354 RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
355 RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
356 RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
David Howells5b8848d2016-03-04 15:53:46 +0000357};
358
359/*
360 * The states that a call can be in.
361 */
362enum rxrpc_call_state {
363 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
364 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
365 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
366 RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
367 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
368 RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
369 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
370 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
371 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
372 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
373 RXRPC_CALL_COMPLETE, /* - call completed */
374 RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
375 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
376 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
377 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
378 RXRPC_CALL_DEAD, /* - call is dead */
379 NR__RXRPC_CALL_STATES
380};
381
382/*
David Howells17926a72007-04-26 15:48:28 -0700383 * RxRPC call definition
384 * - matched by { connection, call_id }
385 */
386struct rxrpc_call {
387 struct rxrpc_connection *conn; /* connection carrying call */
388 struct rxrpc_sock *socket; /* socket responsible */
389 struct timer_list lifetimer; /* lifetime remaining on call */
390 struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
391 struct timer_list ack_timer; /* ACK generation timer */
392 struct timer_list resend_timer; /* Tx resend timer */
393 struct work_struct destroyer; /* call destroyer */
394 struct work_struct processor; /* packet processor and ACK generator */
395 struct list_head link; /* link in master call list */
David Howellsf66d7492016-04-04 14:00:34 +0100396 struct hlist_node error_link; /* link in error distribution list */
David Howells17926a72007-04-26 15:48:28 -0700397 struct list_head accept_link; /* calls awaiting acceptance */
398 struct rb_node sock_node; /* node in socket call tree */
399 struct rb_node conn_node; /* node in connection call tree */
400 struct sk_buff_head rx_queue; /* received packets */
401 struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
402 struct sk_buff *tx_pending; /* Tx socket buffer being filled */
403 wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
404 unsigned long user_call_ID; /* user-defined call ID */
405 unsigned long creation_jif; /* time of call creation */
406 unsigned long flags;
David Howells17926a72007-04-26 15:48:28 -0700407 unsigned long events;
David Howells17926a72007-04-26 15:48:28 -0700408 spinlock_t lock;
409 rwlock_t state_lock; /* lock for state transition */
410 atomic_t usage;
411 atomic_t sequence; /* Tx data packet sequence counter */
David Howellsdc44b3a2016-04-07 17:23:30 +0100412 u32 local_abort; /* local abort code */
413 u32 remote_abort; /* remote abort code */
David Howellsf66d7492016-04-04 14:00:34 +0100414 int error_report; /* Network error (ICMP/local transport) */
415 int error; /* Local error incurred */
David Howells5b8848d2016-03-04 15:53:46 +0000416 enum rxrpc_call_state state : 8; /* current state of call */
David Howells17926a72007-04-26 15:48:28 -0700417 int debug_id; /* debug ID for printks */
418 u8 channel; /* connection channel occupied by this call */
419
420 /* transmission-phase ACK management */
David Howells4e36a952009-09-16 00:01:13 -0700421 u8 acks_head; /* offset into window of first entry */
422 u8 acks_tail; /* offset into window of last entry */
423 u8 acks_winsz; /* size of un-ACK'd window */
424 u8 acks_unacked; /* lowest unacked packet in last ACK received */
David Howells17926a72007-04-26 15:48:28 -0700425 int acks_latest; /* serial number of latest ACK received */
426 rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
427 unsigned long *acks_window; /* sent packet window
428 * - elements are pointers with LSB set if ACK'd
429 */
430
431 /* receive-phase ACK management */
432 rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
433 rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
434 rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
435 rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
436 rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
437 rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
David Howells0d12f8a2016-03-04 15:53:46 +0000438 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
David Howells4e36a952009-09-16 00:01:13 -0700439 u8 ackr_reason; /* reason to ACK */
David Howells0d12f8a2016-03-04 15:53:46 +0000440 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
David Howells17926a72007-04-26 15:48:28 -0700441 atomic_t ackr_not_idle; /* number of packets in Rx queue */
442
443 /* received packet records, 1 bit per record */
444#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
445 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
446
Tim Smith77276402014-03-03 23:04:45 +0000447 struct hlist_node hash_node;
448 unsigned long hash_key; /* Full hash key */
449 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
450 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
451 sa_family_t proto; /* Frame protocol */
David Howells0d12f8a2016-03-04 15:53:46 +0000452 u32 call_id; /* call ID on connection */
453 u32 cid; /* connection ID plus channel index */
454 u32 epoch; /* epoch of this connection */
455 u16 service_id; /* service ID */
Tim Smith77276402014-03-03 23:04:45 +0000456 union { /* Peer IP address for hashing */
457 __be32 ipv4_addr;
458 __u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
459 } peer_ip;
David Howells17926a72007-04-26 15:48:28 -0700460};
461
462/*
David Howells17926a72007-04-26 15:48:28 -0700463 * locally abort an RxRPC call
464 */
465static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
466{
467 write_lock_bh(&call->state_lock);
468 if (call->state < RXRPC_CALL_COMPLETE) {
David Howellsdc44b3a2016-04-07 17:23:30 +0100469 call->local_abort = abort_code;
David Howells17926a72007-04-26 15:48:28 -0700470 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howells4c198ad2016-03-04 15:53:46 +0000471 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells17926a72007-04-26 15:48:28 -0700472 }
473 write_unlock_bh(&call->state_lock);
474}
475
476/*
David Howells651350d2007-04-26 15:50:17 -0700477 * af_rxrpc.c
David Howells17926a72007-04-26 15:48:28 -0700478 */
David Howells651350d2007-04-26 15:50:17 -0700479extern atomic_t rxrpc_n_skbs;
David Howells0d12f8a2016-03-04 15:53:46 +0000480extern u32 rxrpc_epoch;
David Howells651350d2007-04-26 15:50:17 -0700481extern atomic_t rxrpc_debug_id;
482extern struct workqueue_struct *rxrpc_workqueue;
David Howells17926a72007-04-26 15:48:28 -0700483
David Howells2341e072016-06-09 23:02:51 +0100484extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_sock *,
485 struct sockaddr *,
486 int, int, gfp_t);
487
David Howells17926a72007-04-26 15:48:28 -0700488/*
David Howells0d81a512016-06-13 13:30:30 +0100489 * call_accept.c
David Howells17926a72007-04-26 15:48:28 -0700490 */
David Howells4f95dd72016-04-04 14:00:35 +0100491void rxrpc_accept_incoming_calls(struct rxrpc_local *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700492struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
493int rxrpc_reject_call(struct rxrpc_sock *);
David Howells17926a72007-04-26 15:48:28 -0700494
495/*
David Howells0d81a512016-06-13 13:30:30 +0100496 * call_event.c
David Howells17926a72007-04-26 15:48:28 -0700497 */
David Howells0d12f8a2016-03-04 15:53:46 +0000498void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
499void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
Joe Perchesc1b12032013-10-18 13:48:25 -0700500void rxrpc_process_call(struct work_struct *);
David Howells17926a72007-04-26 15:48:28 -0700501
502/*
David Howells0d81a512016-06-13 13:30:30 +0100503 * call_object.c
David Howells17926a72007-04-26 15:48:28 -0700504 */
David Howellsdad8aff2016-03-09 23:22:56 +0000505extern unsigned int rxrpc_max_call_lifetime;
506extern unsigned int rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700507extern struct kmem_cache *rxrpc_call_jar;
508extern struct list_head rxrpc_calls;
509extern rwlock_t rxrpc_call_lock;
510
David Howells0d12f8a2016-03-04 15:53:46 +0000511struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
512 void *, sa_family_t, const void *);
David Howells2341e072016-06-09 23:02:51 +0100513struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
514struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
Joe Perchesc1b12032013-10-18 13:48:25 -0700515 struct rxrpc_transport *,
516 struct rxrpc_conn_bundle *,
David Howells2341e072016-06-09 23:02:51 +0100517 unsigned long, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700518struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
519 struct rxrpc_connection *,
David Howells843099c2016-04-07 17:23:37 +0100520 struct rxrpc_host_header *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700521void rxrpc_release_call(struct rxrpc_call *);
522void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
523void __rxrpc_put_call(struct rxrpc_call *);
524void __exit rxrpc_destroy_all_calls(void);
David Howells17926a72007-04-26 15:48:28 -0700525
526/*
David Howells0d81a512016-06-13 13:30:30 +0100527 * conn_event.c
528 */
529void rxrpc_process_connection(struct work_struct *);
530void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
David Howells4f95dd72016-04-04 14:00:35 +0100531void rxrpc_reject_packets(struct rxrpc_local *);
David Howells0d81a512016-06-13 13:30:30 +0100532
533/*
534 * conn_object.c
David Howells17926a72007-04-26 15:48:28 -0700535 */
David Howellsdad8aff2016-03-09 23:22:56 +0000536extern unsigned int rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700537extern struct list_head rxrpc_connections;
538extern rwlock_t rxrpc_connection_lock;
539
Joe Perchesc1b12032013-10-18 13:48:25 -0700540struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
541 struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000542 struct key *, u16, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700543void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
544int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
545 struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
546void rxrpc_put_connection(struct rxrpc_connection *);
547void __exit rxrpc_destroy_all_connections(void);
548struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000549 struct rxrpc_host_header *);
David Howells17926a72007-04-26 15:48:28 -0700550extern struct rxrpc_connection *
David Howells843099c2016-04-07 17:23:37 +0100551rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_host_header *);
David Howells17926a72007-04-26 15:48:28 -0700552
553/*
David Howells0d81a512016-06-13 13:30:30 +0100554 * input.c
David Howells17926a72007-04-26 15:48:28 -0700555 */
David S. Miller676d2362014-04-11 16:15:36 -0400556void rxrpc_data_ready(struct sock *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700557int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
558void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700559
560/*
David Howells0d81a512016-06-13 13:30:30 +0100561 * insecure.c
David Howells17926a72007-04-26 15:48:28 -0700562 */
David Howells0d81a512016-06-13 13:30:30 +0100563extern const struct rxrpc_security rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -0700564
565/*
David Howells0d81a512016-06-13 13:30:30 +0100566 * key.c
David Howells17926a72007-04-26 15:48:28 -0700567 */
568extern struct key_type key_type_rxrpc;
569extern struct key_type key_type_rxrpc_s;
570
Joe Perchesc1b12032013-10-18 13:48:25 -0700571int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
572int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
573int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
574 u32);
David Howells17926a72007-04-26 15:48:28 -0700575
576/*
David Howells87563612016-04-04 14:00:34 +0100577 * local_event.c
578 */
David Howells4f95dd72016-04-04 14:00:35 +0100579extern void rxrpc_process_local_events(struct rxrpc_local *);
David Howells87563612016-04-04 14:00:34 +0100580
581/*
David Howells0d81a512016-06-13 13:30:30 +0100582 * local_object.c
David Howells17926a72007-04-26 15:48:28 -0700583 */
David Howells4f95dd72016-04-04 14:00:35 +0100584struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
585void __rxrpc_put_local(struct rxrpc_local *);
David Howells0d81a512016-06-13 13:30:30 +0100586void __exit rxrpc_destroy_all_locals(void);
David Howellse0e4d822016-04-07 17:23:58 +0100587
David Howells4f95dd72016-04-04 14:00:35 +0100588static inline void rxrpc_get_local(struct rxrpc_local *local)
589{
590 atomic_inc(&local->usage);
591}
592
593static inline
594struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
595{
596 return atomic_inc_not_zero(&local->usage) ? local : NULL;
597}
598
599static inline void rxrpc_put_local(struct rxrpc_local *local)
600{
601 if (atomic_dec_and_test(&local->usage))
602 __rxrpc_put_local(local);
603}
604
David Howellse0e4d822016-04-07 17:23:58 +0100605/*
David Howells8e688d92016-04-07 17:23:16 +0100606 * misc.c
607 */
David Howells0e119b42016-06-10 22:30:37 +0100608extern unsigned int rxrpc_max_backlog __read_mostly;
David Howells8e688d92016-04-07 17:23:16 +0100609extern unsigned int rxrpc_requested_ack_delay;
610extern unsigned int rxrpc_soft_ack_delay;
611extern unsigned int rxrpc_idle_ack_delay;
612extern unsigned int rxrpc_rx_window_size;
613extern unsigned int rxrpc_rx_mtu;
614extern unsigned int rxrpc_rx_jumbo_max;
615
David Howells5b3e87f2016-04-07 17:23:23 +0100616extern const char *const rxrpc_pkts[];
David Howells8e688d92016-04-07 17:23:16 +0100617extern const s8 rxrpc_ack_priority[];
618
619extern const char *rxrpc_acks(u8 reason);
620
621/*
David Howells0d81a512016-06-13 13:30:30 +0100622 * output.c
623 */
624extern unsigned int rxrpc_resend_timeout;
625
626int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
627int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
628
629/*
David Howellsabe89ef2016-04-04 14:00:32 +0100630 * peer_event.c
David Howells0d81a512016-06-13 13:30:30 +0100631 */
David Howellsabe89ef2016-04-04 14:00:32 +0100632void rxrpc_error_report(struct sock *);
David Howellsf66d7492016-04-04 14:00:34 +0100633void rxrpc_peer_error_distributor(struct work_struct *);
David Howells0d81a512016-06-13 13:30:30 +0100634
635/*
636 * peer_object.c
637 */
David Howellsbe6e6702016-04-04 14:00:32 +0100638struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
639 const struct sockaddr_rxrpc *);
640struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
641 struct sockaddr_rxrpc *, gfp_t);
642struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
643
644static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
645{
646 atomic_inc(&peer->usage);
647}
648
649static inline
650struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
651{
652 return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
653}
654
655extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
656static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
657{
658 if (atomic_dec_and_test(&peer->usage))
659 __rxrpc_put_peer(peer);
660}
David Howells0d81a512016-06-13 13:30:30 +0100661
662/*
663 * proc.c
664 */
665extern const char *const rxrpc_call_states[];
666extern const struct file_operations rxrpc_call_seq_fops;
667extern const struct file_operations rxrpc_connection_seq_fops;
668
669/*
670 * recvmsg.c
671 */
672void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
673int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
674
675/*
David Howells648af7f2016-04-07 17:23:51 +0100676 * rxkad.c
677 */
678#ifdef CONFIG_RXKAD
679extern const struct rxrpc_security rxkad;
680#endif
681
682/*
David Howells0d81a512016-06-13 13:30:30 +0100683 * security.c
684 */
685int __init rxrpc_init_security(void);
686void rxrpc_exit_security(void);
687int rxrpc_init_client_conn_security(struct rxrpc_connection *);
688int rxrpc_init_server_conn_security(struct rxrpc_connection *);
689
690/*
691 * skbuff.c
692 */
693void rxrpc_packet_destructor(struct sk_buff *);
694
695/*
David Howells5873c082014-02-07 18:58:44 +0000696 * sysctl.c
697 */
698#ifdef CONFIG_SYSCTL
699extern int __init rxrpc_sysctl_init(void);
700extern void rxrpc_sysctl_exit(void);
701#else
702static inline int __init rxrpc_sysctl_init(void) { return 0; }
703static inline void rxrpc_sysctl_exit(void) {}
704#endif
705
706/*
David Howells0d81a512016-06-13 13:30:30 +0100707 * transport.c
708 */
709extern unsigned int rxrpc_transport_expiry;
710
711struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
712 struct rxrpc_peer *, gfp_t);
713void rxrpc_put_transport(struct rxrpc_transport *);
714void __exit rxrpc_destroy_all_transports(void);
715struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
716 struct rxrpc_peer *);
717
718/*
David Howellsbe6e6702016-04-04 14:00:32 +0100719 * utils.c
720 */
721void rxrpc_get_addr_from_skb(struct rxrpc_local *, const struct sk_buff *,
722 struct sockaddr_rxrpc *);
723
724/*
David Howells17926a72007-04-26 15:48:28 -0700725 * debug tracing
726 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000727extern unsigned int rxrpc_debug;
David Howells17926a72007-04-26 15:48:28 -0700728
729#define dbgprintk(FMT,...) \
Sven Schnelle9f389f42008-04-03 10:45:30 +0100730 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700731
Harvey Harrison0dc47872008-03-05 20:47:47 -0800732#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
733#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700734#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
735#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
736#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
737
738
739#if defined(__KDEBUG)
740#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
741#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
742#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
743#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
744#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
745
746#elif defined(CONFIG_AF_RXRPC_DEBUG)
747#define RXRPC_DEBUG_KENTER 0x01
748#define RXRPC_DEBUG_KLEAVE 0x02
749#define RXRPC_DEBUG_KDEBUG 0x04
750#define RXRPC_DEBUG_KPROTO 0x08
751#define RXRPC_DEBUG_KNET 0x10
752
753#define _enter(FMT,...) \
754do { \
755 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
756 kenter(FMT,##__VA_ARGS__); \
757} while (0)
758
759#define _leave(FMT,...) \
760do { \
761 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
762 kleave(FMT,##__VA_ARGS__); \
763} while (0)
764
765#define _debug(FMT,...) \
766do { \
767 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
768 kdebug(FMT,##__VA_ARGS__); \
769} while (0)
770
771#define _proto(FMT,...) \
772do { \
773 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
774 kproto(FMT,##__VA_ARGS__); \
775} while (0)
776
777#define _net(FMT,...) \
778do { \
779 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
780 knet(FMT,##__VA_ARGS__); \
781} while (0)
782
783#else
David Howells12fdff32010-08-12 16:54:57 +0100784#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
785#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
786#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
787#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
788#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700789#endif
790
791/*
792 * debug assertion checking
793 */
794#if 1 // defined(__KDEBUGALL)
795
796#define ASSERT(X) \
797do { \
798 if (unlikely(!(X))) { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700799 pr_err("Assertion failed\n"); \
David Howells17926a72007-04-26 15:48:28 -0700800 BUG(); \
801 } \
David Howellsb4f13422016-03-04 15:56:19 +0000802} while (0)
David Howells17926a72007-04-26 15:48:28 -0700803
804#define ASSERTCMP(X, OP, Y) \
805do { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700806 unsigned long _x = (unsigned long)(X); \
807 unsigned long _y = (unsigned long)(Y); \
808 if (unlikely(!(_x OP _y))) { \
809 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
810 _x, _x, #OP, _y, _y); \
David Howells17926a72007-04-26 15:48:28 -0700811 BUG(); \
812 } \
David Howellsb4f13422016-03-04 15:56:19 +0000813} while (0)
David Howells17926a72007-04-26 15:48:28 -0700814
815#define ASSERTIF(C, X) \
816do { \
817 if (unlikely((C) && !(X))) { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700818 pr_err("Assertion failed\n"); \
David Howells17926a72007-04-26 15:48:28 -0700819 BUG(); \
820 } \
David Howellsb4f13422016-03-04 15:56:19 +0000821} while (0)
David Howells17926a72007-04-26 15:48:28 -0700822
823#define ASSERTIFCMP(C, X, OP, Y) \
824do { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700825 unsigned long _x = (unsigned long)(X); \
826 unsigned long _y = (unsigned long)(Y); \
827 if (unlikely((C) && !(_x OP _y))) { \
828 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
829 _x, _x, #OP, _y, _y); \
David Howells17926a72007-04-26 15:48:28 -0700830 BUG(); \
831 } \
David Howellsb4f13422016-03-04 15:56:19 +0000832} while (0)
David Howells17926a72007-04-26 15:48:28 -0700833
834#else
835
836#define ASSERT(X) \
837do { \
David Howellsb4f13422016-03-04 15:56:19 +0000838} while (0)
David Howells17926a72007-04-26 15:48:28 -0700839
840#define ASSERTCMP(X, OP, Y) \
841do { \
David Howellsb4f13422016-03-04 15:56:19 +0000842} while (0)
David Howells17926a72007-04-26 15:48:28 -0700843
844#define ASSERTIF(C, X) \
845do { \
David Howellsb4f13422016-03-04 15:56:19 +0000846} while (0)
David Howells17926a72007-04-26 15:48:28 -0700847
848#define ASSERTIFCMP(C, X, OP, Y) \
849do { \
David Howellsb4f13422016-03-04 15:56:19 +0000850} while (0)
David Howells17926a72007-04-26 15:48:28 -0700851
852#endif /* __KDEBUGALL */
853
854/*
855 * socket buffer accounting / leak finding
856 */
857static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
858{
859 //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
860 //atomic_inc(&rxrpc_n_skbs);
861}
862
863#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
864
865static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
866{
867 //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
868 //atomic_dec(&rxrpc_n_skbs);
869}
870
871#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
872
873static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
874{
875 if (skb) {
876 CHECK_SLAB_OKAY(&skb->users);
877 //_net("free skb %p %s [%d]",
878 // skb, fn, atomic_read(&rxrpc_n_skbs));
879 //atomic_dec(&rxrpc_n_skbs);
880 kfree_skb(skb);
881 }
882}
883
884#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
885
886static inline void rxrpc_purge_queue(struct sk_buff_head *list)
887{
888 struct sk_buff *skb;
889 while ((skb = skb_dequeue((list))) != NULL)
890 rxrpc_free_skb(skb);
891}
892
David Howells17926a72007-04-26 15:48:28 -0700893#define rxrpc_get_call(CALL) \
894do { \
895 CHECK_SLAB_OKAY(&(CALL)->usage); \
896 if (atomic_inc_return(&(CALL)->usage) == 1) \
897 BUG(); \
David Howellsb4f13422016-03-04 15:56:19 +0000898} while (0)
David Howells17926a72007-04-26 15:48:28 -0700899
900#define rxrpc_put_call(CALL) \
901do { \
902 __rxrpc_put_call(CALL); \
David Howellsb4f13422016-03-04 15:56:19 +0000903} while (0)