blob: 60ba22f5695735b6ad9f1a9034dc3939e3c21a84 [file] [log] [blame]
David Howells17926a72007-04-26 15:48:28 -07001/* AF_RXRPC internal definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
David Howellsbe6e6702016-04-04 14:00:32 +010012#include <linux/atomic.h>
David Howellse0e4d822016-04-07 17:23:58 +010013#include <net/sock.h>
David Howellsbe6e6702016-04-04 14:00:32 +010014#include <net/af_rxrpc.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <rxrpc/packet.h>
16
17#if 0
18#define CHECK_SLAB_OKAY(X) \
19 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
20 (POISON_FREE << 8 | POISON_FREE))
21#else
David Howellsb4f13422016-03-04 15:56:19 +000022#define CHECK_SLAB_OKAY(X) do {} while (0)
David Howells17926a72007-04-26 15:48:28 -070023#endif
24
David Howells17926a72007-04-26 15:48:28 -070025#define FCRYPT_BSIZE 8
26struct rxrpc_crypt {
27 union {
28 u8 x[FCRYPT_BSIZE];
Al Viro91e916c2008-03-29 03:08:38 +000029 __be32 n[2];
David Howells17926a72007-04-26 15:48:28 -070030 };
31} __attribute__((aligned(8)));
32
David Howells651350d2007-04-26 15:50:17 -070033#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
34#define rxrpc_queue_delayed_work(WS,D) \
35 queue_delayed_work(rxrpc_workqueue, (WS), (D))
36
37#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
38#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
David Howells17926a72007-04-26 15:48:28 -070039
David Howellscc8feb82016-04-04 14:00:37 +010040struct rxrpc_connection;
41
David Howells17926a72007-04-26 15:48:28 -070042/*
43 * sk_state for RxRPC sockets
44 */
45enum {
David Howells2341e072016-06-09 23:02:51 +010046 RXRPC_UNBOUND = 0,
47 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */
David Howells17926a72007-04-26 15:48:28 -070048 RXRPC_CLIENT_BOUND, /* client local address bound */
David Howells17926a72007-04-26 15:48:28 -070049 RXRPC_SERVER_BOUND, /* server local address bound */
50 RXRPC_SERVER_LISTENING, /* server listening for connections */
51 RXRPC_CLOSE, /* socket is being closed */
52};
53
54/*
55 * RxRPC socket definition
56 */
57struct rxrpc_sock {
58 /* WARNING: sk has to be the first member */
59 struct sock sk;
David Howells651350d2007-04-26 15:50:17 -070060 rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
David Howells17926a72007-04-26 15:48:28 -070061 struct rxrpc_local *local; /* local endpoint */
David Howells17926a72007-04-26 15:48:28 -070062 struct list_head listen_link; /* link in the local endpoint's listen list */
63 struct list_head secureq; /* calls awaiting connection security clearance */
64 struct list_head acceptq; /* calls awaiting acceptance */
65 struct key *key; /* security for this socket */
66 struct key *securities; /* list of server security descriptors */
67 struct rb_root calls; /* outstanding calls on this socket */
68 unsigned long flags;
David Howells2341e072016-06-09 23:02:51 +010069#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
David Howells17926a72007-04-26 15:48:28 -070070 rwlock_t call_lock; /* lock for calls */
71 u32 min_sec_level; /* minimum security level */
72#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
David Howellscc8feb82016-04-04 14:00:37 +010073 bool exclusive; /* Exclusive connection for a client socket */
74 sa_family_t family; /* Protocol family created with */
David Howells17926a72007-04-26 15:48:28 -070075 struct sockaddr_rxrpc srx; /* local address */
David Howells2341e072016-06-09 23:02:51 +010076 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */
David Howells17926a72007-04-26 15:48:28 -070077};
78
79#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
80
81/*
David Howells0d12f8a2016-03-04 15:53:46 +000082 * CPU-byteorder normalised Rx packet header.
83 */
84struct rxrpc_host_header {
85 u32 epoch; /* client boot timestamp */
86 u32 cid; /* connection and channel ID */
87 u32 callNumber; /* call ID (0 for connection-level packets) */
88 u32 seq; /* sequence number of pkt in call stream */
89 u32 serial; /* serial number of pkt sent to network */
90 u8 type; /* packet type */
91 u8 flags; /* packet flags */
92 u8 userStatus; /* app-layer defined status */
93 u8 securityIndex; /* security protocol ID */
94 union {
95 u16 _rsvd; /* reserved */
96 u16 cksum; /* kerberos security checksum */
97 };
98 u16 serviceId; /* service ID */
99} __packed;
100
101/*
David Howells17926a72007-04-26 15:48:28 -0700102 * RxRPC socket buffer private variables
103 * - max 48 bytes (struct sk_buff::cb)
104 */
105struct rxrpc_skb_priv {
106 struct rxrpc_call *call; /* call with which associated */
107 unsigned long resend_at; /* time in jiffies at which to resend */
108 union {
Eric Dumazet95c96172012-04-15 05:58:06 +0000109 unsigned int offset; /* offset into buffer of next read */
David Howells17926a72007-04-26 15:48:28 -0700110 int remain; /* amount of space remaining for next write */
111 u32 error; /* network error code */
112 bool need_resend; /* T if needs resending */
113 };
114
David Howells0d12f8a2016-03-04 15:53:46 +0000115 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
David Howells17926a72007-04-26 15:48:28 -0700116};
117
118#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
119
David Howells17926a72007-04-26 15:48:28 -0700120enum rxrpc_command {
121 RXRPC_CMD_SEND_DATA, /* send data message */
122 RXRPC_CMD_SEND_ABORT, /* request abort generation */
123 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
124 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
125};
126
127/*
128 * RxRPC security module interface
129 */
130struct rxrpc_security {
David Howells17926a72007-04-26 15:48:28 -0700131 const char *name; /* name of this service */
132 u8 security_index; /* security type provided */
133
David Howells648af7f2016-04-07 17:23:51 +0100134 /* Initialise a security service */
135 int (*init)(void);
136
137 /* Clean up a security service */
138 void (*exit)(void);
139
David Howells17926a72007-04-26 15:48:28 -0700140 /* initialise a connection's security */
141 int (*init_connection_security)(struct rxrpc_connection *);
142
143 /* prime a connection's packet security */
144 void (*prime_packet_security)(struct rxrpc_connection *);
145
146 /* impose security on a packet */
147 int (*secure_packet)(const struct rxrpc_call *,
148 struct sk_buff *,
149 size_t,
150 void *);
151
152 /* verify the security on a received packet */
153 int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
154 u32 *);
155
156 /* issue a challenge */
157 int (*issue_challenge)(struct rxrpc_connection *);
158
159 /* respond to a challenge */
160 int (*respond_to_challenge)(struct rxrpc_connection *,
161 struct sk_buff *,
162 u32 *);
163
164 /* verify a response */
165 int (*verify_response)(struct rxrpc_connection *,
166 struct sk_buff *,
167 u32 *);
168
169 /* clear connection security */
170 void (*clear)(struct rxrpc_connection *);
171};
172
173/*
David Howells4f95dd72016-04-04 14:00:35 +0100174 * RxRPC local transport endpoint description
175 * - owned by a single AF_RXRPC socket
176 * - pointed to by transport socket struct sk_user_data
David Howells17926a72007-04-26 15:48:28 -0700177 */
178struct rxrpc_local {
David Howells4f95dd72016-04-04 14:00:35 +0100179 struct rcu_head rcu;
180 atomic_t usage;
181 struct list_head link;
David Howells17926a72007-04-26 15:48:28 -0700182 struct socket *socket; /* my UDP socket */
David Howells4f95dd72016-04-04 14:00:35 +0100183 struct work_struct processor;
David Howells17926a72007-04-26 15:48:28 -0700184 struct list_head services; /* services listening on this endpoint */
David Howells17926a72007-04-26 15:48:28 -0700185 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
186 struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
187 struct sk_buff_head reject_queue; /* packets awaiting rejection */
David Howells44ba0692015-04-01 16:31:26 +0100188 struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
David Howells4f95dd72016-04-04 14:00:35 +0100189 struct mutex conn_lock; /* Client connection creation lock */
David Howells17926a72007-04-26 15:48:28 -0700190 spinlock_t lock; /* access lock */
191 rwlock_t services_lock; /* lock for services list */
David Howells17926a72007-04-26 15:48:28 -0700192 int debug_id; /* debug ID for printks */
David Howells4f95dd72016-04-04 14:00:35 +0100193 bool dead;
David Howells17926a72007-04-26 15:48:28 -0700194 struct sockaddr_rxrpc srx; /* local address */
195};
196
197/*
198 * RxRPC remote transport endpoint definition
David Howellsbe6e6702016-04-04 14:00:32 +0100199 * - matched by local endpoint, remote port, address and protocol type
David Howells17926a72007-04-26 15:48:28 -0700200 */
201struct rxrpc_peer {
David Howellsbe6e6702016-04-04 14:00:32 +0100202 struct rcu_head rcu; /* This must be first */
203 atomic_t usage;
204 unsigned long hash_key;
205 struct hlist_node hash_link;
206 struct rxrpc_local *local;
David Howellsf66d7492016-04-04 14:00:34 +0100207 struct hlist_head error_targets; /* targets for net error distribution */
208 struct work_struct error_distributor;
David Howells17926a72007-04-26 15:48:28 -0700209 spinlock_t lock; /* access lock */
Eric Dumazet95c96172012-04-15 05:58:06 +0000210 unsigned int if_mtu; /* interface MTU for this peer */
211 unsigned int mtu; /* network MTU for this peer */
212 unsigned int maxdata; /* data size (MTU - hdrsize) */
David Howells17926a72007-04-26 15:48:28 -0700213 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
214 int debug_id; /* debug ID for printks */
David Howellsf66d7492016-04-04 14:00:34 +0100215 int error_report; /* Net (+0) or local (+1000000) to distribute */
216#define RXRPC_LOCAL_ERROR_OFFSET 1000000
David Howells17926a72007-04-26 15:48:28 -0700217 struct sockaddr_rxrpc srx; /* remote address */
218
219 /* calculated RTT cache */
220#define RXRPC_RTT_CACHE_SIZE 32
221 suseconds_t rtt; /* current RTT estimate (in uS) */
Eric Dumazet95c96172012-04-15 05:58:06 +0000222 unsigned int rtt_point; /* next entry at which to insert */
223 unsigned int rtt_usage; /* amount of cache actually used */
David Howells17926a72007-04-26 15:48:28 -0700224 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
225};
226
227/*
228 * RxRPC point-to-point transport / connection manager definition
229 * - handles a bundle of connections between two endpoints
230 * - matched by { local, peer }
231 */
232struct rxrpc_transport {
233 struct rxrpc_local *local; /* local transport endpoint */
234 struct rxrpc_peer *peer; /* remote transport endpoint */
David Howells17926a72007-04-26 15:48:28 -0700235 struct rb_root bundles; /* client connection bundles on this transport */
236 struct rb_root client_conns; /* client connections on this transport */
237 struct rb_root server_conns; /* server connections on this transport */
238 struct list_head link; /* link in master session list */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200239 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700240 spinlock_t client_lock; /* client connection allocation lock */
241 rwlock_t conn_lock; /* lock for active/dead connections */
242 atomic_t usage;
243 int debug_id; /* debug ID for printks */
244 unsigned int conn_idcounter; /* connection ID counter (client) */
245};
246
247/*
248 * RxRPC client connection bundle
249 * - matched by { transport, service_id, key }
250 */
251struct rxrpc_conn_bundle {
252 struct rb_node node; /* node in transport's lookup tree */
253 struct list_head unused_conns; /* unused connections in this bundle */
254 struct list_head avail_conns; /* available connections in this bundle */
255 struct list_head busy_conns; /* busy connections in this bundle */
256 struct key *key; /* security for this bundle */
257 wait_queue_head_t chanwait; /* wait for channel to become available */
258 atomic_t usage;
259 int debug_id; /* debug ID for printks */
260 unsigned short num_conns; /* number of connections in this bundle */
David Howells0d12f8a2016-03-04 15:53:46 +0000261 u16 service_id; /* Service ID for this bundle */
David Howells4e36a952009-09-16 00:01:13 -0700262 u8 security_ix; /* security type */
David Howells17926a72007-04-26 15:48:28 -0700263};
264
265/*
David Howells19ffa012016-04-04 14:00:36 +0100266 * Keys for matching a connection.
267 */
268struct rxrpc_conn_proto {
269 unsigned long hash_key;
270 struct rxrpc_local *local; /* Representation of local endpoint */
271 u32 epoch; /* epoch of this connection */
272 u32 cid; /* connection ID */
273 u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
274 u8 addr_size; /* Size of the address */
275 sa_family_t family; /* Transport protocol */
276 __be16 port; /* Peer UDP/UDP6 port */
277 union { /* Peer address */
278 struct in_addr ipv4_addr;
279 struct in6_addr ipv6_addr;
280 u32 raw_addr[0];
281 };
282};
283
284struct rxrpc_conn_parameters {
285 struct rxrpc_local *local; /* Representation of local endpoint */
286 struct rxrpc_peer *peer; /* Remote endpoint */
287 struct key *key; /* Security details */
288 bool exclusive; /* T if conn is exclusive */
289 u16 service_id; /* Service ID for this connection */
290 u32 security_level; /* Security level selected */
291};
292
293/*
David Howells17926a72007-04-26 15:48:28 -0700294 * RxRPC connection definition
295 * - matched by { transport, service_id, conn_id, direction, key }
296 * - each connection can only handle four simultaneous calls
297 */
298struct rxrpc_connection {
299 struct rxrpc_transport *trans; /* transport session */
300 struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
David Howells19ffa012016-04-04 14:00:36 +0100301 struct rxrpc_conn_proto proto;
302 struct rxrpc_conn_parameters params;
303
David Howells17926a72007-04-26 15:48:28 -0700304 struct work_struct processor; /* connection event processor */
305 struct rb_node node; /* node in transport's lookup tree */
306 struct list_head link; /* link in master connection list */
307 struct list_head bundle_link; /* link in bundle */
308 struct rb_root calls; /* calls on this connection */
309 struct sk_buff_head rx_queue; /* received conn-level packets */
310 struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
David Howells648af7f2016-04-07 17:23:51 +0100311 const struct rxrpc_security *security; /* applied security module */
David Howells17926a72007-04-26 15:48:28 -0700312 struct key *server_key; /* security for this service */
Herbert Xu1afe5932016-01-24 21:19:01 +0800313 struct crypto_skcipher *cipher; /* encryption handle */
David Howells17926a72007-04-26 15:48:28 -0700314 struct rxrpc_crypt csum_iv; /* packet checksum base */
315 unsigned long events;
316#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
Ksenija Stanojevic22a3f9a2015-09-17 18:12:53 +0200317 unsigned long put_time; /* time at which to reap */
David Howells17926a72007-04-26 15:48:28 -0700318 rwlock_t lock; /* access lock */
319 spinlock_t state_lock; /* state-change lock */
320 atomic_t usage;
David Howells17926a72007-04-26 15:48:28 -0700321 enum { /* current state of connection */
322 RXRPC_CONN_UNUSED, /* - connection not yet attempted */
323 RXRPC_CONN_CLIENT, /* - client connection */
324 RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */
325 RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */
326 RXRPC_CONN_SERVER, /* - server secured connection */
327 RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */
328 RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
329 RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
330 } state;
David Howellsdc44b3a2016-04-07 17:23:30 +0100331 u32 local_abort; /* local abort code */
332 u32 remote_abort; /* remote abort code */
333 int error; /* local error incurred */
David Howells17926a72007-04-26 15:48:28 -0700334 int debug_id; /* debug ID for printks */
Eric Dumazet95c96172012-04-15 05:58:06 +0000335 unsigned int call_counter; /* call ID counter */
David Howells17926a72007-04-26 15:48:28 -0700336 atomic_t serial; /* packet serial number counter */
337 atomic_t hi_serial; /* highest serial number received */
338 u8 avail_calls; /* number of calls available */
339 u8 size_align; /* data size alignment (for security) */
340 u8 header_size; /* rxrpc + security header size */
341 u8 security_size; /* security header size */
David Howells17926a72007-04-26 15:48:28 -0700342 u32 security_nonce; /* response re-use preventer */
David Howells17926a72007-04-26 15:48:28 -0700343 u8 security_ix; /* security type */
David Howells17926a72007-04-26 15:48:28 -0700344 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
345};
346
347/*
David Howells5b8848d2016-03-04 15:53:46 +0000348 * Flags in call->flags.
349 */
350enum rxrpc_call_flag {
351 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
352 RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
353 RXRPC_CALL_RCVD_LAST, /* all packets received */
354 RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
355 RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
356 RXRPC_CALL_PROC_BUSY, /* the processor is busy */
357 RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
358 RXRPC_CALL_HAS_USERID, /* has a user ID attached */
359 RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
360};
361
362/*
363 * Events that can be raised on a call.
364 */
365enum rxrpc_call_event {
David Howells4c198ad2016-03-04 15:53:46 +0000366 RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
367 RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
368 RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
369 RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
370 RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
371 RXRPC_CALL_EV_ACK, /* need to generate ACK */
372 RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
373 RXRPC_CALL_EV_ABORT, /* need to generate abort */
374 RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
375 RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
376 RXRPC_CALL_EV_RESEND, /* Tx resend required */
377 RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
378 RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
379 RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
380 RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
381 RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
382 RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
David Howells5b8848d2016-03-04 15:53:46 +0000383};
384
385/*
386 * The states that a call can be in.
387 */
388enum rxrpc_call_state {
389 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
390 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
391 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
392 RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
393 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
394 RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
395 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
396 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
397 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
398 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
399 RXRPC_CALL_COMPLETE, /* - call completed */
400 RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
401 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
402 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
403 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
404 RXRPC_CALL_DEAD, /* - call is dead */
405 NR__RXRPC_CALL_STATES
406};
407
408/*
David Howells17926a72007-04-26 15:48:28 -0700409 * RxRPC call definition
410 * - matched by { connection, call_id }
411 */
412struct rxrpc_call {
413 struct rxrpc_connection *conn; /* connection carrying call */
414 struct rxrpc_sock *socket; /* socket responsible */
415 struct timer_list lifetimer; /* lifetime remaining on call */
416 struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
417 struct timer_list ack_timer; /* ACK generation timer */
418 struct timer_list resend_timer; /* Tx resend timer */
419 struct work_struct destroyer; /* call destroyer */
420 struct work_struct processor; /* packet processor and ACK generator */
421 struct list_head link; /* link in master call list */
David Howellsf66d7492016-04-04 14:00:34 +0100422 struct hlist_node error_link; /* link in error distribution list */
David Howells17926a72007-04-26 15:48:28 -0700423 struct list_head accept_link; /* calls awaiting acceptance */
424 struct rb_node sock_node; /* node in socket call tree */
425 struct rb_node conn_node; /* node in connection call tree */
426 struct sk_buff_head rx_queue; /* received packets */
427 struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
428 struct sk_buff *tx_pending; /* Tx socket buffer being filled */
429 wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
430 unsigned long user_call_ID; /* user-defined call ID */
431 unsigned long creation_jif; /* time of call creation */
432 unsigned long flags;
David Howells17926a72007-04-26 15:48:28 -0700433 unsigned long events;
David Howells17926a72007-04-26 15:48:28 -0700434 spinlock_t lock;
435 rwlock_t state_lock; /* lock for state transition */
436 atomic_t usage;
437 atomic_t sequence; /* Tx data packet sequence counter */
David Howellsdc44b3a2016-04-07 17:23:30 +0100438 u32 local_abort; /* local abort code */
439 u32 remote_abort; /* remote abort code */
David Howellsf66d7492016-04-04 14:00:34 +0100440 int error_report; /* Network error (ICMP/local transport) */
441 int error; /* Local error incurred */
David Howells5b8848d2016-03-04 15:53:46 +0000442 enum rxrpc_call_state state : 8; /* current state of call */
David Howells17926a72007-04-26 15:48:28 -0700443 int debug_id; /* debug ID for printks */
444 u8 channel; /* connection channel occupied by this call */
445
446 /* transmission-phase ACK management */
David Howells4e36a952009-09-16 00:01:13 -0700447 u8 acks_head; /* offset into window of first entry */
448 u8 acks_tail; /* offset into window of last entry */
449 u8 acks_winsz; /* size of un-ACK'd window */
450 u8 acks_unacked; /* lowest unacked packet in last ACK received */
David Howells17926a72007-04-26 15:48:28 -0700451 int acks_latest; /* serial number of latest ACK received */
452 rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
453 unsigned long *acks_window; /* sent packet window
454 * - elements are pointers with LSB set if ACK'd
455 */
456
457 /* receive-phase ACK management */
458 rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
459 rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
460 rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
461 rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
462 rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
463 rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
David Howells0d12f8a2016-03-04 15:53:46 +0000464 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
David Howells4e36a952009-09-16 00:01:13 -0700465 u8 ackr_reason; /* reason to ACK */
David Howells0d12f8a2016-03-04 15:53:46 +0000466 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
David Howells17926a72007-04-26 15:48:28 -0700467 atomic_t ackr_not_idle; /* number of packets in Rx queue */
468
469 /* received packet records, 1 bit per record */
470#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
471 unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
472
Tim Smith77276402014-03-03 23:04:45 +0000473 struct hlist_node hash_node;
474 unsigned long hash_key; /* Full hash key */
475 u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
476 struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
David Howells19ffa012016-04-04 14:00:36 +0100477 sa_family_t family; /* Frame protocol */
David Howells0d12f8a2016-03-04 15:53:46 +0000478 u32 call_id; /* call ID on connection */
479 u32 cid; /* connection ID plus channel index */
480 u32 epoch; /* epoch of this connection */
481 u16 service_id; /* service ID */
Tim Smith77276402014-03-03 23:04:45 +0000482 union { /* Peer IP address for hashing */
483 __be32 ipv4_addr;
484 __u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
485 } peer_ip;
David Howells17926a72007-04-26 15:48:28 -0700486};
487
488/*
David Howells17926a72007-04-26 15:48:28 -0700489 * locally abort an RxRPC call
490 */
491static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
492{
493 write_lock_bh(&call->state_lock);
494 if (call->state < RXRPC_CALL_COMPLETE) {
David Howellsdc44b3a2016-04-07 17:23:30 +0100495 call->local_abort = abort_code;
David Howells17926a72007-04-26 15:48:28 -0700496 call->state = RXRPC_CALL_LOCALLY_ABORTED;
David Howells4c198ad2016-03-04 15:53:46 +0000497 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells17926a72007-04-26 15:48:28 -0700498 }
499 write_unlock_bh(&call->state_lock);
500}
501
502/*
David Howells651350d2007-04-26 15:50:17 -0700503 * af_rxrpc.c
David Howells17926a72007-04-26 15:48:28 -0700504 */
David Howells651350d2007-04-26 15:50:17 -0700505extern atomic_t rxrpc_n_skbs;
David Howells0d12f8a2016-03-04 15:53:46 +0000506extern u32 rxrpc_epoch;
David Howells651350d2007-04-26 15:50:17 -0700507extern atomic_t rxrpc_debug_id;
508extern struct workqueue_struct *rxrpc_workqueue;
David Howells17926a72007-04-26 15:48:28 -0700509
David Howells19ffa012016-04-04 14:00:36 +0100510extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_conn_parameters *,
David Howells2341e072016-06-09 23:02:51 +0100511 struct sockaddr *,
David Howells19ffa012016-04-04 14:00:36 +0100512 int, gfp_t);
David Howells2341e072016-06-09 23:02:51 +0100513
David Howells17926a72007-04-26 15:48:28 -0700514/*
David Howells0d81a512016-06-13 13:30:30 +0100515 * call_accept.c
David Howells17926a72007-04-26 15:48:28 -0700516 */
David Howells4f95dd72016-04-04 14:00:35 +0100517void rxrpc_accept_incoming_calls(struct rxrpc_local *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700518struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
519int rxrpc_reject_call(struct rxrpc_sock *);
David Howells17926a72007-04-26 15:48:28 -0700520
521/*
David Howells0d81a512016-06-13 13:30:30 +0100522 * call_event.c
David Howells17926a72007-04-26 15:48:28 -0700523 */
David Howells0d12f8a2016-03-04 15:53:46 +0000524void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
525void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool);
Joe Perchesc1b12032013-10-18 13:48:25 -0700526void rxrpc_process_call(struct work_struct *);
David Howells17926a72007-04-26 15:48:28 -0700527
528/*
David Howells0d81a512016-06-13 13:30:30 +0100529 * call_object.c
David Howells17926a72007-04-26 15:48:28 -0700530 */
David Howellsdad8aff2016-03-09 23:22:56 +0000531extern unsigned int rxrpc_max_call_lifetime;
532extern unsigned int rxrpc_dead_call_expiry;
David Howells17926a72007-04-26 15:48:28 -0700533extern struct kmem_cache *rxrpc_call_jar;
534extern struct list_head rxrpc_calls;
535extern rwlock_t rxrpc_call_lock;
536
David Howells0d12f8a2016-03-04 15:53:46 +0000537struct rxrpc_call *rxrpc_find_call_hash(struct rxrpc_host_header *,
538 void *, sa_family_t, const void *);
David Howells2341e072016-06-09 23:02:51 +0100539struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
540struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
David Howells19ffa012016-04-04 14:00:36 +0100541 struct rxrpc_conn_parameters *,
Joe Perchesc1b12032013-10-18 13:48:25 -0700542 struct rxrpc_transport *,
543 struct rxrpc_conn_bundle *,
David Howells2341e072016-06-09 23:02:51 +0100544 unsigned long, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700545struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
546 struct rxrpc_connection *,
David Howells42886ff2016-06-16 13:31:07 +0100547 struct sk_buff *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700548void rxrpc_release_call(struct rxrpc_call *);
549void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
550void __rxrpc_put_call(struct rxrpc_call *);
551void __exit rxrpc_destroy_all_calls(void);
David Howells17926a72007-04-26 15:48:28 -0700552
553/*
David Howells0d81a512016-06-13 13:30:30 +0100554 * conn_event.c
555 */
556void rxrpc_process_connection(struct work_struct *);
557void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
David Howells4f95dd72016-04-04 14:00:35 +0100558void rxrpc_reject_packets(struct rxrpc_local *);
David Howells0d81a512016-06-13 13:30:30 +0100559
560/*
561 * conn_object.c
David Howells17926a72007-04-26 15:48:28 -0700562 */
David Howellsdad8aff2016-03-09 23:22:56 +0000563extern unsigned int rxrpc_connection_expiry;
David Howells17926a72007-04-26 15:48:28 -0700564extern struct list_head rxrpc_connections;
565extern rwlock_t rxrpc_connection_lock;
566
Joe Perchesc1b12032013-10-18 13:48:25 -0700567struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
568 struct rxrpc_transport *,
David Howells0d12f8a2016-03-04 15:53:46 +0000569 struct key *, u16, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700570void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
David Howells19ffa012016-04-04 14:00:36 +0100571int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *,
572 struct rxrpc_transport *, struct rxrpc_conn_bundle *,
573 struct rxrpc_call *, gfp_t);
Joe Perchesc1b12032013-10-18 13:48:25 -0700574void rxrpc_put_connection(struct rxrpc_connection *);
575void __exit rxrpc_destroy_all_connections(void);
576struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
David Howells42886ff2016-06-16 13:31:07 +0100577 struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700578extern struct rxrpc_connection *
David Howells42886ff2016-06-16 13:31:07 +0100579rxrpc_incoming_connection(struct rxrpc_transport *, struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700580
David Howells19ffa012016-04-04 14:00:36 +0100581static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
582{
583 return conn->out_clientflag;
584}
585
586static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
587{
588 return conn->proto.in_clientflag;
589}
590
David Howells17926a72007-04-26 15:48:28 -0700591/*
David Howells0d81a512016-06-13 13:30:30 +0100592 * input.c
David Howells17926a72007-04-26 15:48:28 -0700593 */
David S. Miller676d2362014-04-11 16:15:36 -0400594void rxrpc_data_ready(struct sock *);
Joe Perchesc1b12032013-10-18 13:48:25 -0700595int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
596void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
David Howells17926a72007-04-26 15:48:28 -0700597
598/*
David Howells0d81a512016-06-13 13:30:30 +0100599 * insecure.c
David Howells17926a72007-04-26 15:48:28 -0700600 */
David Howells0d81a512016-06-13 13:30:30 +0100601extern const struct rxrpc_security rxrpc_no_security;
David Howells17926a72007-04-26 15:48:28 -0700602
603/*
David Howells0d81a512016-06-13 13:30:30 +0100604 * key.c
David Howells17926a72007-04-26 15:48:28 -0700605 */
606extern struct key_type key_type_rxrpc;
607extern struct key_type key_type_rxrpc_s;
608
Joe Perchesc1b12032013-10-18 13:48:25 -0700609int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
610int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
611int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
612 u32);
David Howells17926a72007-04-26 15:48:28 -0700613
614/*
David Howells87563612016-04-04 14:00:34 +0100615 * local_event.c
616 */
David Howells4f95dd72016-04-04 14:00:35 +0100617extern void rxrpc_process_local_events(struct rxrpc_local *);
David Howells87563612016-04-04 14:00:34 +0100618
619/*
David Howells0d81a512016-06-13 13:30:30 +0100620 * local_object.c
David Howells17926a72007-04-26 15:48:28 -0700621 */
David Howells4f95dd72016-04-04 14:00:35 +0100622struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
623void __rxrpc_put_local(struct rxrpc_local *);
David Howells0d81a512016-06-13 13:30:30 +0100624void __exit rxrpc_destroy_all_locals(void);
David Howellse0e4d822016-04-07 17:23:58 +0100625
David Howells4f95dd72016-04-04 14:00:35 +0100626static inline void rxrpc_get_local(struct rxrpc_local *local)
627{
628 atomic_inc(&local->usage);
629}
630
631static inline
632struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
633{
634 return atomic_inc_not_zero(&local->usage) ? local : NULL;
635}
636
637static inline void rxrpc_put_local(struct rxrpc_local *local)
638{
639 if (atomic_dec_and_test(&local->usage))
640 __rxrpc_put_local(local);
641}
642
David Howellse0e4d822016-04-07 17:23:58 +0100643/*
David Howells8e688d92016-04-07 17:23:16 +0100644 * misc.c
645 */
David Howells0e119b42016-06-10 22:30:37 +0100646extern unsigned int rxrpc_max_backlog __read_mostly;
David Howells8e688d92016-04-07 17:23:16 +0100647extern unsigned int rxrpc_requested_ack_delay;
648extern unsigned int rxrpc_soft_ack_delay;
649extern unsigned int rxrpc_idle_ack_delay;
650extern unsigned int rxrpc_rx_window_size;
651extern unsigned int rxrpc_rx_mtu;
652extern unsigned int rxrpc_rx_jumbo_max;
653
David Howells5b3e87f2016-04-07 17:23:23 +0100654extern const char *const rxrpc_pkts[];
David Howells8e688d92016-04-07 17:23:16 +0100655extern const s8 rxrpc_ack_priority[];
656
657extern const char *rxrpc_acks(u8 reason);
658
659/*
David Howells0d81a512016-06-13 13:30:30 +0100660 * output.c
661 */
662extern unsigned int rxrpc_resend_timeout;
663
664int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
665int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
666
667/*
David Howellsabe89ef2016-04-04 14:00:32 +0100668 * peer_event.c
David Howells0d81a512016-06-13 13:30:30 +0100669 */
David Howellsabe89ef2016-04-04 14:00:32 +0100670void rxrpc_error_report(struct sock *);
David Howellsf66d7492016-04-04 14:00:34 +0100671void rxrpc_peer_error_distributor(struct work_struct *);
David Howells0d81a512016-06-13 13:30:30 +0100672
673/*
674 * peer_object.c
675 */
David Howellsbe6e6702016-04-04 14:00:32 +0100676struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
677 const struct sockaddr_rxrpc *);
678struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
679 struct sockaddr_rxrpc *, gfp_t);
680struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
681
682static inline void rxrpc_get_peer(struct rxrpc_peer *peer)
683{
684 atomic_inc(&peer->usage);
685}
686
687static inline
688struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
689{
690 return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
691}
692
693extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
694static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
695{
696 if (atomic_dec_and_test(&peer->usage))
697 __rxrpc_put_peer(peer);
698}
David Howells0d81a512016-06-13 13:30:30 +0100699
700/*
701 * proc.c
702 */
703extern const char *const rxrpc_call_states[];
704extern const struct file_operations rxrpc_call_seq_fops;
705extern const struct file_operations rxrpc_connection_seq_fops;
706
707/*
708 * recvmsg.c
709 */
710void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
711int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
712
713/*
David Howells648af7f2016-04-07 17:23:51 +0100714 * rxkad.c
715 */
716#ifdef CONFIG_RXKAD
717extern const struct rxrpc_security rxkad;
718#endif
719
720/*
David Howells0d81a512016-06-13 13:30:30 +0100721 * security.c
722 */
723int __init rxrpc_init_security(void);
724void rxrpc_exit_security(void);
725int rxrpc_init_client_conn_security(struct rxrpc_connection *);
726int rxrpc_init_server_conn_security(struct rxrpc_connection *);
727
728/*
729 * skbuff.c
730 */
731void rxrpc_packet_destructor(struct sk_buff *);
732
733/*
David Howells5873c082014-02-07 18:58:44 +0000734 * sysctl.c
735 */
736#ifdef CONFIG_SYSCTL
737extern int __init rxrpc_sysctl_init(void);
738extern void rxrpc_sysctl_exit(void);
739#else
740static inline int __init rxrpc_sysctl_init(void) { return 0; }
741static inline void rxrpc_sysctl_exit(void) {}
742#endif
743
744/*
David Howells0d81a512016-06-13 13:30:30 +0100745 * transport.c
746 */
747extern unsigned int rxrpc_transport_expiry;
748
749struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
750 struct rxrpc_peer *, gfp_t);
751void rxrpc_put_transport(struct rxrpc_transport *);
752void __exit rxrpc_destroy_all_transports(void);
753struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
754 struct rxrpc_peer *);
755
756/*
David Howellsbe6e6702016-04-04 14:00:32 +0100757 * utils.c
758 */
759void rxrpc_get_addr_from_skb(struct rxrpc_local *, const struct sk_buff *,
760 struct sockaddr_rxrpc *);
761
762/*
David Howells17926a72007-04-26 15:48:28 -0700763 * debug tracing
764 */
Eric Dumazet95c96172012-04-15 05:58:06 +0000765extern unsigned int rxrpc_debug;
David Howells17926a72007-04-26 15:48:28 -0700766
767#define dbgprintk(FMT,...) \
Sven Schnelle9f389f42008-04-03 10:45:30 +0100768 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700769
Harvey Harrison0dc47872008-03-05 20:47:47 -0800770#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
771#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700772#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
773#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
774#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
775
776
777#if defined(__KDEBUG)
778#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
779#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
780#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
781#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
782#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
783
784#elif defined(CONFIG_AF_RXRPC_DEBUG)
785#define RXRPC_DEBUG_KENTER 0x01
786#define RXRPC_DEBUG_KLEAVE 0x02
787#define RXRPC_DEBUG_KDEBUG 0x04
788#define RXRPC_DEBUG_KPROTO 0x08
789#define RXRPC_DEBUG_KNET 0x10
790
791#define _enter(FMT,...) \
792do { \
793 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
794 kenter(FMT,##__VA_ARGS__); \
795} while (0)
796
797#define _leave(FMT,...) \
798do { \
799 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
800 kleave(FMT,##__VA_ARGS__); \
801} while (0)
802
803#define _debug(FMT,...) \
804do { \
805 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
806 kdebug(FMT,##__VA_ARGS__); \
807} while (0)
808
809#define _proto(FMT,...) \
810do { \
811 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
812 kproto(FMT,##__VA_ARGS__); \
813} while (0)
814
815#define _net(FMT,...) \
816do { \
817 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
818 knet(FMT,##__VA_ARGS__); \
819} while (0)
820
821#else
David Howells12fdff32010-08-12 16:54:57 +0100822#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
823#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
824#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
825#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
826#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
David Howells17926a72007-04-26 15:48:28 -0700827#endif
828
829/*
830 * debug assertion checking
831 */
832#if 1 // defined(__KDEBUGALL)
833
834#define ASSERT(X) \
835do { \
836 if (unlikely(!(X))) { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700837 pr_err("Assertion failed\n"); \
David Howells17926a72007-04-26 15:48:28 -0700838 BUG(); \
839 } \
David Howellsb4f13422016-03-04 15:56:19 +0000840} while (0)
David Howells17926a72007-04-26 15:48:28 -0700841
842#define ASSERTCMP(X, OP, Y) \
843do { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700844 unsigned long _x = (unsigned long)(X); \
845 unsigned long _y = (unsigned long)(Y); \
846 if (unlikely(!(_x OP _y))) { \
847 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
848 _x, _x, #OP, _y, _y); \
David Howells17926a72007-04-26 15:48:28 -0700849 BUG(); \
850 } \
David Howellsb4f13422016-03-04 15:56:19 +0000851} while (0)
David Howells17926a72007-04-26 15:48:28 -0700852
853#define ASSERTIF(C, X) \
854do { \
855 if (unlikely((C) && !(X))) { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700856 pr_err("Assertion failed\n"); \
David Howells17926a72007-04-26 15:48:28 -0700857 BUG(); \
858 } \
David Howellsb4f13422016-03-04 15:56:19 +0000859} while (0)
David Howells17926a72007-04-26 15:48:28 -0700860
861#define ASSERTIFCMP(C, X, OP, Y) \
862do { \
Joe Perches9b6d5392016-06-02 12:08:52 -0700863 unsigned long _x = (unsigned long)(X); \
864 unsigned long _y = (unsigned long)(Y); \
865 if (unlikely((C) && !(_x OP _y))) { \
866 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
867 _x, _x, #OP, _y, _y); \
David Howells17926a72007-04-26 15:48:28 -0700868 BUG(); \
869 } \
David Howellsb4f13422016-03-04 15:56:19 +0000870} while (0)
David Howells17926a72007-04-26 15:48:28 -0700871
872#else
873
874#define ASSERT(X) \
875do { \
David Howellsb4f13422016-03-04 15:56:19 +0000876} while (0)
David Howells17926a72007-04-26 15:48:28 -0700877
878#define ASSERTCMP(X, OP, Y) \
879do { \
David Howellsb4f13422016-03-04 15:56:19 +0000880} while (0)
David Howells17926a72007-04-26 15:48:28 -0700881
882#define ASSERTIF(C, X) \
883do { \
David Howellsb4f13422016-03-04 15:56:19 +0000884} while (0)
David Howells17926a72007-04-26 15:48:28 -0700885
886#define ASSERTIFCMP(C, X, OP, Y) \
887do { \
David Howellsb4f13422016-03-04 15:56:19 +0000888} while (0)
David Howells17926a72007-04-26 15:48:28 -0700889
890#endif /* __KDEBUGALL */
891
892/*
893 * socket buffer accounting / leak finding
894 */
895static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
896{
897 //_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
898 //atomic_inc(&rxrpc_n_skbs);
899}
900
901#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
902
903static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
904{
905 //_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
906 //atomic_dec(&rxrpc_n_skbs);
907}
908
909#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
910
911static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
912{
913 if (skb) {
914 CHECK_SLAB_OKAY(&skb->users);
915 //_net("free skb %p %s [%d]",
916 // skb, fn, atomic_read(&rxrpc_n_skbs));
917 //atomic_dec(&rxrpc_n_skbs);
918 kfree_skb(skb);
919 }
920}
921
922#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
923
924static inline void rxrpc_purge_queue(struct sk_buff_head *list)
925{
926 struct sk_buff *skb;
927 while ((skb = skb_dequeue((list))) != NULL)
928 rxrpc_free_skb(skb);
929}
930
David Howells17926a72007-04-26 15:48:28 -0700931#define rxrpc_get_call(CALL) \
932do { \
933 CHECK_SLAB_OKAY(&(CALL)->usage); \
934 if (atomic_inc_return(&(CALL)->usage) == 1) \
935 BUG(); \
David Howellsb4f13422016-03-04 15:56:19 +0000936} while (0)
David Howells17926a72007-04-26 15:48:28 -0700937
938#define rxrpc_put_call(CALL) \
939do { \
940 __rxrpc_put_call(CALL); \
David Howellsb4f13422016-03-04 15:56:19 +0000941} while (0)