Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 2 | * linux/include/linux/sunrpc/xprt.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Declarations for the RPC transport interface. |
| 5 | * |
| 6 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 7 | */ |
| 8 | |
| 9 | #ifndef _LINUX_SUNRPC_XPRT_H |
| 10 | #define _LINUX_SUNRPC_XPRT_H |
| 11 | |
| 12 | #include <linux/uio.h> |
| 13 | #include <linux/socket.h> |
| 14 | #include <linux/in.h> |
Chuck Lever | ff83997 | 2010-05-07 13:34:47 -0400 | [diff] [blame] | 15 | #include <linux/ktime.h> |
Trond Myklebust | 30c5116 | 2015-02-24 20:31:39 -0500 | [diff] [blame] | 16 | #include <linux/kref.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/sunrpc/sched.h> |
| 18 | #include <linux/sunrpc/xdr.h> |
Greg Banks | 7adae48 | 2006-10-04 02:15:47 -0700 | [diff] [blame] | 19 | #include <linux/sunrpc/msg_prot.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Frank van Maarseveen | 96802a0 | 2007-07-08 13:08:54 +0200 | [diff] [blame] | 21 | #ifdef __KERNEL__ |
| 22 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #define RPC_MIN_SLOT_TABLE (2U) |
| 24 | #define RPC_DEF_SLOT_TABLE (16U) |
Trond Myklebust | d9ba131 | 2011-07-17 18:11:30 -0400 | [diff] [blame] | 25 | #define RPC_MAX_SLOT_TABLE_LIMIT (65536U) |
Trond Myklebust | 8833812 | 2012-02-06 15:18:48 -0500 | [diff] [blame] | 26 | #define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Chuck Lever | 4f4cf5a | 2014-05-28 10:34:49 -0400 | [diff] [blame] | 28 | #define RPC_CWNDSHIFT (8U) |
| 29 | #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) |
| 30 | #define RPC_INITCWND RPC_CWNDSCALE |
| 31 | #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) |
| 32 | #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) |
| 33 | |
Chuck Lever | a246b01 | 2005-08-11 16:25:23 -0400 | [diff] [blame] | 34 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | * This describes a timeout strategy |
| 36 | */ |
| 37 | struct rpc_timeout { |
| 38 | unsigned long to_initval, /* initial timeout */ |
| 39 | to_maxval, /* max timeout */ |
| 40 | to_increment; /* if !exponential */ |
| 41 | unsigned int to_retries; /* max # of retries */ |
| 42 | unsigned char to_exponential; |
| 43 | }; |
| 44 | |
Chuck Lever | edb267a | 2006-08-22 20:06:18 -0400 | [diff] [blame] | 45 | enum rpc_display_format_t { |
| 46 | RPC_DISPLAY_ADDR = 0, |
| 47 | RPC_DISPLAY_PORT, |
| 48 | RPC_DISPLAY_PROTO, |
Chuck Lever | fbfe3cc | 2007-08-06 11:57:02 -0400 | [diff] [blame] | 49 | RPC_DISPLAY_HEX_ADDR, |
| 50 | RPC_DISPLAY_HEX_PORT, |
\"Talpey, Thomas\ | 4417c8c | 2007-09-10 13:43:05 -0400 | [diff] [blame] | 51 | RPC_DISPLAY_NETID, |
Chuck Lever | edb267a | 2006-08-22 20:06:18 -0400 | [diff] [blame] | 52 | RPC_DISPLAY_MAX, |
| 53 | }; |
| 54 | |
Chuck Lever | 529b33c | 2005-08-25 16:25:54 -0700 | [diff] [blame] | 55 | struct rpc_task; |
| 56 | struct rpc_xprt; |
Trond Myklebust | e99170f | 2006-04-18 13:21:42 -0400 | [diff] [blame] | 57 | struct seq_file; |
Chuck Lever | 7656677 | 2015-10-24 17:28:32 -0400 | [diff] [blame] | 58 | struct svc_serv; |
| 59 | struct net; |
Chuck Lever | 529b33c | 2005-08-25 16:25:54 -0700 | [diff] [blame] | 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | /* |
| 62 | * This describes a complete RPC request |
| 63 | */ |
| 64 | struct rpc_rqst { |
| 65 | /* |
| 66 | * This is the user-visible part |
| 67 | */ |
| 68 | struct rpc_xprt * rq_xprt; /* RPC client */ |
| 69 | struct xdr_buf rq_snd_buf; /* send buffer */ |
| 70 | struct xdr_buf rq_rcv_buf; /* recv buffer */ |
| 71 | |
| 72 | /* |
| 73 | * This is the private part |
| 74 | */ |
| 75 | struct rpc_task * rq_task; /* RPC task data */ |
Trond Myklebust | a17c215 | 2010-07-31 14:29:08 -0400 | [diff] [blame] | 76 | struct rpc_cred * rq_cred; /* Bound cred */ |
Alexey Dobriyan | d8ed029 | 2006-09-26 22:29:38 -0700 | [diff] [blame] | 77 | __be32 rq_xid; /* request XID */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | int rq_cong; /* has incremented xprt->cong */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | u32 rq_seqno; /* gss seq no. used on req. */ |
J. Bruce Fields | ead5e1c | 2005-10-13 16:54:43 -0400 | [diff] [blame] | 80 | int rq_enc_pages_num; |
| 81 | struct page **rq_enc_pages; /* scratch pages for use by |
| 82 | gss privacy code */ |
| 83 | void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | struct list_head rq_list; |
| 85 | |
Chuck Lever | 0210714 | 2006-01-03 09:55:49 +0100 | [diff] [blame] | 86 | __u32 * rq_buffer; /* XDR encode buffer */ |
Benny Halevy | c977a2e | 2008-12-23 16:06:13 -0500 | [diff] [blame] | 87 | size_t rq_callsize, |
Chuck Lever | 2bea90d | 2007-03-29 16:47:53 -0400 | [diff] [blame] | 88 | rq_rcvsize; |
Trond Myklebust | d60dbb2 | 2010-05-13 12:51:49 -0400 | [diff] [blame] | 89 | size_t rq_xmit_bytes_sent; /* total bytes sent */ |
| 90 | size_t rq_reply_bytes_recvd; /* total reply bytes */ |
| 91 | /* received */ |
Chuck Lever | 0210714 | 2006-01-03 09:55:49 +0100 | [diff] [blame] | 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | struct xdr_buf rq_private_buf; /* The receive buffer |
| 94 | * used in the softirq. |
| 95 | */ |
| 96 | unsigned long rq_majortimeo; /* major timeout alarm */ |
| 97 | unsigned long rq_timeout; /* Current timeout value */ |
Trond Myklebust | d60dbb2 | 2010-05-13 12:51:49 -0400 | [diff] [blame] | 98 | ktime_t rq_rtt; /* round-trip time */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | unsigned int rq_retries; /* # of retries */ |
Trond Myklebust | 7c1d71c | 2008-04-17 16:52:57 -0400 | [diff] [blame] | 100 | unsigned int rq_connect_cookie; |
| 101 | /* A cookie used to track the |
| 102 | state of the transport |
| 103 | connection */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * Partial send handling |
| 107 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | u32 rq_bytes_sent; /* Bytes we have sent */ |
| 109 | |
Chuck Lever | ff83997 | 2010-05-07 13:34:47 -0400 | [diff] [blame] | 110 | ktime_t rq_xtime; /* transmit time stamp */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | int rq_ntrans; |
Ricardo Labiaga | 56632b5 | 2009-04-01 09:22:58 -0400 | [diff] [blame] | 112 | |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 113 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
Ricardo Labiaga | 56632b5 | 2009-04-01 09:22:58 -0400 | [diff] [blame] | 114 | struct list_head rq_bc_list; /* Callback service list */ |
| 115 | unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ |
| 116 | struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 117 | #endif /* CONFIG_SUNRPC_BACKCHANEL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | }; |
| 119 | #define rq_svec rq_snd_buf.head |
| 120 | #define rq_slen rq_snd_buf.len |
| 121 | |
Chuck Lever | a246b01 | 2005-08-11 16:25:23 -0400 | [diff] [blame] | 122 | struct rpc_xprt_ops { |
Chuck Lever | 470056c | 2005-08-25 16:25:56 -0700 | [diff] [blame] | 123 | void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); |
Trond Myklebust | 43cedbf0e | 2011-07-17 16:01:03 -0400 | [diff] [blame] | 124 | int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); |
Chuck Lever | 49e9a89 | 2005-08-25 16:25:51 -0700 | [diff] [blame] | 125 | void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); |
Trond Myklebust | f39c1bf | 2012-09-07 11:08:50 -0400 | [diff] [blame] | 126 | void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); |
Chuck Lever | bbf7c1d | 2006-08-22 20:06:16 -0400 | [diff] [blame] | 127 | void (*rpcbind)(struct rpc_task *task); |
Chuck Lever | 9220041 | 2006-01-03 09:55:51 +0100 | [diff] [blame] | 128 | void (*set_port)(struct rpc_xprt *xprt, unsigned short port); |
Trond Myklebust | 1b09209 | 2013-01-08 09:26:49 -0500 | [diff] [blame] | 129 | void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); |
Chuck Lever | 0210714 | 2006-01-03 09:55:49 +0100 | [diff] [blame] | 130 | void * (*buf_alloc)(struct rpc_task *task, size_t size); |
Chuck Lever | c5a4dd8 | 2007-03-29 16:47:58 -0400 | [diff] [blame] | 131 | void (*buf_free)(void *buffer); |
Chuck Lever | a246b01 | 2005-08-11 16:25:23 -0400 | [diff] [blame] | 132 | int (*send_request)(struct rpc_task *task); |
Chuck Lever | fe3aca2 | 2005-08-25 16:25:50 -0700 | [diff] [blame] | 133 | void (*set_retrans_timeout)(struct rpc_task *task); |
Trond Myklebust | 6a24dfb | 2013-01-08 09:48:15 -0500 | [diff] [blame] | 134 | void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); |
Chuck Lever | a58dd39 | 2005-08-25 16:25:53 -0700 | [diff] [blame] | 135 | void (*release_request)(struct rpc_task *task); |
Chuck Lever | a246b01 | 2005-08-11 16:25:23 -0400 | [diff] [blame] | 136 | void (*close)(struct rpc_xprt *xprt); |
| 137 | void (*destroy)(struct rpc_xprt *xprt); |
Chuck Lever | 262ca07 | 2006-03-20 13:44:16 -0500 | [diff] [blame] | 138 | void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); |
Jeff Layton | d67fa4d | 2015-06-03 16:14:29 -0400 | [diff] [blame] | 139 | int (*enable_swap)(struct rpc_xprt *xprt); |
| 140 | void (*disable_swap)(struct rpc_xprt *xprt); |
Chuck Lever | 4a06825 | 2015-05-11 14:02:25 -0400 | [diff] [blame] | 141 | void (*inject_disconnect)(struct rpc_xprt *xprt); |
Chuck Lever | 42e5c3e | 2015-10-24 17:27:35 -0400 | [diff] [blame] | 142 | int (*bc_setup)(struct rpc_xprt *xprt, |
| 143 | unsigned int min_reqs); |
Chuck Lever | 7656677 | 2015-10-24 17:28:32 -0400 | [diff] [blame] | 144 | int (*bc_up)(struct svc_serv *serv, struct net *net); |
Chuck Lever | 6b26cc8 | 2016-05-02 14:40:40 -0400 | [diff] [blame] | 145 | size_t (*bc_maxpayload)(struct rpc_xprt *xprt); |
Chuck Lever | 42e5c3e | 2015-10-24 17:27:35 -0400 | [diff] [blame] | 146 | void (*bc_free_rqst)(struct rpc_rqst *rqst); |
| 147 | void (*bc_destroy)(struct rpc_xprt *xprt, |
| 148 | unsigned int max_reqs); |
Chuck Lever | a246b01 | 2005-08-11 16:25:23 -0400 | [diff] [blame] | 149 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Alexandros Batsakis | f300bab | 2009-09-10 17:33:30 +0300 | [diff] [blame] | 151 | /* |
| 152 | * RPC transport identifiers |
| 153 | * |
| 154 | * To preserve compatibility with the historical use of raw IP protocol |
| 155 | * id's for transport selection, UDP and TCP identifiers are specified |
| 156 | * with the previous values. No such restriction exists for new transports, |
| 157 | * except that they may not collide with these values (17 and 6, |
| 158 | * respectively). |
| 159 | */ |
| 160 | #define XPRT_TRANSPORT_BC (1 << 31) |
| 161 | enum xprt_transports { |
| 162 | XPRT_TRANSPORT_UDP = IPPROTO_UDP, |
| 163 | XPRT_TRANSPORT_TCP = IPPROTO_TCP, |
| 164 | XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, |
Chuck Lever | 176e21e | 2011-05-09 15:22:44 -0400 | [diff] [blame] | 165 | XPRT_TRANSPORT_RDMA = 256, |
Chuck Lever | 9468431 | 2015-10-24 17:28:16 -0400 | [diff] [blame] | 166 | XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, |
Chuck Lever | 176e21e | 2011-05-09 15:22:44 -0400 | [diff] [blame] | 167 | XPRT_TRANSPORT_LOCAL = 257, |
Alexandros Batsakis | f300bab | 2009-09-10 17:33:30 +0300 | [diff] [blame] | 168 | }; |
| 169 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | struct rpc_xprt { |
Trond Myklebust | 30c5116 | 2015-02-24 20:31:39 -0500 | [diff] [blame] | 171 | struct kref kref; /* Reference count */ |
Chuck Lever | a246b01 | 2005-08-11 16:25:23 -0400 | [diff] [blame] | 172 | struct rpc_xprt_ops * ops; /* transport methods */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | |
Trond Myklebust | ba7392b | 2007-12-20 16:03:55 -0500 | [diff] [blame] | 174 | const struct rpc_timeout *timeout; /* timeout parms */ |
Chuck Lever | c4efcb1 | 2006-08-22 20:06:19 -0400 | [diff] [blame] | 175 | struct sockaddr_storage addr; /* server address */ |
| 176 | size_t addrlen; /* size of server address */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | int prot; /* IP protocol */ |
| 178 | |
| 179 | unsigned long cong; /* current congestion */ |
| 180 | unsigned long cwnd; /* congestion window */ |
| 181 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | size_t max_payload; /* largest RPC payload size, |
| 183 | in bytes */ |
Chuck Lever | 808012f | 2005-08-25 16:25:49 -0700 | [diff] [blame] | 184 | unsigned int tsh_size; /* size of transport specific |
| 185 | header */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Chuck Lever | 4a68179 | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 187 | struct rpc_wait_queue binding; /* requests waiting on rpcbind */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | struct rpc_wait_queue sending; /* requests waiting to send */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | struct rpc_wait_queue pending; /* requests in flight */ |
| 190 | struct rpc_wait_queue backlog; /* waiting for slot */ |
| 191 | struct list_head free; /* free slots */ |
Trond Myklebust | d9ba131 | 2011-07-17 18:11:30 -0400 | [diff] [blame] | 192 | unsigned int max_reqs; /* max number of slots */ |
| 193 | unsigned int min_reqs; /* min number of slots */ |
| 194 | atomic_t num_reqs; /* total slots */ |
Chuck Lever | 2226feb | 2005-08-11 16:25:38 -0400 | [diff] [blame] | 195 | unsigned long state; /* transport state */ |
Trond Myklebust | d19751e | 2012-09-11 17:21:25 -0400 | [diff] [blame] | 196 | unsigned char resvport : 1; /* use a reserved port */ |
Jeff Layton | 8e22813 | 2015-06-03 16:14:26 -0400 | [diff] [blame] | 197 | atomic_t swapper; /* we're swapping over this |
Mel Gorman | a564b8f | 2012-07-31 16:45:12 -0700 | [diff] [blame] | 198 | transport */ |
Chuck Lever | a509050 | 2007-03-29 16:48:04 -0400 | [diff] [blame] | 199 | unsigned int bind_index; /* bind function index */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
| 201 | /* |
Trond Myklebust | 80b14d5 | 2015-02-14 20:31:59 -0500 | [diff] [blame] | 202 | * Multipath |
| 203 | */ |
| 204 | struct list_head xprt_switch; |
| 205 | |
| 206 | /* |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 207 | * Connection of transports |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | */ |
Trond Myklebust | a8ce4a8 | 2010-04-16 16:42:12 -0400 | [diff] [blame] | 209 | unsigned long bind_timeout, |
Chuck Lever | 03bf4b7 | 2005-08-25 16:25:55 -0700 | [diff] [blame] | 210 | reestablish_timeout; |
Trond Myklebust | 7c1d71c | 2008-04-17 16:52:57 -0400 | [diff] [blame] | 211 | unsigned int connect_cookie; /* A cookie that gets bumped |
| 212 | every time the transport |
| 213 | is reconnected */ |
Chuck Lever | 03bf4b7 | 2005-08-25 16:25:55 -0700 | [diff] [blame] | 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | /* |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 216 | * Disconnection of idle transports |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | */ |
| 218 | struct work_struct task_cleanup; |
| 219 | struct timer_list timer; |
Chuck Lever | 03bf4b7 | 2005-08-25 16:25:55 -0700 | [diff] [blame] | 220 | unsigned long last_used, |
| 221 | idle_timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
| 223 | /* |
| 224 | * Send stuff |
| 225 | */ |
Chuck Lever | 4a0f8c0 | 2005-08-11 16:25:32 -0400 | [diff] [blame] | 226 | spinlock_t transport_lock; /* lock transport info */ |
Chuck Lever | 5dc0772 | 2005-08-11 16:25:35 -0400 | [diff] [blame] | 227 | spinlock_t reserve_lock; /* lock slot table */ |
Chuck Lever | e744cf2 | 2006-10-17 14:44:24 -0400 | [diff] [blame] | 228 | u32 xid; /* Next XID value to use */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | struct rpc_task * snd_task; /* Task blocked in send */ |
Rahul Iyer | 4cfc7e6 | 2009-09-10 17:32:28 +0300 | [diff] [blame] | 230 | struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 231 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
Ricardo Labiaga | 56632b5 | 2009-04-01 09:22:58 -0400 | [diff] [blame] | 232 | struct svc_serv *bc_serv; /* The RPC service which will */ |
| 233 | /* process the callback */ |
Trond Myklebust | 0d2a970 | 2015-06-04 15:37:10 -0400 | [diff] [blame] | 234 | int bc_alloc_count; /* Total number of preallocs */ |
| 235 | atomic_t bc_free_slots; |
Ricardo Labiaga | 56632b5 | 2009-04-01 09:22:58 -0400 | [diff] [blame] | 236 | spinlock_t bc_pa_lock; /* Protects the preallocated |
| 237 | * items */ |
| 238 | struct list_head bc_pa_list; /* List of preallocated |
| 239 | * backchannel rpc_rqst's */ |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 240 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | struct list_head recv; |
| 242 | |
Chuck Lever | 262ca07 | 2006-03-20 13:44:16 -0500 | [diff] [blame] | 243 | struct { |
| 244 | unsigned long bind_count, /* total number of binds */ |
| 245 | connect_count, /* total number of connects */ |
| 246 | connect_start, /* connect start timestamp */ |
| 247 | connect_time, /* jiffies waiting for connect */ |
| 248 | sends, /* how many complete requests */ |
| 249 | recvs, /* how many complete requests */ |
Andy Adamson | 15a4520 | 2012-02-14 16:19:18 -0500 | [diff] [blame] | 250 | bad_xids, /* lookup_rqst didn't find XID */ |
| 251 | max_slots; /* max rpc_slots used */ |
Chuck Lever | 262ca07 | 2006-03-20 13:44:16 -0500 | [diff] [blame] | 252 | |
| 253 | unsigned long long req_u, /* average requests on the wire */ |
Andy Adamson | 15a4520 | 2012-02-14 16:19:18 -0500 | [diff] [blame] | 254 | bklog_u, /* backlog queue utilization */ |
| 255 | sending_u, /* send q utilization */ |
| 256 | pending_u; /* pend q utilization */ |
Chuck Lever | 262ca07 | 2006-03-20 13:44:16 -0500 | [diff] [blame] | 257 | } stat; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
Pavel Emelyanov | 37aa213 | 2010-09-29 16:05:43 +0400 | [diff] [blame] | 259 | struct net *xprt_net; |
Trond Myklebust | 4e0038b | 2012-03-01 17:01:05 -0500 | [diff] [blame] | 260 | const char *servername; |
Chuck Lever | b454ae9 | 2008-01-07 18:34:48 -0500 | [diff] [blame] | 261 | const char *address_strings[RPC_DISPLAY_MAX]; |
Jeff Layton | 388f0c7 | 2014-11-26 14:44:44 -0500 | [diff] [blame] | 262 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 263 | struct dentry *debugfs; /* debugfs directory */ |
Chuck Lever | 4a06825 | 2015-05-11 14:02:25 -0400 | [diff] [blame] | 264 | atomic_t inject_disconnect; |
Jeff Layton | 388f0c7 | 2014-11-26 14:44:44 -0500 | [diff] [blame] | 265 | #endif |
Trond Myklebust | fda1bfe | 2015-02-14 17:48:49 -0500 | [diff] [blame] | 266 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | }; |
| 268 | |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 269 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
Ricardo Labiaga | 56632b5 | 2009-04-01 09:22:58 -0400 | [diff] [blame] | 270 | /* |
| 271 | * Backchannel flags |
| 272 | */ |
| 273 | #define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ |
| 274 | /* buffer in use */ |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 275 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
Ricardo Labiaga | 56632b5 | 2009-04-01 09:22:58 -0400 | [diff] [blame] | 276 | |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 277 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) |
Ricardo Labiaga | 55ae1aa | 2009-04-01 09:23:03 -0400 | [diff] [blame] | 278 | static inline int bc_prealloc(struct rpc_rqst *req) |
| 279 | { |
| 280 | return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
| 281 | } |
| 282 | #else |
| 283 | static inline int bc_prealloc(struct rpc_rqst *req) |
| 284 | { |
| 285 | return 0; |
| 286 | } |
Trond Myklebust | 9e00abc | 2011-07-13 19:20:49 -0400 | [diff] [blame] | 287 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
Ricardo Labiaga | 55ae1aa | 2009-04-01 09:23:03 -0400 | [diff] [blame] | 288 | |
Trond Myklebust | b7993ce | 2013-04-14 11:42:00 -0400 | [diff] [blame] | 289 | #define XPRT_CREATE_INFINITE_SLOTS (1U) |
J. Bruce Fields | 33d90ac | 2013-04-11 15:06:36 -0400 | [diff] [blame] | 290 | #define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1) |
Trond Myklebust | b7993ce | 2013-04-14 11:42:00 -0400 | [diff] [blame] | 291 | |
\"Talpey, Thomas\ | 3c341b0b | 2007-09-10 13:47:07 -0400 | [diff] [blame] | 292 | struct xprt_create { |
\"Talpey, Thomas\ | 4fa016e | 2007-09-10 13:47:57 -0400 | [diff] [blame] | 293 | int ident; /* XPRT_TRANSPORT identifier */ |
Pavel Emelyanov | 9a23e33 | 2010-09-29 16:05:12 +0400 | [diff] [blame] | 294 | struct net * net; |
Frank van Maarseveen | d3bc9a1 | 2007-07-09 22:23:35 +0200 | [diff] [blame] | 295 | struct sockaddr * srcaddr; /* optional local address */ |
Frank van Maarseveen | 96802a0 | 2007-07-08 13:08:54 +0200 | [diff] [blame] | 296 | struct sockaddr * dstaddr; /* remote peer address */ |
| 297 | size_t addrlen; |
Trond Myklebust | 4e0038b | 2012-03-01 17:01:05 -0500 | [diff] [blame] | 298 | const char *servername; |
Alexandros Batsakis | f300bab | 2009-09-10 17:33:30 +0300 | [diff] [blame] | 299 | struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ |
Trond Myklebust | b7993ce | 2013-04-14 11:42:00 -0400 | [diff] [blame] | 300 | unsigned int flags; |
Frank van Maarseveen | 96802a0 | 2007-07-08 13:08:54 +0200 | [diff] [blame] | 301 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
\"Talpey, Thomas\ | 81c098a | 2007-09-10 13:46:00 -0400 | [diff] [blame] | 303 | struct xprt_class { |
| 304 | struct list_head list; |
\"Talpey, Thomas\ | 4fa016e | 2007-09-10 13:47:57 -0400 | [diff] [blame] | 305 | int ident; /* XPRT_TRANSPORT identifier */ |
\"Talpey, Thomas\ | 3c341b0b | 2007-09-10 13:47:07 -0400 | [diff] [blame] | 306 | struct rpc_xprt * (*setup)(struct xprt_create *); |
\"Talpey, Thomas\ | 81c098a | 2007-09-10 13:46:00 -0400 | [diff] [blame] | 307 | struct module *owner; |
| 308 | char name[32]; |
| 309 | }; |
| 310 | |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 311 | /* |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 312 | * Generic internal transport functions |
| 313 | */ |
\"Talpey, Thomas\ | 3c341b0b | 2007-09-10 13:47:07 -0400 | [diff] [blame] | 314 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args); |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 315 | void xprt_connect(struct rpc_task *task); |
| 316 | void xprt_reserve(struct rpc_task *task); |
Trond Myklebust | ba60eb2 | 2013-04-14 10:49:37 -0400 | [diff] [blame] | 317 | void xprt_retry_reserve(struct rpc_task *task); |
Trond Myklebust | 43cedbf0e | 2011-07-17 16:01:03 -0400 | [diff] [blame] | 318 | int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); |
| 319 | int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); |
Trond Myklebust | f39c1bf | 2012-09-07 11:08:50 -0400 | [diff] [blame] | 320 | void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); |
| 321 | void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); |
Trond Myklebust | 90051ea | 2013-09-25 12:17:18 -0400 | [diff] [blame] | 322 | bool xprt_prepare_transmit(struct rpc_task *task); |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 323 | void xprt_transmit(struct rpc_task *task); |
Trond Myklebust | e0ab53d | 2006-07-27 17:22:50 -0400 | [diff] [blame] | 324 | void xprt_end_transmit(struct rpc_task *task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | int xprt_adjust_timeout(struct rpc_rqst *req); |
Chuck Lever | 49e9a89 | 2005-08-25 16:25:51 -0700 | [diff] [blame] | 326 | void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); |
| 327 | void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 328 | void xprt_release(struct rpc_task *task); |
Trond Myklebust | 30c5116 | 2015-02-24 20:31:39 -0500 | [diff] [blame] | 329 | struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); |
Trond Myklebust | 6b6ca86 | 2006-09-05 12:55:57 -0400 | [diff] [blame] | 330 | void xprt_put(struct rpc_xprt *xprt); |
Trond Myklebust | d9ba131 | 2011-07-17 18:11:30 -0400 | [diff] [blame] | 331 | struct rpc_xprt * xprt_alloc(struct net *net, size_t size, |
| 332 | unsigned int num_prealloc, |
| 333 | unsigned int max_req); |
Pavel Emelyanov | e204e62 | 2010-09-29 16:03:13 +0400 | [diff] [blame] | 334 | void xprt_free(struct rpc_xprt *); |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 335 | |
Alexey Dobriyan | d8ed029 | 2006-09-26 22:29:38 -0700 | [diff] [blame] | 336 | static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) |
Chuck Lever | 808012f | 2005-08-25 16:25:49 -0700 | [diff] [blame] | 337 | { |
| 338 | return p + xprt->tsh_size; |
| 339 | } |
| 340 | |
Jeff Layton | d67fa4d | 2015-06-03 16:14:29 -0400 | [diff] [blame] | 341 | static inline int |
| 342 | xprt_enable_swap(struct rpc_xprt *xprt) |
| 343 | { |
| 344 | return xprt->ops->enable_swap(xprt); |
| 345 | } |
| 346 | |
| 347 | static inline void |
| 348 | xprt_disable_swap(struct rpc_xprt *xprt) |
| 349 | { |
| 350 | xprt->ops->disable_swap(xprt); |
| 351 | } |
| 352 | |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 353 | /* |
| 354 | * Transport switch helper functions |
| 355 | */ |
\"Talpey, Thomas\ | 81c098a | 2007-09-10 13:46:00 -0400 | [diff] [blame] | 356 | int xprt_register_transport(struct xprt_class *type); |
| 357 | int xprt_unregister_transport(struct xprt_class *type); |
Tom Talpey | 441e3e2 | 2009-03-11 14:37:56 -0400 | [diff] [blame] | 358 | int xprt_load_transport(const char *); |
Chuck Lever | fe3aca2 | 2005-08-25 16:25:50 -0700 | [diff] [blame] | 359 | void xprt_set_retrans_timeout_def(struct rpc_task *task); |
| 360 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task); |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 361 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); |
Trond Myklebust | b6ddf64 | 2008-04-17 18:52:19 -0400 | [diff] [blame] | 362 | void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); |
Chuck Lever | c7b2cae | 2005-08-11 16:25:50 -0400 | [diff] [blame] | 363 | void xprt_write_space(struct rpc_xprt *xprt); |
Trond Myklebust | 6a24dfb | 2013-01-08 09:48:15 -0500 | [diff] [blame] | 364 | void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); |
Alexey Dobriyan | d8ed029 | 2006-09-26 22:29:38 -0700 | [diff] [blame] | 365 | struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); |
Chuck Lever | 1570c1e | 2005-08-25 16:25:52 -0700 | [diff] [blame] | 366 | void xprt_complete_rqst(struct rpc_task *task, int copied); |
Chuck Lever | a58dd39 | 2005-08-25 16:25:53 -0700 | [diff] [blame] | 367 | void xprt_release_rqst_cong(struct rpc_task *task); |
Trond Myklebust | 62da3b2 | 2007-11-06 18:44:20 -0500 | [diff] [blame] | 368 | void xprt_disconnect_done(struct rpc_xprt *xprt); |
Trond Myklebust | 66af1e5 | 2007-11-06 10:18:36 -0500 | [diff] [blame] | 369 | void xprt_force_disconnect(struct rpc_xprt *xprt); |
Trond Myklebust | 7c1d71c | 2008-04-17 16:52:57 -0400 | [diff] [blame] | 370 | void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 371 | |
Trond Myklebust | 718ba5b | 2015-02-08 18:19:25 -0500 | [diff] [blame] | 372 | bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); |
| 373 | void xprt_unlock_connect(struct rpc_xprt *, void *); |
| 374 | |
Chuck Lever | 55aa4f5 | 2005-08-11 16:25:47 -0400 | [diff] [blame] | 375 | /* |
Chuck Lever | 2226feb | 2005-08-11 16:25:38 -0400 | [diff] [blame] | 376 | * Reserved bit positions in xprt->state |
| 377 | */ |
| 378 | #define XPRT_LOCKED (0) |
| 379 | #define XPRT_CONNECTED (1) |
| 380 | #define XPRT_CONNECTING (2) |
Trond Myklebust | 632e3bd | 2006-01-03 09:55:55 +0100 | [diff] [blame] | 381 | #define XPRT_CLOSE_WAIT (3) |
Chuck Lever | ec739ef | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 382 | #define XPRT_BOUND (4) |
Chuck Lever | 4a68179 | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 383 | #define XPRT_BINDING (5) |
Trond Myklebust | 3b948ae | 2007-11-05 17:42:39 -0500 | [diff] [blame] | 384 | #define XPRT_CLOSING (6) |
Trond Myklebust | ba60eb2 | 2013-04-14 10:49:37 -0400 | [diff] [blame] | 385 | #define XPRT_CONGESTED (9) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | |
Chuck Lever | 2226feb | 2005-08-11 16:25:38 -0400 | [diff] [blame] | 387 | static inline void xprt_set_connected(struct rpc_xprt *xprt) |
| 388 | { |
| 389 | set_bit(XPRT_CONNECTED, &xprt->state); |
| 390 | } |
| 391 | |
| 392 | static inline void xprt_clear_connected(struct rpc_xprt *xprt) |
| 393 | { |
| 394 | clear_bit(XPRT_CONNECTED, &xprt->state); |
| 395 | } |
| 396 | |
| 397 | static inline int xprt_connected(struct rpc_xprt *xprt) |
| 398 | { |
| 399 | return test_bit(XPRT_CONNECTED, &xprt->state); |
| 400 | } |
| 401 | |
| 402 | static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) |
| 403 | { |
| 404 | return test_and_set_bit(XPRT_CONNECTED, &xprt->state); |
| 405 | } |
| 406 | |
| 407 | static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) |
| 408 | { |
| 409 | return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); |
| 410 | } |
| 411 | |
| 412 | static inline void xprt_clear_connecting(struct rpc_xprt *xprt) |
| 413 | { |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 414 | smp_mb__before_atomic(); |
Chuck Lever | 2226feb | 2005-08-11 16:25:38 -0400 | [diff] [blame] | 415 | clear_bit(XPRT_CONNECTING, &xprt->state); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 416 | smp_mb__after_atomic(); |
Chuck Lever | 2226feb | 2005-08-11 16:25:38 -0400 | [diff] [blame] | 417 | } |
| 418 | |
| 419 | static inline int xprt_connecting(struct rpc_xprt *xprt) |
| 420 | { |
| 421 | return test_bit(XPRT_CONNECTING, &xprt->state); |
| 422 | } |
| 423 | |
| 424 | static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) |
| 425 | { |
| 426 | return test_and_set_bit(XPRT_CONNECTING, &xprt->state); |
| 427 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | |
Chuck Lever | ec739ef | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 429 | static inline void xprt_set_bound(struct rpc_xprt *xprt) |
| 430 | { |
| 431 | test_and_set_bit(XPRT_BOUND, &xprt->state); |
| 432 | } |
| 433 | |
| 434 | static inline int xprt_bound(struct rpc_xprt *xprt) |
| 435 | { |
| 436 | return test_bit(XPRT_BOUND, &xprt->state); |
| 437 | } |
| 438 | |
| 439 | static inline void xprt_clear_bound(struct rpc_xprt *xprt) |
| 440 | { |
| 441 | clear_bit(XPRT_BOUND, &xprt->state); |
| 442 | } |
| 443 | |
Chuck Lever | 4a68179 | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 444 | static inline void xprt_clear_binding(struct rpc_xprt *xprt) |
| 445 | { |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 446 | smp_mb__before_atomic(); |
Chuck Lever | 4a68179 | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 447 | clear_bit(XPRT_BINDING, &xprt->state); |
Peter Zijlstra | 4e857c5 | 2014-03-17 18:06:10 +0100 | [diff] [blame] | 448 | smp_mb__after_atomic(); |
Chuck Lever | 4a68179 | 2006-08-22 20:06:15 -0400 | [diff] [blame] | 449 | } |
| 450 | |
| 451 | static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) |
| 452 | { |
| 453 | return test_and_set_bit(XPRT_BINDING, &xprt->state); |
| 454 | } |
| 455 | |
Chuck Lever | 4a06825 | 2015-05-11 14:02:25 -0400 | [diff] [blame] | 456 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
| 457 | extern unsigned int rpc_inject_disconnect; |
| 458 | static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) |
| 459 | { |
| 460 | if (!rpc_inject_disconnect) |
| 461 | return; |
| 462 | if (atomic_dec_return(&xprt->inject_disconnect)) |
| 463 | return; |
| 464 | atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); |
| 465 | xprt->ops->inject_disconnect(xprt); |
| 466 | } |
| 467 | #else |
| 468 | static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) |
| 469 | { |
| 470 | } |
| 471 | #endif |
| 472 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | #endif /* __KERNEL__*/ |
| 474 | |
| 475 | #endif /* _LINUX_SUNRPC_XPRT_H */ |