blob: 6255d141133bb0ccabc195c8b4d9bce8cbe39c51 [file] [log] [blame]
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -04001/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040026#include <linux/sunrpc/xprt.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040027#include <linux/export.h>
Trond Myklebust09acfea2012-03-11 15:22:54 -040028#include <linux/sunrpc/bc_xprt.h>
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040029
Jeff Laytonf895b252014-11-17 16:58:04 -050030#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040031#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040034/*
35 * Helper routines that track the number of preallocation elements
36 * on the transport.
37 */
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040040 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040041}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040045 atomic_add(n, &xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040046 xprt->bc_alloc_count += n;
47}
48
49static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040051 atomic_sub(n, &xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040052 return xprt->bc_alloc_count -= n;
53}
54
55/*
56 * Free the preallocated rpc_rqst structure and the memory
57 * buffers hanging off of it.
58 */
59static void xprt_free_allocation(struct rpc_rqst *req)
60{
61 struct xdr_buf *xbufp;
62
63 dprintk("RPC: free allocations for req= %p\n", req);
Weston Andros Adamsonf30dfbb2012-10-23 10:43:33 -040064 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
Trond Myklebust88de6af2015-06-01 15:10:25 -040065 xbufp = &req->rq_rcv_buf;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040066 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040069 kfree(req);
70}
71
Trond Myklebust1dddda82015-06-01 15:05:38 -040072static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73{
74 struct page *page;
75 /* Preallocate one XDR receive buffer */
76 page = alloc_page(gfp_flags);
77 if (page == NULL)
78 return -ENOMEM;
79 buf->head[0].iov_base = page_address(page);
80 buf->head[0].iov_len = PAGE_SIZE;
81 buf->tail[0].iov_base = NULL;
82 buf->tail[0].iov_len = 0;
83 buf->page_len = 0;
84 buf->len = 0;
85 buf->buflen = PAGE_SIZE;
86 return 0;
87}
88
89static
90struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
91{
92 struct rpc_rqst *req;
93
94 /* Pre-allocate one backchannel rpc_rqst */
95 req = kzalloc(sizeof(*req), gfp_flags);
96 if (req == NULL)
97 return NULL;
98
99 req->rq_xprt = xprt;
100 INIT_LIST_HEAD(&req->rq_list);
101 INIT_LIST_HEAD(&req->rq_bc_list);
102
103 /* Preallocate one XDR receive buffer */
104 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
105 printk(KERN_ERR "Failed to create bc receive xbuf\n");
106 goto out_free;
107 }
108 req->rq_rcv_buf.len = PAGE_SIZE;
109
110 /* Preallocate one XDR send buffer */
111 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
112 printk(KERN_ERR "Failed to create bc snd xbuf\n");
113 goto out_free;
114 }
115 return req;
116out_free:
117 xprt_free_allocation(req);
118 return NULL;
119}
120
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400121/*
122 * Preallocate up to min_reqs structures and related buffers for use
123 * by the backchannel. This function can be called multiple times
124 * when creating new sessions that use the same rpc_xprt. The
125 * preallocated buffers are added to the pool of resources used by
126 * the rpc_xprt. Anyone of these resources may be used used by an
127 * incoming callback request. It's up to the higher levels in the
128 * stack to enforce that the maximum number of session slots is not
129 * being exceeded.
130 *
131 * Some callback arguments can be large. For example, a pNFS server
132 * using multiple deviceids. The list can be unbound, but the client
133 * has the ability to tell the server the maximum size of the callback
134 * requests. Each deviceID is 16 bytes, so allocate one page
135 * for the arguments to have enough room to receive a number of these
136 * deviceIDs. The NFS client indicates to the pNFS server that its
137 * callback requests can be up to 4096 bytes in size.
138 */
139int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
140{
Trond Myklebust1dddda82015-06-01 15:05:38 -0400141 struct rpc_rqst *req;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400142 struct list_head tmp_list;
143 int i;
144
145 dprintk("RPC: setup backchannel transport\n");
146
147 /*
148 * We use a temporary list to keep track of the preallocated
149 * buffers. Once we're done building the list we splice it
150 * into the backchannel preallocation list off of the rpc_xprt
151 * struct. This helps minimize the amount of time the list
152 * lock is held on the rpc_xprt struct. It also makes cleanup
153 * easier in case of memory allocation errors.
154 */
155 INIT_LIST_HEAD(&tmp_list);
156 for (i = 0; i < min_reqs; i++) {
157 /* Pre-allocate one backchannel rpc_rqst */
Trond Myklebust1dddda82015-06-01 15:05:38 -0400158 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400159 if (req == NULL) {
160 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
161 goto out_free;
162 }
163
164 /* Add the allocated buffer to the tmp list */
165 dprintk("RPC: adding req= %p\n", req);
166 list_add(&req->rq_bc_pa_list, &tmp_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400167 }
168
169 /*
170 * Add the temporary list to the backchannel preallocation list
171 */
172 spin_lock_bh(&xprt->bc_pa_lock);
173 list_splice(&tmp_list, &xprt->bc_pa_list);
174 xprt_inc_alloc_count(xprt, min_reqs);
175 spin_unlock_bh(&xprt->bc_pa_lock);
176
177 dprintk("RPC: setup backchannel transport done\n");
178 return 0;
179
180out_free:
181 /*
182 * Memory allocation failed, free the temporary list
183 */
Trond Myklebust1dddda82015-06-01 15:05:38 -0400184 while (!list_empty(&tmp_list)) {
185 req = list_first_entry(&tmp_list,
186 struct rpc_rqst,
187 rq_bc_pa_list);
Trond Myklebust62835672014-02-11 13:56:54 -0500188 list_del(&req->rq_bc_pa_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400189 xprt_free_allocation(req);
Trond Myklebust62835672014-02-11 13:56:54 -0500190 }
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400191
192 dprintk("RPC: setup backchannel transport failed\n");
Weston Andros Adamsond24bab92012-11-01 11:21:53 -0400193 return -ENOMEM;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400194}
Trond Myklebust0d961aa2011-07-13 19:24:15 -0400195EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400196
Ben Hutchings2c530402012-07-10 10:55:09 +0000197/**
198 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
199 * @xprt: the transport holding the preallocated strucures
200 * @max_reqs the maximum number of preallocated structures to destroy
201 *
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400202 * Since these structures may have been allocated by multiple calls
203 * to xprt_setup_backchannel, we only destroy up to the maximum number
204 * of reqs specified by the caller.
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400205 */
206void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
207{
208 struct rpc_rqst *req = NULL, *tmp = NULL;
209
210 dprintk("RPC: destroy backchannel transport\n");
211
Weston Andros Adamsonc4ded8d2012-10-23 10:43:34 -0400212 if (max_reqs == 0)
213 goto out;
214
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400215 spin_lock_bh(&xprt->bc_pa_lock);
216 xprt_dec_alloc_count(xprt, max_reqs);
217 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
218 dprintk("RPC: req=%p\n", req);
Trond Myklebust62835672014-02-11 13:56:54 -0500219 list_del(&req->rq_bc_pa_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400220 xprt_free_allocation(req);
221 if (--max_reqs == 0)
222 break;
223 }
224 spin_unlock_bh(&xprt->bc_pa_lock);
225
Weston Andros Adamsonc4ded8d2012-10-23 10:43:34 -0400226out:
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400227 dprintk("RPC: backchannel list empty= %s\n",
228 list_empty(&xprt->bc_pa_list) ? "true" : "false");
229}
Trond Myklebust0d961aa2011-07-13 19:24:15 -0400230EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400231
Trond Myklebust2ea24492014-02-10 11:18:39 -0500232static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400233{
Trond Myklebust2ea24492014-02-10 11:18:39 -0500234 struct rpc_rqst *req = NULL;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400235
236 dprintk("RPC: allocate a backchannel request\n");
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400237 if (atomic_read(&xprt->bc_free_slots) <= 0)
Trond Myklebust2ea24492014-02-10 11:18:39 -0500238 goto not_found;
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400239 if (list_empty(&xprt->bc_pa_list)) {
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req)
242 goto not_found;
Trond Myklebust68514472015-07-22 16:31:17 -0400243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt->bc_alloc_count++;
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400245 }
Trond Myklebust2ea24492014-02-10 11:18:39 -0500246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list);
248 req->rq_reply_bytes_recvd = 0;
249 req->rq_bytes_sent = 0;
250 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400251 sizeof(req->rq_private_buf));
Trond Myklebust2ea24492014-02-10 11:18:39 -0500252 req->rq_xid = xid;
253 req->rq_connect_cookie = xprt->connect_cookie;
254not_found:
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400255 dprintk("RPC: backchannel req=%p\n", req);
256 return req;
257}
258
259/*
260 * Return the preallocated rpc_rqst structure and XDR buffers
261 * associated with this rpc_task.
262 */
263void xprt_free_bc_request(struct rpc_rqst *req)
264{
265 struct rpc_xprt *xprt = req->rq_xprt;
266
267 dprintk("RPC: free backchannel req=%p\n", req);
268
Trond Myklebust2ea24492014-02-10 11:18:39 -0500269 req->rq_connect_cookie = xprt->connect_cookie - 1;
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100270 smp_mb__before_atomic();
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400271 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100272 smp_mb__after_atomic();
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400273
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400274 /*
275 * Return it to the list of preallocations so that it
276 * may be reused by a new callback request.
277 */
278 spin_lock_bh(&xprt->bc_pa_lock);
279 if (xprt_need_to_requeue(xprt)) {
280 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
281 xprt->bc_alloc_count++;
282 req = NULL;
283 }
284 spin_unlock_bh(&xprt->bc_pa_lock);
285 if (req != NULL) {
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400286 /*
287 * The last remaining session was destroyed while this
288 * entry was in use. Free the entry and don't attempt
289 * to add back to the list because there is no need to
290 * have anymore preallocated entries.
291 */
292 dprintk("RPC: Last session removed req=%p\n", req);
293 xprt_free_allocation(req);
294 return;
295 }
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400296}
297
Trond Myklebust2ea24492014-02-10 11:18:39 -0500298/*
299 * One or more rpc_rqst structure have been preallocated during the
300 * backchannel setup. Buffer space for the send and private XDR buffers
301 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
302 * to this request. Use xprt_free_bc_request to return it.
303 *
304 * We know that we're called in soft interrupt context, grab the spin_lock
305 * since there is no need to grab the bottom half spin_lock.
306 *
307 * Return an available rpc_rqst, otherwise NULL if non are available.
308 */
309struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
310{
311 struct rpc_rqst *req;
312
313 spin_lock(&xprt->bc_pa_lock);
314 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
315 if (req->rq_connect_cookie != xprt->connect_cookie)
316 continue;
317 if (req->rq_xid == xid)
318 goto found;
319 }
320 req = xprt_alloc_bc_request(xprt, xid);
321found:
322 spin_unlock(&xprt->bc_pa_lock);
323 return req;
324}
325
326/*
327 * Add callback request to callback list. The callback
328 * service sleeps on the sv_cb_waitq waiting for new
329 * requests. Wake it up after adding enqueing the
330 * request.
331 */
332void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
333{
334 struct rpc_xprt *xprt = req->rq_xprt;
335 struct svc_serv *bc_serv = xprt->bc_serv;
336
Chuck Lever813b00d2015-02-13 13:08:25 -0500337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list);
Trond Myklebust1980bd42015-07-22 17:05:32 -0400339 xprt_dec_alloc_count(xprt, 1);
Chuck Lever813b00d2015-02-13 13:08:25 -0500340 spin_unlock(&xprt->bc_pa_lock);
341
Trond Myklebust2ea24492014-02-10 11:18:39 -0500342 req->rq_private_buf.len = copied;
343 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
344
345 dprintk("RPC: add callback request to list\n");
346 spin_lock(&bc_serv->sv_cb_lock);
Trond Myklebust2ea24492014-02-10 11:18:39 -0500347 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
348 wake_up(&bc_serv->sv_cb_waitq);
349 spin_unlock(&bc_serv->sv_cb_lock);
350}
351