blob: 46ec934f5dee8c529020558bed4cd820956734f1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
4 *
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
7 *
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 */
10
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/slab.h>
Jeff Layton59766872013-02-04 12:50:00 -050012#include <linux/sunrpc/addr.h>
Jeff Layton0338dd12013-02-04 08:18:02 -050013#include <linux/highmem.h>
Jeff Layton0733c7b2013-03-27 10:15:39 -040014#include <linux/log2.h>
15#include <linux/hash.h>
Jeff Layton01a7dec2013-02-04 11:57:27 -050016#include <net/checksum.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017
Boaz Harrosh9a74af22009-12-03 20:30:56 +020018#include "nfsd.h"
19#include "cache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Jeff Layton0338dd12013-02-04 08:18:02 -050021#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
22
Jeff Layton0733c7b2013-03-27 10:15:39 -040023/*
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
27 */
28#define TARGET_BUCKET_SIZE 64
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Trond Myklebust7142b982014-08-06 13:44:20 -040030struct nfsd_drc_bucket {
Trond Myklebustbedd4b62014-08-06 13:44:21 -040031 struct list_head lru_head;
Trond Myklebust89a26b32014-08-06 13:44:24 -040032 spinlock_t cache_lock;
Trond Myklebust7142b982014-08-06 13:44:20 -040033};
34
35static struct nfsd_drc_bucket *drc_hashtbl;
Jeff Layton8a8bc402013-01-28 14:41:10 -050036static struct kmem_cache *drc_slab;
Jeff Layton9dc56142013-03-27 10:15:37 -040037
38/* max number of entries allowed in the cache */
Jeff Layton0338dd12013-02-04 08:18:02 -050039static unsigned int max_drc_entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Jeff Layton0733c7b2013-03-27 10:15:39 -040041/* number of significant bits in the hash value */
42static unsigned int maskbits;
Trond Myklebustbedd4b62014-08-06 13:44:21 -040043static unsigned int drc_hashsize;
Jeff Layton0733c7b2013-03-27 10:15:39 -040044
Greg Banksfca42172009-04-01 07:28:13 +110045/*
Jeff Layton9dc56142013-03-27 10:15:37 -040046 * Stats and other tracking of on the duplicate reply cache. All of these and
47 * the "rc" fields in nfsdstats are protected by the cache_lock
48 */
49
50/* total number of entries */
Trond Myklebust31e60f52014-08-06 13:44:23 -040051static atomic_t num_drc_entries;
Jeff Layton9dc56142013-03-27 10:15:37 -040052
53/* cache misses due only to checksum comparison failures */
54static unsigned int payload_misses;
55
Jeff Layton6c6910c2013-03-27 10:15:38 -040056/* amount of memory (in bytes) currently consumed by the DRC */
57static unsigned int drc_mem_usage;
58
Jeff Layton98d821b2013-03-27 10:15:39 -040059/* longest hash chain seen */
60static unsigned int longest_chain;
61
62/* size of cache when we saw the longest hash chain */
63static unsigned int longest_chain_cachesize;
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
Jeff Laytonaca8a232013-02-04 08:18:05 -050066static void cache_cleaner_func(struct work_struct *unused);
Dave Chinner1ab6c492013-08-28 10:18:09 +100067static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
68 struct shrink_control *sc);
69static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
70 struct shrink_control *sc);
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -050071
Wei Yongjunc8c797f2013-04-05 21:22:39 +080072static struct shrinker nfsd_reply_cache_shrinker = {
Dave Chinner1ab6c492013-08-28 10:18:09 +100073 .scan_objects = nfsd_reply_cache_scan,
74 .count_objects = nfsd_reply_cache_count,
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -050075 .seeks = 1,
76};
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Greg Banksfca42172009-04-01 07:28:13 +110078/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 * locking for the reply cache:
80 * A cache entry is "single use" if c_state == RC_INPROG
81 * Otherwise, it when accessing _prev or _next, the lock must be held.
82 */
Jeff Laytonaca8a232013-02-04 08:18:05 -050083static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Jeff Layton0338dd12013-02-04 08:18:02 -050085/*
86 * Put a cap on the size of the DRC based on the amount of available
87 * low memory in the machine.
88 *
89 * 64MB: 8192
90 * 128MB: 11585
91 * 256MB: 16384
92 * 512MB: 23170
93 * 1GB: 32768
94 * 2GB: 46340
95 * 4GB: 65536
96 * 8GB: 92681
97 * 16GB: 131072
98 *
99 * ...with a hard cap of 256k entries. In the worst case, each entry will be
100 * ~1k, so the above numbers should give a rough max of the amount of memory
101 * used in k.
102 */
103static unsigned int
104nfsd_cache_size_limit(void)
105{
106 unsigned int limit;
107 unsigned long low_pages = totalram_pages - totalhigh_pages;
108
109 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
110 return min_t(unsigned int, limit, 256*1024);
111}
112
Jeff Layton0733c7b2013-03-27 10:15:39 -0400113/*
114 * Compute the number of hash buckets we need. Divide the max cachesize by
115 * the "target" max bucket size, and round up to next power of two.
116 */
117static unsigned int
118nfsd_hashsize(unsigned int limit)
119{
120 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
121}
122
Trond Myklebust7142b982014-08-06 13:44:20 -0400123static u32
124nfsd_cache_hash(__be32 xid)
125{
126 return hash_32(be32_to_cpu(xid), maskbits);
127}
128
Jeff Laytonf09841f2013-01-28 14:41:11 -0500129static struct svc_cacherep *
130nfsd_reply_cache_alloc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
132 struct svc_cacherep *rp;
Jeff Laytonf09841f2013-01-28 14:41:11 -0500133
134 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
135 if (rp) {
136 rp->c_state = RC_UNUSED;
137 rp->c_type = RC_NOCACHE;
138 INIT_LIST_HEAD(&rp->c_lru);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500139 }
140 return rp;
141}
142
143static void
144nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
145{
Jeff Layton6c6910c2013-03-27 10:15:38 -0400146 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
147 drc_mem_usage -= rp->c_replvec.iov_len;
Jeff Laytonf09841f2013-01-28 14:41:11 -0500148 kfree(rp->c_replvec.iov_base);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400149 }
Jeff Laytonf09841f2013-01-28 14:41:11 -0500150 list_del(&rp->c_lru);
Trond Myklebust31e60f52014-08-06 13:44:23 -0400151 atomic_dec(&num_drc_entries);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400152 drc_mem_usage -= sizeof(*rp);
Jeff Laytonf09841f2013-01-28 14:41:11 -0500153 kmem_cache_free(drc_slab, rp);
154}
155
Jeff Layton2c6b6912013-02-04 08:18:04 -0500156static void
Trond Myklebust89a26b32014-08-06 13:44:24 -0400157nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
Jeff Layton2c6b6912013-02-04 08:18:04 -0500158{
Trond Myklebust89a26b32014-08-06 13:44:24 -0400159 spin_lock(&b->cache_lock);
Jeff Layton2c6b6912013-02-04 08:18:04 -0500160 nfsd_reply_cache_free_locked(rp);
Trond Myklebust89a26b32014-08-06 13:44:24 -0400161 spin_unlock(&b->cache_lock);
Jeff Layton2c6b6912013-02-04 08:18:04 -0500162}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164int nfsd_reply_cache_init(void)
165{
Jeff Layton0733c7b2013-03-27 10:15:39 -0400166 unsigned int hashsize;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400167 unsigned int i;
Kinglong Meea68465c2015-03-19 19:48:31 +0800168 int status = 0;
Jeff Layton0733c7b2013-03-27 10:15:39 -0400169
Jeff Laytonac534ff2013-03-15 09:16:29 -0400170 max_drc_entries = nfsd_cache_size_limit();
Trond Myklebust31e60f52014-08-06 13:44:23 -0400171 atomic_set(&num_drc_entries, 0);
Jeff Layton0733c7b2013-03-27 10:15:39 -0400172 hashsize = nfsd_hashsize(max_drc_entries);
173 maskbits = ilog2(hashsize);
Jeff Laytonac534ff2013-03-15 09:16:29 -0400174
Kinglong Meea68465c2015-03-19 19:48:31 +0800175 status = register_shrinker(&nfsd_reply_cache_shrinker);
176 if (status)
177 return status;
178
Jeff Layton8a8bc402013-01-28 14:41:10 -0500179 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
180 0, 0, NULL);
181 if (!drc_slab)
182 goto out_nomem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Trond Myklebust7142b982014-08-06 13:44:20 -0400184 drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
185 if (!drc_hashtbl)
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500186 goto out_nomem;
Trond Myklebust89a26b32014-08-06 13:44:24 -0400187 for (i = 0; i < hashsize; i++) {
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400188 INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
Trond Myklebust89a26b32014-08-06 13:44:24 -0400189 spin_lock_init(&drc_hashtbl[i].cache_lock);
190 }
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400191 drc_hashsize = hashsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500193 return 0;
194out_nomem:
195 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
196 nfsd_reply_cache_shutdown();
197 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
J. Bruce Fieldsd5c34282007-11-09 14:10:56 -0500200void nfsd_reply_cache_shutdown(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 struct svc_cacherep *rp;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400203 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500205 unregister_shrinker(&nfsd_reply_cache_shrinker);
Jeff Laytonaca8a232013-02-04 08:18:05 -0500206 cancel_delayed_work_sync(&cache_cleaner);
207
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400208 for (i = 0; i < drc_hashsize; i++) {
209 struct list_head *head = &drc_hashtbl[i].lru_head;
210 while (!list_empty(head)) {
211 rp = list_first_entry(head, struct svc_cacherep, c_lru);
212 nfsd_reply_cache_free_locked(rp);
213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 }
215
Trond Myklebust7142b982014-08-06 13:44:20 -0400216 kfree (drc_hashtbl);
217 drc_hashtbl = NULL;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400218 drc_hashsize = 0;
Jeff Layton8a8bc402013-01-28 14:41:10 -0500219
220 if (drc_slab) {
221 kmem_cache_destroy(drc_slab);
222 drc_slab = NULL;
223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
226/*
Jeff Laytonaca8a232013-02-04 08:18:05 -0500227 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
228 * not already scheduled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 */
230static void
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400231lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{
Jeff Layton56c25482013-02-04 08:18:00 -0500233 rp->c_timestamp = jiffies;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400234 list_move_tail(&rp->c_lru, &b->lru_head);
Jeff Laytonaca8a232013-02-04 08:18:05 -0500235 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Dave Chinner1ab6c492013-08-28 10:18:09 +1000238static long
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400239prune_bucket(struct nfsd_drc_bucket *b)
Jeff Laytonaca8a232013-02-04 08:18:05 -0500240{
241 struct svc_cacherep *rp, *tmp;
Dave Chinner1ab6c492013-08-28 10:18:09 +1000242 long freed = 0;
Jeff Laytonaca8a232013-02-04 08:18:05 -0500243
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400244 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
Jeff Layton1b194532014-06-05 09:45:00 -0400245 /*
246 * Don't free entries attached to calls that are still
247 * in-progress, but do keep scanning the list.
248 */
249 if (rp->c_state == RC_INPROG)
250 continue;
Trond Myklebust31e60f52014-08-06 13:44:23 -0400251 if (atomic_read(&num_drc_entries) <= max_drc_entries &&
Jeff Layton1b194532014-06-05 09:45:00 -0400252 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
Jeff Laytonaca8a232013-02-04 08:18:05 -0500253 break;
254 nfsd_reply_cache_free_locked(rp);
Dave Chinner1ab6c492013-08-28 10:18:09 +1000255 freed++;
Jeff Laytonaca8a232013-02-04 08:18:05 -0500256 }
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400257 return freed;
258}
259
260/*
261 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
262 * Also prune the oldest ones when the total exceeds the max number of entries.
263 */
264static long
265prune_cache_entries(void)
266{
267 unsigned int i;
268 long freed = 0;
269 bool cancel = true;
270
271 for (i = 0; i < drc_hashsize; i++) {
272 struct nfsd_drc_bucket *b = &drc_hashtbl[i];
273
Trond Myklebust89a26b32014-08-06 13:44:24 -0400274 if (list_empty(&b->lru_head))
275 continue;
276 spin_lock(&b->cache_lock);
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400277 freed += prune_bucket(b);
278 if (!list_empty(&b->lru_head))
279 cancel = false;
Trond Myklebust89a26b32014-08-06 13:44:24 -0400280 spin_unlock(&b->cache_lock);
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400281 }
Jeff Laytonaca8a232013-02-04 08:18:05 -0500282
283 /*
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400284 * Conditionally rearm the job to run in RC_EXPIRE since we just
285 * ran the pruner.
Jeff Laytonaca8a232013-02-04 08:18:05 -0500286 */
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400287 if (!cancel)
Jeff Laytonaca8a232013-02-04 08:18:05 -0500288 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
Dave Chinner1ab6c492013-08-28 10:18:09 +1000289 return freed;
Jeff Laytonaca8a232013-02-04 08:18:05 -0500290}
291
292static void
293cache_cleaner_func(struct work_struct *unused)
294{
Jeff Laytonaca8a232013-02-04 08:18:05 -0500295 prune_cache_entries();
Jeff Laytonaca8a232013-02-04 08:18:05 -0500296}
297
Dave Chinner1ab6c492013-08-28 10:18:09 +1000298static unsigned long
299nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500300{
Trond Myklebust31e60f52014-08-06 13:44:23 -0400301 return atomic_read(&num_drc_entries);
Jeff Laytonb4e7f2c2013-02-04 08:18:06 -0500302}
303
Dave Chinner1ab6c492013-08-28 10:18:09 +1000304static unsigned long
305nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
306{
Trond Myklebust89a26b32014-08-06 13:44:24 -0400307 return prune_cache_entries();
Dave Chinner1ab6c492013-08-28 10:18:09 +1000308}
Jeff Laytonaca8a232013-02-04 08:18:05 -0500309/*
Jeff Layton01a7dec2013-02-04 11:57:27 -0500310 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
311 */
312static __wsum
313nfsd_cache_csum(struct svc_rqst *rqstp)
314{
315 int idx;
316 unsigned int base;
317 __wsum csum;
318 struct xdr_buf *buf = &rqstp->rq_arg;
319 const unsigned char *p = buf->head[0].iov_base;
320 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
321 RC_CSUMLEN);
322 size_t len = min(buf->head[0].iov_len, csum_len);
323
324 /* rq_arg.head first */
325 csum = csum_partial(p, len, 0);
326 csum_len -= len;
327
328 /* Continue into page array */
329 idx = buf->page_base / PAGE_SIZE;
330 base = buf->page_base & ~PAGE_MASK;
331 while (csum_len) {
332 p = page_address(buf->pages[idx]) + base;
Jeff Layton56edc862013-02-15 13:36:34 -0500333 len = min_t(size_t, PAGE_SIZE - base, csum_len);
Jeff Layton01a7dec2013-02-04 11:57:27 -0500334 csum = csum_partial(p, len, csum);
335 csum_len -= len;
336 base = 0;
337 ++idx;
338 }
339 return csum;
340}
341
Jeff Layton9dc56142013-03-27 10:15:37 -0400342static bool
343nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
344{
Trond Myklebustef9b16d2014-08-06 13:44:25 -0400345 /* Check RPC XID first */
346 if (rqstp->rq_xid != rp->c_xid)
Jeff Layton9dc56142013-03-27 10:15:37 -0400347 return false;
Jeff Layton9dc56142013-03-27 10:15:37 -0400348 /* compare checksum of NFS data */
349 if (csum != rp->c_csum) {
350 ++payload_misses;
351 return false;
352 }
353
Trond Myklebustef9b16d2014-08-06 13:44:25 -0400354 /* Other discriminators */
355 if (rqstp->rq_proc != rp->c_proc ||
356 rqstp->rq_prot != rp->c_prot ||
357 rqstp->rq_vers != rp->c_vers ||
358 rqstp->rq_arg.len != rp->c_len ||
359 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
360 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
361 return false;
362
Jeff Layton9dc56142013-03-27 10:15:37 -0400363 return true;
364}
365
Jeff Layton01a7dec2013-02-04 11:57:27 -0500366/*
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500367 * Search the request hash for an entry that matches the given rqstp.
368 * Must be called with cache_lock held. Returns the found entry or
369 * NULL on failure.
370 */
371static struct svc_cacherep *
Trond Myklebust7142b982014-08-06 13:44:20 -0400372nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
373 __wsum csum)
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500374{
Jeff Layton98d821b2013-03-27 10:15:39 -0400375 struct svc_cacherep *rp, *ret = NULL;
Trond Myklebust11acf6e2014-08-06 13:44:22 -0400376 struct list_head *rh = &b->lru_head;
Jeff Layton98d821b2013-03-27 10:15:39 -0400377 unsigned int entries = 0;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500378
Trond Myklebust11acf6e2014-08-06 13:44:22 -0400379 list_for_each_entry(rp, rh, c_lru) {
Jeff Layton98d821b2013-03-27 10:15:39 -0400380 ++entries;
381 if (nfsd_cache_match(rqstp, csum, rp)) {
382 ret = rp;
383 break;
384 }
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500385 }
Jeff Layton98d821b2013-03-27 10:15:39 -0400386
387 /* tally hash chain length stats */
388 if (entries > longest_chain) {
389 longest_chain = entries;
Trond Myklebust31e60f52014-08-06 13:44:23 -0400390 longest_chain_cachesize = atomic_read(&num_drc_entries);
Jeff Layton98d821b2013-03-27 10:15:39 -0400391 } else if (entries == longest_chain) {
392 /* prefer to keep the smallest cachesize possible here */
Trond Myklebust31e60f52014-08-06 13:44:23 -0400393 longest_chain_cachesize = min_t(unsigned int,
394 longest_chain_cachesize,
395 atomic_read(&num_drc_entries));
Jeff Layton98d821b2013-03-27 10:15:39 -0400396 }
397
398 return ret;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500399}
400
401/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 * Try to find an entry matching the current call in the cache. When none
Jeff Layton1ac83622013-02-14 16:45:13 -0500403 * is found, we try to grab the oldest expired entry off the LRU list. If
404 * a suitable one isn't there, then drop the cache_lock and allocate a
405 * new one, then search again in case one got inserted while this thread
406 * didn't hold the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 */
408int
J. Bruce Fields10910062011-01-24 12:11:02 -0500409nfsd_cache_lookup(struct svc_rqst *rqstp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
Jeff Layton0338dd12013-02-04 08:18:02 -0500411 struct svc_cacherep *rp, *found;
Al Viroc7afef12006-10-19 23:29:02 -0700412 __be32 xid = rqstp->rq_xid;
413 u32 proto = rqstp->rq_prot,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 vers = rqstp->rq_vers,
415 proc = rqstp->rq_proc;
Jeff Layton01a7dec2013-02-04 11:57:27 -0500416 __wsum csum;
Trond Myklebust7142b982014-08-06 13:44:20 -0400417 u32 hash = nfsd_cache_hash(xid);
418 struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 unsigned long age;
J. Bruce Fields10910062011-01-24 12:11:02 -0500420 int type = rqstp->rq_cachetype;
Jeff Layton0b9ea372013-03-27 10:15:37 -0400421 int rtn = RC_DOIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 rqstp->rq_cacherep = NULL;
Jeff Layton13cc8a72013-02-04 08:18:03 -0500424 if (type == RC_NOCACHE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 nfsdstats.rcnocache++;
Jeff Layton0b9ea372013-03-27 10:15:37 -0400426 return rtn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428
Jeff Layton01a7dec2013-02-04 11:57:27 -0500429 csum = nfsd_cache_csum(rqstp);
430
Jeff Layton0b9ea372013-03-27 10:15:37 -0400431 /*
432 * Since the common case is a cache miss followed by an insert,
Jeff Laytona0ef5e192013-12-05 06:00:51 -0500433 * preallocate an entry.
Jeff Layton0b9ea372013-03-27 10:15:37 -0400434 */
Jeff Layton0338dd12013-02-04 08:18:02 -0500435 rp = nfsd_reply_cache_alloc();
Trond Myklebust89a26b32014-08-06 13:44:24 -0400436 spin_lock(&b->cache_lock);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400437 if (likely(rp)) {
Trond Myklebust31e60f52014-08-06 13:44:23 -0400438 atomic_inc(&num_drc_entries);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400439 drc_mem_usage += sizeof(*rp);
440 }
Jeff Layton0338dd12013-02-04 08:18:02 -0500441
Jeff Laytona0ef5e192013-12-05 06:00:51 -0500442 /* go ahead and prune the cache */
Trond Myklebust89a26b32014-08-06 13:44:24 -0400443 prune_bucket(b);
Jeff Laytona0ef5e192013-12-05 06:00:51 -0500444
Trond Myklebust7142b982014-08-06 13:44:20 -0400445 found = nfsd_cache_search(b, rqstp, csum);
Jeff Layton0338dd12013-02-04 08:18:02 -0500446 if (found) {
Jeff Layton0b9ea372013-03-27 10:15:37 -0400447 if (likely(rp))
448 nfsd_reply_cache_free_locked(rp);
Jeff Layton0338dd12013-02-04 08:18:02 -0500449 rp = found;
Jeff Laytona4a3ec32013-01-28 14:41:14 -0500450 goto found_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 }
Jeff Layton0338dd12013-02-04 08:18:02 -0500452
Jeff Layton0b9ea372013-03-27 10:15:37 -0400453 if (!rp) {
454 dprintk("nfsd: unable to allocate DRC entry!\n");
455 goto out;
456 }
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 nfsdstats.rcmisses++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 rqstp->rq_cacherep = rp;
460 rp->c_state = RC_INPROG;
461 rp->c_xid = xid;
462 rp->c_proc = proc;
Jeff Layton7b9e8522013-01-28 14:41:07 -0500463 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
464 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 rp->c_prot = proto;
466 rp->c_vers = vers;
Jeff Layton01a7dec2013-02-04 11:57:27 -0500467 rp->c_len = rqstp->rq_arg.len;
468 rp->c_csum = csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400470 lru_put_end(b, rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 /* release any buffer */
473 if (rp->c_type == RC_REPLBUFF) {
Jeff Layton6c6910c2013-03-27 10:15:38 -0400474 drc_mem_usage -= rp->c_replvec.iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 kfree(rp->c_replvec.iov_base);
476 rp->c_replvec.iov_base = NULL;
477 }
478 rp->c_type = RC_NOCACHE;
479 out:
Trond Myklebust89a26b32014-08-06 13:44:24 -0400480 spin_unlock(&b->cache_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return rtn;
482
483found_entry:
Jeff Layton0338dd12013-02-04 08:18:02 -0500484 nfsdstats.rchits++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 /* We found a matching entry which is either in progress or done. */
486 age = jiffies - rp->c_timestamp;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400487 lru_put_end(b, rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 rtn = RC_DROPIT;
490 /* Request being processed or excessive rexmits */
491 if (rp->c_state == RC_INPROG || age < RC_DELAY)
492 goto out;
493
494 /* From the hall of fame of impractical attacks:
495 * Is this a user who tries to snoop on the cache? */
496 rtn = RC_DOIT;
Jeff Layton4d152e22014-11-19 07:51:14 -0500497 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 goto out;
499
500 /* Compose RPC reply header */
501 switch (rp->c_type) {
502 case RC_NOCACHE:
503 break;
504 case RC_REPLSTAT:
505 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
506 rtn = RC_REPLY;
507 break;
508 case RC_REPLBUFF:
509 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
510 goto out; /* should not happen */
511 rtn = RC_REPLY;
512 break;
513 default:
514 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
Jeff Layton0338dd12013-02-04 08:18:02 -0500515 nfsd_reply_cache_free_locked(rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517
518 goto out;
519}
520
521/*
522 * Update a cache entry. This is called from nfsd_dispatch when
523 * the procedure has been executed and the complete reply is in
524 * rqstp->rq_res.
525 *
526 * We're copying around data here rather than swapping buffers because
527 * the toplevel loop requires max-sized buffers, which would be a waste
528 * of memory for a cache with a max reply size of 100 bytes (diropokres).
529 *
530 * If we should start to use different types of cache entries tailored
531 * specifically for attrstat and fh's, we may save even more space.
532 *
533 * Also note that a cachetype of RC_NOCACHE can legally be passed when
534 * nfsd failed to encode a reply that otherwise would have been cached.
535 * In this case, nfsd_cache_update is called with statp == NULL.
536 */
537void
Al Viroc7afef12006-10-19 23:29:02 -0700538nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
Jeff Layton13cc8a72013-02-04 08:18:03 -0500540 struct svc_cacherep *rp = rqstp->rq_cacherep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400542 u32 hash;
543 struct nfsd_drc_bucket *b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 int len;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400545 size_t bufsize = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Jeff Layton13cc8a72013-02-04 08:18:03 -0500547 if (!rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 return;
549
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400550 hash = nfsd_cache_hash(rp->c_xid);
551 b = &drc_hashtbl[hash];
552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
554 len >>= 2;
Greg Banksfca42172009-04-01 07:28:13 +1100555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 /* Don't cache excessive amounts of data and XDR failures */
557 if (!statp || len > (256 >> 2)) {
Trond Myklebust89a26b32014-08-06 13:44:24 -0400558 nfsd_reply_cache_free(b, rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 return;
560 }
561
562 switch (cachetype) {
563 case RC_REPLSTAT:
564 if (len != 1)
565 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
566 rp->c_replstat = *statp;
567 break;
568 case RC_REPLBUFF:
569 cachv = &rp->c_replvec;
Jeff Layton6c6910c2013-03-27 10:15:38 -0400570 bufsize = len << 2;
571 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 if (!cachv->iov_base) {
Trond Myklebust89a26b32014-08-06 13:44:24 -0400573 nfsd_reply_cache_free(b, rp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 return;
575 }
Jeff Layton6c6910c2013-03-27 10:15:38 -0400576 cachv->iov_len = bufsize;
577 memcpy(cachv->iov_base, statp, bufsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 break;
Jeff Layton2c6b6912013-02-04 08:18:04 -0500579 case RC_NOCACHE:
Trond Myklebust89a26b32014-08-06 13:44:24 -0400580 nfsd_reply_cache_free(b, rp);
Jeff Layton2c6b6912013-02-04 08:18:04 -0500581 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
Trond Myklebust89a26b32014-08-06 13:44:24 -0400583 spin_lock(&b->cache_lock);
Jeff Layton6c6910c2013-03-27 10:15:38 -0400584 drc_mem_usage += bufsize;
Trond Myklebustbedd4b62014-08-06 13:44:21 -0400585 lru_put_end(b, rp);
Jeff Layton4d152e22014-11-19 07:51:14 -0500586 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 rp->c_type = cachetype;
588 rp->c_state = RC_DONE;
Trond Myklebust89a26b32014-08-06 13:44:24 -0400589 spin_unlock(&b->cache_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return;
591}
592
593/*
594 * Copy cached reply to current reply buffer. Should always fit.
595 * FIXME as reply is in a page, we should just attach the page, and
596 * keep a refcount....
597 */
598static int
599nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
600{
601 struct kvec *vec = &rqstp->rq_res.head[0];
602
603 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
604 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
605 data->iov_len);
606 return 0;
607 }
608 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
609 vec->iov_len += data->iov_len;
610 return 1;
611}
Jeff Laytona2f999a2013-03-27 10:15:38 -0400612
613/*
614 * Note that fields may be added, removed or reordered in the future. Programs
615 * scraping this file for info should test the labels to ensure they're
616 * getting the correct field.
617 */
618static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
619{
Jeff Laytona2f999a2013-03-27 10:15:38 -0400620 seq_printf(m, "max entries: %u\n", max_drc_entries);
Trond Myklebust31e60f52014-08-06 13:44:23 -0400621 seq_printf(m, "num entries: %u\n",
622 atomic_read(&num_drc_entries));
Jeff Layton0733c7b2013-03-27 10:15:39 -0400623 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
Jeff Laytona2f999a2013-03-27 10:15:38 -0400624 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
625 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
626 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
627 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
628 seq_printf(m, "payload misses: %u\n", payload_misses);
Jeff Layton98d821b2013-03-27 10:15:39 -0400629 seq_printf(m, "longest chain len: %u\n", longest_chain);
630 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
Jeff Laytona2f999a2013-03-27 10:15:38 -0400631 return 0;
632}
633
634int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
635{
636 return single_open(file, nfsd_reply_cache_stats_show, NULL);
637}