Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Request reply cache. This is currently a global cache, but this may |
| 3 | * change in the future and be a per-client cache. |
| 4 | * |
| 5 | * This code is heavily inspired by the 44BSD implementation, although |
| 6 | * it does things a bit differently. |
| 7 | * |
| 8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 9 | */ |
| 10 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 11 | #include <linux/slab.h> |
Jeff Layton | 5976687 | 2013-02-04 12:50:00 -0500 | [diff] [blame] | 12 | #include <linux/sunrpc/addr.h> |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 13 | #include <linux/highmem.h> |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 14 | #include <linux/log2.h> |
| 15 | #include <linux/hash.h> |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 16 | #include <net/checksum.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | |
Boaz Harrosh | 9a74af2 | 2009-12-03 20:30:56 +0200 | [diff] [blame] | 18 | #include "nfsd.h" |
| 19 | #include "cache.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 21 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
| 22 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 23 | /* |
| 24 | * We use this value to determine the number of hash buckets from the max |
| 25 | * cache size, the idea being that when the cache is at its maximum number |
| 26 | * of entries, then this should be the average number of entries per bucket. |
| 27 | */ |
| 28 | #define TARGET_BUCKET_SIZE 64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 30 | static struct hlist_head * cache_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | static struct list_head lru_head; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 32 | static struct kmem_cache *drc_slab; |
Jeff Layton | 9dc5614 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 33 | |
| 34 | /* max number of entries allowed in the cache */ |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 35 | static unsigned int max_drc_entries; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 37 | /* number of significant bits in the hash value */ |
| 38 | static unsigned int maskbits; |
| 39 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 40 | /* |
Jeff Layton | 9dc5614 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 41 | * Stats and other tracking of on the duplicate reply cache. All of these and |
| 42 | * the "rc" fields in nfsdstats are protected by the cache_lock |
| 43 | */ |
| 44 | |
| 45 | /* total number of entries */ |
| 46 | static unsigned int num_drc_entries; |
| 47 | |
| 48 | /* cache misses due only to checksum comparison failures */ |
| 49 | static unsigned int payload_misses; |
| 50 | |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 51 | /* amount of memory (in bytes) currently consumed by the DRC */ |
| 52 | static unsigned int drc_mem_usage; |
| 53 | |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 54 | /* longest hash chain seen */ |
| 55 | static unsigned int longest_chain; |
| 56 | |
| 57 | /* size of cache when we saw the longest hash chain */ |
| 58 | static unsigned int longest_chain_cachesize; |
| 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 61 | static void cache_cleaner_func(struct work_struct *unused); |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 62 | static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, |
| 63 | struct shrink_control *sc); |
| 64 | static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, |
| 65 | struct shrink_control *sc); |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 66 | |
Wei Yongjun | c8c797f | 2013-04-05 21:22:39 +0800 | [diff] [blame] | 67 | static struct shrinker nfsd_reply_cache_shrinker = { |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 68 | .scan_objects = nfsd_reply_cache_scan, |
| 69 | .count_objects = nfsd_reply_cache_count, |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 70 | .seeks = 1, |
| 71 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 73 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | * locking for the reply cache: |
| 75 | * A cache entry is "single use" if c_state == RC_INPROG |
| 76 | * Otherwise, it when accessing _prev or _next, the lock must be held. |
| 77 | */ |
| 78 | static DEFINE_SPINLOCK(cache_lock); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 79 | static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 81 | /* |
| 82 | * Put a cap on the size of the DRC based on the amount of available |
| 83 | * low memory in the machine. |
| 84 | * |
| 85 | * 64MB: 8192 |
| 86 | * 128MB: 11585 |
| 87 | * 256MB: 16384 |
| 88 | * 512MB: 23170 |
| 89 | * 1GB: 32768 |
| 90 | * 2GB: 46340 |
| 91 | * 4GB: 65536 |
| 92 | * 8GB: 92681 |
| 93 | * 16GB: 131072 |
| 94 | * |
| 95 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
| 96 | * ~1k, so the above numbers should give a rough max of the amount of memory |
| 97 | * used in k. |
| 98 | */ |
| 99 | static unsigned int |
| 100 | nfsd_cache_size_limit(void) |
| 101 | { |
| 102 | unsigned int limit; |
| 103 | unsigned long low_pages = totalram_pages - totalhigh_pages; |
| 104 | |
| 105 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
| 106 | return min_t(unsigned int, limit, 256*1024); |
| 107 | } |
| 108 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 109 | /* |
| 110 | * Compute the number of hash buckets we need. Divide the max cachesize by |
| 111 | * the "target" max bucket size, and round up to next power of two. |
| 112 | */ |
| 113 | static unsigned int |
| 114 | nfsd_hashsize(unsigned int limit) |
| 115 | { |
| 116 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); |
| 117 | } |
| 118 | |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 119 | static struct svc_cacherep * |
| 120 | nfsd_reply_cache_alloc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { |
| 122 | struct svc_cacherep *rp; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 123 | |
| 124 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
| 125 | if (rp) { |
| 126 | rp->c_state = RC_UNUSED; |
| 127 | rp->c_type = RC_NOCACHE; |
| 128 | INIT_LIST_HEAD(&rp->c_lru); |
| 129 | INIT_HLIST_NODE(&rp->c_hash); |
| 130 | } |
| 131 | return rp; |
| 132 | } |
| 133 | |
| 134 | static void |
| 135 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) |
| 136 | { |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 137 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
| 138 | drc_mem_usage -= rp->c_replvec.iov_len; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 139 | kfree(rp->c_replvec.iov_base); |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 140 | } |
Jeff Layton | a517b60 | 2013-03-18 10:49:07 -0400 | [diff] [blame] | 141 | if (!hlist_unhashed(&rp->c_hash)) |
| 142 | hlist_del(&rp->c_hash); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 143 | list_del(&rp->c_lru); |
Jeff Layton | 0ee0bf7 | 2013-02-04 08:18:01 -0500 | [diff] [blame] | 144 | --num_drc_entries; |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 145 | drc_mem_usage -= sizeof(*rp); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 146 | kmem_cache_free(drc_slab, rp); |
| 147 | } |
| 148 | |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 149 | static void |
| 150 | nfsd_reply_cache_free(struct svc_cacherep *rp) |
| 151 | { |
| 152 | spin_lock(&cache_lock); |
| 153 | nfsd_reply_cache_free_locked(rp); |
| 154 | spin_unlock(&cache_lock); |
| 155 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
| 157 | int nfsd_reply_cache_init(void) |
| 158 | { |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 159 | unsigned int hashsize; |
| 160 | |
Jeff Layton | ac534ff | 2013-03-15 09:16:29 -0400 | [diff] [blame] | 161 | INIT_LIST_HEAD(&lru_head); |
| 162 | max_drc_entries = nfsd_cache_size_limit(); |
| 163 | num_drc_entries = 0; |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 164 | hashsize = nfsd_hashsize(max_drc_entries); |
| 165 | maskbits = ilog2(hashsize); |
Jeff Layton | ac534ff | 2013-03-15 09:16:29 -0400 | [diff] [blame] | 166 | |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 167 | register_shrinker(&nfsd_reply_cache_shrinker); |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 168 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
| 169 | 0, 0, NULL); |
| 170 | if (!drc_slab) |
| 171 | goto out_nomem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 173 | cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL); |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 174 | if (!cache_hash) |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 175 | goto out_nomem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 177 | return 0; |
| 178 | out_nomem: |
| 179 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); |
| 180 | nfsd_reply_cache_shutdown(); |
| 181 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } |
| 183 | |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 184 | void nfsd_reply_cache_shutdown(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | { |
| 186 | struct svc_cacherep *rp; |
| 187 | |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 188 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 189 | cancel_delayed_work_sync(&cache_cleaner); |
| 190 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | while (!list_empty(&lru_head)) { |
| 192 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 193 | nfsd_reply_cache_free_locked(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 196 | kfree (cache_hash); |
| 197 | cache_hash = NULL; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 198 | |
| 199 | if (drc_slab) { |
| 200 | kmem_cache_destroy(drc_slab); |
| 201 | drc_slab = NULL; |
| 202 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 206 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
| 207 | * not already scheduled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | */ |
| 209 | static void |
| 210 | lru_put_end(struct svc_cacherep *rp) |
| 211 | { |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 212 | rp->c_timestamp = jiffies; |
Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 213 | list_move_tail(&rp->c_lru, &lru_head); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 214 | schedule_delayed_work(&cache_cleaner, RC_EXPIRE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | /* |
| 218 | * Move a cache entry from one hash list to another |
| 219 | */ |
| 220 | static void |
| 221 | hash_refile(struct svc_cacherep *rp) |
| 222 | { |
| 223 | hlist_del_init(&rp->c_hash); |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 224 | hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } |
| 226 | |
Jeff Layton | d1a0774 | 2013-01-28 14:41:13 -0500 | [diff] [blame] | 227 | static inline bool |
| 228 | nfsd_cache_entry_expired(struct svc_cacherep *rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | { |
Jeff Layton | d1a0774 | 2013-01-28 14:41:13 -0500 | [diff] [blame] | 230 | return rp->c_state != RC_INPROG && |
| 231 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); |
| 232 | } |
| 233 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 235 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
| 236 | * Also prune the oldest ones when the total exceeds the max number of entries. |
| 237 | */ |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 238 | static long |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 239 | prune_cache_entries(void) |
| 240 | { |
| 241 | struct svc_cacherep *rp, *tmp; |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 242 | long freed = 0; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 243 | |
| 244 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { |
| 245 | if (!nfsd_cache_entry_expired(rp) && |
| 246 | num_drc_entries <= max_drc_entries) |
| 247 | break; |
| 248 | nfsd_reply_cache_free_locked(rp); |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 249 | freed++; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | /* |
| 253 | * Conditionally rearm the job. If we cleaned out the list, then |
| 254 | * cancel any pending run (since there won't be any work to do). |
| 255 | * Otherwise, we rearm the job or modify the existing one to run in |
| 256 | * RC_EXPIRE since we just ran the pruner. |
| 257 | */ |
| 258 | if (list_empty(&lru_head)) |
| 259 | cancel_delayed_work(&cache_cleaner); |
| 260 | else |
| 261 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 262 | return freed; |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | static void |
| 266 | cache_cleaner_func(struct work_struct *unused) |
| 267 | { |
| 268 | spin_lock(&cache_lock); |
| 269 | prune_cache_entries(); |
| 270 | spin_unlock(&cache_lock); |
| 271 | } |
| 272 | |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 273 | static unsigned long |
| 274 | nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 275 | { |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 276 | unsigned long num; |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 277 | |
| 278 | spin_lock(&cache_lock); |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 279 | num = num_drc_entries; |
| 280 | spin_unlock(&cache_lock); |
| 281 | |
| 282 | return num; |
| 283 | } |
| 284 | |
Dave Chinner | 1ab6c49 | 2013-08-28 10:18:09 +1000 | [diff] [blame^] | 285 | static unsigned long |
| 286 | nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc) |
| 287 | { |
| 288 | unsigned long freed; |
| 289 | |
| 290 | spin_lock(&cache_lock); |
| 291 | freed = prune_cache_entries(); |
| 292 | spin_unlock(&cache_lock); |
| 293 | return freed; |
| 294 | } |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 295 | /* |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 296 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
| 297 | */ |
| 298 | static __wsum |
| 299 | nfsd_cache_csum(struct svc_rqst *rqstp) |
| 300 | { |
| 301 | int idx; |
| 302 | unsigned int base; |
| 303 | __wsum csum; |
| 304 | struct xdr_buf *buf = &rqstp->rq_arg; |
| 305 | const unsigned char *p = buf->head[0].iov_base; |
| 306 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, |
| 307 | RC_CSUMLEN); |
| 308 | size_t len = min(buf->head[0].iov_len, csum_len); |
| 309 | |
| 310 | /* rq_arg.head first */ |
| 311 | csum = csum_partial(p, len, 0); |
| 312 | csum_len -= len; |
| 313 | |
| 314 | /* Continue into page array */ |
| 315 | idx = buf->page_base / PAGE_SIZE; |
| 316 | base = buf->page_base & ~PAGE_MASK; |
| 317 | while (csum_len) { |
| 318 | p = page_address(buf->pages[idx]) + base; |
Jeff Layton | 56edc86 | 2013-02-15 13:36:34 -0500 | [diff] [blame] | 319 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 320 | csum = csum_partial(p, len, csum); |
| 321 | csum_len -= len; |
| 322 | base = 0; |
| 323 | ++idx; |
| 324 | } |
| 325 | return csum; |
| 326 | } |
| 327 | |
Jeff Layton | 9dc5614 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 328 | static bool |
| 329 | nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) |
| 330 | { |
| 331 | /* Check RPC header info first */ |
| 332 | if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || |
| 333 | rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers || |
| 334 | rqstp->rq_arg.len != rp->c_len || |
| 335 | !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || |
| 336 | rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) |
| 337 | return false; |
| 338 | |
| 339 | /* compare checksum of NFS data */ |
| 340 | if (csum != rp->c_csum) { |
| 341 | ++payload_misses; |
| 342 | return false; |
| 343 | } |
| 344 | |
| 345 | return true; |
| 346 | } |
| 347 | |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 348 | /* |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 349 | * Search the request hash for an entry that matches the given rqstp. |
| 350 | * Must be called with cache_lock held. Returns the found entry or |
| 351 | * NULL on failure. |
| 352 | */ |
| 353 | static struct svc_cacherep * |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 354 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 355 | { |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 356 | struct svc_cacherep *rp, *ret = NULL; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 357 | struct hlist_head *rh; |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 358 | unsigned int entries = 0; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 359 | |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 360 | rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)]; |
Linus Torvalds | b666973 | 2013-02-28 18:02:55 -0800 | [diff] [blame] | 361 | hlist_for_each_entry(rp, rh, c_hash) { |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 362 | ++entries; |
| 363 | if (nfsd_cache_match(rqstp, csum, rp)) { |
| 364 | ret = rp; |
| 365 | break; |
| 366 | } |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 367 | } |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 368 | |
| 369 | /* tally hash chain length stats */ |
| 370 | if (entries > longest_chain) { |
| 371 | longest_chain = entries; |
| 372 | longest_chain_cachesize = num_drc_entries; |
| 373 | } else if (entries == longest_chain) { |
| 374 | /* prefer to keep the smallest cachesize possible here */ |
| 375 | longest_chain_cachesize = min(longest_chain_cachesize, |
| 376 | num_drc_entries); |
| 377 | } |
| 378 | |
| 379 | return ret; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 380 | } |
| 381 | |
| 382 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | * Try to find an entry matching the current call in the cache. When none |
Jeff Layton | 1ac8362 | 2013-02-14 16:45:13 -0500 | [diff] [blame] | 384 | * is found, we try to grab the oldest expired entry off the LRU list. If |
| 385 | * a suitable one isn't there, then drop the cache_lock and allocate a |
| 386 | * new one, then search again in case one got inserted while this thread |
| 387 | * didn't hold the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | */ |
| 389 | int |
| 390 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
| 391 | { |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 392 | struct svc_cacherep *rp, *found; |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 393 | __be32 xid = rqstp->rq_xid; |
| 394 | u32 proto = rqstp->rq_prot, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | vers = rqstp->rq_vers, |
| 396 | proc = rqstp->rq_proc; |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 397 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | unsigned long age; |
J. Bruce Fields | 1091006 | 2011-01-24 12:11:02 -0500 | [diff] [blame] | 399 | int type = rqstp->rq_cachetype; |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 400 | int rtn = RC_DOIT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | |
| 402 | rqstp->rq_cacherep = NULL; |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 403 | if (type == RC_NOCACHE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | nfsdstats.rcnocache++; |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 405 | return rtn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } |
| 407 | |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 408 | csum = nfsd_cache_csum(rqstp); |
| 409 | |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 410 | /* |
| 411 | * Since the common case is a cache miss followed by an insert, |
| 412 | * preallocate an entry. First, try to reuse the first entry on the LRU |
| 413 | * if it works, then go ahead and prune the LRU list. |
| 414 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | spin_lock(&cache_lock); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 416 | if (!list_empty(&lru_head)) { |
| 417 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); |
| 418 | if (nfsd_cache_entry_expired(rp) || |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 419 | num_drc_entries >= max_drc_entries) { |
| 420 | lru_put_end(rp); |
| 421 | prune_cache_entries(); |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 422 | goto search_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | } |
| 424 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 425 | |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 426 | /* No expired ones available, allocate a new one. */ |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 427 | spin_unlock(&cache_lock); |
| 428 | rp = nfsd_reply_cache_alloc(); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 429 | spin_lock(&cache_lock); |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 430 | if (likely(rp)) { |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 431 | ++num_drc_entries; |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 432 | drc_mem_usage += sizeof(*rp); |
| 433 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 434 | |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 435 | search_cache: |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 436 | found = nfsd_cache_search(rqstp, csum); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 437 | if (found) { |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 438 | if (likely(rp)) |
| 439 | nfsd_reply_cache_free_locked(rp); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 440 | rp = found; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 441 | goto found_entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 443 | |
Jeff Layton | 0b9ea37 | 2013-03-27 10:15:37 -0400 | [diff] [blame] | 444 | if (!rp) { |
| 445 | dprintk("nfsd: unable to allocate DRC entry!\n"); |
| 446 | goto out; |
| 447 | } |
| 448 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 449 | /* |
| 450 | * We're keeping the one we just allocated. Are we now over the |
| 451 | * limit? Prune one off the tip of the LRU in trade for the one we |
| 452 | * just allocated if so. |
| 453 | */ |
| 454 | if (num_drc_entries >= max_drc_entries) |
| 455 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, |
| 456 | struct svc_cacherep, c_lru)); |
| 457 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | nfsdstats.rcmisses++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | rqstp->rq_cacherep = rp; |
| 460 | rp->c_state = RC_INPROG; |
| 461 | rp->c_xid = xid; |
| 462 | rp->c_proc = proc; |
Jeff Layton | 7b9e852 | 2013-01-28 14:41:07 -0500 | [diff] [blame] | 463 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
| 464 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | rp->c_prot = proto; |
| 466 | rp->c_vers = vers; |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 467 | rp->c_len = rqstp->rq_arg.len; |
| 468 | rp->c_csum = csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | |
| 470 | hash_refile(rp); |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 471 | lru_put_end(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | |
| 473 | /* release any buffer */ |
| 474 | if (rp->c_type == RC_REPLBUFF) { |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 475 | drc_mem_usage -= rp->c_replvec.iov_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | kfree(rp->c_replvec.iov_base); |
| 477 | rp->c_replvec.iov_base = NULL; |
| 478 | } |
| 479 | rp->c_type = RC_NOCACHE; |
| 480 | out: |
| 481 | spin_unlock(&cache_lock); |
| 482 | return rtn; |
| 483 | |
| 484 | found_entry: |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 485 | nfsdstats.rchits++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | /* We found a matching entry which is either in progress or done. */ |
| 487 | age = jiffies - rp->c_timestamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | lru_put_end(rp); |
| 489 | |
| 490 | rtn = RC_DROPIT; |
| 491 | /* Request being processed or excessive rexmits */ |
| 492 | if (rp->c_state == RC_INPROG || age < RC_DELAY) |
| 493 | goto out; |
| 494 | |
| 495 | /* From the hall of fame of impractical attacks: |
| 496 | * Is this a user who tries to snoop on the cache? */ |
| 497 | rtn = RC_DOIT; |
| 498 | if (!rqstp->rq_secure && rp->c_secure) |
| 499 | goto out; |
| 500 | |
| 501 | /* Compose RPC reply header */ |
| 502 | switch (rp->c_type) { |
| 503 | case RC_NOCACHE: |
| 504 | break; |
| 505 | case RC_REPLSTAT: |
| 506 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); |
| 507 | rtn = RC_REPLY; |
| 508 | break; |
| 509 | case RC_REPLBUFF: |
| 510 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) |
| 511 | goto out; /* should not happen */ |
| 512 | rtn = RC_REPLY; |
| 513 | break; |
| 514 | default: |
| 515 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 516 | nfsd_reply_cache_free_locked(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | goto out; |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * Update a cache entry. This is called from nfsd_dispatch when |
| 524 | * the procedure has been executed and the complete reply is in |
| 525 | * rqstp->rq_res. |
| 526 | * |
| 527 | * We're copying around data here rather than swapping buffers because |
| 528 | * the toplevel loop requires max-sized buffers, which would be a waste |
| 529 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
| 530 | * |
| 531 | * If we should start to use different types of cache entries tailored |
| 532 | * specifically for attrstat and fh's, we may save even more space. |
| 533 | * |
| 534 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
| 535 | * nfsd failed to encode a reply that otherwise would have been cached. |
| 536 | * In this case, nfsd_cache_update is called with statp == NULL. |
| 537 | */ |
| 538 | void |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 539 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | { |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 541 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
| 543 | int len; |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 544 | size_t bufsize = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 546 | if (!rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | return; |
| 548 | |
| 549 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
| 550 | len >>= 2; |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 551 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | /* Don't cache excessive amounts of data and XDR failures */ |
| 553 | if (!statp || len > (256 >> 2)) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 554 | nfsd_reply_cache_free(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | return; |
| 556 | } |
| 557 | |
| 558 | switch (cachetype) { |
| 559 | case RC_REPLSTAT: |
| 560 | if (len != 1) |
| 561 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); |
| 562 | rp->c_replstat = *statp; |
| 563 | break; |
| 564 | case RC_REPLBUFF: |
| 565 | cachv = &rp->c_replvec; |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 566 | bufsize = len << 2; |
| 567 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | if (!cachv->iov_base) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 569 | nfsd_reply_cache_free(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | return; |
| 571 | } |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 572 | cachv->iov_len = bufsize; |
| 573 | memcpy(cachv->iov_base, statp, bufsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | break; |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 575 | case RC_NOCACHE: |
| 576 | nfsd_reply_cache_free(rp); |
| 577 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | } |
| 579 | spin_lock(&cache_lock); |
Jeff Layton | 6c6910c | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 580 | drc_mem_usage += bufsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | lru_put_end(rp); |
| 582 | rp->c_secure = rqstp->rq_secure; |
| 583 | rp->c_type = cachetype; |
| 584 | rp->c_state = RC_DONE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | spin_unlock(&cache_lock); |
| 586 | return; |
| 587 | } |
| 588 | |
| 589 | /* |
| 590 | * Copy cached reply to current reply buffer. Should always fit. |
| 591 | * FIXME as reply is in a page, we should just attach the page, and |
| 592 | * keep a refcount.... |
| 593 | */ |
| 594 | static int |
| 595 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
| 596 | { |
| 597 | struct kvec *vec = &rqstp->rq_res.head[0]; |
| 598 | |
| 599 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { |
| 600 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", |
| 601 | data->iov_len); |
| 602 | return 0; |
| 603 | } |
| 604 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); |
| 605 | vec->iov_len += data->iov_len; |
| 606 | return 1; |
| 607 | } |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 608 | |
| 609 | /* |
| 610 | * Note that fields may be added, removed or reordered in the future. Programs |
| 611 | * scraping this file for info should test the labels to ensure they're |
| 612 | * getting the correct field. |
| 613 | */ |
| 614 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) |
| 615 | { |
| 616 | spin_lock(&cache_lock); |
| 617 | seq_printf(m, "max entries: %u\n", max_drc_entries); |
| 618 | seq_printf(m, "num entries: %u\n", num_drc_entries); |
Jeff Layton | 0733c7b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 619 | seq_printf(m, "hash buckets: %u\n", 1 << maskbits); |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 620 | seq_printf(m, "mem usage: %u\n", drc_mem_usage); |
| 621 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); |
| 622 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); |
| 623 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); |
| 624 | seq_printf(m, "payload misses: %u\n", payload_misses); |
Jeff Layton | 98d821b | 2013-03-27 10:15:39 -0400 | [diff] [blame] | 625 | seq_printf(m, "longest chain len: %u\n", longest_chain); |
| 626 | seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); |
Jeff Layton | a2f999a | 2013-03-27 10:15:38 -0400 | [diff] [blame] | 627 | spin_unlock(&cache_lock); |
| 628 | return 0; |
| 629 | } |
| 630 | |
| 631 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) |
| 632 | { |
| 633 | return single_open(file, nfsd_reply_cache_stats_show, NULL); |
| 634 | } |