Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Request reply cache. This is currently a global cache, but this may |
| 3 | * change in the future and be a per-client cache. |
| 4 | * |
| 5 | * This code is heavily inspired by the 44BSD implementation, although |
| 6 | * it does things a bit differently. |
| 7 | * |
| 8 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> |
| 9 | */ |
| 10 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 11 | #include <linux/slab.h> |
Jeff Layton | 5976687 | 2013-02-04 12:50:00 -0500 | [diff] [blame] | 12 | #include <linux/sunrpc/addr.h> |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 13 | #include <linux/highmem.h> |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 14 | #include <net/checksum.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | |
Boaz Harrosh | 9a74af2 | 2009-12-03 20:30:56 +0200 | [diff] [blame] | 16 | #include "nfsd.h" |
| 17 | #include "cache.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 19 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
| 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #define HASHSIZE 64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 23 | static struct hlist_head * cache_hash; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | static struct list_head lru_head; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 25 | static struct kmem_cache *drc_slab; |
Jeff Layton | 0ee0bf7 | 2013-02-04 08:18:01 -0500 | [diff] [blame] | 26 | static unsigned int num_drc_entries; |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 27 | static unsigned int max_drc_entries; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 29 | /* |
| 30 | * Calculate the hash index from an XID. |
| 31 | */ |
| 32 | static inline u32 request_hash(u32 xid) |
| 33 | { |
| 34 | u32 h = xid; |
| 35 | h ^= (xid >> 24); |
| 36 | return h & (HASHSIZE-1); |
| 37 | } |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 40 | static void cache_cleaner_func(struct work_struct *unused); |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 41 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, |
| 42 | struct shrink_control *sc); |
| 43 | |
| 44 | struct shrinker nfsd_reply_cache_shrinker = { |
| 45 | .shrink = nfsd_reply_cache_shrink, |
| 46 | .seeks = 1, |
| 47 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 49 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | * locking for the reply cache: |
| 51 | * A cache entry is "single use" if c_state == RC_INPROG |
| 52 | * Otherwise, it when accessing _prev or _next, the lock must be held. |
| 53 | */ |
| 54 | static DEFINE_SPINLOCK(cache_lock); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 55 | static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 57 | /* |
| 58 | * Put a cap on the size of the DRC based on the amount of available |
| 59 | * low memory in the machine. |
| 60 | * |
| 61 | * 64MB: 8192 |
| 62 | * 128MB: 11585 |
| 63 | * 256MB: 16384 |
| 64 | * 512MB: 23170 |
| 65 | * 1GB: 32768 |
| 66 | * 2GB: 46340 |
| 67 | * 4GB: 65536 |
| 68 | * 8GB: 92681 |
| 69 | * 16GB: 131072 |
| 70 | * |
| 71 | * ...with a hard cap of 256k entries. In the worst case, each entry will be |
| 72 | * ~1k, so the above numbers should give a rough max of the amount of memory |
| 73 | * used in k. |
| 74 | */ |
| 75 | static unsigned int |
| 76 | nfsd_cache_size_limit(void) |
| 77 | { |
| 78 | unsigned int limit; |
| 79 | unsigned long low_pages = totalram_pages - totalhigh_pages; |
| 80 | |
| 81 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); |
| 82 | return min_t(unsigned int, limit, 256*1024); |
| 83 | } |
| 84 | |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 85 | static struct svc_cacherep * |
| 86 | nfsd_reply_cache_alloc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | { |
| 88 | struct svc_cacherep *rp; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 89 | |
| 90 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
| 91 | if (rp) { |
| 92 | rp->c_state = RC_UNUSED; |
| 93 | rp->c_type = RC_NOCACHE; |
| 94 | INIT_LIST_HEAD(&rp->c_lru); |
| 95 | INIT_HLIST_NODE(&rp->c_hash); |
| 96 | } |
| 97 | return rp; |
| 98 | } |
| 99 | |
| 100 | static void |
| 101 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) |
| 102 | { |
Jeff Layton | 25e6b8b | 2013-01-28 14:41:12 -0500 | [diff] [blame] | 103 | if (rp->c_type == RC_REPLBUFF) |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 104 | kfree(rp->c_replvec.iov_base); |
Jeff Layton | a517b60 | 2013-03-18 10:49:07 -0400 | [diff] [blame] | 105 | if (!hlist_unhashed(&rp->c_hash)) |
| 106 | hlist_del(&rp->c_hash); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 107 | list_del(&rp->c_lru); |
Jeff Layton | 0ee0bf7 | 2013-02-04 08:18:01 -0500 | [diff] [blame] | 108 | --num_drc_entries; |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 109 | kmem_cache_free(drc_slab, rp); |
| 110 | } |
| 111 | |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 112 | static void |
| 113 | nfsd_reply_cache_free(struct svc_cacherep *rp) |
| 114 | { |
| 115 | spin_lock(&cache_lock); |
| 116 | nfsd_reply_cache_free_locked(rp); |
| 117 | spin_unlock(&cache_lock); |
| 118 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
| 120 | int nfsd_reply_cache_init(void) |
| 121 | { |
Jeff Layton | ac534ff | 2013-03-15 09:16:29 -0400 | [diff] [blame] | 122 | INIT_LIST_HEAD(&lru_head); |
| 123 | max_drc_entries = nfsd_cache_size_limit(); |
| 124 | num_drc_entries = 0; |
| 125 | |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 126 | register_shrinker(&nfsd_reply_cache_shrinker); |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 127 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
| 128 | 0, 0, NULL); |
| 129 | if (!drc_slab) |
| 130 | goto out_nomem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 132 | cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 133 | if (!cache_hash) |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 134 | goto out_nomem; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 136 | return 0; |
| 137 | out_nomem: |
| 138 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); |
| 139 | nfsd_reply_cache_shutdown(); |
| 140 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
| 142 | |
J. Bruce Fields | d5c3428 | 2007-11-09 14:10:56 -0500 | [diff] [blame] | 143 | void nfsd_reply_cache_shutdown(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
| 145 | struct svc_cacherep *rp; |
| 146 | |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 147 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 148 | cancel_delayed_work_sync(&cache_cleaner); |
| 149 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | while (!list_empty(&lru_head)) { |
| 151 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); |
Jeff Layton | f09841f | 2013-01-28 14:41:11 -0500 | [diff] [blame] | 152 | nfsd_reply_cache_free_locked(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 155 | kfree (cache_hash); |
| 156 | cache_hash = NULL; |
Jeff Layton | 8a8bc40 | 2013-01-28 14:41:10 -0500 | [diff] [blame] | 157 | |
| 158 | if (drc_slab) { |
| 159 | kmem_cache_destroy(drc_slab); |
| 160 | drc_slab = NULL; |
| 161 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 165 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
| 166 | * not already scheduled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | */ |
| 168 | static void |
| 169 | lru_put_end(struct svc_cacherep *rp) |
| 170 | { |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 171 | rp->c_timestamp = jiffies; |
Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 172 | list_move_tail(&rp->c_lru, &lru_head); |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 173 | schedule_delayed_work(&cache_cleaner, RC_EXPIRE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Move a cache entry from one hash list to another |
| 178 | */ |
| 179 | static void |
| 180 | hash_refile(struct svc_cacherep *rp) |
| 181 | { |
| 182 | hlist_del_init(&rp->c_hash); |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 183 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } |
| 185 | |
Jeff Layton | d1a0774 | 2013-01-28 14:41:13 -0500 | [diff] [blame] | 186 | static inline bool |
| 187 | nfsd_cache_entry_expired(struct svc_cacherep *rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
Jeff Layton | d1a0774 | 2013-01-28 14:41:13 -0500 | [diff] [blame] | 189 | return rp->c_state != RC_INPROG && |
| 190 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); |
| 191 | } |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | /* |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 194 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. |
| 195 | * Also prune the oldest ones when the total exceeds the max number of entries. |
| 196 | */ |
| 197 | static void |
| 198 | prune_cache_entries(void) |
| 199 | { |
| 200 | struct svc_cacherep *rp, *tmp; |
| 201 | |
| 202 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { |
| 203 | if (!nfsd_cache_entry_expired(rp) && |
| 204 | num_drc_entries <= max_drc_entries) |
| 205 | break; |
| 206 | nfsd_reply_cache_free_locked(rp); |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * Conditionally rearm the job. If we cleaned out the list, then |
| 211 | * cancel any pending run (since there won't be any work to do). |
| 212 | * Otherwise, we rearm the job or modify the existing one to run in |
| 213 | * RC_EXPIRE since we just ran the pruner. |
| 214 | */ |
| 215 | if (list_empty(&lru_head)) |
| 216 | cancel_delayed_work(&cache_cleaner); |
| 217 | else |
| 218 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); |
| 219 | } |
| 220 | |
| 221 | static void |
| 222 | cache_cleaner_func(struct work_struct *unused) |
| 223 | { |
| 224 | spin_lock(&cache_lock); |
| 225 | prune_cache_entries(); |
| 226 | spin_unlock(&cache_lock); |
| 227 | } |
| 228 | |
Jeff Layton | b4e7f2c | 2013-02-04 08:18:06 -0500 | [diff] [blame] | 229 | static int |
| 230 | nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) |
| 231 | { |
| 232 | unsigned int num; |
| 233 | |
| 234 | spin_lock(&cache_lock); |
| 235 | if (sc->nr_to_scan) |
| 236 | prune_cache_entries(); |
| 237 | num = num_drc_entries; |
| 238 | spin_unlock(&cache_lock); |
| 239 | |
| 240 | return num; |
| 241 | } |
| 242 | |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 243 | /* |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 244 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes |
| 245 | */ |
| 246 | static __wsum |
| 247 | nfsd_cache_csum(struct svc_rqst *rqstp) |
| 248 | { |
| 249 | int idx; |
| 250 | unsigned int base; |
| 251 | __wsum csum; |
| 252 | struct xdr_buf *buf = &rqstp->rq_arg; |
| 253 | const unsigned char *p = buf->head[0].iov_base; |
| 254 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, |
| 255 | RC_CSUMLEN); |
| 256 | size_t len = min(buf->head[0].iov_len, csum_len); |
| 257 | |
| 258 | /* rq_arg.head first */ |
| 259 | csum = csum_partial(p, len, 0); |
| 260 | csum_len -= len; |
| 261 | |
| 262 | /* Continue into page array */ |
| 263 | idx = buf->page_base / PAGE_SIZE; |
| 264 | base = buf->page_base & ~PAGE_MASK; |
| 265 | while (csum_len) { |
| 266 | p = page_address(buf->pages[idx]) + base; |
Jeff Layton | 56edc86 | 2013-02-15 13:36:34 -0500 | [diff] [blame] | 267 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 268 | csum = csum_partial(p, len, csum); |
| 269 | csum_len -= len; |
| 270 | base = 0; |
| 271 | ++idx; |
| 272 | } |
| 273 | return csum; |
| 274 | } |
| 275 | |
| 276 | /* |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 277 | * Search the request hash for an entry that matches the given rqstp. |
| 278 | * Must be called with cache_lock held. Returns the found entry or |
| 279 | * NULL on failure. |
| 280 | */ |
| 281 | static struct svc_cacherep * |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 282 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 283 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | struct svc_cacherep *rp; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 285 | struct hlist_head *rh; |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 286 | __be32 xid = rqstp->rq_xid; |
| 287 | u32 proto = rqstp->rq_prot, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | vers = rqstp->rq_vers, |
| 289 | proc = rqstp->rq_proc; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 290 | |
| 291 | rh = &cache_hash[request_hash(xid)]; |
Linus Torvalds | b666973 | 2013-02-28 18:02:55 -0800 | [diff] [blame] | 292 | hlist_for_each_entry(rp, rh, c_hash) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 293 | if (xid == rp->c_xid && proc == rp->c_proc && |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 294 | proto == rp->c_prot && vers == rp->c_vers && |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 295 | rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum && |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 296 | rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && |
| 297 | rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) |
| 298 | return rp; |
| 299 | } |
| 300 | return NULL; |
| 301 | } |
| 302 | |
| 303 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | * Try to find an entry matching the current call in the cache. When none |
Jeff Layton | 1ac8362 | 2013-02-14 16:45:13 -0500 | [diff] [blame] | 305 | * is found, we try to grab the oldest expired entry off the LRU list. If |
| 306 | * a suitable one isn't there, then drop the cache_lock and allocate a |
| 307 | * new one, then search again in case one got inserted while this thread |
| 308 | * didn't hold the lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | */ |
| 310 | int |
| 311 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
| 312 | { |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 313 | struct svc_cacherep *rp, *found; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | __be32 xid = rqstp->rq_xid; |
| 315 | u32 proto = rqstp->rq_prot, |
| 316 | vers = rqstp->rq_vers, |
| 317 | proc = rqstp->rq_proc; |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 318 | __wsum csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | unsigned long age; |
J. Bruce Fields | 1091006 | 2011-01-24 12:11:02 -0500 | [diff] [blame] | 320 | int type = rqstp->rq_cachetype; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | int rtn; |
| 322 | |
| 323 | rqstp->rq_cacherep = NULL; |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 324 | if (type == RC_NOCACHE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | nfsdstats.rcnocache++; |
| 326 | return RC_DOIT; |
| 327 | } |
| 328 | |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 329 | csum = nfsd_cache_csum(rqstp); |
| 330 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | spin_lock(&cache_lock); |
| 332 | rtn = RC_DOIT; |
| 333 | |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 334 | rp = nfsd_cache_search(rqstp, csum); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 335 | if (rp) |
| 336 | goto found_entry; |
| 337 | |
| 338 | /* Try to use the first entry on the LRU */ |
| 339 | if (!list_empty(&lru_head)) { |
| 340 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); |
| 341 | if (nfsd_cache_entry_expired(rp) || |
Jeff Layton | aca8a23 | 2013-02-04 08:18:05 -0500 | [diff] [blame] | 342 | num_drc_entries >= max_drc_entries) { |
| 343 | lru_put_end(rp); |
| 344 | prune_cache_entries(); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 345 | goto setup_entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | } |
| 347 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 348 | |
Jeff Layton | 1ac8362 | 2013-02-14 16:45:13 -0500 | [diff] [blame] | 349 | /* Drop the lock and allocate a new entry */ |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 350 | spin_unlock(&cache_lock); |
| 351 | rp = nfsd_reply_cache_alloc(); |
| 352 | if (!rp) { |
| 353 | dprintk("nfsd: unable to allocate DRC entry!\n"); |
| 354 | return RC_DOIT; |
| 355 | } |
| 356 | spin_lock(&cache_lock); |
| 357 | ++num_drc_entries; |
| 358 | |
| 359 | /* |
| 360 | * Must search again just in case someone inserted one |
| 361 | * after we dropped the lock above. |
| 362 | */ |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 363 | found = nfsd_cache_search(rqstp, csum); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 364 | if (found) { |
| 365 | nfsd_reply_cache_free_locked(rp); |
| 366 | rp = found; |
Jeff Layton | a4a3ec3 | 2013-01-28 14:41:14 -0500 | [diff] [blame] | 367 | goto found_entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | } |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 369 | |
| 370 | /* |
| 371 | * We're keeping the one we just allocated. Are we now over the |
| 372 | * limit? Prune one off the tip of the LRU in trade for the one we |
| 373 | * just allocated if so. |
| 374 | */ |
| 375 | if (num_drc_entries >= max_drc_entries) |
| 376 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, |
| 377 | struct svc_cacherep, c_lru)); |
| 378 | |
| 379 | setup_entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | nfsdstats.rcmisses++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | rqstp->rq_cacherep = rp; |
| 382 | rp->c_state = RC_INPROG; |
| 383 | rp->c_xid = xid; |
| 384 | rp->c_proc = proc; |
Jeff Layton | 7b9e852 | 2013-01-28 14:41:07 -0500 | [diff] [blame] | 385 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
| 386 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | rp->c_prot = proto; |
| 388 | rp->c_vers = vers; |
Jeff Layton | 01a7dec | 2013-02-04 11:57:27 -0500 | [diff] [blame] | 389 | rp->c_len = rqstp->rq_arg.len; |
| 390 | rp->c_csum = csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | |
| 392 | hash_refile(rp); |
Jeff Layton | 56c2548 | 2013-02-04 08:18:00 -0500 | [diff] [blame] | 393 | lru_put_end(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
| 395 | /* release any buffer */ |
| 396 | if (rp->c_type == RC_REPLBUFF) { |
| 397 | kfree(rp->c_replvec.iov_base); |
| 398 | rp->c_replvec.iov_base = NULL; |
| 399 | } |
| 400 | rp->c_type = RC_NOCACHE; |
| 401 | out: |
| 402 | spin_unlock(&cache_lock); |
| 403 | return rtn; |
| 404 | |
| 405 | found_entry: |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 406 | nfsdstats.rchits++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | /* We found a matching entry which is either in progress or done. */ |
| 408 | age = jiffies - rp->c_timestamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | lru_put_end(rp); |
| 410 | |
| 411 | rtn = RC_DROPIT; |
| 412 | /* Request being processed or excessive rexmits */ |
| 413 | if (rp->c_state == RC_INPROG || age < RC_DELAY) |
| 414 | goto out; |
| 415 | |
| 416 | /* From the hall of fame of impractical attacks: |
| 417 | * Is this a user who tries to snoop on the cache? */ |
| 418 | rtn = RC_DOIT; |
| 419 | if (!rqstp->rq_secure && rp->c_secure) |
| 420 | goto out; |
| 421 | |
| 422 | /* Compose RPC reply header */ |
| 423 | switch (rp->c_type) { |
| 424 | case RC_NOCACHE: |
| 425 | break; |
| 426 | case RC_REPLSTAT: |
| 427 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); |
| 428 | rtn = RC_REPLY; |
| 429 | break; |
| 430 | case RC_REPLBUFF: |
| 431 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) |
| 432 | goto out; /* should not happen */ |
| 433 | rtn = RC_REPLY; |
| 434 | break; |
| 435 | default: |
| 436 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); |
Jeff Layton | 0338dd1 | 2013-02-04 08:18:02 -0500 | [diff] [blame] | 437 | nfsd_reply_cache_free_locked(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | } |
| 439 | |
| 440 | goto out; |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Update a cache entry. This is called from nfsd_dispatch when |
| 445 | * the procedure has been executed and the complete reply is in |
| 446 | * rqstp->rq_res. |
| 447 | * |
| 448 | * We're copying around data here rather than swapping buffers because |
| 449 | * the toplevel loop requires max-sized buffers, which would be a waste |
| 450 | * of memory for a cache with a max reply size of 100 bytes (diropokres). |
| 451 | * |
| 452 | * If we should start to use different types of cache entries tailored |
| 453 | * specifically for attrstat and fh's, we may save even more space. |
| 454 | * |
| 455 | * Also note that a cachetype of RC_NOCACHE can legally be passed when |
| 456 | * nfsd failed to encode a reply that otherwise would have been cached. |
| 457 | * In this case, nfsd_cache_update is called with statp == NULL. |
| 458 | */ |
| 459 | void |
Al Viro | c7afef1 | 2006-10-19 23:29:02 -0700 | [diff] [blame] | 460 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | { |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 462 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
| 464 | int len; |
| 465 | |
Jeff Layton | 13cc8a7 | 2013-02-04 08:18:03 -0500 | [diff] [blame] | 466 | if (!rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | return; |
| 468 | |
| 469 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); |
| 470 | len >>= 2; |
Greg Banks | fca4217 | 2009-04-01 07:28:13 +1100 | [diff] [blame] | 471 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | /* Don't cache excessive amounts of data and XDR failures */ |
| 473 | if (!statp || len > (256 >> 2)) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 474 | nfsd_reply_cache_free(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | return; |
| 476 | } |
| 477 | |
| 478 | switch (cachetype) { |
| 479 | case RC_REPLSTAT: |
| 480 | if (len != 1) |
| 481 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); |
| 482 | rp->c_replstat = *statp; |
| 483 | break; |
| 484 | case RC_REPLBUFF: |
| 485 | cachv = &rp->c_replvec; |
| 486 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); |
| 487 | if (!cachv->iov_base) { |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 488 | nfsd_reply_cache_free(rp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | return; |
| 490 | } |
| 491 | cachv->iov_len = len << 2; |
| 492 | memcpy(cachv->iov_base, statp, len << 2); |
| 493 | break; |
Jeff Layton | 2c6b691 | 2013-02-04 08:18:04 -0500 | [diff] [blame] | 494 | case RC_NOCACHE: |
| 495 | nfsd_reply_cache_free(rp); |
| 496 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | } |
| 498 | spin_lock(&cache_lock); |
| 499 | lru_put_end(rp); |
| 500 | rp->c_secure = rqstp->rq_secure; |
| 501 | rp->c_type = cachetype; |
| 502 | rp->c_state = RC_DONE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | spin_unlock(&cache_lock); |
| 504 | return; |
| 505 | } |
| 506 | |
| 507 | /* |
| 508 | * Copy cached reply to current reply buffer. Should always fit. |
| 509 | * FIXME as reply is in a page, we should just attach the page, and |
| 510 | * keep a refcount.... |
| 511 | */ |
| 512 | static int |
| 513 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) |
| 514 | { |
| 515 | struct kvec *vec = &rqstp->rq_res.head[0]; |
| 516 | |
| 517 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { |
| 518 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", |
| 519 | data->iov_len); |
| 520 | return 0; |
| 521 | } |
| 522 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); |
| 523 | vec->iov_len += data->iov_len; |
| 524 | return 1; |
| 525 | } |