blob: 91b1adec0d67e64f85547906a425cc8ba6ff21e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sunrpc/cache.c
3 *
4 * Generic code for various authentication-related caches
5 * used by sunrpc clients and servers.
6 *
7 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
8 *
9 * Released under terms in GPL version 2. See COPYING.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/fs.h>
15#include <linux/file.h>
16#include <linux/slab.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <asm/uaccess.h>
24#include <linux/poll.h>
25#include <linux/seq_file.h>
26#include <linux/proc_fs.h>
27#include <linux/net.h>
28#include <linux/workqueue.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080029#include <linux/mutex.h>
Trond Myklebustda770052009-08-09 15:14:28 -040030#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/ioctls.h>
32#include <linux/sunrpc/types.h>
33#include <linux/sunrpc/cache.h>
34#include <linux/sunrpc/stats.h>
Trond Myklebust8854e822009-08-09 15:14:30 -040035#include <linux/sunrpc/rpc_pipe_fs.h>
Frederic Weisbecker9918ff22010-05-19 15:08:17 +020036#include <linux/smp_lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#define RPCDBG_FACILITY RPCDBG_CACHE
39
J.Bruce Fieldse0bb89e2006-12-13 00:35:25 -080040static int cache_defer_req(struct cache_req *req, struct cache_head *item);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041static void cache_revisit_request(struct cache_head *item);
42
Adrian Bunk74cae612006-03-27 01:15:10 -080043static void cache_init(struct cache_head *h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044{
45 time_t now = get_seconds();
46 h->next = NULL;
47 h->flags = 0;
NeilBrownbaab9352006-03-27 01:15:09 -080048 kref_init(&h->ref);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 h->last_refresh = now;
51}
52
NeilBrown15a5f6b2006-03-27 01:15:02 -080053struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
54 struct cache_head *key, int hash)
55{
56 struct cache_head **head, **hp;
57 struct cache_head *new = NULL;
58
59 head = &detail->hash_table[hash];
60
61 read_lock(&detail->hash_lock);
62
63 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
64 struct cache_head *tmp = *hp;
65 if (detail->match(tmp, key)) {
66 cache_get(tmp);
67 read_unlock(&detail->hash_lock);
68 return tmp;
69 }
70 }
71 read_unlock(&detail->hash_lock);
72 /* Didn't find anything, insert an empty entry */
73
74 new = detail->alloc();
75 if (!new)
76 return NULL;
Neil Brown2f349312006-08-05 12:14:29 -070077 /* must fully initialise 'new', else
78 * we might get lose if we need to
79 * cache_put it soon.
80 */
NeilBrown15a5f6b2006-03-27 01:15:02 -080081 cache_init(new);
Neil Brown2f349312006-08-05 12:14:29 -070082 detail->init(new, key);
NeilBrown15a5f6b2006-03-27 01:15:02 -080083
84 write_lock(&detail->hash_lock);
85
86 /* check if entry appeared while we slept */
87 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
88 struct cache_head *tmp = *hp;
89 if (detail->match(tmp, key)) {
90 cache_get(tmp);
91 write_unlock(&detail->hash_lock);
NeilBrownbaab9352006-03-27 01:15:09 -080092 cache_put(new, detail);
NeilBrown15a5f6b2006-03-27 01:15:02 -080093 return tmp;
94 }
95 }
NeilBrown15a5f6b2006-03-27 01:15:02 -080096 new->next = *head;
97 *head = new;
98 detail->entries++;
99 cache_get(new);
100 write_unlock(&detail->hash_lock);
101
102 return new;
103}
Trond Myklebust24c37672008-12-23 16:30:12 -0500104EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
NeilBrown15a5f6b2006-03-27 01:15:02 -0800105
NeilBrownebd0cb12006-03-27 01:15:08 -0800106
NeilBrownf866a812009-08-04 15:22:38 +1000107static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
NeilBrownebd0cb12006-03-27 01:15:08 -0800108
NeilBrown908329f2009-09-09 16:32:54 +1000109static void cache_fresh_locked(struct cache_head *head, time_t expiry)
NeilBrownebd0cb12006-03-27 01:15:08 -0800110{
111 head->expiry_time = expiry;
112 head->last_refresh = get_seconds();
NeilBrown908329f2009-09-09 16:32:54 +1000113 set_bit(CACHE_VALID, &head->flags);
NeilBrownebd0cb12006-03-27 01:15:08 -0800114}
115
116static void cache_fresh_unlocked(struct cache_head *head,
NeilBrown908329f2009-09-09 16:32:54 +1000117 struct cache_detail *detail)
NeilBrownebd0cb12006-03-27 01:15:08 -0800118{
NeilBrownebd0cb12006-03-27 01:15:08 -0800119 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
120 cache_revisit_request(head);
NeilBrownf866a812009-08-04 15:22:38 +1000121 cache_dequeue(detail, head);
NeilBrownebd0cb12006-03-27 01:15:08 -0800122 }
123}
124
NeilBrown15a5f6b2006-03-27 01:15:02 -0800125struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
126 struct cache_head *new, struct cache_head *old, int hash)
127{
128 /* The 'old' entry is to be replaced by 'new'.
129 * If 'old' is not VALID, we update it directly,
130 * otherwise we need to replace it
131 */
132 struct cache_head **head;
133 struct cache_head *tmp;
134
135 if (!test_bit(CACHE_VALID, &old->flags)) {
136 write_lock(&detail->hash_lock);
137 if (!test_bit(CACHE_VALID, &old->flags)) {
138 if (test_bit(CACHE_NEGATIVE, &new->flags))
139 set_bit(CACHE_NEGATIVE, &old->flags);
140 else
141 detail->update(old, new);
NeilBrown908329f2009-09-09 16:32:54 +1000142 cache_fresh_locked(old, new->expiry_time);
NeilBrown15a5f6b2006-03-27 01:15:02 -0800143 write_unlock(&detail->hash_lock);
NeilBrown908329f2009-09-09 16:32:54 +1000144 cache_fresh_unlocked(old, detail);
NeilBrown15a5f6b2006-03-27 01:15:02 -0800145 return old;
146 }
147 write_unlock(&detail->hash_lock);
148 }
149 /* We need to insert a new entry */
150 tmp = detail->alloc();
151 if (!tmp) {
NeilBrownbaab9352006-03-27 01:15:09 -0800152 cache_put(old, detail);
NeilBrown15a5f6b2006-03-27 01:15:02 -0800153 return NULL;
154 }
155 cache_init(tmp);
156 detail->init(tmp, old);
157 head = &detail->hash_table[hash];
158
159 write_lock(&detail->hash_lock);
160 if (test_bit(CACHE_NEGATIVE, &new->flags))
161 set_bit(CACHE_NEGATIVE, &tmp->flags);
162 else
163 detail->update(tmp, new);
164 tmp->next = *head;
165 *head = tmp;
NeilBrownf2d39582006-05-22 22:35:25 -0700166 detail->entries++;
NeilBrown15a5f6b2006-03-27 01:15:02 -0800167 cache_get(tmp);
NeilBrown908329f2009-09-09 16:32:54 +1000168 cache_fresh_locked(tmp, new->expiry_time);
NeilBrownebd0cb12006-03-27 01:15:08 -0800169 cache_fresh_locked(old, 0);
NeilBrown15a5f6b2006-03-27 01:15:02 -0800170 write_unlock(&detail->hash_lock);
NeilBrown908329f2009-09-09 16:32:54 +1000171 cache_fresh_unlocked(tmp, detail);
172 cache_fresh_unlocked(old, detail);
NeilBrownbaab9352006-03-27 01:15:09 -0800173 cache_put(old, detail);
NeilBrown15a5f6b2006-03-27 01:15:02 -0800174 return tmp;
175}
Trond Myklebust24c37672008-12-23 16:30:12 -0500176EXPORT_SYMBOL_GPL(sunrpc_cache_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Trond Myklebustbc74b4f2009-08-09 15:14:29 -0400178static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
179{
180 if (!cd->cache_upcall)
181 return -EINVAL;
182 return cd->cache_upcall(cd, h);
183}
NeilBrown989a19b2009-08-04 15:22:38 +1000184
185static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
186{
187 if (!test_bit(CACHE_VALID, &h->flags) ||
188 h->expiry_time < get_seconds())
189 return -EAGAIN;
190 else if (detail->flush_time > h->last_refresh)
191 return -EAGAIN;
192 else {
193 /* entry is valid */
194 if (test_bit(CACHE_NEGATIVE, &h->flags))
195 return -ENOENT;
196 else
197 return 0;
198 }
199}
J. Bruce Fieldse9dc1222009-08-21 11:27:29 -0400200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/*
202 * This is the generic cache management routine for all
203 * the authentication caches.
204 * It checks the currency of a cache item and will (later)
205 * initiate an upcall to fill it if needed.
206 *
207 *
208 * Returns 0 if the cache_head can be used, or cache_puts it and returns
NeilBrown989a19b2009-08-04 15:22:38 +1000209 * -EAGAIN if upcall is pending and request has been queued
210 * -ETIMEDOUT if upcall failed or request could not be queue or
211 * upcall completed but item is still invalid (implying that
212 * the cache item has been replaced with a newer one).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 * -ENOENT if cache entry was negative
214 */
215int cache_check(struct cache_detail *detail,
216 struct cache_head *h, struct cache_req *rqstp)
217{
218 int rv;
219 long refresh_age, age;
220
221 /* First decide return status as best we can */
NeilBrown989a19b2009-08-04 15:22:38 +1000222 rv = cache_is_valid(detail, h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* now see if we want to start an upcall */
225 refresh_age = (h->expiry_time - h->last_refresh);
226 age = get_seconds() - h->last_refresh;
227
228 if (rqstp == NULL) {
229 if (rv == -EAGAIN)
230 rv = -ENOENT;
231 } else if (rv == -EAGAIN || age > refresh_age/2) {
Chuck Lever46121cf2007-01-31 12:14:08 -0500232 dprintk("RPC: Want update, refage=%ld, age=%ld\n",
233 refresh_age, age);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
235 switch (cache_make_upcall(detail, h)) {
236 case -EINVAL:
237 clear_bit(CACHE_PENDING, &h->flags);
NeilBrown5c4d2632009-08-04 15:22:38 +1000238 cache_revisit_request(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 if (rv == -EAGAIN) {
240 set_bit(CACHE_NEGATIVE, &h->flags);
NeilBrown908329f2009-09-09 16:32:54 +1000241 cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
242 cache_fresh_unlocked(h, detail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 rv = -ENOENT;
244 }
245 break;
246
247 case -EAGAIN:
248 clear_bit(CACHE_PENDING, &h->flags);
249 cache_revisit_request(h);
250 break;
251 }
252 }
253 }
254
NeilBrown989a19b2009-08-04 15:22:38 +1000255 if (rv == -EAGAIN) {
NeilBrown9e4c6372009-09-09 16:32:54 +1000256 if (cache_defer_req(rqstp, h) < 0) {
NeilBrown989a19b2009-08-04 15:22:38 +1000257 /* Request is not deferred */
258 rv = cache_is_valid(detail, h);
259 if (rv == -EAGAIN)
260 rv = -ETIMEDOUT;
261 }
262 }
NeilBrown4013ede2006-03-27 01:15:07 -0800263 if (rv)
NeilBrownbaab9352006-03-27 01:15:09 -0800264 cache_put(h, detail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return rv;
266}
Trond Myklebust24c37672008-12-23 16:30:12 -0500267EXPORT_SYMBOL_GPL(cache_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/*
270 * caches need to be periodically cleaned.
271 * For this we maintain a list of cache_detail and
272 * a current pointer into that list and into the table
273 * for that entry.
274 *
275 * Each time clean_cache is called it finds the next non-empty entry
276 * in the current table and walks the list in that entry
277 * looking for entries that can be removed.
278 *
279 * An entry gets removed if:
280 * - The expiry is before current time
281 * - The last_refresh time is before the flush_time for that cache
282 *
283 * later we might drop old entries with non-NEVER expiry if that table
284 * is getting 'full' for some definition of 'full'
285 *
286 * The question of "how often to scan a table" is an interesting one
287 * and is answered in part by the use of the "nextcheck" field in the
288 * cache_detail.
289 * When a scan of a table begins, the nextcheck field is set to a time
290 * that is well into the future.
291 * While scanning, if an expiry time is found that is earlier than the
292 * current nextcheck time, nextcheck is set to that expiry time.
293 * If the flush_time is ever set to a time earlier than the nextcheck
294 * time, the nextcheck time is then set to that flush_time.
295 *
296 * A table is then only scanned if the current time is at least
297 * the nextcheck time.
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800298 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 */
300
301static LIST_HEAD(cache_list);
302static DEFINE_SPINLOCK(cache_list_lock);
303static struct cache_detail *current_detail;
304static int current_index;
305
David Howells65f27f32006-11-22 14:55:48 +0000306static void do_cache_clean(struct work_struct *work);
307static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Trond Myklebust5b7a1b92009-08-09 15:14:27 -0400309static void sunrpc_init_cache_detail(struct cache_detail *cd)
J. Bruce Fieldsffe93862007-11-12 17:04:29 -0500310{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 rwlock_init(&cd->hash_lock);
312 INIT_LIST_HEAD(&cd->queue);
313 spin_lock(&cache_list_lock);
314 cd->nextcheck = 0;
315 cd->entries = 0;
316 atomic_set(&cd->readers, 0);
317 cd->last_close = 0;
318 cd->last_warn = -1;
319 list_add(&cd->others, &cache_list);
320 spin_unlock(&cache_list_lock);
321
322 /* start the cleaning process */
David Howells52bad642006-11-22 14:54:01 +0000323 schedule_delayed_work(&cache_cleaner, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
Trond Myklebust5b7a1b92009-08-09 15:14:27 -0400326static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
328 cache_purge(cd);
329 spin_lock(&cache_list_lock);
330 write_lock(&cd->hash_lock);
331 if (cd->entries || atomic_read(&cd->inuse)) {
332 write_unlock(&cd->hash_lock);
333 spin_unlock(&cache_list_lock);
J. Bruce Fieldsdf95a9d2007-11-08 16:09:59 -0500334 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336 if (current_detail == cd)
337 current_detail = NULL;
338 list_del_init(&cd->others);
339 write_unlock(&cd->hash_lock);
340 spin_unlock(&cache_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 if (list_empty(&cache_list)) {
342 /* module must be being unloaded so its safe to kill the worker */
Trond Myklebust4011cd92007-08-07 15:33:01 -0400343 cancel_delayed_work_sync(&cache_cleaner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
J. Bruce Fieldsdf95a9d2007-11-08 16:09:59 -0500345 return;
346out:
347 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
350/* clean cache tries to find something to clean
351 * and cleans it.
352 * It returns 1 if it cleaned something,
353 * 0 if it didn't find anything this time
354 * -1 if it fell off the end of the list.
355 */
356static int cache_clean(void)
357{
358 int rv = 0;
359 struct list_head *next;
360
361 spin_lock(&cache_list_lock);
362
363 /* find a suitable table if we don't already have one */
364 while (current_detail == NULL ||
365 current_index >= current_detail->hash_size) {
366 if (current_detail)
367 next = current_detail->others.next;
368 else
369 next = cache_list.next;
370 if (next == &cache_list) {
371 current_detail = NULL;
372 spin_unlock(&cache_list_lock);
373 return -1;
374 }
375 current_detail = list_entry(next, struct cache_detail, others);
376 if (current_detail->nextcheck > get_seconds())
377 current_index = current_detail->hash_size;
378 else {
379 current_index = 0;
380 current_detail->nextcheck = get_seconds()+30*60;
381 }
382 }
383
384 /* find a non-empty bucket in the table */
385 while (current_detail &&
386 current_index < current_detail->hash_size &&
387 current_detail->hash_table[current_index] == NULL)
388 current_index++;
389
390 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 if (current_detail && current_index < current_detail->hash_size) {
393 struct cache_head *ch, **cp;
394 struct cache_detail *d;
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 write_lock(&current_detail->hash_lock);
397
398 /* Ok, now to clean this strand */
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 cp = & current_detail->hash_table[current_index];
401 ch = *cp;
402 for (; ch; cp= & ch->next, ch= *cp) {
403 if (current_detail->nextcheck > ch->expiry_time)
404 current_detail->nextcheck = ch->expiry_time+1;
Joe Perchesf64f9e72009-11-29 16:55:45 -0800405 if (ch->expiry_time >= get_seconds() &&
406 ch->last_refresh >= current_detail->flush_time)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 continue;
408 if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
NeilBrownf866a812009-08-04 15:22:38 +1000409 cache_dequeue(current_detail, ch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
NeilBrownbaab9352006-03-27 01:15:09 -0800411 if (atomic_read(&ch->ref.refcount) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 break;
413 }
414 if (ch) {
415 *cp = ch->next;
416 ch->next = NULL;
417 current_detail->entries--;
418 rv = 1;
419 }
420 write_unlock(&current_detail->hash_lock);
421 d = current_detail;
422 if (!ch)
423 current_index ++;
424 spin_unlock(&cache_list_lock);
NeilBrown5c4d2632009-08-04 15:22:38 +1000425 if (ch) {
426 cache_revisit_request(ch);
NeilBrownbaab9352006-03-27 01:15:09 -0800427 cache_put(ch, d);
NeilBrown5c4d2632009-08-04 15:22:38 +1000428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 } else
430 spin_unlock(&cache_list_lock);
431
432 return rv;
433}
434
435/*
436 * We want to regularly clean the cache, so we need to schedule some work ...
437 */
David Howells65f27f32006-11-22 14:55:48 +0000438static void do_cache_clean(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
440 int delay = 5;
441 if (cache_clean() == -1)
Anton Blanchard6aad89c2009-06-10 12:52:21 -0700442 delay = round_jiffies_relative(30*HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 if (list_empty(&cache_list))
445 delay = 0;
446
447 if (delay)
448 schedule_delayed_work(&cache_cleaner, delay);
449}
450
451
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800452/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 * Clean all caches promptly. This just calls cache_clean
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800454 * repeatedly until we are sure that every cache has had a chance to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 * be fully cleaned
456 */
457void cache_flush(void)
458{
459 while (cache_clean() != -1)
460 cond_resched();
461 while (cache_clean() != -1)
462 cond_resched();
463}
Trond Myklebust24c37672008-12-23 16:30:12 -0500464EXPORT_SYMBOL_GPL(cache_flush);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466void cache_purge(struct cache_detail *detail)
467{
468 detail->flush_time = LONG_MAX;
469 detail->nextcheck = get_seconds();
470 cache_flush();
471 detail->flush_time = 1;
472}
Trond Myklebust24c37672008-12-23 16:30:12 -0500473EXPORT_SYMBOL_GPL(cache_purge);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475
476/*
477 * Deferral and Revisiting of Requests.
478 *
479 * If a cache lookup finds a pending entry, we
480 * need to defer the request and revisit it later.
481 * All deferred requests are stored in a hash table,
482 * indexed by "struct cache_head *".
483 * As it may be wasteful to store a whole request
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800484 * structure, we allow the request to provide a
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 * deferred form, which must contain a
486 * 'struct cache_deferred_req'
487 * This cache_deferred_req contains a method to allow
488 * it to be revisited when cache info is available
489 */
490
491#define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
492#define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
493
494#define DFR_MAX 300 /* ??? */
495
496static DEFINE_SPINLOCK(cache_defer_lock);
497static LIST_HEAD(cache_defer_list);
498static struct list_head cache_defer_hash[DFR_HASHSIZE];
499static int cache_defer_cnt;
500
J.Bruce Fieldse0bb89e2006-12-13 00:35:25 -0800501static int cache_defer_req(struct cache_req *req, struct cache_head *item)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
NeilBrowncd68c3742009-09-09 16:32:54 +1000503 struct cache_deferred_req *dreq, *discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 int hash = DFR_HASH(item);
505
J.Bruce Fields01f3bd12006-12-13 00:35:26 -0800506 if (cache_defer_cnt >= DFR_MAX) {
507 /* too much in the cache, randomly drop this one,
508 * or continue and drop the oldest below
509 */
510 if (net_random()&1)
NeilBrown9e4c6372009-09-09 16:32:54 +1000511 return -ENOMEM;
J.Bruce Fields01f3bd12006-12-13 00:35:26 -0800512 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 dreq = req->defer(req);
514 if (dreq == NULL)
NeilBrown9e4c6372009-09-09 16:32:54 +1000515 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 dreq->item = item;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 spin_lock(&cache_defer_lock);
520
521 list_add(&dreq->recent, &cache_defer_list);
522
523 if (cache_defer_hash[hash].next == NULL)
524 INIT_LIST_HEAD(&cache_defer_hash[hash]);
525 list_add(&dreq->hash, &cache_defer_hash[hash]);
526
527 /* it is in, now maybe clean up */
NeilBrowncd68c3742009-09-09 16:32:54 +1000528 discard = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (++cache_defer_cnt > DFR_MAX) {
NeilBrowncd68c3742009-09-09 16:32:54 +1000530 discard = list_entry(cache_defer_list.prev,
531 struct cache_deferred_req, recent);
532 list_del_init(&discard->recent);
533 list_del_init(&discard->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 cache_defer_cnt--;
535 }
536 spin_unlock(&cache_defer_lock);
537
NeilBrowncd68c3742009-09-09 16:32:54 +1000538 if (discard)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /* there was one too many */
NeilBrowncd68c3742009-09-09 16:32:54 +1000540 discard->revisit(discard, 1);
541
NeilBrown4013ede2006-03-27 01:15:07 -0800542 if (!test_bit(CACHE_PENDING, &item->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 /* must have just been validated... */
544 cache_revisit_request(item);
NeilBrown9e4c6372009-09-09 16:32:54 +1000545 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
NeilBrown9e4c6372009-09-09 16:32:54 +1000547 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548}
549
550static void cache_revisit_request(struct cache_head *item)
551{
552 struct cache_deferred_req *dreq;
553 struct list_head pending;
554
555 struct list_head *lp;
556 int hash = DFR_HASH(item);
557
558 INIT_LIST_HEAD(&pending);
559 spin_lock(&cache_defer_lock);
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 lp = cache_defer_hash[hash].next;
562 if (lp) {
563 while (lp != &cache_defer_hash[hash]) {
564 dreq = list_entry(lp, struct cache_deferred_req, hash);
565 lp = lp->next;
566 if (dreq->item == item) {
NeilBrown67e73282009-09-09 16:32:54 +1000567 list_del_init(&dreq->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 list_move(&dreq->recent, &pending);
569 cache_defer_cnt--;
570 }
571 }
572 }
573 spin_unlock(&cache_defer_lock);
574
575 while (!list_empty(&pending)) {
576 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
577 list_del_init(&dreq->recent);
578 dreq->revisit(dreq, 0);
579 }
580}
581
582void cache_clean_deferred(void *owner)
583{
584 struct cache_deferred_req *dreq, *tmp;
585 struct list_head pending;
586
587
588 INIT_LIST_HEAD(&pending);
589 spin_lock(&cache_defer_lock);
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800590
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
592 if (dreq->owner == owner) {
NeilBrown67e73282009-09-09 16:32:54 +1000593 list_del_init(&dreq->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 list_move(&dreq->recent, &pending);
595 cache_defer_cnt--;
596 }
597 }
598 spin_unlock(&cache_defer_lock);
599
600 while (!list_empty(&pending)) {
601 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
602 list_del_init(&dreq->recent);
603 dreq->revisit(dreq, 1);
604 }
605}
606
607/*
608 * communicate with user-space
609 *
J. Bruce Fieldsa490c682007-11-06 14:15:19 -0500610 * We have a magic /proc file - /proc/sunrpc/<cachename>/channel.
611 * On read, you get a full request, or block.
612 * On write, an update request is processed.
613 * Poll works if anything to read, and always allows write.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 *
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800615 * Implemented by linked list of requests. Each open file has
J. Bruce Fieldsa490c682007-11-06 14:15:19 -0500616 * a ->private that also exists in this list. New requests are added
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 * to the end and may wakeup and preceding readers.
618 * New readers are added to the head. If, on read, an item is found with
619 * CACHE_UPCALLING clear, we free it from the list.
620 *
621 */
622
623static DEFINE_SPINLOCK(queue_lock);
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -0800624static DEFINE_MUTEX(queue_io_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626struct cache_queue {
627 struct list_head list;
628 int reader; /* if 0, then request */
629};
630struct cache_request {
631 struct cache_queue q;
632 struct cache_head *item;
633 char * buf;
634 int len;
635 int readers;
636};
637struct cache_reader {
638 struct cache_queue q;
639 int offset; /* if non-0, we have a refcnt on next request */
640};
641
Trond Myklebust173912a2009-08-09 15:14:29 -0400642static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
643 loff_t *ppos, struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
645 struct cache_reader *rp = filp->private_data;
646 struct cache_request *rq;
Trond Myklebustda770052009-08-09 15:14:28 -0400647 struct inode *inode = filp->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 int err;
649
650 if (count == 0)
651 return 0;
652
Trond Myklebustda770052009-08-09 15:14:28 -0400653 mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 * readers on this file */
655 again:
656 spin_lock(&queue_lock);
657 /* need to find next request */
658 while (rp->q.list.next != &cd->queue &&
659 list_entry(rp->q.list.next, struct cache_queue, list)
660 ->reader) {
661 struct list_head *next = rp->q.list.next;
662 list_move(&rp->q.list, next);
663 }
664 if (rp->q.list.next == &cd->queue) {
665 spin_unlock(&queue_lock);
Trond Myklebustda770052009-08-09 15:14:28 -0400666 mutex_unlock(&inode->i_mutex);
Kris Katterjohn09a62662006-01-08 22:24:28 -0800667 BUG_ON(rp->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 return 0;
669 }
670 rq = container_of(rp->q.list.next, struct cache_request, q.list);
Kris Katterjohn09a62662006-01-08 22:24:28 -0800671 BUG_ON(rq->q.reader);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (rp->offset == 0)
673 rq->readers++;
674 spin_unlock(&queue_lock);
675
676 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
677 err = -EAGAIN;
678 spin_lock(&queue_lock);
679 list_move(&rp->q.list, &rq->q.list);
680 spin_unlock(&queue_lock);
681 } else {
682 if (rp->offset + count > rq->len)
683 count = rq->len - rp->offset;
684 err = -EFAULT;
685 if (copy_to_user(buf, rq->buf + rp->offset, count))
686 goto out;
687 rp->offset += count;
688 if (rp->offset >= rq->len) {
689 rp->offset = 0;
690 spin_lock(&queue_lock);
691 list_move(&rp->q.list, &rq->q.list);
692 spin_unlock(&queue_lock);
693 }
694 err = 0;
695 }
696 out:
697 if (rp->offset == 0) {
698 /* need to release rq */
699 spin_lock(&queue_lock);
700 rq->readers--;
701 if (rq->readers == 0 &&
702 !test_bit(CACHE_PENDING, &rq->item->flags)) {
703 list_del(&rq->q.list);
704 spin_unlock(&queue_lock);
NeilBrownbaab9352006-03-27 01:15:09 -0800705 cache_put(rq->item, cd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 kfree(rq->buf);
707 kfree(rq);
708 } else
709 spin_unlock(&queue_lock);
710 }
711 if (err == -EAGAIN)
712 goto again;
Trond Myklebustda770052009-08-09 15:14:28 -0400713 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return err ? err : count;
715}
716
Trond Myklebustda770052009-08-09 15:14:28 -0400717static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
718 size_t count, struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
Trond Myklebustda770052009-08-09 15:14:28 -0400720 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Trond Myklebustda770052009-08-09 15:14:28 -0400722 if (copy_from_user(kaddr, buf, count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return -EFAULT;
Trond Myklebustda770052009-08-09 15:14:28 -0400724 kaddr[count] = '\0';
725 ret = cd->cache_parse(cd, kaddr, count);
726 if (!ret)
727 ret = count;
728 return ret;
729}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Trond Myklebustda770052009-08-09 15:14:28 -0400731static ssize_t cache_slow_downcall(const char __user *buf,
732 size_t count, struct cache_detail *cd)
733{
734 static char write_buf[8192]; /* protected by queue_io_mutex */
735 ssize_t ret = -EINVAL;
736
737 if (count >= sizeof(write_buf))
738 goto out;
739 mutex_lock(&queue_io_mutex);
740 ret = cache_do_downcall(write_buf, buf, count, cd);
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -0800741 mutex_unlock(&queue_io_mutex);
Trond Myklebustda770052009-08-09 15:14:28 -0400742out:
743 return ret;
744}
745
746static ssize_t cache_downcall(struct address_space *mapping,
747 const char __user *buf,
748 size_t count, struct cache_detail *cd)
749{
750 struct page *page;
751 char *kaddr;
752 ssize_t ret = -ENOMEM;
753
754 if (count >= PAGE_CACHE_SIZE)
755 goto out_slow;
756
757 page = find_or_create_page(mapping, 0, GFP_KERNEL);
758 if (!page)
759 goto out_slow;
760
761 kaddr = kmap(page);
762 ret = cache_do_downcall(kaddr, buf, count, cd);
763 kunmap(page);
764 unlock_page(page);
765 page_cache_release(page);
766 return ret;
767out_slow:
768 return cache_slow_downcall(buf, count, cd);
769}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Trond Myklebust173912a2009-08-09 15:14:29 -0400771static ssize_t cache_write(struct file *filp, const char __user *buf,
772 size_t count, loff_t *ppos,
773 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
Trond Myklebustda770052009-08-09 15:14:28 -0400775 struct address_space *mapping = filp->f_mapping;
776 struct inode *inode = filp->f_path.dentry->d_inode;
Trond Myklebustda770052009-08-09 15:14:28 -0400777 ssize_t ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Trond Myklebustda770052009-08-09 15:14:28 -0400779 if (!cd->cache_parse)
780 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Trond Myklebustda770052009-08-09 15:14:28 -0400782 mutex_lock(&inode->i_mutex);
783 ret = cache_downcall(mapping, buf, count, cd);
784 mutex_unlock(&inode->i_mutex);
785out:
786 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788
789static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
790
Trond Myklebust173912a2009-08-09 15:14:29 -0400791static unsigned int cache_poll(struct file *filp, poll_table *wait,
792 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
794 unsigned int mask;
795 struct cache_reader *rp = filp->private_data;
796 struct cache_queue *cq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798 poll_wait(filp, &queue_wait, wait);
799
800 /* alway allow write */
801 mask = POLL_OUT | POLLWRNORM;
802
803 if (!rp)
804 return mask;
805
806 spin_lock(&queue_lock);
807
808 for (cq= &rp->q; &cq->list != &cd->queue;
809 cq = list_entry(cq->list.next, struct cache_queue, list))
810 if (!cq->reader) {
811 mask |= POLLIN | POLLRDNORM;
812 break;
813 }
814 spin_unlock(&queue_lock);
815 return mask;
816}
817
Trond Myklebust173912a2009-08-09 15:14:29 -0400818static int cache_ioctl(struct inode *ino, struct file *filp,
819 unsigned int cmd, unsigned long arg,
820 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
822 int len = 0;
823 struct cache_reader *rp = filp->private_data;
824 struct cache_queue *cq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 if (cmd != FIONREAD || !rp)
827 return -EINVAL;
828
829 spin_lock(&queue_lock);
830
831 /* only find the length remaining in current request,
832 * or the length of the next request
833 */
834 for (cq= &rp->q; &cq->list != &cd->queue;
835 cq = list_entry(cq->list.next, struct cache_queue, list))
836 if (!cq->reader) {
837 struct cache_request *cr =
838 container_of(cq, struct cache_request, q);
839 len = cr->len - rp->offset;
840 break;
841 }
842 spin_unlock(&queue_lock);
843
844 return put_user(len, (int __user *)arg);
845}
846
Trond Myklebust173912a2009-08-09 15:14:29 -0400847static int cache_open(struct inode *inode, struct file *filp,
848 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 struct cache_reader *rp = NULL;
851
Trond Myklebustf7e86ab2009-08-19 18:13:00 -0400852 if (!cd || !try_module_get(cd->owner))
853 return -EACCES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 nonseekable_open(inode, filp);
855 if (filp->f_mode & FMODE_READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
857 if (!rp)
858 return -ENOMEM;
859 rp->offset = 0;
860 rp->q.reader = 1;
861 atomic_inc(&cd->readers);
862 spin_lock(&queue_lock);
863 list_add(&rp->q.list, &cd->queue);
864 spin_unlock(&queue_lock);
865 }
866 filp->private_data = rp;
867 return 0;
868}
869
Trond Myklebust173912a2009-08-09 15:14:29 -0400870static int cache_release(struct inode *inode, struct file *filp,
871 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872{
873 struct cache_reader *rp = filp->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 if (rp) {
876 spin_lock(&queue_lock);
877 if (rp->offset) {
878 struct cache_queue *cq;
879 for (cq= &rp->q; &cq->list != &cd->queue;
880 cq = list_entry(cq->list.next, struct cache_queue, list))
881 if (!cq->reader) {
882 container_of(cq, struct cache_request, q)
883 ->readers--;
884 break;
885 }
886 rp->offset = 0;
887 }
888 list_del(&rp->q.list);
889 spin_unlock(&queue_lock);
890
891 filp->private_data = NULL;
892 kfree(rp);
893
894 cd->last_close = get_seconds();
895 atomic_dec(&cd->readers);
896 }
Trond Myklebustf7e86ab2009-08-19 18:13:00 -0400897 module_put(cd->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 return 0;
899}
900
901
902
NeilBrownf866a812009-08-04 15:22:38 +1000903static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 struct cache_queue *cq;
906 spin_lock(&queue_lock);
907 list_for_each_entry(cq, &detail->queue, list)
908 if (!cq->reader) {
909 struct cache_request *cr = container_of(cq, struct cache_request, q);
910 if (cr->item != ch)
911 continue;
912 if (cr->readers != 0)
NeilBrown4013ede2006-03-27 01:15:07 -0800913 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 list_del(&cr->q.list);
915 spin_unlock(&queue_lock);
NeilBrownbaab9352006-03-27 01:15:09 -0800916 cache_put(cr->item, detail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 kfree(cr->buf);
918 kfree(cr);
919 return;
920 }
921 spin_unlock(&queue_lock);
922}
923
924/*
925 * Support routines for text-based upcalls.
926 * Fields are separated by spaces.
927 * Fields are either mangled to quote space tab newline slosh with slosh
928 * or a hexified with a leading \x
929 * Record is terminated with newline.
930 *
931 */
932
933void qword_add(char **bpp, int *lp, char *str)
934{
935 char *bp = *bpp;
936 int len = *lp;
937 char c;
938
939 if (len < 0) return;
940
941 while ((c=*str++) && len)
942 switch(c) {
943 case ' ':
944 case '\t':
945 case '\n':
946 case '\\':
947 if (len >= 4) {
948 *bp++ = '\\';
949 *bp++ = '0' + ((c & 0300)>>6);
950 *bp++ = '0' + ((c & 0070)>>3);
951 *bp++ = '0' + ((c & 0007)>>0);
952 }
953 len -= 4;
954 break;
955 default:
956 *bp++ = c;
957 len--;
958 }
959 if (c || len <1) len = -1;
960 else {
961 *bp++ = ' ';
962 len--;
963 }
964 *bpp = bp;
965 *lp = len;
966}
Trond Myklebust24c37672008-12-23 16:30:12 -0500967EXPORT_SYMBOL_GPL(qword_add);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969void qword_addhex(char **bpp, int *lp, char *buf, int blen)
970{
971 char *bp = *bpp;
972 int len = *lp;
973
974 if (len < 0) return;
975
976 if (len > 2) {
977 *bp++ = '\\';
978 *bp++ = 'x';
979 len -= 2;
980 while (blen && len >= 2) {
981 unsigned char c = *buf++;
982 *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
983 *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
984 len -= 2;
985 blen--;
986 }
987 }
988 if (blen || len<1) len = -1;
989 else {
990 *bp++ = ' ';
991 len--;
992 }
993 *bpp = bp;
994 *lp = len;
995}
Trond Myklebust24c37672008-12-23 16:30:12 -0500996EXPORT_SYMBOL_GPL(qword_addhex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998static void warn_no_listener(struct cache_detail *detail)
999{
1000 if (detail->last_warn != detail->last_close) {
1001 detail->last_warn = detail->last_close;
1002 if (detail->warn_no_listener)
Trond Myklebust2da8ca22009-08-09 15:14:26 -04001003 detail->warn_no_listener(detail, detail->last_close != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005}
1006
1007/*
Trond Myklebustbc74b4f2009-08-09 15:14:29 -04001008 * register an upcall request to user-space and queue it up for read() by the
1009 * upcall daemon.
1010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 * Each request is at most one page long.
1012 */
Trond Myklebustbc74b4f2009-08-09 15:14:29 -04001013int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1014 void (*cache_request)(struct cache_detail *,
1015 struct cache_head *,
1016 char **,
1017 int *))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
1019
1020 char *buf;
1021 struct cache_request *crq;
1022 char *bp;
1023 int len;
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 if (atomic_read(&detail->readers) == 0 &&
1026 detail->last_close < get_seconds() - 30) {
1027 warn_no_listener(detail);
1028 return -EINVAL;
1029 }
1030
1031 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1032 if (!buf)
1033 return -EAGAIN;
1034
1035 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1036 if (!crq) {
1037 kfree(buf);
1038 return -EAGAIN;
1039 }
1040
1041 bp = buf; len = PAGE_SIZE;
1042
Trond Myklebustbc74b4f2009-08-09 15:14:29 -04001043 cache_request(detail, h, &bp, &len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
1045 if (len < 0) {
1046 kfree(buf);
1047 kfree(crq);
1048 return -EAGAIN;
1049 }
1050 crq->q.reader = 0;
1051 crq->item = cache_get(h);
1052 crq->buf = buf;
1053 crq->len = PAGE_SIZE - len;
1054 crq->readers = 0;
1055 spin_lock(&queue_lock);
1056 list_add_tail(&crq->q.list, &detail->queue);
1057 spin_unlock(&queue_lock);
1058 wake_up(&queue_wait);
1059 return 0;
1060}
Trond Myklebustbc74b4f2009-08-09 15:14:29 -04001061EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
1063/*
1064 * parse a message from user-space and pass it
1065 * to an appropriate cache
1066 * Messages are, like requests, separated into fields by
1067 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1068 *
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -08001069 * Message is
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 * reply cachename expiry key ... content....
1071 *
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -08001072 * key and content are both parsed by cache
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 */
1074
1075#define isodigit(c) (isdigit(c) && c <= '7')
1076int qword_get(char **bpp, char *dest, int bufsize)
1077{
1078 /* return bytes copied, or -1 on error */
1079 char *bp = *bpp;
1080 int len = 0;
1081
1082 while (*bp == ' ') bp++;
1083
1084 if (bp[0] == '\\' && bp[1] == 'x') {
1085 /* HEX STRING */
1086 bp += 2;
1087 while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
1088 int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1089 bp++;
1090 byte <<= 4;
1091 byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
1092 *dest++ = byte;
1093 bp++;
1094 len++;
1095 }
1096 } else {
1097 /* text with \nnn octal quoting */
1098 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1099 if (*bp == '\\' &&
1100 isodigit(bp[1]) && (bp[1] <= '3') &&
1101 isodigit(bp[2]) &&
1102 isodigit(bp[3])) {
1103 int byte = (*++bp -'0');
1104 bp++;
1105 byte = (byte << 3) | (*bp++ - '0');
1106 byte = (byte << 3) | (*bp++ - '0');
1107 *dest++ = byte;
1108 len++;
1109 } else {
1110 *dest++ = *bp++;
1111 len++;
1112 }
1113 }
1114 }
1115
1116 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1117 return -1;
1118 while (*bp == ' ') bp++;
1119 *bpp = bp;
1120 *dest = '\0';
1121 return len;
1122}
Trond Myklebust24c37672008-12-23 16:30:12 -05001123EXPORT_SYMBOL_GPL(qword_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125
1126/*
1127 * support /proc/sunrpc/cache/$CACHENAME/content
1128 * as a seqfile.
1129 * We call ->cache_show passing NULL for the item to
1130 * get a header, then pass each real item in the cache
1131 */
1132
1133struct handle {
1134 struct cache_detail *cd;
1135};
1136
1137static void *c_start(struct seq_file *m, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08001138 __acquires(cd->hash_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139{
1140 loff_t n = *pos;
1141 unsigned hash, entry;
1142 struct cache_head *ch;
1143 struct cache_detail *cd = ((struct handle*)m->private)->cd;
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -08001144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 read_lock(&cd->hash_lock);
1147 if (!n--)
1148 return SEQ_START_TOKEN;
1149 hash = n >> 32;
1150 entry = n & ((1LL<<32) - 1);
1151
1152 for (ch=cd->hash_table[hash]; ch; ch=ch->next)
1153 if (!entry--)
1154 return ch;
1155 n &= ~((1LL<<32) - 1);
1156 do {
1157 hash++;
1158 n += 1LL<<32;
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -08001159 } while(hash < cd->hash_size &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 cd->hash_table[hash]==NULL);
1161 if (hash >= cd->hash_size)
1162 return NULL;
1163 *pos = n+1;
1164 return cd->hash_table[hash];
1165}
1166
1167static void *c_next(struct seq_file *m, void *p, loff_t *pos)
1168{
1169 struct cache_head *ch = p;
1170 int hash = (*pos >> 32);
1171 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1172
1173 if (p == SEQ_START_TOKEN)
1174 hash = 0;
1175 else if (ch->next == NULL) {
1176 hash++;
1177 *pos += 1LL<<32;
1178 } else {
1179 ++*pos;
1180 return ch->next;
1181 }
1182 *pos &= ~((1LL<<32) - 1);
1183 while (hash < cd->hash_size &&
1184 cd->hash_table[hash] == NULL) {
1185 hash++;
1186 *pos += 1LL<<32;
1187 }
1188 if (hash >= cd->hash_size)
1189 return NULL;
1190 ++*pos;
1191 return cd->hash_table[hash];
1192}
1193
1194static void c_stop(struct seq_file *m, void *p)
Eric Dumazet9a429c42008-01-01 21:58:02 -08001195 __releases(cd->hash_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
1197 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1198 read_unlock(&cd->hash_lock);
1199}
1200
1201static int c_show(struct seq_file *m, void *p)
1202{
1203 struct cache_head *cp = p;
1204 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1205
1206 if (p == SEQ_START_TOKEN)
1207 return cd->cache_show(m, cd, NULL);
1208
1209 ifdebug(CACHE)
NeilBrown4013ede2006-03-27 01:15:07 -08001210 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
NeilBrownbaab9352006-03-27 01:15:09 -08001211 cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 cache_get(cp);
1213 if (cache_check(cd, cp, NULL))
1214 /* cache_check does a cache_put on failure */
1215 seq_printf(m, "# ");
1216 else
1217 cache_put(cp, cd);
1218
1219 return cd->cache_show(m, cd, cp);
1220}
1221
Philippe De Muyter56b3d972007-07-10 23:07:31 -07001222static const struct seq_operations cache_content_op = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 .start = c_start,
1224 .next = c_next,
1225 .stop = c_stop,
1226 .show = c_show,
1227};
1228
Trond Myklebust173912a2009-08-09 15:14:29 -04001229static int content_open(struct inode *inode, struct file *file,
1230 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 struct handle *han;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001234 if (!cd || !try_module_get(cd->owner))
1235 return -EACCES;
Pavel Emelyanovec931032007-10-10 02:31:07 -07001236 han = __seq_open_private(file, &cache_content_op, sizeof(*han));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (han == NULL)
1238 return -ENOMEM;
1239
1240 han->cd = cd;
Pavel Emelyanovec931032007-10-10 02:31:07 -07001241 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001244static int content_release(struct inode *inode, struct file *file,
1245 struct cache_detail *cd)
1246{
1247 int ret = seq_release_private(inode, file);
1248 module_put(cd->owner);
1249 return ret;
1250}
1251
1252static int open_flush(struct inode *inode, struct file *file,
1253 struct cache_detail *cd)
1254{
1255 if (!cd || !try_module_get(cd->owner))
1256 return -EACCES;
1257 return nonseekable_open(inode, file);
1258}
1259
1260static int release_flush(struct inode *inode, struct file *file,
1261 struct cache_detail *cd)
1262{
1263 module_put(cd->owner);
1264 return 0;
1265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
1267static ssize_t read_flush(struct file *file, char __user *buf,
Trond Myklebust173912a2009-08-09 15:14:29 -04001268 size_t count, loff_t *ppos,
1269 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 char tbuf[20];
1272 unsigned long p = *ppos;
Chuck Lever01b29692007-10-26 13:31:20 -04001273 size_t len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 sprintf(tbuf, "%lu\n", cd->flush_time);
1276 len = strlen(tbuf);
1277 if (p >= len)
1278 return 0;
1279 len -= p;
Chuck Lever01b29692007-10-26 13:31:20 -04001280 if (len > count)
1281 len = count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 if (copy_to_user(buf, (void*)(tbuf+p), len))
Chuck Lever01b29692007-10-26 13:31:20 -04001283 return -EFAULT;
1284 *ppos += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 return len;
1286}
1287
Trond Myklebust173912a2009-08-09 15:14:29 -04001288static ssize_t write_flush(struct file *file, const char __user *buf,
1289 size_t count, loff_t *ppos,
1290 struct cache_detail *cd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 char tbuf[20];
1293 char *ep;
1294 long flushtime;
1295 if (*ppos || count > sizeof(tbuf)-1)
1296 return -EINVAL;
1297 if (copy_from_user(tbuf, buf, count))
1298 return -EFAULT;
1299 tbuf[count] = 0;
1300 flushtime = simple_strtoul(tbuf, &ep, 0);
1301 if (*ep && *ep != '\n')
1302 return -EINVAL;
1303
1304 cd->flush_time = flushtime;
1305 cd->nextcheck = get_seconds();
1306 cache_flush();
1307
1308 *ppos += count;
1309 return count;
1310}
1311
Trond Myklebust173912a2009-08-09 15:14:29 -04001312static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1313 size_t count, loff_t *ppos)
1314{
1315 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1316
1317 return cache_read(filp, buf, count, ppos, cd);
1318}
1319
1320static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1321 size_t count, loff_t *ppos)
1322{
1323 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1324
1325 return cache_write(filp, buf, count, ppos, cd);
1326}
1327
1328static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1329{
1330 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1331
1332 return cache_poll(filp, wait, cd);
1333}
1334
1335static int cache_ioctl_procfs(struct inode *inode, struct file *filp,
1336 unsigned int cmd, unsigned long arg)
1337{
1338 struct cache_detail *cd = PDE(inode)->data;
1339
1340 return cache_ioctl(inode, filp, cmd, arg, cd);
1341}
1342
1343static int cache_open_procfs(struct inode *inode, struct file *filp)
1344{
1345 struct cache_detail *cd = PDE(inode)->data;
1346
1347 return cache_open(inode, filp, cd);
1348}
1349
1350static int cache_release_procfs(struct inode *inode, struct file *filp)
1351{
1352 struct cache_detail *cd = PDE(inode)->data;
1353
1354 return cache_release(inode, filp, cd);
1355}
1356
1357static const struct file_operations cache_file_operations_procfs = {
1358 .owner = THIS_MODULE,
1359 .llseek = no_llseek,
1360 .read = cache_read_procfs,
1361 .write = cache_write_procfs,
1362 .poll = cache_poll_procfs,
1363 .ioctl = cache_ioctl_procfs, /* for FIONREAD */
1364 .open = cache_open_procfs,
1365 .release = cache_release_procfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366};
Trond Myklebust173912a2009-08-09 15:14:29 -04001367
1368static int content_open_procfs(struct inode *inode, struct file *filp)
1369{
1370 struct cache_detail *cd = PDE(inode)->data;
1371
1372 return content_open(inode, filp, cd);
1373}
1374
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001375static int content_release_procfs(struct inode *inode, struct file *filp)
1376{
1377 struct cache_detail *cd = PDE(inode)->data;
1378
1379 return content_release(inode, filp, cd);
1380}
1381
Trond Myklebust173912a2009-08-09 15:14:29 -04001382static const struct file_operations content_file_operations_procfs = {
1383 .open = content_open_procfs,
1384 .read = seq_read,
1385 .llseek = seq_lseek,
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001386 .release = content_release_procfs,
Trond Myklebust173912a2009-08-09 15:14:29 -04001387};
1388
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001389static int open_flush_procfs(struct inode *inode, struct file *filp)
1390{
1391 struct cache_detail *cd = PDE(inode)->data;
1392
1393 return open_flush(inode, filp, cd);
1394}
1395
1396static int release_flush_procfs(struct inode *inode, struct file *filp)
1397{
1398 struct cache_detail *cd = PDE(inode)->data;
1399
1400 return release_flush(inode, filp, cd);
1401}
1402
Trond Myklebust173912a2009-08-09 15:14:29 -04001403static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1404 size_t count, loff_t *ppos)
1405{
1406 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1407
1408 return read_flush(filp, buf, count, ppos, cd);
1409}
1410
1411static ssize_t write_flush_procfs(struct file *filp,
1412 const char __user *buf,
1413 size_t count, loff_t *ppos)
1414{
1415 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1416
1417 return write_flush(filp, buf, count, ppos, cd);
1418}
1419
1420static const struct file_operations cache_flush_operations_procfs = {
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001421 .open = open_flush_procfs,
Trond Myklebust173912a2009-08-09 15:14:29 -04001422 .read = read_flush_procfs,
1423 .write = write_flush_procfs,
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001424 .release = release_flush_procfs,
Trond Myklebust173912a2009-08-09 15:14:29 -04001425};
1426
1427static void remove_cache_proc_entries(struct cache_detail *cd)
1428{
1429 if (cd->u.procfs.proc_ent == NULL)
1430 return;
1431 if (cd->u.procfs.flush_ent)
1432 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1433 if (cd->u.procfs.channel_ent)
1434 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1435 if (cd->u.procfs.content_ent)
1436 remove_proc_entry("content", cd->u.procfs.proc_ent);
1437 cd->u.procfs.proc_ent = NULL;
1438 remove_proc_entry(cd->name, proc_net_rpc);
1439}
1440
1441#ifdef CONFIG_PROC_FS
1442static int create_cache_proc_entries(struct cache_detail *cd)
1443{
1444 struct proc_dir_entry *p;
1445
1446 cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
1447 if (cd->u.procfs.proc_ent == NULL)
1448 goto out_nomem;
1449 cd->u.procfs.channel_ent = NULL;
1450 cd->u.procfs.content_ent = NULL;
1451
1452 p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1453 cd->u.procfs.proc_ent,
1454 &cache_flush_operations_procfs, cd);
1455 cd->u.procfs.flush_ent = p;
1456 if (p == NULL)
1457 goto out_nomem;
1458
1459 if (cd->cache_upcall || cd->cache_parse) {
1460 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1461 cd->u.procfs.proc_ent,
1462 &cache_file_operations_procfs, cd);
1463 cd->u.procfs.channel_ent = p;
1464 if (p == NULL)
1465 goto out_nomem;
1466 }
1467 if (cd->cache_show) {
1468 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1469 cd->u.procfs.proc_ent,
1470 &content_file_operations_procfs, cd);
1471 cd->u.procfs.content_ent = p;
1472 if (p == NULL)
1473 goto out_nomem;
1474 }
1475 return 0;
1476out_nomem:
1477 remove_cache_proc_entries(cd);
1478 return -ENOMEM;
1479}
1480#else /* CONFIG_PROC_FS */
1481static int create_cache_proc_entries(struct cache_detail *cd)
1482{
1483 return 0;
1484}
1485#endif
1486
1487int cache_register(struct cache_detail *cd)
1488{
1489 int ret;
1490
1491 sunrpc_init_cache_detail(cd);
1492 ret = create_cache_proc_entries(cd);
1493 if (ret)
1494 sunrpc_destroy_cache_detail(cd);
1495 return ret;
1496}
1497EXPORT_SYMBOL_GPL(cache_register);
1498
1499void cache_unregister(struct cache_detail *cd)
1500{
1501 remove_cache_proc_entries(cd);
1502 sunrpc_destroy_cache_detail(cd);
1503}
1504EXPORT_SYMBOL_GPL(cache_unregister);
Trond Myklebust8854e822009-08-09 15:14:30 -04001505
1506static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1507 size_t count, loff_t *ppos)
1508{
1509 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1510
1511 return cache_read(filp, buf, count, ppos, cd);
1512}
1513
1514static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1515 size_t count, loff_t *ppos)
1516{
1517 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1518
1519 return cache_write(filp, buf, count, ppos, cd);
1520}
1521
1522static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1523{
1524 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1525
1526 return cache_poll(filp, wait, cd);
1527}
1528
Frederic Weisbecker9918ff22010-05-19 15:08:17 +02001529static long cache_ioctl_pipefs(struct file *filp,
Trond Myklebust8854e822009-08-09 15:14:30 -04001530 unsigned int cmd, unsigned long arg)
1531{
Frederic Weisbecker9918ff22010-05-19 15:08:17 +02001532 struct inode *inode = filp->f_dentry->d_inode;
Trond Myklebust8854e822009-08-09 15:14:30 -04001533 struct cache_detail *cd = RPC_I(inode)->private;
Frederic Weisbecker9918ff22010-05-19 15:08:17 +02001534 long ret;
Trond Myklebust8854e822009-08-09 15:14:30 -04001535
Frederic Weisbecker9918ff22010-05-19 15:08:17 +02001536 lock_kernel();
1537 ret = cache_ioctl(inode, filp, cmd, arg, cd);
1538 unlock_kernel();
1539
1540 return ret;
Trond Myklebust8854e822009-08-09 15:14:30 -04001541}
1542
1543static int cache_open_pipefs(struct inode *inode, struct file *filp)
1544{
1545 struct cache_detail *cd = RPC_I(inode)->private;
1546
1547 return cache_open(inode, filp, cd);
1548}
1549
1550static int cache_release_pipefs(struct inode *inode, struct file *filp)
1551{
1552 struct cache_detail *cd = RPC_I(inode)->private;
1553
1554 return cache_release(inode, filp, cd);
1555}
1556
1557const struct file_operations cache_file_operations_pipefs = {
1558 .owner = THIS_MODULE,
1559 .llseek = no_llseek,
1560 .read = cache_read_pipefs,
1561 .write = cache_write_pipefs,
1562 .poll = cache_poll_pipefs,
Frederic Weisbecker9918ff22010-05-19 15:08:17 +02001563 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
Trond Myklebust8854e822009-08-09 15:14:30 -04001564 .open = cache_open_pipefs,
1565 .release = cache_release_pipefs,
1566};
1567
1568static int content_open_pipefs(struct inode *inode, struct file *filp)
1569{
1570 struct cache_detail *cd = RPC_I(inode)->private;
1571
1572 return content_open(inode, filp, cd);
1573}
1574
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001575static int content_release_pipefs(struct inode *inode, struct file *filp)
1576{
1577 struct cache_detail *cd = RPC_I(inode)->private;
1578
1579 return content_release(inode, filp, cd);
1580}
1581
Trond Myklebust8854e822009-08-09 15:14:30 -04001582const struct file_operations content_file_operations_pipefs = {
1583 .open = content_open_pipefs,
1584 .read = seq_read,
1585 .llseek = seq_lseek,
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001586 .release = content_release_pipefs,
Trond Myklebust8854e822009-08-09 15:14:30 -04001587};
1588
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001589static int open_flush_pipefs(struct inode *inode, struct file *filp)
1590{
1591 struct cache_detail *cd = RPC_I(inode)->private;
1592
1593 return open_flush(inode, filp, cd);
1594}
1595
1596static int release_flush_pipefs(struct inode *inode, struct file *filp)
1597{
1598 struct cache_detail *cd = RPC_I(inode)->private;
1599
1600 return release_flush(inode, filp, cd);
1601}
1602
Trond Myklebust8854e822009-08-09 15:14:30 -04001603static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1604 size_t count, loff_t *ppos)
1605{
1606 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1607
1608 return read_flush(filp, buf, count, ppos, cd);
1609}
1610
1611static ssize_t write_flush_pipefs(struct file *filp,
1612 const char __user *buf,
1613 size_t count, loff_t *ppos)
1614{
1615 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1616
1617 return write_flush(filp, buf, count, ppos, cd);
1618}
1619
1620const struct file_operations cache_flush_operations_pipefs = {
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001621 .open = open_flush_pipefs,
Trond Myklebust8854e822009-08-09 15:14:30 -04001622 .read = read_flush_pipefs,
1623 .write = write_flush_pipefs,
Trond Myklebustf7e86ab2009-08-19 18:13:00 -04001624 .release = release_flush_pipefs,
Trond Myklebust8854e822009-08-09 15:14:30 -04001625};
1626
1627int sunrpc_cache_register_pipefs(struct dentry *parent,
1628 const char *name, mode_t umode,
1629 struct cache_detail *cd)
1630{
1631 struct qstr q;
1632 struct dentry *dir;
1633 int ret = 0;
1634
1635 sunrpc_init_cache_detail(cd);
1636 q.name = name;
1637 q.len = strlen(name);
1638 q.hash = full_name_hash(q.name, q.len);
1639 dir = rpc_create_cache_dir(parent, &q, umode, cd);
1640 if (!IS_ERR(dir))
1641 cd->u.pipefs.dir = dir;
1642 else {
1643 sunrpc_destroy_cache_detail(cd);
1644 ret = PTR_ERR(dir);
1645 }
1646 return ret;
1647}
1648EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1649
1650void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1651{
1652 rpc_remove_cache_dir(cd->u.pipefs.dir);
1653 cd->u.pipefs.dir = NULL;
1654 sunrpc_destroy_cache_detail(cd);
1655}
1656EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1657