blob: 32e6f4136fa2297e13a6ac51444d50c18b78e9a3 [file] [log] [blame]
Dan Magenheimer077b1f82011-05-26 10:01:36 -06001/*
2 * Cleancache frontend
3 *
4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of cleancache. See
6 * Documentation/vm/cleancache.txt for more information.
7 *
8 * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2.
12 */
13
14#include <linux/module.h>
15#include <linux/fs.h>
16#include <linux/exportfs.h>
17#include <linux/mm.h>
Dan Magenheimer417fc2c2011-09-21 12:28:04 -040018#include <linux/debugfs.h>
Dan Magenheimer077b1f82011-05-26 10:01:36 -060019#include <linux/cleancache.h>
20
21/*
22 * This global enablement flag may be read thousands of times per second
Dan Magenheimer31677602011-09-21 11:56:28 -040023 * by cleancache_get/put/invalidate even on systems where cleancache_ops
Dan Magenheimer077b1f82011-05-26 10:01:36 -060024 * is not claimed (e.g. cleancache is config'ed on but remains
25 * disabled), so is preferred to the slower alternative: a function
26 * call that checks a non-global.
27 */
Dan Magenheimer072611e2011-09-21 12:21:20 -040028int cleancache_enabled __read_mostly;
Dan Magenheimer077b1f82011-05-26 10:01:36 -060029EXPORT_SYMBOL(cleancache_enabled);
30
31/*
32 * cleancache_ops is set by cleancache_ops_register to contain the pointers
33 * to the cleancache "backend" implementation functions.
34 */
Dan Magenheimer072611e2011-09-21 12:21:20 -040035static struct cleancache_ops cleancache_ops __read_mostly;
Dan Magenheimer077b1f82011-05-26 10:01:36 -060036
Dan Magenheimer417fc2c2011-09-21 12:28:04 -040037/*
38 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
39 * properly configured. These are for information only so are not protected
40 * against increment races.
41 */
42static u64 cleancache_succ_gets;
43static u64 cleancache_failed_gets;
44static u64 cleancache_puts;
45static u64 cleancache_invalidates;
Dan Magenheimer077b1f82011-05-26 10:01:36 -060046
47/*
48 * register operations for cleancache, returning previous thus allowing
49 * detection of multiple backends and possible nesting
50 */
51struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
52{
53 struct cleancache_ops old = cleancache_ops;
54
55 cleancache_ops = *ops;
56 cleancache_enabled = 1;
57 return old;
58}
59EXPORT_SYMBOL(cleancache_register_ops);
60
61/* Called by a cleancache-enabled filesystem at time of mount */
62void __cleancache_init_fs(struct super_block *sb)
63{
64 sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
65}
66EXPORT_SYMBOL(__cleancache_init_fs);
67
68/* Called by a cleancache-enabled clustered filesystem at time of mount */
69void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
70{
71 sb->cleancache_poolid =
72 (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
73}
74EXPORT_SYMBOL(__cleancache_init_shared_fs);
75
76/*
77 * If the filesystem uses exportable filehandles, use the filehandle as
78 * the key, else use the inode number.
79 */
80static int cleancache_get_key(struct inode *inode,
81 struct cleancache_filekey *key)
82{
Al Virob0b03822012-04-02 14:34:06 -040083 int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
Dan Magenheimer077b1f82011-05-26 10:01:36 -060084 int len = 0, maxlen = CLEANCACHE_KEY_MAX;
85 struct super_block *sb = inode->i_sb;
86
87 key->u.ino = inode->i_ino;
88 if (sb->s_export_op != NULL) {
89 fhfn = sb->s_export_op->encode_fh;
90 if (fhfn) {
Al Virob0b03822012-04-02 14:34:06 -040091 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
Dan Magenheimer077b1f82011-05-26 10:01:36 -060092 if (len <= 0 || len == 255)
93 return -1;
94 if (maxlen > CLEANCACHE_KEY_MAX)
95 return -1;
96 }
97 }
98 return 0;
99}
100
101/*
102 * "Get" data from cleancache associated with the poolid/inode/index
103 * that were specified when the data was put to cleanache and, if
104 * successful, use it to fill the specified page with data and return 0.
105 * The pageframe is unchanged and returns -1 if the get fails.
106 * Page must be locked by caller.
107 */
108int __cleancache_get_page(struct page *page)
109{
110 int ret = -1;
111 int pool_id;
112 struct cleancache_filekey key = { .u.key = { 0 } };
113
114 VM_BUG_ON(!PageLocked(page));
115 pool_id = page->mapping->host->i_sb->cleancache_poolid;
116 if (pool_id < 0)
117 goto out;
118
119 if (cleancache_get_key(page->mapping->host, &key) < 0)
120 goto out;
121
122 ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
123 if (ret == 0)
124 cleancache_succ_gets++;
125 else
126 cleancache_failed_gets++;
127out:
128 return ret;
129}
130EXPORT_SYMBOL(__cleancache_get_page);
131
132/*
133 * "Put" data from a page to cleancache and associate it with the
134 * (previously-obtained per-filesystem) poolid and the page's,
135 * inode and page index. Page must be locked. Note that a put_page
136 * always "succeeds", though a subsequent get_page may succeed or fail.
137 */
138void __cleancache_put_page(struct page *page)
139{
140 int pool_id;
141 struct cleancache_filekey key = { .u.key = { 0 } };
142
143 VM_BUG_ON(!PageLocked(page));
144 pool_id = page->mapping->host->i_sb->cleancache_poolid;
145 if (pool_id >= 0 &&
146 cleancache_get_key(page->mapping->host, &key) >= 0) {
147 (*cleancache_ops.put_page)(pool_id, key, page->index, page);
148 cleancache_puts++;
149 }
150}
151EXPORT_SYMBOL(__cleancache_put_page);
152
153/*
Dan Magenheimer31677602011-09-21 11:56:28 -0400154 * Invalidate any data from cleancache associated with the poolid and the
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600155 * page's inode and page index so that a subsequent "get" will fail.
156 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400157void __cleancache_invalidate_page(struct address_space *mapping,
158 struct page *page)
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600159{
160 /* careful... page->mapping is NULL sometimes when this is called */
161 int pool_id = mapping->host->i_sb->cleancache_poolid;
162 struct cleancache_filekey key = { .u.key = { 0 } };
163
164 if (pool_id >= 0) {
165 VM_BUG_ON(!PageLocked(page));
166 if (cleancache_get_key(mapping->host, &key) >= 0) {
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500167 (*cleancache_ops.invalidate_page)(pool_id,
168 key, page->index);
Dan Magenheimer417fc2c2011-09-21 12:28:04 -0400169 cleancache_invalidates++;
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600170 }
171 }
172}
Dan Magenheimer31677602011-09-21 11:56:28 -0400173EXPORT_SYMBOL(__cleancache_invalidate_page);
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600174
175/*
Dan Magenheimer31677602011-09-21 11:56:28 -0400176 * Invalidate all data from cleancache associated with the poolid and the
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600177 * mappings's inode so that all subsequent gets to this poolid/inode
178 * will fail.
179 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400180void __cleancache_invalidate_inode(struct address_space *mapping)
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600181{
182 int pool_id = mapping->host->i_sb->cleancache_poolid;
183 struct cleancache_filekey key = { .u.key = { 0 } };
184
185 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500186 (*cleancache_ops.invalidate_inode)(pool_id, key);
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600187}
Dan Magenheimer31677602011-09-21 11:56:28 -0400188EXPORT_SYMBOL(__cleancache_invalidate_inode);
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600189
190/*
191 * Called by any cleancache-enabled filesystem at time of unmount;
192 * note that pool_id is surrendered and may be reutrned by a subsequent
193 * cleancache_init_fs or cleancache_init_shared_fs
194 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400195void __cleancache_invalidate_fs(struct super_block *sb)
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600196{
197 if (sb->cleancache_poolid >= 0) {
198 int old_poolid = sb->cleancache_poolid;
199 sb->cleancache_poolid = -1;
Dan Magenheimer91c6cc92012-01-12 14:03:25 -0500200 (*cleancache_ops.invalidate_fs)(old_poolid);
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600201 }
202}
Dan Magenheimer31677602011-09-21 11:56:28 -0400203EXPORT_SYMBOL(__cleancache_invalidate_fs);
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600204
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600205static int __init init_cleancache(void)
206{
Dan Magenheimer417fc2c2011-09-21 12:28:04 -0400207#ifdef CONFIG_DEBUG_FS
208 struct dentry *root = debugfs_create_dir("cleancache", NULL);
209 if (root == NULL)
210 return -ENXIO;
211 debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
212 debugfs_create_u64("failed_gets", S_IRUGO,
213 root, &cleancache_failed_gets);
214 debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
215 debugfs_create_u64("invalidates", S_IRUGO,
216 root, &cleancache_invalidates);
217#endif
Dan Magenheimer077b1f82011-05-26 10:01:36 -0600218 return 0;
219}
220module_init(init_cleancache)