blob: 64c58eb26159324245a7581a67bfe5a5afd9f552 [file] [log] [blame]
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -05001/*
2 * V9FS cache definitions.
3 *
4 * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to:
17 * Free Software Foundation
18 * 51 Franklin Street, Fifth Floor
19 * Boston, MA 02111-1301 USA
20 *
21 */
22
23#include <linux/jiffies.h>
24#include <linux/file.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050026#include <linux/stat.h>
27#include <linux/sched.h>
28#include <linux/fs.h>
29#include <net/9p/9p.h>
30
31#include "v9fs.h"
32#include "cache.h"
33
34#define CACHETAG_LEN 11
35
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050036struct fscache_netfs v9fs_cache_netfs = {
37 .name = "9p",
38 .version = 0,
39};
40
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050041/**
42 * v9fs_random_cachetag - Generate a random tag to be associated
43 * with a new cache session.
44 *
45 * The value of jiffies is used for a fairly randomly cache tag.
46 */
47
48static
49int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
50{
51 v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
52 if (!v9ses->cachetag)
53 return -ENOMEM;
54
55 return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
56}
57
58static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data,
59 void *buffer, uint16_t bufmax)
60{
61 struct v9fs_session_info *v9ses;
62 uint16_t klen = 0;
63
64 v9ses = (struct v9fs_session_info *)cookie_netfs_data;
Joe Perches5d385152011-11-28 10:40:46 -080065 p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n",
66 v9ses, buffer, bufmax);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050067
68 if (v9ses->cachetag)
69 klen = strlen(v9ses->cachetag);
70
71 if (klen > bufmax)
72 return 0;
73
74 memcpy(buffer, v9ses->cachetag, klen);
Joe Perches5d385152011-11-28 10:40:46 -080075 p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050076 return klen;
77}
78
79const struct fscache_cookie_def v9fs_cache_session_index_def = {
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +053080 .name = "9P.session",
81 .type = FSCACHE_COOKIE_TYPE_INDEX,
82 .get_key = v9fs_cache_session_get_key,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050083};
84
85void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
86{
87 /* If no cache session tag was specified, we generate a random one. */
88 if (!v9ses->cachetag)
89 v9fs_random_cachetag(v9ses);
90
91 v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
92 &v9fs_cache_session_index_def,
David Howells94d30ae2013-09-21 00:09:31 +010093 v9ses, true);
Joe Perches5d385152011-11-28 10:40:46 -080094 p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
95 v9ses, v9ses->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -050096}
97
98void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
99{
Joe Perches5d385152011-11-28 10:40:46 -0800100 p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
101 v9ses, v9ses->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500102 fscache_relinquish_cookie(v9ses->fscache, 0);
103 v9ses->fscache = NULL;
104}
105
106
107static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data,
108 void *buffer, uint16_t bufmax)
109{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530110 const struct v9fs_inode *v9inode = cookie_netfs_data;
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000111 memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path));
Joe Perches5d385152011-11-28 10:40:46 -0800112 p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n",
113 &v9inode->vfs_inode, v9inode->qid.path);
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000114 return sizeof(v9inode->qid.path);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500115}
116
117static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data,
118 uint64_t *size)
119{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530120 const struct v9fs_inode *v9inode = cookie_netfs_data;
121 *size = i_size_read(&v9inode->vfs_inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500122
Joe Perches5d385152011-11-28 10:40:46 -0800123 p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n",
124 &v9inode->vfs_inode, *size);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500125}
126
127static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data,
128 void *buffer, uint16_t buflen)
129{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530130 const struct v9fs_inode *v9inode = cookie_netfs_data;
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000131 memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version));
Joe Perches5d385152011-11-28 10:40:46 -0800132 p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n",
133 &v9inode->vfs_inode, v9inode->qid.version);
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000134 return sizeof(v9inode->qid.version);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500135}
136
137static enum
138fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
139 const void *buffer,
140 uint16_t buflen)
141{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530142 const struct v9fs_inode *v9inode = cookie_netfs_data;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500143
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000144 if (buflen != sizeof(v9inode->qid.version))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500145 return FSCACHE_CHECKAUX_OBSOLETE;
146
Aneesh Kumar K.Vfd2421f2011-07-11 16:40:59 +0000147 if (memcmp(buffer, &v9inode->qid.version,
148 sizeof(v9inode->qid.version)))
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500149 return FSCACHE_CHECKAUX_OBSOLETE;
150
151 return FSCACHE_CHECKAUX_OKAY;
152}
153
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500154const struct fscache_cookie_def v9fs_cache_inode_index_def = {
155 .name = "9p.inode",
156 .type = FSCACHE_COOKIE_TYPE_DATAFILE,
157 .get_key = v9fs_cache_inode_get_key,
158 .get_attr = v9fs_cache_inode_get_attr,
159 .get_aux = v9fs_cache_inode_get_aux,
160 .check_aux = v9fs_cache_inode_check_aux,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500161};
162
163void v9fs_cache_inode_get_cookie(struct inode *inode)
164{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530165 struct v9fs_inode *v9inode;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500166 struct v9fs_session_info *v9ses;
167
168 if (!S_ISREG(inode->i_mode))
169 return;
170
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530171 v9inode = V9FS_I(inode);
172 if (v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500173 return;
174
175 v9ses = v9fs_inode2v9ses(inode);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530176 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500177 &v9fs_cache_inode_index_def,
David Howells94d30ae2013-09-21 00:09:31 +0100178 v9inode, true);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500179
Joe Perches5d385152011-11-28 10:40:46 -0800180 p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
181 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500182}
183
184void v9fs_cache_inode_put_cookie(struct inode *inode)
185{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530186 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500187
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530188 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500189 return;
Joe Perches5d385152011-11-28 10:40:46 -0800190 p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
191 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500192
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530193 fscache_relinquish_cookie(v9inode->fscache, 0);
194 v9inode->fscache = NULL;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500195}
196
197void v9fs_cache_inode_flush_cookie(struct inode *inode)
198{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530199 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500200
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530201 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500202 return;
Joe Perches5d385152011-11-28 10:40:46 -0800203 p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
204 inode, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500205
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530206 fscache_relinquish_cookie(v9inode->fscache, 1);
207 v9inode->fscache = NULL;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500208}
209
210void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
211{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530212 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500213
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530214 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500215 return;
216
Sasha Levin8f5fed12016-01-07 17:49:51 -0500217 mutex_lock(&v9inode->fscache_lock);
Geyslan G. Bembd126e52013-09-28 20:32:13 -0300218
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500219 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
220 v9fs_cache_inode_flush_cookie(inode);
221 else
222 v9fs_cache_inode_get_cookie(inode);
223
Sasha Levin8f5fed12016-01-07 17:49:51 -0500224 mutex_unlock(&v9inode->fscache_lock);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500225}
226
227void v9fs_cache_inode_reset_cookie(struct inode *inode)
228{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530229 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500230 struct v9fs_session_info *v9ses;
231 struct fscache_cookie *old;
232
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530233 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500234 return;
235
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530236 old = v9inode->fscache;
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500237
Sasha Levin8f5fed12016-01-07 17:49:51 -0500238 mutex_lock(&v9inode->fscache_lock);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530239 fscache_relinquish_cookie(v9inode->fscache, 1);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500240
241 v9ses = v9fs_inode2v9ses(inode);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530242 v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500243 &v9fs_cache_inode_index_def,
David Howells94d30ae2013-09-21 00:09:31 +0100244 v9inode, true);
Joe Perches5d385152011-11-28 10:40:46 -0800245 p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
246 inode, old, v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500247
Sasha Levin8f5fed12016-01-07 17:49:51 -0500248 mutex_unlock(&v9inode->fscache_lock);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500249}
250
251int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
252{
253 struct inode *inode = page->mapping->host;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530254 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500255
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530256 BUG_ON(!v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500257
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530258 return fscache_maybe_release_page(v9inode->fscache, page, gfp);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500259}
260
261void __v9fs_fscache_invalidate_page(struct page *page)
262{
263 struct inode *inode = page->mapping->host;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530264 struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500265
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530266 BUG_ON(!v9inode->fscache);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500267
268 if (PageFsCache(page)) {
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530269 fscache_wait_on_page_write(v9inode->fscache, page);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500270 BUG_ON(!PageLocked(page));
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530271 fscache_uncache_page(v9inode->fscache, page);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500272 }
273}
274
275static void v9fs_vfs_readpage_complete(struct page *page, void *data,
276 int error)
277{
278 if (!error)
279 SetPageUptodate(page);
280
281 unlock_page(page);
282}
283
284/**
285 * __v9fs_readpage_from_fscache - read a page from cache
286 *
287 * Returns 0 if the pages are in cache and a BIO is submitted,
288 * 1 if the pages are not in cache and -error otherwise.
289 */
290
291int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
292{
293 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530294 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500295
Joe Perches5d385152011-11-28 10:40:46 -0800296 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530297 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500298 return -ENOBUFS;
299
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530300 ret = fscache_read_or_alloc_page(v9inode->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500301 page,
302 v9fs_vfs_readpage_complete,
303 NULL,
304 GFP_KERNEL);
305 switch (ret) {
306 case -ENOBUFS:
307 case -ENODATA:
Joe Perches5d385152011-11-28 10:40:46 -0800308 p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500309 return 1;
310 case 0:
Joe Perches5d385152011-11-28 10:40:46 -0800311 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500312 return ret;
313 default:
Joe Perches5d385152011-11-28 10:40:46 -0800314 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500315 return ret;
316 }
317}
318
319/**
320 * __v9fs_readpages_from_fscache - read multiple pages from cache
321 *
322 * Returns 0 if the pages are in cache and a BIO is submitted,
323 * 1 if the pages are not in cache and -error otherwise.
324 */
325
326int __v9fs_readpages_from_fscache(struct inode *inode,
327 struct address_space *mapping,
328 struct list_head *pages,
329 unsigned *nr_pages)
330{
331 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530332 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500333
Joe Perches5d385152011-11-28 10:40:46 -0800334 p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530335 if (!v9inode->fscache)
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500336 return -ENOBUFS;
337
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530338 ret = fscache_read_or_alloc_pages(v9inode->fscache,
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500339 mapping, pages, nr_pages,
340 v9fs_vfs_readpage_complete,
341 NULL,
342 mapping_gfp_mask(mapping));
343 switch (ret) {
344 case -ENOBUFS:
345 case -ENODATA:
Joe Perches5d385152011-11-28 10:40:46 -0800346 p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500347 return 1;
348 case 0:
349 BUG_ON(!list_empty(pages));
350 BUG_ON(*nr_pages != 0);
Joe Perches5d385152011-11-28 10:40:46 -0800351 p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500352 return ret;
353 default:
Joe Perches5d385152011-11-28 10:40:46 -0800354 p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500355 return ret;
356 }
357}
358
359/**
360 * __v9fs_readpage_to_fscache - write a page to the cache
361 *
362 */
363
364void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
365{
366 int ret;
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530367 const struct v9fs_inode *v9inode = V9FS_I(inode);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500368
Joe Perches5d385152011-11-28 10:40:46 -0800369 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530370 ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL);
Joe Perches5d385152011-11-28 10:40:46 -0800371 p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret);
Abhishek Kulkarni60e78d22009-09-23 13:00:27 -0500372 if (ret != 0)
373 v9fs_uncache_page(inode, page);
374}
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530375
376/*
377 * wait for a page to complete writing to the cache
378 */
379void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
380{
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530381 const struct v9fs_inode *v9inode = V9FS_I(inode);
Joe Perches5d385152011-11-28 10:40:46 -0800382 p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530383 if (PageFsCache(page))
Aneesh Kumar K.Va78ce052011-02-28 17:04:02 +0530384 fscache_wait_on_page_write(v9inode->fscache, page);
Aneesh Kumar K.V2efda792011-02-28 17:03:56 +0530385}