blob: ea31bceac4f288f3e68ab6713c52bbfc7fd78fb3 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/mm.h>
17#include <asm/semaphore.h>
18
19#include "gfs2.h"
20#include "bmap.h"
21#include "inode.h"
22#include "page.h"
23#include "trans.h"
Steven Whitehouse257f9b42006-01-31 10:00:25 +000024#include "ops_address.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000025
26/**
27 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
28 * @gl: the glock
29 *
30 */
31
32void gfs2_pte_inval(struct gfs2_glock *gl)
33{
34 struct gfs2_inode *ip;
35 struct inode *inode;
36
37 ip = get_gl2ip(gl);
38 if (!ip || !S_ISREG(ip->i_di.di_mode))
39 return;
40
41 if (!test_bit(GIF_PAGED, &ip->i_flags))
42 return;
43
44 inode = gfs2_ip2v_lookup(ip);
45 if (inode) {
46 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
47 iput(inode);
48
49 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
50 set_bit(GLF_DIRTY, &gl->gl_flags);
51 }
52
53 clear_bit(GIF_SW_PAGED, &ip->i_flags);
54}
55
56/**
57 * gfs2_page_inval - Invalidate all pages associated with a glock
58 * @gl: the glock
59 *
60 */
61
62void gfs2_page_inval(struct gfs2_glock *gl)
63{
64 struct gfs2_inode *ip;
65 struct inode *inode;
66
67 ip = get_gl2ip(gl);
68 if (!ip || !S_ISREG(ip->i_di.di_mode))
69 return;
70
71 inode = gfs2_ip2v_lookup(ip);
72 if (inode) {
73 struct address_space *mapping = inode->i_mapping;
74
75 truncate_inode_pages(mapping, 0);
76 gfs2_assert_withdraw(ip->i_sbd, !mapping->nrpages);
77
78 iput(inode);
79 }
80
81 clear_bit(GIF_PAGED, &ip->i_flags);
82}
83
84/**
85 * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
86 * @gl: the glock
87 * @flags: DIO_START | DIO_WAIT
88 *
89 * Syncs data (not metadata) for a regular file.
90 * No-op for all other types.
91 */
92
93void gfs2_page_sync(struct gfs2_glock *gl, int flags)
94{
95 struct gfs2_inode *ip;
96 struct inode *inode;
97
98 ip = get_gl2ip(gl);
99 if (!ip || !S_ISREG(ip->i_di.di_mode))
100 return;
101
102 inode = gfs2_ip2v_lookup(ip);
103 if (inode) {
104 struct address_space *mapping = inode->i_mapping;
105 int error = 0;
106
107 if (flags & DIO_START)
108 filemap_fdatawrite(mapping);
109 if (!error && (flags & DIO_WAIT))
110 error = filemap_fdatawait(mapping);
111
112 /* Put back any errors cleared by filemap_fdatawait()
113 so they can be caught by someone who can pass them
114 up to user space. */
115
116 if (error == -ENOSPC)
117 set_bit(AS_ENOSPC, &mapping->flags);
118 else if (error)
119 set_bit(AS_EIO, &mapping->flags);
120
121 iput(inode);
122 }
123}
124
125/**
126 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
127 * @ip: the inode
128 * @dibh: the dinode buffer
129 * @block: the block number that was allocated
130 * @private: any locked page held by the caller process
131 *
132 * Returns: errno
133 */
134
135int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
136 uint64_t block, void *private)
137{
138 struct gfs2_sbd *sdp = ip->i_sbd;
139 struct inode *inode = ip->i_vnode;
140 struct page *page = (struct page *)private;
141 struct buffer_head *bh;
142 int release = 0;
143
144 if (!page || page->index) {
145 page = grab_cache_page(inode->i_mapping, 0);
146 if (!page)
147 return -ENOMEM;
148 release = 1;
149 }
150
151 if (!PageUptodate(page)) {
152 void *kaddr = kmap(page);
153
154 memcpy(kaddr,
155 dibh->b_data + sizeof(struct gfs2_dinode),
156 ip->i_di.di_size);
157 memset(kaddr + ip->i_di.di_size,
158 0,
159 PAGE_CACHE_SIZE - ip->i_di.di_size);
160 kunmap(page);
161
162 SetPageUptodate(page);
163 }
164
165 if (!page_has_buffers(page))
166 create_empty_buffers(page, 1 << inode->i_blkbits,
167 (1 << BH_Uptodate));
168
169 bh = page_buffers(page);
170
171 if (!buffer_mapped(bh))
172 map_bh(bh, inode->i_sb, block);
173
174 set_buffer_uptodate(bh);
175 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED)
176 gfs2_trans_add_databuf(sdp, bh);
177 mark_buffer_dirty(bh);
178
179 if (release) {
180 unlock_page(page);
181 page_cache_release(page);
182 }
183
184 return 0;
185}
186
187/**
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000188 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
David Teiglandb3b94fa2006-01-16 16:50:04 +0000189 *
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000190 * This is partly borrowed from ext3.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000191 */
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000192int gfs2_block_truncate_page(struct address_space *mapping)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000193{
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000194 struct inode *inode = mapping->host;
195 struct gfs2_inode *ip = get_v2ip(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000196 struct gfs2_sbd *sdp = ip->i_sbd;
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000197 loff_t from = inode->i_size;
198 unsigned long index = from >> PAGE_CACHE_SHIFT;
199 unsigned offset = from & (PAGE_CACHE_SIZE-1);
200 unsigned blocksize, iblock, length, pos;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000201 struct buffer_head *bh;
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000202 struct page *page;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000203 void *kaddr;
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000204 int err;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000205
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000206 page = grab_cache_page(mapping, index);
207 if (!page)
208 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000209
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000210 blocksize = inode->i_sb->s_blocksize;
211 length = blocksize - (offset & (blocksize - 1));
212 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000213
214 if (!page_has_buffers(page))
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000215 create_empty_buffers(page, blocksize, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000216
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000217 /* Find the buffer that contains "offset" */
218 bh = page_buffers(page);
219 pos = blocksize;
220 while (offset >= pos) {
221 bh = bh->b_this_page;
222 iblock++;
223 pos += blocksize;
224 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000225
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000226 err = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000227
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000228 if (!buffer_mapped(bh)) {
229 gfs2_get_block(inode, iblock, bh, 0);
230 /* unmapped? It's a hole - nothing to do */
231 if (!buffer_mapped(bh))
232 goto unlock;
233 }
234
235 /* Ok, it's mapped. Make sure it's up-to-date */
236 if (PageUptodate(page))
237 set_buffer_uptodate(bh);
238
239 if (!buffer_uptodate(bh)) {
240 err = -EIO;
241 ll_rw_block(READ, 1, &bh);
242 wait_on_buffer(bh);
243 /* Uhhuh. Read error. Complain and punt. */
244 if (!buffer_uptodate(bh))
245 goto unlock;
246 }
247
248 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED/* || gfs2_is_jdata(ip)*/)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000249 gfs2_trans_add_databuf(sdp, bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000250
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000251 kaddr = kmap_atomic(page, KM_USER0);
252 memset(kaddr + offset, 0, length);
253 flush_dcache_page(page);
254 kunmap_atomic(kaddr, KM_USER0);
255
256unlock:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000257 unlock_page(page);
258 page_cache_release(page);
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000259 return err;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000260}
261
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000262void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000263 unsigned int from, unsigned int to)
264{
265 struct buffer_head *head = page_buffers(page);
266 unsigned int bsize = head->b_size;
267 struct buffer_head *bh;
268 unsigned int start, end;
269
270 for (bh = head, start = 0;
271 bh != head || !start;
272 bh = bh->b_this_page, start = end) {
273 end = start + bsize;
274 if (end <= from || start >= to)
275 continue;
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000276 gfs2_trans_add_databuf(ip->i_sbd, bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000277 }
278}
279