blob: 5d7c7ececa64a3345eb9a2632a660e695a9a73cc [file] [log] [blame]
David Howells642fb4d2006-01-06 00:11:41 -08001/* file-nommu.c: no-MMU version of ramfs
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/fs.h>
Dimitri Gorokhovik131612d2006-12-29 16:48:24 -080014#include <linux/mm.h>
David Howells642fb4d2006-01-06 00:11:41 -080015#include <linux/pagemap.h>
16#include <linux/highmem.h>
17#include <linux/init.h>
18#include <linux/string.h>
David Howells642fb4d2006-01-06 00:11:41 -080019#include <linux/backing-dev.h>
20#include <linux/ramfs.h>
21#include <linux/quotaops.h>
22#include <linux/pagevec.h>
23#include <linux/mman.h>
24
25#include <asm/uaccess.h>
26#include "internal.h"
27
28static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
29
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070030const struct address_space_operations ramfs_aops = {
David Howells642fb4d2006-01-06 00:11:41 -080031 .readpage = simple_readpage,
Nick Piggin800d15a2007-10-16 01:25:03 -070032 .write_begin = simple_write_begin,
33 .write_end = simple_write_end,
Ken Chen46626292007-02-10 01:43:17 -080034 .set_page_dirty = __set_page_dirty_no_writeback,
David Howells642fb4d2006-01-06 00:11:41 -080035};
36
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080037const struct file_operations ramfs_file_operations = {
David Howells642fb4d2006-01-06 00:11:41 -080038 .mmap = ramfs_nommu_mmap,
39 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
Badari Pulavarty543ade12006-09-30 23:28:48 -070040 .read = do_sync_read,
41 .aio_read = generic_file_aio_read,
42 .write = do_sync_write,
43 .aio_write = generic_file_aio_write,
David Howells642fb4d2006-01-06 00:11:41 -080044 .fsync = simple_sync_file,
Jens Axboe5ffc4ef2007-06-01 11:49:19 +020045 .splice_read = generic_file_splice_read,
Octavian Purdila8b3d3562008-07-04 09:33:33 +020046 .splice_write = generic_file_splice_write,
David Howells642fb4d2006-01-06 00:11:41 -080047 .llseek = generic_file_llseek,
48};
49
Arjan van de Venc5ef1c42007-02-12 00:55:40 -080050const struct inode_operations ramfs_file_inode_operations = {
David Howells642fb4d2006-01-06 00:11:41 -080051 .setattr = ramfs_nommu_setattr,
52 .getattr = simple_getattr,
53};
54
55/*****************************************************************************/
56/*
57 * add a contiguous set of pages into a ramfs inode when it's truncated from
58 * size 0 on the assumption that it's going to be used for an mmap of shared
59 * memory
60 */
Nick Piggin4b19de62008-10-02 14:50:16 -070061int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
David Howells642fb4d2006-01-06 00:11:41 -080062{
63 struct pagevec lru_pvec;
64 unsigned long npages, xpages, loop, limit;
65 struct page *pages;
66 unsigned order;
67 void *data;
68 int ret;
69
70 /* make various checks */
71 order = get_order(newsize);
72 if (unlikely(order >= MAX_ORDER))
73 goto too_big;
74
75 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
76 if (limit != RLIM_INFINITY && newsize > limit)
77 goto fsize_exceeded;
78
79 if (newsize > inode->i_sb->s_maxbytes)
80 goto too_big;
81
82 i_size_write(inode, newsize);
83
84 /* allocate enough contiguous pages to be able to satisfy the
85 * request */
86 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
87 if (!pages)
88 return -ENOMEM;
89
90 /* split the high-order page into an array of single pages */
91 xpages = 1UL << order;
92 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
93
Nick Piggin84097512006-03-22 00:08:34 -080094 split_page(pages, order);
David Howells642fb4d2006-01-06 00:11:41 -080095
96 /* trim off any pages we don't actually require */
97 for (loop = npages; loop < xpages; loop++)
98 __free_page(pages + loop);
99
100 /* clear the memory we allocated */
101 newsize = PAGE_SIZE * npages;
102 data = page_address(pages);
103 memset(data, 0, newsize);
104
105 /* attach all the pages to the inode's address space */
106 pagevec_init(&lru_pvec, 0);
107 for (loop = 0; loop < npages; loop++) {
108 struct page *page = pages + loop;
109
110 ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL);
111 if (ret < 0)
112 goto add_error;
113
114 if (!pagevec_add(&lru_pvec, page))
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700115 __pagevec_lru_add_file(&lru_pvec);
David Howells642fb4d2006-01-06 00:11:41 -0800116
Enrik Berkhan020fe222009-03-13 13:51:56 -0700117 /* prevent the page from being discarded on memory pressure */
118 SetPageDirty(page);
119
David Howells642fb4d2006-01-06 00:11:41 -0800120 unlock_page(page);
121 }
122
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700123 pagevec_lru_add_file(&lru_pvec);
David Howells642fb4d2006-01-06 00:11:41 -0800124 return 0;
125
126 fsize_exceeded:
127 send_sig(SIGXFSZ, current, 0);
128 too_big:
129 return -EFBIG;
130
131 add_error:
Johannes Weiner15e7b872009-03-13 13:51:58 -0700132 pagevec_lru_add_file(&lru_pvec);
David Howells642fb4d2006-01-06 00:11:41 -0800133 page_cache_release(pages + loop);
134 for (loop++; loop < npages; loop++)
135 __free_page(pages + loop);
136 return ret;
137}
138
139/*****************************************************************************/
140/*
141 * check that file shrinkage doesn't leave any VMAs dangling in midair
142 */
143static int ramfs_nommu_check_mappings(struct inode *inode,
144 size_t newsize, size_t size)
145{
146 struct vm_area_struct *vma;
147 struct prio_tree_iter iter;
148
149 /* search for VMAs that fall within the dead zone */
150 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
151 newsize >> PAGE_SHIFT,
152 (size + PAGE_SIZE - 1) >> PAGE_SHIFT
153 ) {
154 /* found one - only interested if it's shared out of the page
155 * cache */
156 if (vma->vm_flags & VM_SHARED)
157 return -ETXTBSY; /* not quite true, but near enough */
158 }
159
160 return 0;
161}
162
163/*****************************************************************************/
164/*
165 *
166 */
167static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
168{
169 int ret;
170
171 /* assume a truncate from zero size is going to be for the purposes of
172 * shared mmap */
173 if (size == 0) {
174 if (unlikely(newsize >> 32))
175 return -EFBIG;
176
177 return ramfs_nommu_expand_for_mapping(inode, newsize);
178 }
179
180 /* check that a decrease in size doesn't cut off any shared mappings */
181 if (newsize < size) {
182 ret = ramfs_nommu_check_mappings(inode, newsize, size);
183 if (ret < 0)
184 return ret;
185 }
186
Bryan Wu3f0a6762007-05-31 11:31:55 +0800187 ret = vmtruncate(inode, newsize);
David Howells642fb4d2006-01-06 00:11:41 -0800188
189 return ret;
190}
191
192/*****************************************************************************/
193/*
194 * handle a change of attributes
195 * - we're specifically interested in a change of size
196 */
197static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
198{
199 struct inode *inode = dentry->d_inode;
200 unsigned int old_ia_valid = ia->ia_valid;
201 int ret = 0;
202
Bryan Wu85f60382007-06-05 11:02:01 +0800203 /* POSIX UID/GID verification for setting inode attributes */
204 ret = inode_change_ok(inode, ia);
205 if (ret)
206 return ret;
207
David Howells642fb4d2006-01-06 00:11:41 -0800208 /* by providing our own setattr() method, we skip this quotaism */
209 if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
210 (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
211 ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
212
213 /* pick out size-changing events */
214 if (ia->ia_valid & ATTR_SIZE) {
215 loff_t size = i_size_read(inode);
216 if (ia->ia_size != size) {
217 ret = ramfs_nommu_resize(inode, ia->ia_size, size);
218 if (ret < 0 || ia->ia_valid == ATTR_SIZE)
219 goto out;
220 } else {
221 /* we skipped the truncate but must still update
222 * timestamps
223 */
224 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
225 }
226 }
227
228 ret = inode_setattr(inode, ia);
229 out:
230 ia->ia_valid = old_ia_valid;
231 return ret;
232}
233
234/*****************************************************************************/
235/*
236 * try to determine where a shared mapping can be made
237 * - we require that:
238 * - the pages to be mapped must exist
239 * - the pages be physically contiguous in sequence
240 */
241unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
242 unsigned long addr, unsigned long len,
243 unsigned long pgoff, unsigned long flags)
244{
245 unsigned long maxpages, lpages, nr, loop, ret;
Josef Sipeka57c4d62006-12-08 02:37:32 -0800246 struct inode *inode = file->f_path.dentry->d_inode;
David Howells642fb4d2006-01-06 00:11:41 -0800247 struct page **pages = NULL, **ptr, *page;
248 loff_t isize;
249
250 if (!(flags & MAP_SHARED))
251 return addr;
252
253 /* the mapping mustn't extend beyond the EOF */
254 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
255 isize = i_size_read(inode);
256
257 ret = -EINVAL;
258 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
259 if (pgoff >= maxpages)
260 goto out;
261
262 if (maxpages - pgoff < lpages)
263 goto out;
264
265 /* gang-find the pages */
266 ret = -ENOMEM;
267 pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
268 if (!pages)
David Howells0e8f9892009-01-08 12:04:46 +0000269 goto out_free;
David Howells642fb4d2006-01-06 00:11:41 -0800270
271 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
272 if (nr != lpages)
David Howells0e8f9892009-01-08 12:04:46 +0000273 goto out_free_pages; /* leave if some pages were missing */
David Howells642fb4d2006-01-06 00:11:41 -0800274
275 /* check the pages for physical adjacency */
276 ptr = pages;
277 page = *ptr++;
278 page++;
279 for (loop = lpages; loop > 1; loop--)
280 if (*ptr++ != page++)
David Howells0e8f9892009-01-08 12:04:46 +0000281 goto out_free_pages;
David Howells642fb4d2006-01-06 00:11:41 -0800282
283 /* okay - all conditions fulfilled */
284 ret = (unsigned long) page_address(pages[0]);
285
David Howells0e8f9892009-01-08 12:04:46 +0000286out_free_pages:
287 ptr = pages;
288 for (loop = nr; loop > 0; loop--)
289 put_page(*ptr++);
290out_free:
291 kfree(pages);
292out:
David Howells642fb4d2006-01-06 00:11:41 -0800293 return ret;
294}
295
296/*****************************************************************************/
297/*
David Howells21ff8212006-07-10 04:44:52 -0700298 * set up a mapping for shared memory segments
David Howells642fb4d2006-01-06 00:11:41 -0800299 */
300int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
301{
David Howells2e92a3b2007-07-31 00:37:24 -0700302 if (!(vma->vm_flags & VM_SHARED))
303 return -ENOSYS;
304
305 file_accessed(file);
306 vma->vm_ops = &generic_file_vm_ops;
307 return 0;
David Howells642fb4d2006-01-06 00:11:41 -0800308}