blob: 1953fe34f9fe1ede32ca5f4d0c6037182312f63f [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
Jan Kara364443c2016-11-20 17:36:06 -050034#ifdef CONFIG_FS_DAX
35static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36{
37 struct inode *inode = file_inode(iocb->ki_filp);
38 ssize_t ret;
39
40 inode_lock_shared(inode);
41 /*
42 * Recheck under inode lock - at this point we are sure it cannot
43 * change anymore
44 */
45 if (!IS_DAX(inode)) {
46 inode_unlock_shared(inode);
47 /* Fallback to buffered IO in case we cannot support DAX */
48 return generic_file_read_iter(iocb, to);
49 }
50 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
51 inode_unlock_shared(inode);
52
53 file_accessed(iocb->ki_filp);
54 return ret;
55}
56#endif
57
58static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
59{
60 if (!iov_iter_count(to))
61 return 0; /* skip atime */
62
63#ifdef CONFIG_FS_DAX
64 if (IS_DAX(file_inode(iocb->ki_filp)))
65 return ext4_dax_read_iter(iocb, to);
66#endif
67 return generic_file_read_iter(iocb, to);
68}
69
Dave Kleikampac27a0e2006-10-11 01:20:50 -070070/*
71 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070072 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070073 * gets called only when /all/ the files are closed.
74 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040075static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070076{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050077 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050078 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050079 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050080 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070081 /* if we are the last writer on the inode, drop the block reservation */
82 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040083 (atomic_read(&inode->i_writecount) == 1) &&
84 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070085 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050086 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040087 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050088 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070089 }
90 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070091 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070092
93 return 0;
94}
95
Stephen Hemmingerc1978552014-05-12 10:50:23 -040096static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050097{
98 wait_queue_head_t *wq = ext4_ioend_wq(inode);
99
Dmitry Monakhove27f41e2012-09-28 23:24:52 -0400100 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500101}
102
103/*
104 * This tests whether the IO in question is block-aligned or not.
105 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
106 * are converted to written only after the IO is complete. Until they are
107 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
108 * it needs to zero out portions of the start and/or end block. If 2 AIO
109 * threads are at work on the same unwritten block, they must be synchronized
110 * or one thread will zero the other's data, causing corruption.
111 */
112static int
Al Viro9b884162014-04-17 16:09:22 -0400113ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500114{
115 struct super_block *sb = inode->i_sb;
116 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500117
Theodore Ts'o6e6358f2014-04-12 12:45:25 -0400118 if (pos >= i_size_read(inode))
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500119 return 0;
120
Al Viro9b884162014-04-17 16:09:22 -0400121 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500122 return 1;
123
124 return 0;
125}
126
Jan Kara213bcd92016-11-20 17:29:51 -0500127/* Is IO overwriting allocated and initialized blocks? */
128static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
129{
130 struct ext4_map_blocks map;
131 unsigned int blkbits = inode->i_blkbits;
132 int err, blklen;
133
134 if (pos + len > i_size_read(inode))
135 return false;
136
137 map.m_lblk = pos >> blkbits;
138 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
139 blklen = map.m_len;
140
141 err = ext4_map_blocks(NULL, inode, &map, 0);
142 /*
143 * 'err==len' means that all of the blocks have been preallocated,
144 * regardless of whether they have been initialized or not. To exclude
145 * unwritten extents, we need to check m_flags.
146 */
147 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
148}
149
150static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
151{
152 struct inode *inode = file_inode(iocb->ki_filp);
153 ssize_t ret;
154
155 ret = generic_write_checks(iocb, from);
156 if (ret <= 0)
157 return ret;
158 /*
159 * If we have encountered a bitmap-format file, the size limit
160 * is smaller than s_maxbytes, which is for extent-mapped files.
161 */
162 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
163 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
164
165 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
166 return -EFBIG;
167 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
168 }
169 return iov_iter_count(from);
170}
171
Jan Kara776722e2016-11-20 18:09:11 -0500172#ifdef CONFIG_FS_DAX
173static ssize_t
174ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
175{
176 struct inode *inode = file_inode(iocb->ki_filp);
177 ssize_t ret;
178 bool overwrite = false;
179
180 inode_lock(inode);
181 ret = ext4_write_checks(iocb, from);
182 if (ret <= 0)
183 goto out;
184 ret = file_remove_privs(iocb->ki_filp);
185 if (ret)
186 goto out;
187 ret = file_update_time(iocb->ki_filp);
188 if (ret)
189 goto out;
190
191 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
192 overwrite = true;
193 downgrade_write(&inode->i_rwsem);
194 }
195 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
196out:
197 if (!overwrite)
198 inode_unlock(inode);
199 else
200 inode_unlock_shared(inode);
201 if (ret > 0)
202 ret = generic_write_sync(iocb, ret);
203 return ret;
204}
205#endif
206
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700207static ssize_t
Al Viro9b884162014-04-17 16:09:22 -0400208ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700209{
Al Viro496ad9a2013-01-23 17:07:38 -0500210 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -0400211 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -0500212 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400213 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -0400214 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -0400215
Jan Kara776722e2016-11-20 18:09:11 -0500216#ifdef CONFIG_FS_DAX
217 if (IS_DAX(inode))
218 return ext4_dax_write_iter(iocb, from);
219#endif
220
Al Viro59551022016-01-22 15:40:57 -0500221 inode_lock(inode);
Jan Kara213bcd92016-11-20 17:29:51 -0500222 ret = ext4_write_checks(iocb, from);
Al Viro3309dd02015-04-09 12:55:47 -0400223 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400224 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400225
226 /*
Jan Karae142d052016-03-08 22:44:50 -0500227 * Unaligned direct AIO must be serialized among each other as zeroing
228 * of partial blocks of two competing unaligned AIOs can result in data
229 * corruption.
230 */
231 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
232 !is_sync_kiocb(iocb) &&
233 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
234 unaligned_aio = 1;
235 ext4_unwritten_wait(inode);
236 }
237
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400238 iocb->private = &overwrite;
Jan Kara213bcd92016-11-20 17:29:51 -0500239 /* Check whether we do a DIO overwrite or not */
240 if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
241 ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
242 overwrite = 1;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400243
Al Viro9b884162014-04-17 16:09:22 -0400244 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -0500245 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400246
Christoph Hellwige2592212016-04-07 08:52:01 -0700247 if (ret > 0)
248 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400249
Al Viroe768d7f2015-04-07 14:48:22 -0400250 return ret;
251
252out:
Al Viro59551022016-01-22 15:40:57 -0500253 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500254 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700255}
256
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800257#ifdef CONFIG_FS_DAX
258static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
259{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700260 int result;
261 handle_t *handle = NULL;
Jan Karaea3d7202015-12-07 14:28:03 -0500262 struct inode *inode = file_inode(vma->vm_file);
263 struct super_block *sb = inode->i_sb;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700264 bool write = vmf->flags & FAULT_FLAG_WRITE;
265
266 if (write) {
267 sb_start_pagefault(sb);
268 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500269 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700270 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
271 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Karaea3d7202015-12-07 14:28:03 -0500272 } else
273 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700274
275 if (IS_ERR(handle))
276 result = VM_FAULT_SIGBUS;
277 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700278 result = dax_fault(vma, vmf, ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700279
280 if (write) {
281 if (!IS_ERR(handle))
282 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500283 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700284 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500285 } else
286 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700287
288 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800289}
290
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700291static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
292 pmd_t *pmd, unsigned int flags)
293{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700294 int result;
295 handle_t *handle = NULL;
296 struct inode *inode = file_inode(vma->vm_file);
297 struct super_block *sb = inode->i_sb;
298 bool write = flags & FAULT_FLAG_WRITE;
299
300 if (write) {
301 sb_start_pagefault(sb);
302 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500303 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700304 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
305 ext4_chunk_trans_blocks(inode,
306 PMD_SIZE / PAGE_SIZE));
Jan Karaea3d7202015-12-07 14:28:03 -0500307 } else
308 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700309
310 if (IS_ERR(handle))
311 result = VM_FAULT_SIGBUS;
312 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700313 result = dax_pmd_fault(vma, addr, pmd, flags,
Jan Kara02fbd132016-05-11 11:58:48 +0200314 ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700315
316 if (write) {
317 if (!IS_ERR(handle))
318 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500319 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700320 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500321 } else
322 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700323
324 return result;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700325}
326
Jan Karaea3d7202015-12-07 14:28:03 -0500327/*
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500328 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
Jan Karaea3d7202015-12-07 14:28:03 -0500329 * handler we check for races agaist truncate. Note that since we cycle through
330 * i_mmap_sem, we are sure that also any hole punching that began before we
331 * were called is finished by now and so if it included part of the file we
332 * are working on, our pte will get unmapped and the check for pte_same() in
333 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
334 * desired.
335 */
336static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
337 struct vm_fault *vmf)
338{
339 struct inode *inode = file_inode(vma->vm_file);
340 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500341 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800342 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500343
344 sb_start_pagefault(sb);
345 file_update_time(vma->vm_file);
346 down_read(&EXT4_I(inode)->i_mmap_sem);
347 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
348 if (vmf->pgoff >= size)
349 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800350 else
351 ret = dax_pfn_mkwrite(vma, vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500352 up_read(&EXT4_I(inode)->i_mmap_sem);
353 sb_end_pagefault(sb);
354
355 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800356}
357
358static const struct vm_operations_struct ext4_dax_vm_ops = {
359 .fault = ext4_dax_fault,
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700360 .pmd_fault = ext4_dax_pmd_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500361 .page_mkwrite = ext4_dax_fault,
Jan Karaea3d7202015-12-07 14:28:03 -0500362 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800363};
364#else
365#define ext4_dax_vm_ops ext4_file_vm_ops
366#endif
367
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400368static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500369 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700370 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400371 .page_mkwrite = ext4_page_mkwrite,
372};
373
374static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
375{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400376 struct inode *inode = file->f_mapping->host;
377
378 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400379 int err = fscrypt_get_encryption_info(inode);
Michael Halcrowc9c74292015-04-12 00:56:10 -0400380 if (err)
381 return 0;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400382 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400383 return -ENOKEY;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400384 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400385 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800386 if (IS_DAX(file_inode(file))) {
387 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700388 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800389 } else {
390 vma->vm_ops = &ext4_file_vm_ops;
391 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400392 return 0;
393}
394
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400395static int ext4_file_open(struct inode * inode, struct file * filp)
396{
397 struct super_block *sb = inode->i_sb;
398 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
399 struct vfsmount *mnt = filp->f_path.mnt;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400400 struct dentry *dir;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400401 struct path path;
402 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400403 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400404
405 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
406 !(sb->s_flags & MS_RDONLY))) {
407 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
408 /*
409 * Sample where the filesystem has been mounted and
410 * store it in the superblock for sysadmin convenience
411 * when trying to sort through large numbers of block
412 * devices or filesystem images.
413 */
414 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500415 path.mnt = mnt;
416 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400417 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400418 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400419 handle_t *handle;
420 int err;
421
Theodore Ts'o9924a922013-02-08 21:59:22 -0500422 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400423 if (IS_ERR(handle))
424 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400425 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400426 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
427 if (err) {
428 ext4_journal_stop(handle);
429 return err;
430 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400431 strlcpy(sbi->s_es->s_last_mounted, cp,
432 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400433 ext4_handle_dirty_super(handle, sb);
434 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400435 }
436 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400437 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400438 ret = fscrypt_get_encryption_info(inode);
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400439 if (ret)
440 return -EACCES;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400441 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400442 return -ENOKEY;
443 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400444
Miklos Szeredic0a37d4872016-03-26 16:14:42 -0400445 dir = dget_parent(file_dentry(filp));
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400446 if (ext4_encrypted_inode(d_inode(dir)) &&
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400447 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
Theodore Ts'off978b02016-02-08 00:54:26 -0500448 ext4_warning(inode->i_sb,
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -0400449 "Inconsistent encryption contexts: %lu/%lu",
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400450 (unsigned long) d_inode(dir)->i_ino,
Theodore Ts'off978b02016-02-08 00:54:26 -0500451 (unsigned long) inode->i_ino);
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400452 dput(dir);
Theodore Ts'off978b02016-02-08 00:54:26 -0500453 return -EPERM;
454 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400455 dput(dir);
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500456 /*
457 * Set up the jbd2_inode if we are opening the inode for
458 * writing and the journal is present
459 */
Jan Karaa3612932013-08-16 21:19:41 -0400460 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400461 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400462 if (ret < 0)
463 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500464 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400465 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400466}
467
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400468/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500469 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
470 * file rather than ext4_ext_walk_space() because we can introduce
471 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
472 * function. When extent status tree has been fully implemented, it will
473 * track all extent status for a file and we can directly use it to
474 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
475 */
476
477/*
478 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
479 * lookup page cache to check whether or not there has some data between
480 * [startoff, endoff] because, if this range contains an unwritten extent,
481 * we determine this extent as a data or a hole according to whether the
482 * page cache has data or not.
483 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500484static int ext4_find_unwritten_pgoff(struct inode *inode,
485 int whence,
Jan Kara2d90c162016-03-09 23:11:13 -0500486 ext4_lblk_t end_blk,
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500487 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500488{
489 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500490 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500491 pgoff_t index;
492 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500493 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500494 loff_t startoff;
495 loff_t lastoff;
496 int found = 0;
497
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500498 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500499 startoff = *offset;
500 lastoff = startoff;
Jan Kara2d90c162016-03-09 23:11:13 -0500501 endoff = (loff_t)end_blk << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500502
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300503 index = startoff >> PAGE_SHIFT;
504 end = endoff >> PAGE_SHIFT;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500505
506 pagevec_init(&pvec, 0);
507 do {
508 int i, num;
509 unsigned long nr_pages;
510
511 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
512 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
513 (pgoff_t)num);
514 if (nr_pages == 0) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800515 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500516 break;
517
Andrew Morton965c8e52012-12-17 15:59:39 -0800518 BUG_ON(whence != SEEK_HOLE);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500519 /*
520 * If this is the first time to go into the loop and
521 * offset is not beyond the end offset, it will be a
522 * hole at this offset
523 */
524 if (lastoff == startoff || lastoff < endoff)
525 found = 1;
526 break;
527 }
528
529 /*
530 * If this is the first time to go into the loop and
531 * offset is smaller than the first page offset, it will be a
532 * hole at this offset.
533 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800534 if (lastoff == startoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500535 lastoff < page_offset(pvec.pages[0])) {
536 found = 1;
537 break;
538 }
539
540 for (i = 0; i < nr_pages; i++) {
541 struct page *page = pvec.pages[i];
542 struct buffer_head *bh, *head;
543
544 /*
545 * If the current offset is not beyond the end of given
546 * range, it will be a hole.
547 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800548 if (lastoff < endoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500549 page->index > end) {
550 found = 1;
551 *offset = lastoff;
552 goto out;
553 }
554
555 lock_page(page);
556
557 if (unlikely(page->mapping != inode->i_mapping)) {
558 unlock_page(page);
559 continue;
560 }
561
562 if (!page_has_buffers(page)) {
563 unlock_page(page);
564 continue;
565 }
566
567 if (page_has_buffers(page)) {
568 lastoff = page_offset(page);
569 bh = head = page_buffers(page);
570 do {
571 if (buffer_uptodate(bh) ||
572 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800573 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500574 found = 1;
575 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800576 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500577 found = 1;
578 }
579 if (found) {
580 *offset = max_t(loff_t,
581 startoff, lastoff);
582 unlock_page(page);
583 goto out;
584 }
585 lastoff += bh->b_size;
586 bh = bh->b_this_page;
587 } while (bh != head);
588 }
589
590 lastoff = page_offset(page) + PAGE_SIZE;
591 unlock_page(page);
592 }
593
594 /*
595 * The no. of pages is less than our desired, that would be a
596 * hole in there.
597 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800598 if (nr_pages < num && whence == SEEK_HOLE) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500599 found = 1;
600 *offset = lastoff;
601 break;
602 }
603
604 index = pvec.pages[i - 1]->index + 1;
605 pagevec_release(&pvec);
606 } while (index <= end);
607
608out:
609 pagevec_release(&pvec);
610 return found;
611}
612
613/*
614 * ext4_seek_data() retrieves the offset for SEEK_DATA.
615 */
616static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
617{
618 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500619 struct extent_status es;
620 ext4_lblk_t start, last, end;
621 loff_t dataoff, isize;
622 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500623 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500624
Al Viro59551022016-01-22 15:40:57 -0500625 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500626
627 isize = i_size_read(inode);
628 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500629 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500630 return -ENXIO;
631 }
632
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500633 blkbits = inode->i_sb->s_blocksize_bits;
634 start = offset >> blkbits;
635 last = start;
636 end = isize >> blkbits;
637 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500638
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500639 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500640 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
641 if (ret <= 0) {
642 /* No extent found -> no data */
643 if (ret == 0)
644 ret = -ENXIO;
645 inode_unlock(inode);
646 return ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500647 }
648
Jan Kara2d90c162016-03-09 23:11:13 -0500649 last = es.es_lblk;
650 if (last != start)
651 dataoff = (loff_t)last << blkbits;
652 if (!ext4_es_is_unwritten(&es))
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500653 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500654
655 /*
656 * If there is a unwritten extent at this offset,
657 * it will be as a data or a hole according to page
658 * cache that has data or not.
659 */
Jan Kara2d90c162016-03-09 23:11:13 -0500660 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
661 es.es_lblk + es.es_len, &dataoff))
662 break;
663 last += es.es_len;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500664 dataoff = (loff_t)last << blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500665 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500666 } while (last <= end);
667
Al Viro59551022016-01-22 15:40:57 -0500668 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500669
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500670 if (dataoff > isize)
671 return -ENXIO;
672
673 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500674}
675
676/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500677 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500678 */
679static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
680{
681 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500682 struct extent_status es;
683 ext4_lblk_t start, last, end;
684 loff_t holeoff, isize;
685 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500686 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500687
Al Viro59551022016-01-22 15:40:57 -0500688 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500689
690 isize = i_size_read(inode);
691 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500692 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500693 return -ENXIO;
694 }
695
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500696 blkbits = inode->i_sb->s_blocksize_bits;
697 start = offset >> blkbits;
698 last = start;
699 end = isize >> blkbits;
700 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500701
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500702 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500703 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
704 if (ret < 0) {
705 inode_unlock(inode);
706 return ret;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500707 }
Jan Kara2d90c162016-03-09 23:11:13 -0500708 /* Found a hole? */
709 if (ret == 0 || es.es_lblk > last) {
710 if (last != start)
711 holeoff = (loff_t)last << blkbits;
712 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500713 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500714 /*
715 * If there is a unwritten extent at this offset,
716 * it will be as a data or a hole according to page
717 * cache that has data or not.
718 */
Jan Kara2d90c162016-03-09 23:11:13 -0500719 if (ext4_es_is_unwritten(&es) &&
720 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
721 last + es.es_len, &holeoff))
722 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500723
Jan Kara2d90c162016-03-09 23:11:13 -0500724 last += es.es_len;
725 holeoff = (loff_t)last << blkbits;
726 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500727 } while (last <= end);
728
Al Viro59551022016-01-22 15:40:57 -0500729 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500730
731 if (holeoff > isize)
732 holeoff = isize;
733
734 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500735}
736
737/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500738 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
739 * by calling generic_file_llseek_size() with the appropriate maxbytes
740 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400741 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800742loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400743{
744 struct inode *inode = file->f_mapping->host;
745 loff_t maxbytes;
746
747 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
748 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
749 else
750 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400751
Andrew Morton965c8e52012-12-17 15:59:39 -0800752 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500753 case SEEK_SET:
754 case SEEK_CUR:
755 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800756 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500757 maxbytes, i_size_read(inode));
758 case SEEK_DATA:
759 return ext4_seek_data(file, offset, maxbytes);
760 case SEEK_HOLE:
761 return ext4_seek_hole(file, offset, maxbytes);
762 }
763
764 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400765}
766
Mingming Cao617ba132006-10-11 01:20:53 -0700767const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400768 .llseek = ext4_llseek,
Jan Kara364443c2016-11-20 17:36:06 -0500769 .read_iter = ext4_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400770 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400771 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700772#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700773 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700774#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400775 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400776 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700777 .release = ext4_release_file,
778 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700779 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700780 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400781 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100782 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700783};
784
Arjan van de Ven754661f2007-02-12 00:55:38 -0800785const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700786 .setattr = ext4_setattr,
Mingming Cao3e3398a2008-07-11 19:27:31 -0400787 .getattr = ext4_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700788 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200789 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800790 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400791 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700792};
793