blob: 4a1153561580a86e20e6206d3be1b15a6f2ac095 [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
34/*
35 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070036 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070037 * gets called only when /all/ the files are closed.
38 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040039static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070040{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050041 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050042 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050043 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050044 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070045 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040047 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070049 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050050 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040051 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050052 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070053 }
54 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070055 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070056
57 return 0;
58}
59
Stephen Hemmingerc1978552014-05-12 10:50:23 -040060static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050061{
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
Dmitry Monakhove27f41e2012-09-28 23:24:52 -040064 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -050065}
66
67/*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76static int
Al Viro9b884162014-04-17 16:09:22 -040077ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050078{
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -050081
Theodore Ts'o6e6358f2014-04-12 12:45:25 -040082 if (pos >= i_size_read(inode))
Eric Sandeene9e3bce2011-02-12 08:17:34 -050083 return 0;
84
Al Viro9b884162014-04-17 16:09:22 -040085 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050086 return 1;
87
88 return 0;
89}
90
Dave Kleikampac27a0e2006-10-11 01:20:50 -070091static ssize_t
Al Viro9b884162014-04-17 16:09:22 -040092ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070093{
Theodore Ts'o7608e612014-04-21 14:26:28 -040094 struct file *file = iocb->ki_filp;
Al Viro496ad9a2013-01-23 17:07:38 -050095 struct inode *inode = file_inode(iocb->ki_filp);
Theodore Ts'o8ad28502014-04-21 14:26:57 -040096 struct blk_plug plug;
Al Viro2ba48ce2015-04-09 13:52:01 -040097 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -050098 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -040099 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -0400100 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -0400101
Al Viro59551022016-01-22 15:40:57 -0500102 inode_lock(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400103 ret = generic_write_checks(iocb, from);
104 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400105 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400106
107 /*
Jan Karae142d052016-03-08 22:44:50 -0500108 * Unaligned direct AIO must be serialized among each other as zeroing
109 * of partial blocks of two competing unaligned AIOs can result in data
110 * corruption.
111 */
112 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
113 !is_sync_kiocb(iocb) &&
114 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
115 unaligned_aio = 1;
116 ext4_unwritten_wait(inode);
117 }
118
119 /*
Eric Sandeene2b46572008-01-28 23:58:27 -0500120 * If we have encountered a bitmap-format file, the size limit
121 * is smaller than s_maxbytes, which is for extent-mapped files.
122 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400123 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
Eric Sandeene2b46572008-01-28 23:58:27 -0500124 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Eric Sandeene2b46572008-01-28 23:58:27 -0500125
Al Viro3309dd02015-04-09 12:55:47 -0400126 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400127 ret = -EFBIG;
Al Viroe768d7f2015-04-07 14:48:22 -0400128 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400129 }
Al Viro3309dd02015-04-09 12:55:47 -0400130 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
Eric Sandeene2b46572008-01-28 23:58:27 -0500131 }
132
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400133 iocb->private = &overwrite;
Theodore Ts'o7ed07ba2014-04-21 14:36:30 -0400134 if (o_direct) {
Al Viro3309dd02015-04-09 12:55:47 -0400135 size_t length = iov_iter_count(from);
136 loff_t pos = iocb->ki_pos;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400137 blk_start_plug(&plug);
138
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400139 /* check whether we do a DIO overwrite or not */
Jan Karae142d052016-03-08 22:44:50 -0500140 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400141 !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
142 struct ext4_map_blocks map;
143 unsigned int blkbits = inode->i_blkbits;
144 int err, len;
145
146 map.m_lblk = pos >> blkbits;
147 map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
148 - map.m_lblk;
149 len = map.m_len;
150
151 err = ext4_map_blocks(NULL, inode, &map, 0);
152 /*
153 * 'err==len' means that all of blocks has
154 * been preallocated no matter they are
155 * initialized or not. For excluding
156 * unwritten extents, we need to check
157 * m_flags. There are two conditions that
158 * indicate for initialized extents. 1) If we
159 * hit extent cache, EXT4_MAP_MAPPED flag is
160 * returned; 2) If we do a real lookup,
161 * non-flags are returned. So we should check
162 * these two conditions.
163 */
164 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
165 overwrite = 1;
166 }
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400167 }
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400168
Al Viro9b884162014-04-17 16:09:22 -0400169 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -0500170 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400171
Theodore Ts'o7ed07ba2014-04-21 14:36:30 -0400172 if (ret > 0) {
173 ssize_t err;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400174
Theodore Ts'o7ed07ba2014-04-21 14:36:30 -0400175 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
176 if (err < 0)
177 ret = err;
178 }
179 if (o_direct)
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400180 blk_finish_plug(&plug);
181
Al Viroe768d7f2015-04-07 14:48:22 -0400182 return ret;
183
184out:
Al Viro59551022016-01-22 15:40:57 -0500185 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500186 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700187}
188
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800189#ifdef CONFIG_FS_DAX
190static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
191{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700192 int result;
193 handle_t *handle = NULL;
Jan Karaea3d7202015-12-07 14:28:03 -0500194 struct inode *inode = file_inode(vma->vm_file);
195 struct super_block *sb = inode->i_sb;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700196 bool write = vmf->flags & FAULT_FLAG_WRITE;
197
198 if (write) {
199 sb_start_pagefault(sb);
200 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500201 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700202 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
203 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Karaea3d7202015-12-07 14:28:03 -0500204 } else
205 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700206
207 if (IS_ERR(handle))
208 result = VM_FAULT_SIGBUS;
209 else
Jan Karaba5843f2015-12-07 15:10:44 -0500210 result = __dax_fault(vma, vmf, ext4_dax_mmap_get_block, NULL);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700211
212 if (write) {
213 if (!IS_ERR(handle))
214 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500215 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700216 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500217 } else
218 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700219
220 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800221}
222
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700223static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
224 pmd_t *pmd, unsigned int flags)
225{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700226 int result;
227 handle_t *handle = NULL;
228 struct inode *inode = file_inode(vma->vm_file);
229 struct super_block *sb = inode->i_sb;
230 bool write = flags & FAULT_FLAG_WRITE;
231
232 if (write) {
233 sb_start_pagefault(sb);
234 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500235 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700236 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
237 ext4_chunk_trans_blocks(inode,
238 PMD_SIZE / PAGE_SIZE));
Jan Karaea3d7202015-12-07 14:28:03 -0500239 } else
240 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700241
242 if (IS_ERR(handle))
243 result = VM_FAULT_SIGBUS;
244 else
245 result = __dax_pmd_fault(vma, addr, pmd, flags,
Jan Karaba5843f2015-12-07 15:10:44 -0500246 ext4_dax_mmap_get_block, NULL);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700247
248 if (write) {
249 if (!IS_ERR(handle))
250 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500251 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700252 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500253 } else
254 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700255
256 return result;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700257}
258
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800259static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
260{
Jan Karaea3d7202015-12-07 14:28:03 -0500261 int err;
262 struct inode *inode = file_inode(vma->vm_file);
263
264 sb_start_pagefault(inode->i_sb);
265 file_update_time(vma->vm_file);
266 down_read(&EXT4_I(inode)->i_mmap_sem);
Jan Karaba5843f2015-12-07 15:10:44 -0500267 err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
Jan Karaea3d7202015-12-07 14:28:03 -0500268 up_read(&EXT4_I(inode)->i_mmap_sem);
269 sb_end_pagefault(inode->i_sb);
270
271 return err;
272}
273
274/*
275 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
276 * handler we check for races agaist truncate. Note that since we cycle through
277 * i_mmap_sem, we are sure that also any hole punching that began before we
278 * were called is finished by now and so if it included part of the file we
279 * are working on, our pte will get unmapped and the check for pte_same() in
280 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
281 * desired.
282 */
283static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
284 struct vm_fault *vmf)
285{
286 struct inode *inode = file_inode(vma->vm_file);
287 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500288 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800289 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500290
291 sb_start_pagefault(sb);
292 file_update_time(vma->vm_file);
293 down_read(&EXT4_I(inode)->i_mmap_sem);
294 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
295 if (vmf->pgoff >= size)
296 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800297 else
298 ret = dax_pfn_mkwrite(vma, vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500299 up_read(&EXT4_I(inode)->i_mmap_sem);
300 sb_end_pagefault(sb);
301
302 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800303}
304
305static const struct vm_operations_struct ext4_dax_vm_ops = {
306 .fault = ext4_dax_fault,
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700307 .pmd_fault = ext4_dax_pmd_fault,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800308 .page_mkwrite = ext4_dax_mkwrite,
Jan Karaea3d7202015-12-07 14:28:03 -0500309 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800310};
311#else
312#define ext4_dax_vm_ops ext4_file_vm_ops
313#endif
314
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400315static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500316 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700317 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400318 .page_mkwrite = ext4_page_mkwrite,
319};
320
321static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
322{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400323 struct inode *inode = file->f_mapping->host;
324
325 if (ext4_encrypted_inode(inode)) {
Theodore Ts'ob7236e22015-05-18 13:17:47 -0400326 int err = ext4_get_encryption_info(inode);
Michael Halcrowc9c74292015-04-12 00:56:10 -0400327 if (err)
328 return 0;
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400329 if (ext4_encryption_info(inode) == NULL)
330 return -ENOKEY;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400331 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400332 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800333 if (IS_DAX(file_inode(file))) {
334 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700335 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800336 } else {
337 vma->vm_ops = &ext4_file_vm_ops;
338 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400339 return 0;
340}
341
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400342static int ext4_file_open(struct inode * inode, struct file * filp)
343{
344 struct super_block *sb = inode->i_sb;
345 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
346 struct vfsmount *mnt = filp->f_path.mnt;
Theodore Ts'off978b02016-02-08 00:54:26 -0500347 struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400348 struct path path;
349 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400350 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400351
352 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
353 !(sb->s_flags & MS_RDONLY))) {
354 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
355 /*
356 * Sample where the filesystem has been mounted and
357 * store it in the superblock for sysadmin convenience
358 * when trying to sort through large numbers of block
359 * devices or filesystem images.
360 */
361 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500362 path.mnt = mnt;
363 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400364 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400365 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400366 handle_t *handle;
367 int err;
368
Theodore Ts'o9924a922013-02-08 21:59:22 -0500369 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400370 if (IS_ERR(handle))
371 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400372 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400373 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
374 if (err) {
375 ext4_journal_stop(handle);
376 return err;
377 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400378 strlcpy(sbi->s_es->s_last_mounted, cp,
379 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400380 ext4_handle_dirty_super(handle, sb);
381 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400382 }
383 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400384 if (ext4_encrypted_inode(inode)) {
385 ret = ext4_get_encryption_info(inode);
386 if (ret)
387 return -EACCES;
388 if (ext4_encryption_info(inode) == NULL)
389 return -ENOKEY;
390 }
Theodore Ts'off978b02016-02-08 00:54:26 -0500391 if (ext4_encrypted_inode(dir) &&
392 !ext4_is_child_context_consistent_with_parent(dir, inode)) {
393 ext4_warning(inode->i_sb,
394 "Inconsistent encryption contexts: %lu/%lu\n",
395 (unsigned long) dir->i_ino,
396 (unsigned long) inode->i_ino);
397 return -EPERM;
398 }
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500399 /*
400 * Set up the jbd2_inode if we are opening the inode for
401 * writing and the journal is present
402 */
Jan Karaa3612932013-08-16 21:19:41 -0400403 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400404 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400405 if (ret < 0)
406 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500407 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400408 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400409}
410
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400411/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500412 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
413 * file rather than ext4_ext_walk_space() because we can introduce
414 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
415 * function. When extent status tree has been fully implemented, it will
416 * track all extent status for a file and we can directly use it to
417 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
418 */
419
420/*
421 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
422 * lookup page cache to check whether or not there has some data between
423 * [startoff, endoff] because, if this range contains an unwritten extent,
424 * we determine this extent as a data or a hole according to whether the
425 * page cache has data or not.
426 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500427static int ext4_find_unwritten_pgoff(struct inode *inode,
428 int whence,
429 struct ext4_map_blocks *map,
430 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500431{
432 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500433 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500434 pgoff_t index;
435 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500436 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500437 loff_t startoff;
438 loff_t lastoff;
439 int found = 0;
440
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500441 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500442 startoff = *offset;
443 lastoff = startoff;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500444 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500445
446 index = startoff >> PAGE_CACHE_SHIFT;
447 end = endoff >> PAGE_CACHE_SHIFT;
448
449 pagevec_init(&pvec, 0);
450 do {
451 int i, num;
452 unsigned long nr_pages;
453
454 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
455 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
456 (pgoff_t)num);
457 if (nr_pages == 0) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800458 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500459 break;
460
Andrew Morton965c8e52012-12-17 15:59:39 -0800461 BUG_ON(whence != SEEK_HOLE);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500462 /*
463 * If this is the first time to go into the loop and
464 * offset is not beyond the end offset, it will be a
465 * hole at this offset
466 */
467 if (lastoff == startoff || lastoff < endoff)
468 found = 1;
469 break;
470 }
471
472 /*
473 * If this is the first time to go into the loop and
474 * offset is smaller than the first page offset, it will be a
475 * hole at this offset.
476 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800477 if (lastoff == startoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500478 lastoff < page_offset(pvec.pages[0])) {
479 found = 1;
480 break;
481 }
482
483 for (i = 0; i < nr_pages; i++) {
484 struct page *page = pvec.pages[i];
485 struct buffer_head *bh, *head;
486
487 /*
488 * If the current offset is not beyond the end of given
489 * range, it will be a hole.
490 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800491 if (lastoff < endoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500492 page->index > end) {
493 found = 1;
494 *offset = lastoff;
495 goto out;
496 }
497
498 lock_page(page);
499
500 if (unlikely(page->mapping != inode->i_mapping)) {
501 unlock_page(page);
502 continue;
503 }
504
505 if (!page_has_buffers(page)) {
506 unlock_page(page);
507 continue;
508 }
509
510 if (page_has_buffers(page)) {
511 lastoff = page_offset(page);
512 bh = head = page_buffers(page);
513 do {
514 if (buffer_uptodate(bh) ||
515 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800516 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500517 found = 1;
518 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800519 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500520 found = 1;
521 }
522 if (found) {
523 *offset = max_t(loff_t,
524 startoff, lastoff);
525 unlock_page(page);
526 goto out;
527 }
528 lastoff += bh->b_size;
529 bh = bh->b_this_page;
530 } while (bh != head);
531 }
532
533 lastoff = page_offset(page) + PAGE_SIZE;
534 unlock_page(page);
535 }
536
537 /*
538 * The no. of pages is less than our desired, that would be a
539 * hole in there.
540 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800541 if (nr_pages < num && whence == SEEK_HOLE) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500542 found = 1;
543 *offset = lastoff;
544 break;
545 }
546
547 index = pvec.pages[i - 1]->index + 1;
548 pagevec_release(&pvec);
549 } while (index <= end);
550
551out:
552 pagevec_release(&pvec);
553 return found;
554}
555
556/*
557 * ext4_seek_data() retrieves the offset for SEEK_DATA.
558 */
559static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
560{
561 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500562 struct ext4_map_blocks map;
563 struct extent_status es;
564 ext4_lblk_t start, last, end;
565 loff_t dataoff, isize;
566 int blkbits;
567 int ret = 0;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500568
Al Viro59551022016-01-22 15:40:57 -0500569 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500570
571 isize = i_size_read(inode);
572 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500573 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500574 return -ENXIO;
575 }
576
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500577 blkbits = inode->i_sb->s_blocksize_bits;
578 start = offset >> blkbits;
579 last = start;
580 end = isize >> blkbits;
581 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500582
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500583 do {
584 map.m_lblk = last;
585 map.m_len = end - last + 1;
586 ret = ext4_map_blocks(NULL, inode, &map, 0);
587 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
588 if (last != start)
589 dataoff = (loff_t)last << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500590 break;
591 }
592
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500593 /*
594 * If there is a delay extent at this offset,
595 * it will be as a data.
596 */
597 ext4_es_find_delayed_extent_range(inode, last, last, &es);
598 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
599 if (last != start)
600 dataoff = (loff_t)last << blkbits;
601 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500602 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500603
604 /*
605 * If there is a unwritten extent at this offset,
606 * it will be as a data or a hole according to page
607 * cache that has data or not.
608 */
609 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
610 int unwritten;
611 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
612 &map, &dataoff);
613 if (unwritten)
614 break;
615 }
616
617 last++;
618 dataoff = (loff_t)last << blkbits;
619 } while (last <= end);
620
Al Viro59551022016-01-22 15:40:57 -0500621 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500622
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500623 if (dataoff > isize)
624 return -ENXIO;
625
626 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500627}
628
629/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500630 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500631 */
632static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
633{
634 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500635 struct ext4_map_blocks map;
636 struct extent_status es;
637 ext4_lblk_t start, last, end;
638 loff_t holeoff, isize;
639 int blkbits;
640 int ret = 0;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500641
Al Viro59551022016-01-22 15:40:57 -0500642 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500643
644 isize = i_size_read(inode);
645 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500646 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500647 return -ENXIO;
648 }
649
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500650 blkbits = inode->i_sb->s_blocksize_bits;
651 start = offset >> blkbits;
652 last = start;
653 end = isize >> blkbits;
654 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500655
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500656 do {
657 map.m_lblk = last;
658 map.m_len = end - last + 1;
659 ret = ext4_map_blocks(NULL, inode, &map, 0);
660 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
661 last += ret;
662 holeoff = (loff_t)last << blkbits;
663 continue;
664 }
Zheng Liuc8c0df22012-11-08 21:57:40 -0500665
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500666 /*
667 * If there is a delay extent at this offset,
668 * we will skip this extent.
669 */
670 ext4_es_find_delayed_extent_range(inode, last, last, &es);
671 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
672 last = es.es_lblk + es.es_len;
673 holeoff = (loff_t)last << blkbits;
674 continue;
675 }
Zheng Liuc8c0df22012-11-08 21:57:40 -0500676
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500677 /*
678 * If there is a unwritten extent at this offset,
679 * it will be as a data or a hole according to page
680 * cache that has data or not.
681 */
682 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
683 int unwritten;
684 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
685 &map, &holeoff);
686 if (!unwritten) {
687 last += ret;
688 holeoff = (loff_t)last << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500689 continue;
690 }
691 }
Zheng Liuc8c0df22012-11-08 21:57:40 -0500692
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500693 /* find a hole */
694 break;
695 } while (last <= end);
696
Al Viro59551022016-01-22 15:40:57 -0500697 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500698
699 if (holeoff > isize)
700 holeoff = isize;
701
702 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500703}
704
705/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500706 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
707 * by calling generic_file_llseek_size() with the appropriate maxbytes
708 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400709 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800710loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400711{
712 struct inode *inode = file->f_mapping->host;
713 loff_t maxbytes;
714
715 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
716 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
717 else
718 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400719
Andrew Morton965c8e52012-12-17 15:59:39 -0800720 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500721 case SEEK_SET:
722 case SEEK_CUR:
723 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800724 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500725 maxbytes, i_size_read(inode));
726 case SEEK_DATA:
727 return ext4_seek_data(file, offset, maxbytes);
728 case SEEK_HOLE:
729 return ext4_seek_hole(file, offset, maxbytes);
730 }
731
732 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400733}
734
Mingming Cao617ba132006-10-11 01:20:53 -0700735const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400736 .llseek = ext4_llseek,
Al Viroaad4f8b2014-04-02 14:33:16 -0400737 .read_iter = generic_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400738 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400739 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700740#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700741 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700742#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400743 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400744 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700745 .release = ext4_release_file,
746 .fsync = ext4_sync_file,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700747 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400748 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100749 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700750};
751
Arjan van de Ven754661f2007-02-12 00:55:38 -0800752const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700753 .setattr = ext4_setattr,
Mingming Cao3e3398a2008-07-11 19:27:31 -0400754 .getattr = ext4_getattr,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700755 .setxattr = generic_setxattr,
756 .getxattr = generic_getxattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700757 .listxattr = ext4_listxattr,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700758 .removexattr = generic_removexattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200759 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800760 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400761 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700762};
763