blob: 502d2d07d19116cbf5283d5b591187778c5f8707 [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
Jan Kara364443c2016-11-20 17:36:06 -050034#ifdef CONFIG_FS_DAX
35static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36{
37 struct inode *inode = file_inode(iocb->ki_filp);
38 ssize_t ret;
39
40 inode_lock_shared(inode);
41 /*
42 * Recheck under inode lock - at this point we are sure it cannot
43 * change anymore
44 */
45 if (!IS_DAX(inode)) {
46 inode_unlock_shared(inode);
47 /* Fallback to buffered IO in case we cannot support DAX */
48 return generic_file_read_iter(iocb, to);
49 }
50 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
51 inode_unlock_shared(inode);
52
53 file_accessed(iocb->ki_filp);
54 return ret;
55}
56#endif
57
58static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
59{
Theodore Ts'o0db1ff22017-02-05 01:28:48 -050060 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
61 return -EIO;
62
Jan Kara364443c2016-11-20 17:36:06 -050063 if (!iov_iter_count(to))
64 return 0; /* skip atime */
65
66#ifdef CONFIG_FS_DAX
67 if (IS_DAX(file_inode(iocb->ki_filp)))
68 return ext4_dax_read_iter(iocb, to);
69#endif
70 return generic_file_read_iter(iocb, to);
71}
72
Dave Kleikampac27a0e2006-10-11 01:20:50 -070073/*
74 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070075 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070076 * gets called only when /all/ the files are closed.
77 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040078static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070079{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050080 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050081 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050082 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050083 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070084 /* if we are the last writer on the inode, drop the block reservation */
85 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040086 (atomic_read(&inode->i_writecount) == 1) &&
87 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070088 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050089 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040090 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050091 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070092 }
93 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070094 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070095
96 return 0;
97}
98
Stephen Hemmingerc1978552014-05-12 10:50:23 -040099static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500100{
101 wait_queue_head_t *wq = ext4_ioend_wq(inode);
102
Dmitry Monakhove27f41e2012-09-28 23:24:52 -0400103 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500104}
105
106/*
107 * This tests whether the IO in question is block-aligned or not.
108 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
109 * are converted to written only after the IO is complete. Until they are
110 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
111 * it needs to zero out portions of the start and/or end block. If 2 AIO
112 * threads are at work on the same unwritten block, they must be synchronized
113 * or one thread will zero the other's data, causing corruption.
114 */
115static int
Al Viro9b884162014-04-17 16:09:22 -0400116ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500117{
118 struct super_block *sb = inode->i_sb;
119 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500120
Theodore Ts'o6e6358f2014-04-12 12:45:25 -0400121 if (pos >= i_size_read(inode))
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500122 return 0;
123
Al Viro9b884162014-04-17 16:09:22 -0400124 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500125 return 1;
126
127 return 0;
128}
129
Jan Kara213bcd92016-11-20 17:29:51 -0500130/* Is IO overwriting allocated and initialized blocks? */
131static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
132{
133 struct ext4_map_blocks map;
134 unsigned int blkbits = inode->i_blkbits;
135 int err, blklen;
136
137 if (pos + len > i_size_read(inode))
138 return false;
139
140 map.m_lblk = pos >> blkbits;
141 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
142 blklen = map.m_len;
143
144 err = ext4_map_blocks(NULL, inode, &map, 0);
145 /*
146 * 'err==len' means that all of the blocks have been preallocated,
147 * regardless of whether they have been initialized or not. To exclude
148 * unwritten extents, we need to check m_flags.
149 */
150 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
151}
152
153static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
154{
155 struct inode *inode = file_inode(iocb->ki_filp);
156 ssize_t ret;
157
158 ret = generic_write_checks(iocb, from);
159 if (ret <= 0)
160 return ret;
161 /*
162 * If we have encountered a bitmap-format file, the size limit
163 * is smaller than s_maxbytes, which is for extent-mapped files.
164 */
165 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
166 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
167
168 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
169 return -EFBIG;
170 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
171 }
172 return iov_iter_count(from);
173}
174
Jan Kara776722e2016-11-20 18:09:11 -0500175#ifdef CONFIG_FS_DAX
176static ssize_t
177ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
178{
179 struct inode *inode = file_inode(iocb->ki_filp);
180 ssize_t ret;
Jan Kara776722e2016-11-20 18:09:11 -0500181
182 inode_lock(inode);
183 ret = ext4_write_checks(iocb, from);
184 if (ret <= 0)
185 goto out;
186 ret = file_remove_privs(iocb->ki_filp);
187 if (ret)
188 goto out;
189 ret = file_update_time(iocb->ki_filp);
190 if (ret)
191 goto out;
192
Jan Kara776722e2016-11-20 18:09:11 -0500193 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
194out:
Christoph Hellwigff5462e2017-02-08 14:39:27 -0500195 inode_unlock(inode);
Jan Kara776722e2016-11-20 18:09:11 -0500196 if (ret > 0)
197 ret = generic_write_sync(iocb, ret);
198 return ret;
199}
200#endif
201
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700202static ssize_t
Al Viro9b884162014-04-17 16:09:22 -0400203ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700204{
Al Viro496ad9a2013-01-23 17:07:38 -0500205 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -0400206 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -0500207 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400208 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -0400209 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -0400210
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500211 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
212 return -EIO;
213
Jan Kara776722e2016-11-20 18:09:11 -0500214#ifdef CONFIG_FS_DAX
215 if (IS_DAX(inode))
216 return ext4_dax_write_iter(iocb, from);
217#endif
218
Al Viro59551022016-01-22 15:40:57 -0500219 inode_lock(inode);
Jan Kara213bcd92016-11-20 17:29:51 -0500220 ret = ext4_write_checks(iocb, from);
Al Viro3309dd02015-04-09 12:55:47 -0400221 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400222 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400223
224 /*
Jan Karae142d052016-03-08 22:44:50 -0500225 * Unaligned direct AIO must be serialized among each other as zeroing
226 * of partial blocks of two competing unaligned AIOs can result in data
227 * corruption.
228 */
229 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
230 !is_sync_kiocb(iocb) &&
231 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
232 unaligned_aio = 1;
233 ext4_unwritten_wait(inode);
234 }
235
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400236 iocb->private = &overwrite;
Jan Kara213bcd92016-11-20 17:29:51 -0500237 /* Check whether we do a DIO overwrite or not */
238 if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
239 ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
240 overwrite = 1;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400241
Al Viro9b884162014-04-17 16:09:22 -0400242 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -0500243 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400244
Christoph Hellwige2592212016-04-07 08:52:01 -0700245 if (ret > 0)
246 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400247
Al Viroe768d7f2015-04-07 14:48:22 -0400248 return ret;
249
250out:
Al Viro59551022016-01-22 15:40:57 -0500251 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500252 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700253}
254
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800255#ifdef CONFIG_FS_DAX
Dave Jiang11bac802017-02-24 14:56:41 -0800256static int ext4_dax_fault(struct vm_fault *vmf)
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800257{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700258 int result;
Dave Jiang11bac802017-02-24 14:56:41 -0800259 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500260 struct super_block *sb = inode->i_sb;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700261 bool write = vmf->flags & FAULT_FLAG_WRITE;
262
263 if (write) {
264 sb_start_pagefault(sb);
Dave Jiang11bac802017-02-24 14:56:41 -0800265 file_update_time(vmf->vma->vm_file);
Jan Kara1db17542016-10-21 11:33:49 +0200266 }
267 down_read(&EXT4_I(inode)->i_mmap_sem);
Dave Jiang11bac802017-02-24 14:56:41 -0800268 result = dax_iomap_fault(vmf, &ext4_iomap_ops);
Jan Kara1db17542016-10-21 11:33:49 +0200269 up_read(&EXT4_I(inode)->i_mmap_sem);
270 if (write)
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700271 sb_end_pagefault(sb);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700272
273 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800274}
275
Jan Karaea3d7202015-12-07 14:28:03 -0500276/*
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500277 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
Jan Karaea3d7202015-12-07 14:28:03 -0500278 * handler we check for races agaist truncate. Note that since we cycle through
279 * i_mmap_sem, we are sure that also any hole punching that began before we
280 * were called is finished by now and so if it included part of the file we
281 * are working on, our pte will get unmapped and the check for pte_same() in
282 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
283 * desired.
284 */
Dave Jiang11bac802017-02-24 14:56:41 -0800285static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf)
Jan Karaea3d7202015-12-07 14:28:03 -0500286{
Dave Jiang11bac802017-02-24 14:56:41 -0800287 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500288 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500289 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800290 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500291
292 sb_start_pagefault(sb);
Dave Jiang11bac802017-02-24 14:56:41 -0800293 file_update_time(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500294 down_read(&EXT4_I(inode)->i_mmap_sem);
295 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
296 if (vmf->pgoff >= size)
297 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800298 else
Dave Jiang11bac802017-02-24 14:56:41 -0800299 ret = dax_pfn_mkwrite(vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500300 up_read(&EXT4_I(inode)->i_mmap_sem);
301 sb_end_pagefault(sb);
302
303 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800304}
305
306static const struct vm_operations_struct ext4_dax_vm_ops = {
307 .fault = ext4_dax_fault,
Dave Jianga2d58162017-02-24 14:56:59 -0800308 .huge_fault = ext4_dax_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500309 .page_mkwrite = ext4_dax_fault,
Jan Karaea3d7202015-12-07 14:28:03 -0500310 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800311};
312#else
313#define ext4_dax_vm_ops ext4_file_vm_ops
314#endif
315
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400316static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500317 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700318 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400319 .page_mkwrite = ext4_page_mkwrite,
320};
321
322static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
323{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400324 struct inode *inode = file->f_mapping->host;
325
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500326 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
327 return -EIO;
328
Michael Halcrowc9c74292015-04-12 00:56:10 -0400329 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400330 int err = fscrypt_get_encryption_info(inode);
Michael Halcrowc9c74292015-04-12 00:56:10 -0400331 if (err)
332 return 0;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400333 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400334 return -ENOKEY;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400335 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400336 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800337 if (IS_DAX(file_inode(file))) {
338 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700339 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800340 } else {
341 vma->vm_ops = &ext4_file_vm_ops;
342 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400343 return 0;
344}
345
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400346static int ext4_file_open(struct inode * inode, struct file * filp)
347{
348 struct super_block *sb = inode->i_sb;
349 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
350 struct vfsmount *mnt = filp->f_path.mnt;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400351 struct dentry *dir;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400352 struct path path;
353 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400354 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400355
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500356 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
357 return -EIO;
358
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400359 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
360 !(sb->s_flags & MS_RDONLY))) {
361 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
362 /*
363 * Sample where the filesystem has been mounted and
364 * store it in the superblock for sysadmin convenience
365 * when trying to sort through large numbers of block
366 * devices or filesystem images.
367 */
368 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500369 path.mnt = mnt;
370 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400371 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400372 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400373 handle_t *handle;
374 int err;
375
Theodore Ts'o9924a922013-02-08 21:59:22 -0500376 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400377 if (IS_ERR(handle))
378 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400379 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400380 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
381 if (err) {
382 ext4_journal_stop(handle);
383 return err;
384 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400385 strlcpy(sbi->s_es->s_last_mounted, cp,
386 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400387 ext4_handle_dirty_super(handle, sb);
388 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400389 }
390 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400391 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400392 ret = fscrypt_get_encryption_info(inode);
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400393 if (ret)
394 return -EACCES;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400395 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400396 return -ENOKEY;
397 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400398
Miklos Szeredic0a37d4872016-03-26 16:14:42 -0400399 dir = dget_parent(file_dentry(filp));
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400400 if (ext4_encrypted_inode(d_inode(dir)) &&
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400401 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
Theodore Ts'off978b02016-02-08 00:54:26 -0500402 ext4_warning(inode->i_sb,
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -0400403 "Inconsistent encryption contexts: %lu/%lu",
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400404 (unsigned long) d_inode(dir)->i_ino,
Theodore Ts'off978b02016-02-08 00:54:26 -0500405 (unsigned long) inode->i_ino);
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400406 dput(dir);
Theodore Ts'off978b02016-02-08 00:54:26 -0500407 return -EPERM;
408 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400409 dput(dir);
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500410 /*
411 * Set up the jbd2_inode if we are opening the inode for
412 * writing and the journal is present
413 */
Jan Karaa3612932013-08-16 21:19:41 -0400414 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400415 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400416 if (ret < 0)
417 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500418 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400419 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400420}
421
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400422/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500423 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
424 * file rather than ext4_ext_walk_space() because we can introduce
425 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
426 * function. When extent status tree has been fully implemented, it will
427 * track all extent status for a file and we can directly use it to
428 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
429 */
430
431/*
432 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
433 * lookup page cache to check whether or not there has some data between
434 * [startoff, endoff] because, if this range contains an unwritten extent,
435 * we determine this extent as a data or a hole according to whether the
436 * page cache has data or not.
437 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500438static int ext4_find_unwritten_pgoff(struct inode *inode,
439 int whence,
Jan Kara2d90c162016-03-09 23:11:13 -0500440 ext4_lblk_t end_blk,
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500441 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500442{
443 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500444 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500445 pgoff_t index;
446 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500447 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500448 loff_t startoff;
449 loff_t lastoff;
450 int found = 0;
451
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500452 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500453 startoff = *offset;
454 lastoff = startoff;
Jan Kara2d90c162016-03-09 23:11:13 -0500455 endoff = (loff_t)end_blk << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500456
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300457 index = startoff >> PAGE_SHIFT;
458 end = endoff >> PAGE_SHIFT;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500459
460 pagevec_init(&pvec, 0);
461 do {
462 int i, num;
463 unsigned long nr_pages;
464
465 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
466 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
467 (pgoff_t)num);
468 if (nr_pages == 0) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800469 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500470 break;
471
Andrew Morton965c8e52012-12-17 15:59:39 -0800472 BUG_ON(whence != SEEK_HOLE);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500473 /*
474 * If this is the first time to go into the loop and
475 * offset is not beyond the end offset, it will be a
476 * hole at this offset
477 */
478 if (lastoff == startoff || lastoff < endoff)
479 found = 1;
480 break;
481 }
482
483 /*
484 * If this is the first time to go into the loop and
485 * offset is smaller than the first page offset, it will be a
486 * hole at this offset.
487 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800488 if (lastoff == startoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500489 lastoff < page_offset(pvec.pages[0])) {
490 found = 1;
491 break;
492 }
493
494 for (i = 0; i < nr_pages; i++) {
495 struct page *page = pvec.pages[i];
496 struct buffer_head *bh, *head;
497
498 /*
499 * If the current offset is not beyond the end of given
500 * range, it will be a hole.
501 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800502 if (lastoff < endoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500503 page->index > end) {
504 found = 1;
505 *offset = lastoff;
506 goto out;
507 }
508
509 lock_page(page);
510
511 if (unlikely(page->mapping != inode->i_mapping)) {
512 unlock_page(page);
513 continue;
514 }
515
516 if (!page_has_buffers(page)) {
517 unlock_page(page);
518 continue;
519 }
520
521 if (page_has_buffers(page)) {
522 lastoff = page_offset(page);
523 bh = head = page_buffers(page);
524 do {
525 if (buffer_uptodate(bh) ||
526 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800527 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500528 found = 1;
529 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800530 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500531 found = 1;
532 }
533 if (found) {
534 *offset = max_t(loff_t,
535 startoff, lastoff);
536 unlock_page(page);
537 goto out;
538 }
539 lastoff += bh->b_size;
540 bh = bh->b_this_page;
541 } while (bh != head);
542 }
543
544 lastoff = page_offset(page) + PAGE_SIZE;
545 unlock_page(page);
546 }
547
548 /*
549 * The no. of pages is less than our desired, that would be a
550 * hole in there.
551 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800552 if (nr_pages < num && whence == SEEK_HOLE) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500553 found = 1;
554 *offset = lastoff;
555 break;
556 }
557
558 index = pvec.pages[i - 1]->index + 1;
559 pagevec_release(&pvec);
560 } while (index <= end);
561
562out:
563 pagevec_release(&pvec);
564 return found;
565}
566
567/*
568 * ext4_seek_data() retrieves the offset for SEEK_DATA.
569 */
570static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
571{
572 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500573 struct extent_status es;
574 ext4_lblk_t start, last, end;
575 loff_t dataoff, isize;
576 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500577 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500578
Al Viro59551022016-01-22 15:40:57 -0500579 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500580
581 isize = i_size_read(inode);
582 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500583 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500584 return -ENXIO;
585 }
586
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500587 blkbits = inode->i_sb->s_blocksize_bits;
588 start = offset >> blkbits;
589 last = start;
590 end = isize >> blkbits;
591 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500592
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500593 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500594 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
595 if (ret <= 0) {
596 /* No extent found -> no data */
597 if (ret == 0)
598 ret = -ENXIO;
599 inode_unlock(inode);
600 return ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500601 }
602
Jan Kara2d90c162016-03-09 23:11:13 -0500603 last = es.es_lblk;
604 if (last != start)
605 dataoff = (loff_t)last << blkbits;
606 if (!ext4_es_is_unwritten(&es))
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500607 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500608
609 /*
610 * If there is a unwritten extent at this offset,
611 * it will be as a data or a hole according to page
612 * cache that has data or not.
613 */
Jan Kara2d90c162016-03-09 23:11:13 -0500614 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
615 es.es_lblk + es.es_len, &dataoff))
616 break;
617 last += es.es_len;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500618 dataoff = (loff_t)last << blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500619 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500620 } while (last <= end);
621
Al Viro59551022016-01-22 15:40:57 -0500622 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500623
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500624 if (dataoff > isize)
625 return -ENXIO;
626
627 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500628}
629
630/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500631 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500632 */
633static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
634{
635 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500636 struct extent_status es;
637 ext4_lblk_t start, last, end;
638 loff_t holeoff, isize;
639 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500640 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500641
Al Viro59551022016-01-22 15:40:57 -0500642 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500643
644 isize = i_size_read(inode);
645 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500646 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500647 return -ENXIO;
648 }
649
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500650 blkbits = inode->i_sb->s_blocksize_bits;
651 start = offset >> blkbits;
652 last = start;
653 end = isize >> blkbits;
654 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500655
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500656 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500657 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
658 if (ret < 0) {
659 inode_unlock(inode);
660 return ret;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500661 }
Jan Kara2d90c162016-03-09 23:11:13 -0500662 /* Found a hole? */
663 if (ret == 0 || es.es_lblk > last) {
664 if (last != start)
665 holeoff = (loff_t)last << blkbits;
666 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500667 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500668 /*
669 * If there is a unwritten extent at this offset,
670 * it will be as a data or a hole according to page
671 * cache that has data or not.
672 */
Jan Kara2d90c162016-03-09 23:11:13 -0500673 if (ext4_es_is_unwritten(&es) &&
674 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
675 last + es.es_len, &holeoff))
676 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500677
Jan Kara2d90c162016-03-09 23:11:13 -0500678 last += es.es_len;
679 holeoff = (loff_t)last << blkbits;
680 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500681 } while (last <= end);
682
Al Viro59551022016-01-22 15:40:57 -0500683 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500684
685 if (holeoff > isize)
686 holeoff = isize;
687
688 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500689}
690
691/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500692 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
693 * by calling generic_file_llseek_size() with the appropriate maxbytes
694 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400695 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800696loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400697{
698 struct inode *inode = file->f_mapping->host;
699 loff_t maxbytes;
700
701 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
702 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
703 else
704 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400705
Andrew Morton965c8e52012-12-17 15:59:39 -0800706 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500707 case SEEK_SET:
708 case SEEK_CUR:
709 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800710 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500711 maxbytes, i_size_read(inode));
712 case SEEK_DATA:
713 return ext4_seek_data(file, offset, maxbytes);
714 case SEEK_HOLE:
715 return ext4_seek_hole(file, offset, maxbytes);
716 }
717
718 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400719}
720
Mingming Cao617ba132006-10-11 01:20:53 -0700721const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400722 .llseek = ext4_llseek,
Jan Kara364443c2016-11-20 17:36:06 -0500723 .read_iter = ext4_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400724 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400725 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700726#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700727 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700728#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400729 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400730 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700731 .release = ext4_release_file,
732 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700733 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700734 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400735 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100736 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700737};
738
Arjan van de Ven754661f2007-02-12 00:55:38 -0800739const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700740 .setattr = ext4_setattr,
Mingming Cao3e3398a2008-07-11 19:27:31 -0400741 .getattr = ext4_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700742 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200743 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800744 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400745 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700746};
747