blob: 9e77c089e8cb2e9e3210112889a138fc6667638f [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
34/*
35 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070036 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070037 * gets called only when /all/ the files are closed.
38 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040039static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070040{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050041 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050042 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050043 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050044 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070045 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040047 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070049 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050050 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040051 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050052 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070053 }
54 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070055 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070056
57 return 0;
58}
59
Stephen Hemmingerc1978552014-05-12 10:50:23 -040060static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050061{
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
Dmitry Monakhove27f41e2012-09-28 23:24:52 -040064 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -050065}
66
67/*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76static int
Al Viro9b884162014-04-17 16:09:22 -040077ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050078{
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -050081
Theodore Ts'o6e6358f2014-04-12 12:45:25 -040082 if (pos >= i_size_read(inode))
Eric Sandeene9e3bce2011-02-12 08:17:34 -050083 return 0;
84
Al Viro9b884162014-04-17 16:09:22 -040085 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050086 return 1;
87
88 return 0;
89}
90
Dave Kleikampac27a0e2006-10-11 01:20:50 -070091static ssize_t
Al Viro9b884162014-04-17 16:09:22 -040092ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070093{
Al Viro496ad9a2013-01-23 17:07:38 -050094 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -040095 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -050096 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -040097 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -040098 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -040099
Al Viro59551022016-01-22 15:40:57 -0500100 inode_lock(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400101 ret = generic_write_checks(iocb, from);
102 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400103 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400104
105 /*
Jan Karae142d052016-03-08 22:44:50 -0500106 * Unaligned direct AIO must be serialized among each other as zeroing
107 * of partial blocks of two competing unaligned AIOs can result in data
108 * corruption.
109 */
110 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
111 !is_sync_kiocb(iocb) &&
112 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
113 unaligned_aio = 1;
114 ext4_unwritten_wait(inode);
115 }
116
117 /*
Eric Sandeene2b46572008-01-28 23:58:27 -0500118 * If we have encountered a bitmap-format file, the size limit
119 * is smaller than s_maxbytes, which is for extent-mapped files.
120 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400121 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
Eric Sandeene2b46572008-01-28 23:58:27 -0500122 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Eric Sandeene2b46572008-01-28 23:58:27 -0500123
Al Viro3309dd02015-04-09 12:55:47 -0400124 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400125 ret = -EFBIG;
Al Viroe768d7f2015-04-07 14:48:22 -0400126 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400127 }
Al Viro3309dd02015-04-09 12:55:47 -0400128 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
Eric Sandeene2b46572008-01-28 23:58:27 -0500129 }
130
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400131 iocb->private = &overwrite;
Theodore Ts'o7ed07ba2014-04-21 14:36:30 -0400132 if (o_direct) {
Al Viro3309dd02015-04-09 12:55:47 -0400133 size_t length = iov_iter_count(from);
134 loff_t pos = iocb->ki_pos;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400135
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400136 /* check whether we do a DIO overwrite or not */
Jan Karae142d052016-03-08 22:44:50 -0500137 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
Jan Kara4b0524a2016-09-30 01:55:32 -0400138 pos + length <= i_size_read(inode)) {
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400139 struct ext4_map_blocks map;
140 unsigned int blkbits = inode->i_blkbits;
141 int err, len;
142
143 map.m_lblk = pos >> blkbits;
Fabian Frederick518eaa62016-09-15 11:55:01 -0400144 map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400145 len = map.m_len;
146
147 err = ext4_map_blocks(NULL, inode, &map, 0);
148 /*
149 * 'err==len' means that all of blocks has
150 * been preallocated no matter they are
151 * initialized or not. For excluding
152 * unwritten extents, we need to check
153 * m_flags. There are two conditions that
154 * indicate for initialized extents. 1) If we
155 * hit extent cache, EXT4_MAP_MAPPED flag is
156 * returned; 2) If we do a real lookup,
157 * non-flags are returned. So we should check
158 * these two conditions.
159 */
160 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
161 overwrite = 1;
162 }
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400163 }
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400164
Al Viro9b884162014-04-17 16:09:22 -0400165 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -0500166 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400167
Christoph Hellwige2592212016-04-07 08:52:01 -0700168 if (ret > 0)
169 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400170
Al Viroe768d7f2015-04-07 14:48:22 -0400171 return ret;
172
173out:
Al Viro59551022016-01-22 15:40:57 -0500174 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500175 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700176}
177
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800178#ifdef CONFIG_FS_DAX
179static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700181 int result;
182 handle_t *handle = NULL;
Jan Karaea3d7202015-12-07 14:28:03 -0500183 struct inode *inode = file_inode(vma->vm_file);
184 struct super_block *sb = inode->i_sb;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700185 bool write = vmf->flags & FAULT_FLAG_WRITE;
186
187 if (write) {
188 sb_start_pagefault(sb);
189 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500190 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700191 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
192 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Karaea3d7202015-12-07 14:28:03 -0500193 } else
194 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700195
196 if (IS_ERR(handle))
197 result = VM_FAULT_SIGBUS;
198 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700199 result = dax_fault(vma, vmf, ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700200
201 if (write) {
202 if (!IS_ERR(handle))
203 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500204 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700205 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500206 } else
207 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700208
209 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800210}
211
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700212static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
213 pmd_t *pmd, unsigned int flags)
214{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700215 int result;
216 handle_t *handle = NULL;
217 struct inode *inode = file_inode(vma->vm_file);
218 struct super_block *sb = inode->i_sb;
219 bool write = flags & FAULT_FLAG_WRITE;
220
221 if (write) {
222 sb_start_pagefault(sb);
223 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500224 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700225 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
226 ext4_chunk_trans_blocks(inode,
227 PMD_SIZE / PAGE_SIZE));
Jan Karaea3d7202015-12-07 14:28:03 -0500228 } else
229 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700230
231 if (IS_ERR(handle))
232 result = VM_FAULT_SIGBUS;
233 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700234 result = dax_pmd_fault(vma, addr, pmd, flags,
Jan Kara02fbd132016-05-11 11:58:48 +0200235 ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700236
237 if (write) {
238 if (!IS_ERR(handle))
239 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500240 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700241 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500242 } else
243 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700244
245 return result;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700246}
247
Jan Karaea3d7202015-12-07 14:28:03 -0500248/*
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500249 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
Jan Karaea3d7202015-12-07 14:28:03 -0500250 * handler we check for races agaist truncate. Note that since we cycle through
251 * i_mmap_sem, we are sure that also any hole punching that began before we
252 * were called is finished by now and so if it included part of the file we
253 * are working on, our pte will get unmapped and the check for pte_same() in
254 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
255 * desired.
256 */
257static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
258 struct vm_fault *vmf)
259{
260 struct inode *inode = file_inode(vma->vm_file);
261 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500262 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800263 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500264
265 sb_start_pagefault(sb);
266 file_update_time(vma->vm_file);
267 down_read(&EXT4_I(inode)->i_mmap_sem);
268 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
269 if (vmf->pgoff >= size)
270 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800271 else
272 ret = dax_pfn_mkwrite(vma, vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500273 up_read(&EXT4_I(inode)->i_mmap_sem);
274 sb_end_pagefault(sb);
275
276 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800277}
278
279static const struct vm_operations_struct ext4_dax_vm_ops = {
280 .fault = ext4_dax_fault,
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700281 .pmd_fault = ext4_dax_pmd_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500282 .page_mkwrite = ext4_dax_fault,
Jan Karaea3d7202015-12-07 14:28:03 -0500283 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800284};
285#else
286#define ext4_dax_vm_ops ext4_file_vm_ops
287#endif
288
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400289static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500290 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700291 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400292 .page_mkwrite = ext4_page_mkwrite,
293};
294
295static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
296{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400297 struct inode *inode = file->f_mapping->host;
298
299 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400300 int err = fscrypt_get_encryption_info(inode);
Michael Halcrowc9c74292015-04-12 00:56:10 -0400301 if (err)
302 return 0;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400303 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400304 return -ENOKEY;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400305 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400306 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800307 if (IS_DAX(file_inode(file))) {
308 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700309 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800310 } else {
311 vma->vm_ops = &ext4_file_vm_ops;
312 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400313 return 0;
314}
315
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400316static int ext4_file_open(struct inode * inode, struct file * filp)
317{
318 struct super_block *sb = inode->i_sb;
319 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
320 struct vfsmount *mnt = filp->f_path.mnt;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400321 struct dentry *dir;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400322 struct path path;
323 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400324 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400325
326 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
327 !(sb->s_flags & MS_RDONLY))) {
328 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
329 /*
330 * Sample where the filesystem has been mounted and
331 * store it in the superblock for sysadmin convenience
332 * when trying to sort through large numbers of block
333 * devices or filesystem images.
334 */
335 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500336 path.mnt = mnt;
337 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400338 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400339 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400340 handle_t *handle;
341 int err;
342
Theodore Ts'o9924a922013-02-08 21:59:22 -0500343 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400344 if (IS_ERR(handle))
345 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400346 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400347 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
348 if (err) {
349 ext4_journal_stop(handle);
350 return err;
351 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400352 strlcpy(sbi->s_es->s_last_mounted, cp,
353 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400354 ext4_handle_dirty_super(handle, sb);
355 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400356 }
357 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400358 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400359 ret = fscrypt_get_encryption_info(inode);
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400360 if (ret)
361 return -EACCES;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400362 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400363 return -ENOKEY;
364 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400365
Miklos Szeredic0a37d4872016-03-26 16:14:42 -0400366 dir = dget_parent(file_dentry(filp));
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400367 if (ext4_encrypted_inode(d_inode(dir)) &&
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400368 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
Theodore Ts'off978b02016-02-08 00:54:26 -0500369 ext4_warning(inode->i_sb,
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -0400370 "Inconsistent encryption contexts: %lu/%lu",
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400371 (unsigned long) d_inode(dir)->i_ino,
Theodore Ts'off978b02016-02-08 00:54:26 -0500372 (unsigned long) inode->i_ino);
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400373 dput(dir);
Theodore Ts'off978b02016-02-08 00:54:26 -0500374 return -EPERM;
375 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400376 dput(dir);
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500377 /*
378 * Set up the jbd2_inode if we are opening the inode for
379 * writing and the journal is present
380 */
Jan Karaa3612932013-08-16 21:19:41 -0400381 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400382 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400383 if (ret < 0)
384 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500385 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400386 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400387}
388
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400389/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500390 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
391 * file rather than ext4_ext_walk_space() because we can introduce
392 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
393 * function. When extent status tree has been fully implemented, it will
394 * track all extent status for a file and we can directly use it to
395 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
396 */
397
398/*
399 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
400 * lookup page cache to check whether or not there has some data between
401 * [startoff, endoff] because, if this range contains an unwritten extent,
402 * we determine this extent as a data or a hole according to whether the
403 * page cache has data or not.
404 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500405static int ext4_find_unwritten_pgoff(struct inode *inode,
406 int whence,
Jan Kara2d90c162016-03-09 23:11:13 -0500407 ext4_lblk_t end_blk,
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500408 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500409{
410 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500411 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500412 pgoff_t index;
413 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500414 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500415 loff_t startoff;
416 loff_t lastoff;
417 int found = 0;
418
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500419 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500420 startoff = *offset;
421 lastoff = startoff;
Jan Kara2d90c162016-03-09 23:11:13 -0500422 endoff = (loff_t)end_blk << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500423
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300424 index = startoff >> PAGE_SHIFT;
425 end = endoff >> PAGE_SHIFT;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500426
427 pagevec_init(&pvec, 0);
428 do {
429 int i, num;
430 unsigned long nr_pages;
431
432 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
433 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
434 (pgoff_t)num);
Jan Kara98508442017-05-21 22:33:23 -0400435 if (nr_pages == 0)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500436 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500437
438 for (i = 0; i < nr_pages; i++) {
439 struct page *page = pvec.pages[i];
440 struct buffer_head *bh, *head;
441
442 /*
Jan Kara98508442017-05-21 22:33:23 -0400443 * If current offset is smaller than the page offset,
444 * there is a hole at this offset.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500445 */
Jan Kara98508442017-05-21 22:33:23 -0400446 if (whence == SEEK_HOLE && lastoff < endoff &&
447 lastoff < page_offset(pvec.pages[i])) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500448 found = 1;
449 *offset = lastoff;
450 goto out;
451 }
452
Jan Kara98508442017-05-21 22:33:23 -0400453 if (page->index > end)
454 goto out;
455
Zheng Liuc8c0df22012-11-08 21:57:40 -0500456 lock_page(page);
457
458 if (unlikely(page->mapping != inode->i_mapping)) {
459 unlock_page(page);
460 continue;
461 }
462
463 if (!page_has_buffers(page)) {
464 unlock_page(page);
465 continue;
466 }
467
468 if (page_has_buffers(page)) {
469 lastoff = page_offset(page);
470 bh = head = page_buffers(page);
471 do {
472 if (buffer_uptodate(bh) ||
473 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800474 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500475 found = 1;
476 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800477 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500478 found = 1;
479 }
480 if (found) {
481 *offset = max_t(loff_t,
482 startoff, lastoff);
483 unlock_page(page);
484 goto out;
485 }
486 lastoff += bh->b_size;
487 bh = bh->b_this_page;
488 } while (bh != head);
489 }
490
491 lastoff = page_offset(page) + PAGE_SIZE;
492 unlock_page(page);
493 }
494
Jan Kara98508442017-05-21 22:33:23 -0400495 /* The no. of pages is less than our desired, we are done. */
496 if (nr_pages < num)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500497 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500498
499 index = pvec.pages[i - 1]->index + 1;
500 pagevec_release(&pvec);
501 } while (index <= end);
502
Jan Kara98508442017-05-21 22:33:23 -0400503 if (whence == SEEK_HOLE && lastoff < endoff) {
504 found = 1;
505 *offset = lastoff;
506 }
Zheng Liuc8c0df22012-11-08 21:57:40 -0500507out:
508 pagevec_release(&pvec);
509 return found;
510}
511
512/*
513 * ext4_seek_data() retrieves the offset for SEEK_DATA.
514 */
515static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
516{
517 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500518 struct extent_status es;
519 ext4_lblk_t start, last, end;
520 loff_t dataoff, isize;
521 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500522 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500523
Al Viro59551022016-01-22 15:40:57 -0500524 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500525
526 isize = i_size_read(inode);
527 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500528 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500529 return -ENXIO;
530 }
531
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500532 blkbits = inode->i_sb->s_blocksize_bits;
533 start = offset >> blkbits;
534 last = start;
535 end = isize >> blkbits;
536 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500537
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500538 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500539 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
540 if (ret <= 0) {
541 /* No extent found -> no data */
542 if (ret == 0)
543 ret = -ENXIO;
544 inode_unlock(inode);
545 return ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500546 }
547
Jan Kara2d90c162016-03-09 23:11:13 -0500548 last = es.es_lblk;
549 if (last != start)
550 dataoff = (loff_t)last << blkbits;
551 if (!ext4_es_is_unwritten(&es))
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500552 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500553
554 /*
555 * If there is a unwritten extent at this offset,
556 * it will be as a data or a hole according to page
557 * cache that has data or not.
558 */
Jan Kara2d90c162016-03-09 23:11:13 -0500559 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
560 es.es_lblk + es.es_len, &dataoff))
561 break;
562 last += es.es_len;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500563 dataoff = (loff_t)last << blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500564 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500565 } while (last <= end);
566
Al Viro59551022016-01-22 15:40:57 -0500567 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500568
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500569 if (dataoff > isize)
570 return -ENXIO;
571
572 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500573}
574
575/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500576 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500577 */
578static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
579{
580 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500581 struct extent_status es;
582 ext4_lblk_t start, last, end;
583 loff_t holeoff, isize;
584 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500585 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500586
Al Viro59551022016-01-22 15:40:57 -0500587 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500588
589 isize = i_size_read(inode);
590 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500591 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500592 return -ENXIO;
593 }
594
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500595 blkbits = inode->i_sb->s_blocksize_bits;
596 start = offset >> blkbits;
597 last = start;
598 end = isize >> blkbits;
599 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500600
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500601 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500602 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
603 if (ret < 0) {
604 inode_unlock(inode);
605 return ret;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500606 }
Jan Kara2d90c162016-03-09 23:11:13 -0500607 /* Found a hole? */
608 if (ret == 0 || es.es_lblk > last) {
609 if (last != start)
610 holeoff = (loff_t)last << blkbits;
611 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500612 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500613 /*
614 * If there is a unwritten extent at this offset,
615 * it will be as a data or a hole according to page
616 * cache that has data or not.
617 */
Jan Kara2d90c162016-03-09 23:11:13 -0500618 if (ext4_es_is_unwritten(&es) &&
619 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
620 last + es.es_len, &holeoff))
621 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500622
Jan Kara2d90c162016-03-09 23:11:13 -0500623 last += es.es_len;
624 holeoff = (loff_t)last << blkbits;
625 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500626 } while (last <= end);
627
Al Viro59551022016-01-22 15:40:57 -0500628 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500629
630 if (holeoff > isize)
631 holeoff = isize;
632
633 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500634}
635
636/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500637 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
638 * by calling generic_file_llseek_size() with the appropriate maxbytes
639 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400640 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800641loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400642{
643 struct inode *inode = file->f_mapping->host;
644 loff_t maxbytes;
645
646 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
647 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
648 else
649 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400650
Andrew Morton965c8e52012-12-17 15:59:39 -0800651 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500652 case SEEK_SET:
653 case SEEK_CUR:
654 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800655 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500656 maxbytes, i_size_read(inode));
657 case SEEK_DATA:
658 return ext4_seek_data(file, offset, maxbytes);
659 case SEEK_HOLE:
660 return ext4_seek_hole(file, offset, maxbytes);
661 }
662
663 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400664}
665
Mingming Cao617ba132006-10-11 01:20:53 -0700666const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400667 .llseek = ext4_llseek,
Al Viroaad4f8b2014-04-02 14:33:16 -0400668 .read_iter = generic_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400669 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400670 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700671#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700672 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700673#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400674 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400675 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700676 .release = ext4_release_file,
677 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700678 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700679 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400680 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100681 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700682};
683
Arjan van de Ven754661f2007-02-12 00:55:38 -0800684const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700685 .setattr = ext4_setattr,
Mingming Cao3e3398a2008-07-11 19:27:31 -0400686 .getattr = ext4_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700687 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200688 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800689 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400690 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700691};
692