blob: 3badb49d2794deea1708ab4eb2d1bf90c9b5f8a1 [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
34/*
35 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070036 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070037 * gets called only when /all/ the files are closed.
38 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040039static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070040{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050041 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050042 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050043 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050044 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070045 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040047 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070049 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050050 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040051 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050052 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070053 }
54 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070055 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070056
57 return 0;
58}
59
Stephen Hemmingerc1978552014-05-12 10:50:23 -040060static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050061{
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
Dmitry Monakhove27f41e2012-09-28 23:24:52 -040064 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -050065}
66
67/*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76static int
Al Viro9b884162014-04-17 16:09:22 -040077ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050078{
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -050081
Lukas Czerner8651fa1e2019-03-14 23:20:25 -040082 if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
Eric Sandeene9e3bce2011-02-12 08:17:34 -050083 return 0;
84
Al Viro9b884162014-04-17 16:09:22 -040085 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050086 return 1;
87
88 return 0;
89}
90
Dave Kleikampac27a0e2006-10-11 01:20:50 -070091static ssize_t
Al Viro9b884162014-04-17 16:09:22 -040092ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070093{
Al Viro496ad9a2013-01-23 17:07:38 -050094 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -040095 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -050096 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -040097 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -040098 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -040099
Al Viro59551022016-01-22 15:40:57 -0500100 inode_lock(inode);
Al Viro3309dd02015-04-09 12:55:47 -0400101 ret = generic_write_checks(iocb, from);
102 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400103 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400104
105 /*
Jan Karae142d052016-03-08 22:44:50 -0500106 * Unaligned direct AIO must be serialized among each other as zeroing
107 * of partial blocks of two competing unaligned AIOs can result in data
108 * corruption.
109 */
110 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
111 !is_sync_kiocb(iocb) &&
112 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
113 unaligned_aio = 1;
114 ext4_unwritten_wait(inode);
115 }
116
117 /*
Eric Sandeene2b46572008-01-28 23:58:27 -0500118 * If we have encountered a bitmap-format file, the size limit
119 * is smaller than s_maxbytes, which is for extent-mapped files.
120 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -0400121 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
Eric Sandeene2b46572008-01-28 23:58:27 -0500122 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
Eric Sandeene2b46572008-01-28 23:58:27 -0500123
Al Viro3309dd02015-04-09 12:55:47 -0400124 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400125 ret = -EFBIG;
Al Viroe768d7f2015-04-07 14:48:22 -0400126 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400127 }
Al Viro3309dd02015-04-09 12:55:47 -0400128 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
Eric Sandeene2b46572008-01-28 23:58:27 -0500129 }
130
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400131 iocb->private = &overwrite;
Theodore Ts'o7ed07ba2014-04-21 14:36:30 -0400132 if (o_direct) {
Al Viro3309dd02015-04-09 12:55:47 -0400133 size_t length = iov_iter_count(from);
134 loff_t pos = iocb->ki_pos;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400135
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400136 /* check whether we do a DIO overwrite or not */
Jan Karae142d052016-03-08 22:44:50 -0500137 if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
Jan Kara4b0524a2016-09-30 01:55:32 -0400138 pos + length <= i_size_read(inode)) {
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400139 struct ext4_map_blocks map;
140 unsigned int blkbits = inode->i_blkbits;
141 int err, len;
142
143 map.m_lblk = pos >> blkbits;
Fabian Frederick518eaa62016-09-15 11:55:01 -0400144 map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400145 len = map.m_len;
146
147 err = ext4_map_blocks(NULL, inode, &map, 0);
148 /*
149 * 'err==len' means that all of blocks has
150 * been preallocated no matter they are
151 * initialized or not. For excluding
152 * unwritten extents, we need to check
153 * m_flags. There are two conditions that
154 * indicate for initialized extents. 1) If we
155 * hit extent cache, EXT4_MAP_MAPPED flag is
156 * returned; 2) If we do a real lookup,
157 * non-flags are returned. So we should check
158 * these two conditions.
159 */
160 if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
161 overwrite = 1;
162 }
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400163 }
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400164
Al Viro9b884162014-04-17 16:09:22 -0400165 ret = __generic_file_write_iter(iocb, from);
Lukas Czernerfa089772019-05-10 21:45:33 -0400166 /*
167 * Unaligned direct AIO must be the only IO in flight. Otherwise
168 * overlapping aligned IO after unaligned might result in data
169 * corruption.
170 */
171 if (ret == -EIOCBQUEUED && unaligned_aio)
172 ext4_unwritten_wait(inode);
Al Viro59551022016-01-22 15:40:57 -0500173 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400174
Christoph Hellwige2592212016-04-07 08:52:01 -0700175 if (ret > 0)
176 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400177
Al Viroe768d7f2015-04-07 14:48:22 -0400178 return ret;
179
180out:
Al Viro59551022016-01-22 15:40:57 -0500181 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500182 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700183}
184
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800185#ifdef CONFIG_FS_DAX
186static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
187{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700188 int result;
189 handle_t *handle = NULL;
Jan Karaea3d7202015-12-07 14:28:03 -0500190 struct inode *inode = file_inode(vma->vm_file);
191 struct super_block *sb = inode->i_sb;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700192 bool write = vmf->flags & FAULT_FLAG_WRITE;
193
194 if (write) {
195 sb_start_pagefault(sb);
196 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500197 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700198 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
199 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Karaea3d7202015-12-07 14:28:03 -0500200 } else
201 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700202
203 if (IS_ERR(handle))
204 result = VM_FAULT_SIGBUS;
205 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700206 result = dax_fault(vma, vmf, ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700207
208 if (write) {
209 if (!IS_ERR(handle))
210 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500211 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700212 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500213 } else
214 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700215
216 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800217}
218
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700219static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
220 pmd_t *pmd, unsigned int flags)
221{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700222 int result;
223 handle_t *handle = NULL;
224 struct inode *inode = file_inode(vma->vm_file);
225 struct super_block *sb = inode->i_sb;
226 bool write = flags & FAULT_FLAG_WRITE;
227
228 if (write) {
229 sb_start_pagefault(sb);
230 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500231 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700232 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
233 ext4_chunk_trans_blocks(inode,
234 PMD_SIZE / PAGE_SIZE));
Jan Karaea3d7202015-12-07 14:28:03 -0500235 } else
236 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700237
238 if (IS_ERR(handle))
239 result = VM_FAULT_SIGBUS;
240 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700241 result = dax_pmd_fault(vma, addr, pmd, flags,
Jan Kara02fbd132016-05-11 11:58:48 +0200242 ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700243
244 if (write) {
245 if (!IS_ERR(handle))
246 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500247 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700248 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500249 } else
250 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700251
252 return result;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700253}
254
Jan Karaea3d7202015-12-07 14:28:03 -0500255/*
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500256 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
Jan Karaea3d7202015-12-07 14:28:03 -0500257 * handler we check for races agaist truncate. Note that since we cycle through
258 * i_mmap_sem, we are sure that also any hole punching that began before we
259 * were called is finished by now and so if it included part of the file we
260 * are working on, our pte will get unmapped and the check for pte_same() in
261 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
262 * desired.
263 */
264static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
265 struct vm_fault *vmf)
266{
267 struct inode *inode = file_inode(vma->vm_file);
268 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500269 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800270 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500271
272 sb_start_pagefault(sb);
273 file_update_time(vma->vm_file);
274 down_read(&EXT4_I(inode)->i_mmap_sem);
275 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
276 if (vmf->pgoff >= size)
277 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800278 else
279 ret = dax_pfn_mkwrite(vma, vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500280 up_read(&EXT4_I(inode)->i_mmap_sem);
281 sb_end_pagefault(sb);
282
283 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800284}
285
286static const struct vm_operations_struct ext4_dax_vm_ops = {
287 .fault = ext4_dax_fault,
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700288 .pmd_fault = ext4_dax_pmd_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500289 .page_mkwrite = ext4_dax_fault,
Jan Karaea3d7202015-12-07 14:28:03 -0500290 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800291};
292#else
293#define ext4_dax_vm_ops ext4_file_vm_ops
294#endif
295
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400296static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500297 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700298 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400299 .page_mkwrite = ext4_page_mkwrite,
300};
301
302static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
303{
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400304 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800305 if (IS_DAX(file_inode(file))) {
306 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700307 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800308 } else {
309 vma->vm_ops = &ext4_file_vm_ops;
310 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400311 return 0;
312}
313
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400314static int ext4_file_open(struct inode * inode, struct file * filp)
315{
316 struct super_block *sb = inode->i_sb;
317 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
318 struct vfsmount *mnt = filp->f_path.mnt;
319 struct path path;
320 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400321 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400322
323 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
324 !(sb->s_flags & MS_RDONLY))) {
325 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
326 /*
327 * Sample where the filesystem has been mounted and
328 * store it in the superblock for sysadmin convenience
329 * when trying to sort through large numbers of block
330 * devices or filesystem images.
331 */
332 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500333 path.mnt = mnt;
334 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400335 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400336 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400337 handle_t *handle;
338 int err;
339
Theodore Ts'o9924a922013-02-08 21:59:22 -0500340 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400341 if (IS_ERR(handle))
342 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400343 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400344 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
345 if (err) {
346 ext4_journal_stop(handle);
347 return err;
348 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400349 strlcpy(sbi->s_es->s_last_mounted, cp,
350 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400351 ext4_handle_dirty_super(handle, sb);
352 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400353 }
354 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400355
Eric Biggersec686d22017-10-18 20:21:57 -0400356 ret = fscrypt_file_open(inode, filp);
357 if (ret)
358 return ret;
359
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500360 /*
361 * Set up the jbd2_inode if we are opening the inode for
362 * writing and the journal is present
363 */
Jan Karaa3612932013-08-16 21:19:41 -0400364 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400365 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400366 if (ret < 0)
367 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500368 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400369 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400370}
371
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400372/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500373 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
374 * file rather than ext4_ext_walk_space() because we can introduce
375 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
376 * function. When extent status tree has been fully implemented, it will
377 * track all extent status for a file and we can directly use it to
378 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
379 */
380
381/*
382 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
383 * lookup page cache to check whether or not there has some data between
384 * [startoff, endoff] because, if this range contains an unwritten extent,
385 * we determine this extent as a data or a hole according to whether the
386 * page cache has data or not.
387 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500388static int ext4_find_unwritten_pgoff(struct inode *inode,
389 int whence,
Jan Kara2d90c162016-03-09 23:11:13 -0500390 ext4_lblk_t end_blk,
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500391 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500392{
393 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500394 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500395 pgoff_t index;
396 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500397 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500398 loff_t startoff;
399 loff_t lastoff;
400 int found = 0;
401
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500402 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500403 startoff = *offset;
404 lastoff = startoff;
Jan Kara2d90c162016-03-09 23:11:13 -0500405 endoff = (loff_t)end_blk << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500406
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300407 index = startoff >> PAGE_SHIFT;
408 end = endoff >> PAGE_SHIFT;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500409
410 pagevec_init(&pvec, 0);
411 do {
412 int i, num;
413 unsigned long nr_pages;
414
Eryu Guan78b336a2017-05-24 18:02:20 -0400415 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500416 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
417 (pgoff_t)num);
Jan Kara98508442017-05-21 22:33:23 -0400418 if (nr_pages == 0)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500419 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500420
421 for (i = 0; i < nr_pages; i++) {
422 struct page *page = pvec.pages[i];
423 struct buffer_head *bh, *head;
424
425 /*
Jan Kara98508442017-05-21 22:33:23 -0400426 * If current offset is smaller than the page offset,
427 * there is a hole at this offset.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500428 */
Jan Kara98508442017-05-21 22:33:23 -0400429 if (whence == SEEK_HOLE && lastoff < endoff &&
430 lastoff < page_offset(pvec.pages[i])) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500431 found = 1;
432 *offset = lastoff;
433 goto out;
434 }
435
Jan Kara98508442017-05-21 22:33:23 -0400436 if (page->index > end)
437 goto out;
438
Zheng Liuc8c0df22012-11-08 21:57:40 -0500439 lock_page(page);
440
441 if (unlikely(page->mapping != inode->i_mapping)) {
442 unlock_page(page);
443 continue;
444 }
445
446 if (!page_has_buffers(page)) {
447 unlock_page(page);
448 continue;
449 }
450
451 if (page_has_buffers(page)) {
452 lastoff = page_offset(page);
453 bh = head = page_buffers(page);
454 do {
Jan Kara0814c3a2017-08-05 17:43:24 -0400455 if (lastoff + bh->b_size <= startoff)
456 goto next;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500457 if (buffer_uptodate(bh) ||
458 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800459 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500460 found = 1;
461 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800462 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500463 found = 1;
464 }
465 if (found) {
466 *offset = max_t(loff_t,
467 startoff, lastoff);
468 unlock_page(page);
469 goto out;
470 }
Jan Kara0814c3a2017-08-05 17:43:24 -0400471next:
Zheng Liuc8c0df22012-11-08 21:57:40 -0500472 lastoff += bh->b_size;
473 bh = bh->b_this_page;
474 } while (bh != head);
475 }
476
477 lastoff = page_offset(page) + PAGE_SIZE;
478 unlock_page(page);
479 }
480
Jan Kara98508442017-05-21 22:33:23 -0400481 /* The no. of pages is less than our desired, we are done. */
482 if (nr_pages < num)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500483 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500484
485 index = pvec.pages[i - 1]->index + 1;
486 pagevec_release(&pvec);
487 } while (index <= end);
488
Jan Kara98508442017-05-21 22:33:23 -0400489 if (whence == SEEK_HOLE && lastoff < endoff) {
490 found = 1;
491 *offset = lastoff;
492 }
Zheng Liuc8c0df22012-11-08 21:57:40 -0500493out:
494 pagevec_release(&pvec);
495 return found;
496}
497
498/*
499 * ext4_seek_data() retrieves the offset for SEEK_DATA.
500 */
501static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
502{
503 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500504 struct extent_status es;
505 ext4_lblk_t start, last, end;
506 loff_t dataoff, isize;
507 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500508 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500509
Al Viro59551022016-01-22 15:40:57 -0500510 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500511
512 isize = i_size_read(inode);
Darrick J. Wong28cbf062017-08-24 13:22:06 -0400513 if (offset < 0 || offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500514 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500515 return -ENXIO;
516 }
517
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500518 blkbits = inode->i_sb->s_blocksize_bits;
519 start = offset >> blkbits;
520 last = start;
521 end = isize >> blkbits;
522 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500523
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500524 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500525 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
526 if (ret <= 0) {
527 /* No extent found -> no data */
528 if (ret == 0)
529 ret = -ENXIO;
530 inode_unlock(inode);
531 return ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500532 }
533
Jan Kara2d90c162016-03-09 23:11:13 -0500534 last = es.es_lblk;
535 if (last != start)
536 dataoff = (loff_t)last << blkbits;
537 if (!ext4_es_is_unwritten(&es))
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500538 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500539
540 /*
541 * If there is a unwritten extent at this offset,
542 * it will be as a data or a hole according to page
543 * cache that has data or not.
544 */
Jan Kara2d90c162016-03-09 23:11:13 -0500545 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
546 es.es_lblk + es.es_len, &dataoff))
547 break;
548 last += es.es_len;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500549 dataoff = (loff_t)last << blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500550 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500551 } while (last <= end);
552
Al Viro59551022016-01-22 15:40:57 -0500553 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500554
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500555 if (dataoff > isize)
556 return -ENXIO;
557
558 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500559}
560
561/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500562 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500563 */
564static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
565{
566 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500567 struct extent_status es;
568 ext4_lblk_t start, last, end;
569 loff_t holeoff, isize;
570 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500571 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500572
Al Viro59551022016-01-22 15:40:57 -0500573 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500574
575 isize = i_size_read(inode);
Darrick J. Wong28cbf062017-08-24 13:22:06 -0400576 if (offset < 0 || offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500577 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500578 return -ENXIO;
579 }
580
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500581 blkbits = inode->i_sb->s_blocksize_bits;
582 start = offset >> blkbits;
583 last = start;
584 end = isize >> blkbits;
585 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500586
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500587 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500588 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
589 if (ret < 0) {
590 inode_unlock(inode);
591 return ret;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500592 }
Jan Kara2d90c162016-03-09 23:11:13 -0500593 /* Found a hole? */
594 if (ret == 0 || es.es_lblk > last) {
595 if (last != start)
596 holeoff = (loff_t)last << blkbits;
597 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500598 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500599 /*
600 * If there is a unwritten extent at this offset,
601 * it will be as a data or a hole according to page
602 * cache that has data or not.
603 */
Jan Kara2d90c162016-03-09 23:11:13 -0500604 if (ext4_es_is_unwritten(&es) &&
605 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
606 last + es.es_len, &holeoff))
607 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500608
Jan Kara2d90c162016-03-09 23:11:13 -0500609 last += es.es_len;
610 holeoff = (loff_t)last << blkbits;
611 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500612 } while (last <= end);
613
Al Viro59551022016-01-22 15:40:57 -0500614 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500615
616 if (holeoff > isize)
617 holeoff = isize;
618
619 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500620}
621
622/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500623 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
624 * by calling generic_file_llseek_size() with the appropriate maxbytes
625 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400626 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800627loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400628{
629 struct inode *inode = file->f_mapping->host;
630 loff_t maxbytes;
631
632 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
633 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
634 else
635 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400636
Andrew Morton965c8e52012-12-17 15:59:39 -0800637 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500638 case SEEK_SET:
639 case SEEK_CUR:
640 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800641 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500642 maxbytes, i_size_read(inode));
643 case SEEK_DATA:
644 return ext4_seek_data(file, offset, maxbytes);
645 case SEEK_HOLE:
646 return ext4_seek_hole(file, offset, maxbytes);
647 }
648
649 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400650}
651
Mingming Cao617ba132006-10-11 01:20:53 -0700652const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400653 .llseek = ext4_llseek,
Al Viroaad4f8b2014-04-02 14:33:16 -0400654 .read_iter = generic_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400655 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400656 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700657#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700658 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700659#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400660 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400661 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700662 .release = ext4_release_file,
663 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700664 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700665 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400666 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100667 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700668};
669
Arjan van de Ven754661f2007-02-12 00:55:38 -0800670const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700671 .setattr = ext4_setattr,
Mingming Cao3e3398a2008-07-11 19:27:31 -0400672 .getattr = ext4_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700673 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200674 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800675 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400676 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700677};
678