blob: 52d155b4e733416f816d7d8dd0408f1e44171e68 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Dave Kleikampac27a0e2006-10-11 01:20:50 -07002/*
Mingming Cao617ba132006-10-11 01:20:53 -07003 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07004 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/file.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
Mingming Cao617ba132006-10-11 01:20:53 -070016 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070017 *
18 * 64-bit file support on 64-bit platforms by Jakub Jelinek
19 * (jj@sunsite.ms.mff.cuni.cz)
20 */
21
22#include <linux/time.h>
23#include <linux/fs.h>
Christoph Hellwig545052e2017-10-01 17:58:54 -040024#include <linux/iomap.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040025#include <linux/mount.h>
26#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070027#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050028#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050029#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080030#include <linux/uio.h>
Jan Karab8a61762017-11-01 16:36:45 +010031#include <linux/mman.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040032#include "ext4.h"
33#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070034#include "xattr.h"
35#include "acl.h"
36
Jan Kara364443c2016-11-20 17:36:06 -050037#ifdef CONFIG_FS_DAX
38static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
39{
40 struct inode *inode = file_inode(iocb->ki_filp);
41 ssize_t ret;
42
Ritesh Harjani428bb082019-12-12 11:25:55 +053043 if (iocb->ki_flags & IOCB_NOWAIT) {
44 if (!inode_trylock_shared(inode))
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -050045 return -EAGAIN;
Ritesh Harjani428bb082019-12-12 11:25:55 +053046 } else {
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -050047 inode_lock_shared(inode);
48 }
Jan Kara364443c2016-11-20 17:36:06 -050049 /*
50 * Recheck under inode lock - at this point we are sure it cannot
51 * change anymore
52 */
53 if (!IS_DAX(inode)) {
54 inode_unlock_shared(inode);
55 /* Fallback to buffered IO in case we cannot support DAX */
56 return generic_file_read_iter(iocb, to);
57 }
58 ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
59 inode_unlock_shared(inode);
60
61 file_accessed(iocb->ki_filp);
62 return ret;
63}
64#endif
65
66static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
67{
Theodore Ts'o0db1ff22017-02-05 01:28:48 -050068 if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
69 return -EIO;
70
Jan Kara364443c2016-11-20 17:36:06 -050071 if (!iov_iter_count(to))
72 return 0; /* skip atime */
73
74#ifdef CONFIG_FS_DAX
75 if (IS_DAX(file_inode(iocb->ki_filp)))
76 return ext4_dax_read_iter(iocb, to);
77#endif
78 return generic_file_read_iter(iocb, to);
79}
80
Dave Kleikampac27a0e2006-10-11 01:20:50 -070081/*
82 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070083 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070084 * gets called only when /all/ the files are closed.
85 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040086static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070087{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050088 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050089 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050090 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050091 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070092 /* if we are the last writer on the inode, drop the block reservation */
93 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040094 (atomic_read(&inode->i_writecount) == 1) &&
95 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070096 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050097 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040098 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050099 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700100 }
101 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -0700102 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700103
104 return 0;
105}
106
Stephen Hemmingerc1978552014-05-12 10:50:23 -0400107static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500108{
109 wait_queue_head_t *wq = ext4_ioend_wq(inode);
110
Dmitry Monakhove27f41e2012-09-28 23:24:52 -0400111 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500112}
113
114/*
115 * This tests whether the IO in question is block-aligned or not.
116 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
117 * are converted to written only after the IO is complete. Until they are
118 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
119 * it needs to zero out portions of the start and/or end block. If 2 AIO
120 * threads are at work on the same unwritten block, they must be synchronized
121 * or one thread will zero the other's data, causing corruption.
122 */
123static int
Al Viro9b884162014-04-17 16:09:22 -0400124ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500125{
126 struct super_block *sb = inode->i_sb;
127 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500128
Lukas Czerner76c9ee62019-03-14 23:20:25 -0400129 if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500130 return 0;
131
Al Viro9b884162014-04-17 16:09:22 -0400132 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500133 return 1;
134
135 return 0;
136}
137
Jan Kara213bcd92016-11-20 17:29:51 -0500138/* Is IO overwriting allocated and initialized blocks? */
139static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
140{
141 struct ext4_map_blocks map;
142 unsigned int blkbits = inode->i_blkbits;
143 int err, blklen;
144
145 if (pos + len > i_size_read(inode))
146 return false;
147
148 map.m_lblk = pos >> blkbits;
149 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
150 blklen = map.m_len;
151
152 err = ext4_map_blocks(NULL, inode, &map, 0);
153 /*
154 * 'err==len' means that all of the blocks have been preallocated,
155 * regardless of whether they have been initialized or not. To exclude
156 * unwritten extents, we need to check m_flags.
157 */
158 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
159}
160
161static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
162{
163 struct inode *inode = file_inode(iocb->ki_filp);
164 ssize_t ret;
165
166 ret = generic_write_checks(iocb, from);
167 if (ret <= 0)
168 return ret;
Theodore Ts'oc9ea4622019-06-09 22:04:33 -0400169
170 if (unlikely(IS_IMMUTABLE(inode)))
171 return -EPERM;
172
Jan Kara213bcd92016-11-20 17:29:51 -0500173 /*
174 * If we have encountered a bitmap-format file, the size limit
175 * is smaller than s_maxbytes, which is for extent-mapped files.
176 */
177 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
178 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
179
180 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
181 return -EFBIG;
182 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
183 }
184 return iov_iter_count(from);
185}
186
Jan Kara776722e2016-11-20 18:09:11 -0500187#ifdef CONFIG_FS_DAX
188static ssize_t
189ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
190{
191 struct inode *inode = file_inode(iocb->ki_filp);
192 ssize_t ret;
Jan Kara776722e2016-11-20 18:09:11 -0500193
Ritesh Harjani428bb082019-12-12 11:25:55 +0530194 if (iocb->ki_flags & IOCB_NOWAIT) {
195 if (!inode_trylock(inode))
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500196 return -EAGAIN;
Ritesh Harjani428bb082019-12-12 11:25:55 +0530197 } else {
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500198 inode_lock(inode);
199 }
Jan Kara776722e2016-11-20 18:09:11 -0500200 ret = ext4_write_checks(iocb, from);
201 if (ret <= 0)
202 goto out;
203 ret = file_remove_privs(iocb->ki_filp);
204 if (ret)
205 goto out;
206 ret = file_update_time(iocb->ki_filp);
207 if (ret)
208 goto out;
209
Jan Kara776722e2016-11-20 18:09:11 -0500210 ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
211out:
Christoph Hellwigff5462e2017-02-08 14:39:27 -0500212 inode_unlock(inode);
Jan Kara776722e2016-11-20 18:09:11 -0500213 if (ret > 0)
214 ret = generic_write_sync(iocb, ret);
215 return ret;
216}
217#endif
218
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700219static ssize_t
Al Viro9b884162014-04-17 16:09:22 -0400220ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700221{
Al Viro496ad9a2013-01-23 17:07:38 -0500222 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -0400223 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -0500224 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400225 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -0400226 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -0400227
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500228 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
229 return -EIO;
230
Jan Kara776722e2016-11-20 18:09:11 -0500231#ifdef CONFIG_FS_DAX
232 if (IS_DAX(inode))
233 return ext4_dax_write_iter(iocb, from);
234#endif
Christoph Hellwig91f99432017-08-29 16:13:20 +0200235 if (!o_direct && (iocb->ki_flags & IOCB_NOWAIT))
236 return -EOPNOTSUPP;
Jan Kara776722e2016-11-20 18:09:11 -0500237
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500238 if (!inode_trylock(inode)) {
239 if (iocb->ki_flags & IOCB_NOWAIT)
240 return -EAGAIN;
241 inode_lock(inode);
242 }
243
Jan Kara213bcd92016-11-20 17:29:51 -0500244 ret = ext4_write_checks(iocb, from);
Al Viro3309dd02015-04-09 12:55:47 -0400245 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400246 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400247
248 /*
Jan Karae142d052016-03-08 22:44:50 -0500249 * Unaligned direct AIO must be serialized among each other as zeroing
250 * of partial blocks of two competing unaligned AIOs can result in data
251 * corruption.
252 */
253 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
254 !is_sync_kiocb(iocb) &&
255 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
256 unaligned_aio = 1;
257 ext4_unwritten_wait(inode);
258 }
259
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400260 iocb->private = &overwrite;
Jan Kara213bcd92016-11-20 17:29:51 -0500261 /* Check whether we do a DIO overwrite or not */
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500262 if (o_direct && !unaligned_aio) {
263 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
264 if (ext4_should_dioread_nolock(inode))
265 overwrite = 1;
266 } else if (iocb->ki_flags & IOCB_NOWAIT) {
267 ret = -EAGAIN;
268 goto out;
269 }
270 }
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400271
Al Viro9b884162014-04-17 16:09:22 -0400272 ret = __generic_file_write_iter(iocb, from);
Lukas Czerner0db24122019-05-10 21:45:33 -0400273 /*
274 * Unaligned direct AIO must be the only IO in flight. Otherwise
275 * overlapping aligned IO after unaligned might result in data
276 * corruption.
277 */
278 if (ret == -EIOCBQUEUED && unaligned_aio)
279 ext4_unwritten_wait(inode);
Al Viro59551022016-01-22 15:40:57 -0500280 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400281
Christoph Hellwige2592212016-04-07 08:52:01 -0700282 if (ret > 0)
283 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400284
Al Viroe768d7f2015-04-07 14:48:22 -0400285 return ret;
286
287out:
Al Viro59551022016-01-22 15:40:57 -0500288 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500289 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700290}
291
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800292#ifdef CONFIG_FS_DAX
Souptick Joarder71fe98992018-05-13 16:01:49 -0400293static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
Dave Jiangc791ace2017-02-24 14:57:08 -0800294 enum page_entry_size pe_size)
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800295{
Souptick Joarder71fe98992018-05-13 16:01:49 -0400296 int error = 0;
297 vm_fault_t result;
Jan Kara22446422018-01-07 16:41:01 -0500298 int retries = 0;
Jan Karafb26a1c2017-05-12 15:46:54 -0700299 handle_t *handle = NULL;
Dave Jiang11bac802017-02-24 14:56:41 -0800300 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500301 struct super_block *sb = inode->i_sb;
Randy Dodgenfd96b8d2017-08-24 15:26:01 -0400302
303 /*
304 * We have to distinguish real writes from writes which will result in a
305 * COW page; COW writes should *not* poke the journal (the file will not
306 * be changed). Doing so would cause unintended failures when mounted
307 * read-only.
308 *
309 * We check for VM_SHARED rather than vmf->cow_page since the latter is
310 * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
311 * other sizes, dax_iomap_fault will handle splitting / fallback so that
312 * we eventually come back with a COW page.
313 */
314 bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
315 (vmf->vma->vm_flags & VM_SHARED);
Jan Karab8a61762017-11-01 16:36:45 +0100316 pfn_t pfn;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700317
318 if (write) {
319 sb_start_pagefault(sb);
Dave Jiang11bac802017-02-24 14:56:41 -0800320 file_update_time(vmf->vma->vm_file);
Jan Karafb26a1c2017-05-12 15:46:54 -0700321 down_read(&EXT4_I(inode)->i_mmap_sem);
Jan Kara22446422018-01-07 16:41:01 -0500322retry:
Jan Karafb26a1c2017-05-12 15:46:54 -0700323 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
324 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Kara497f6922017-11-01 16:36:44 +0100325 if (IS_ERR(handle)) {
326 up_read(&EXT4_I(inode)->i_mmap_sem);
327 sb_end_pagefault(sb);
328 return VM_FAULT_SIGBUS;
329 }
Jan Karafb26a1c2017-05-12 15:46:54 -0700330 } else {
331 down_read(&EXT4_I(inode)->i_mmap_sem);
Jan Kara1db17542016-10-21 11:33:49 +0200332 }
Jan Kara22446422018-01-07 16:41:01 -0500333 result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
Jan Karafb26a1c2017-05-12 15:46:54 -0700334 if (write) {
Jan Kara497f6922017-11-01 16:36:44 +0100335 ext4_journal_stop(handle);
Jan Kara22446422018-01-07 16:41:01 -0500336
337 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
338 ext4_should_retry_alloc(sb, &retries))
339 goto retry;
Jan Karab8a61762017-11-01 16:36:45 +0100340 /* Handling synchronous page fault? */
341 if (result & VM_FAULT_NEEDDSYNC)
342 result = dax_finish_sync_fault(vmf, pe_size, pfn);
Jan Karafb26a1c2017-05-12 15:46:54 -0700343 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700344 sb_end_pagefault(sb);
Jan Karafb26a1c2017-05-12 15:46:54 -0700345 } else {
346 up_read(&EXT4_I(inode)->i_mmap_sem);
347 }
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700348
349 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800350}
351
Souptick Joarder71fe98992018-05-13 16:01:49 -0400352static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
Dave Jiangc791ace2017-02-24 14:57:08 -0800353{
354 return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
355}
356
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800357static const struct vm_operations_struct ext4_dax_vm_ops = {
358 .fault = ext4_dax_fault,
Dave Jiangc791ace2017-02-24 14:57:08 -0800359 .huge_fault = ext4_dax_huge_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500360 .page_mkwrite = ext4_dax_fault,
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700361 .pfn_mkwrite = ext4_dax_fault,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800362};
363#else
364#define ext4_dax_vm_ops ext4_file_vm_ops
365#endif
366
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400367static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500368 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700369 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400370 .page_mkwrite = ext4_page_mkwrite,
371};
372
373static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
374{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400375 struct inode *inode = file->f_mapping->host;
376
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500377 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
378 return -EIO;
379
Jan Karab8a61762017-11-01 16:36:45 +0100380 /*
381 * We don't support synchronous mappings for non-DAX files. At least
382 * until someone comes with a sensible use case.
383 */
384 if (!IS_DAX(file_inode(file)) && (vma->vm_flags & VM_SYNC))
385 return -EOPNOTSUPP;
386
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400387 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800388 if (IS_DAX(file_inode(file))) {
389 vma->vm_ops = &ext4_dax_vm_ops;
Dave Jiange1fb4a02018-08-17 15:43:40 -0700390 vma->vm_flags |= VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800391 } else {
392 vma->vm_ops = &ext4_file_vm_ops;
393 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400394 return 0;
395}
396
Amir Goldstein833a9502018-05-13 22:44:23 -0400397static int ext4_sample_last_mounted(struct super_block *sb,
398 struct vfsmount *mnt)
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400399{
Amir Goldstein833a9502018-05-13 22:44:23 -0400400 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400401 struct path path;
402 char buf[64], *cp;
Amir Goldstein833a9502018-05-13 22:44:23 -0400403 handle_t *handle;
404 int err;
405
406 if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
407 return 0;
408
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400409 if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
Amir Goldstein833a9502018-05-13 22:44:23 -0400410 return 0;
411
412 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
413 /*
414 * Sample where the filesystem has been mounted and
415 * store it in the superblock for sysadmin convenience
416 * when trying to sort through large numbers of block
417 * devices or filesystem images.
418 */
419 memset(buf, 0, sizeof(buf));
420 path.mnt = mnt;
421 path.dentry = mnt->mnt_root;
422 cp = d_path(&path, buf, sizeof(buf));
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400423 err = 0;
Amir Goldstein833a9502018-05-13 22:44:23 -0400424 if (IS_ERR(cp))
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400425 goto out;
Amir Goldstein833a9502018-05-13 22:44:23 -0400426
427 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400428 err = PTR_ERR(handle);
Amir Goldstein833a9502018-05-13 22:44:23 -0400429 if (IS_ERR(handle))
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400430 goto out;
Amir Goldstein833a9502018-05-13 22:44:23 -0400431 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
432 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
433 if (err)
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400434 goto out_journal;
Amir Goldstein833a9502018-05-13 22:44:23 -0400435 strlcpy(sbi->s_es->s_last_mounted, cp,
436 sizeof(sbi->s_es->s_last_mounted));
437 ext4_handle_dirty_super(handle, sb);
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400438out_journal:
Amir Goldstein833a9502018-05-13 22:44:23 -0400439 ext4_journal_stop(handle);
Amir Goldsteindb6516a2018-05-13 22:54:44 -0400440out:
441 sb_end_intwrite(sb);
Amir Goldstein833a9502018-05-13 22:44:23 -0400442 return err;
443}
444
445static int ext4_file_open(struct inode * inode, struct file * filp)
446{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400447 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400448
Theodore Ts'o0db1ff22017-02-05 01:28:48 -0500449 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
450 return -EIO;
451
Amir Goldstein833a9502018-05-13 22:44:23 -0400452 ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
453 if (ret)
454 return ret;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400455
Eric Biggers09a5c312017-10-18 20:21:57 -0400456 ret = fscrypt_file_open(inode, filp);
457 if (ret)
458 return ret;
459
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500460 /*
461 * Set up the jbd2_inode if we are opening the inode for
462 * writing and the journal is present
463 */
Jan Karaa3612932013-08-16 21:19:41 -0400464 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400465 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400466 if (ret < 0)
467 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500468 }
Goldwyn Rodrigues728fbc02017-06-20 07:05:47 -0500469
Christoph Hellwig91f99432017-08-29 16:13:20 +0200470 filp->f_mode |= FMODE_NOWAIT;
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400471 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400472}
473
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400474/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500475 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
476 * by calling generic_file_llseek_size() with the appropriate maxbytes
477 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400478 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800479loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400480{
481 struct inode *inode = file->f_mapping->host;
482 loff_t maxbytes;
483
484 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
485 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
486 else
487 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400488
Andrew Morton965c8e52012-12-17 15:59:39 -0800489 switch (whence) {
Christoph Hellwig545052e2017-10-01 17:58:54 -0400490 default:
Andrew Morton965c8e52012-12-17 15:59:39 -0800491 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500492 maxbytes, i_size_read(inode));
Zheng Liuc8c0df22012-11-08 21:57:40 -0500493 case SEEK_HOLE:
Christoph Hellwig545052e2017-10-01 17:58:54 -0400494 inode_lock_shared(inode);
495 offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
496 inode_unlock_shared(inode);
497 break;
498 case SEEK_DATA:
499 inode_lock_shared(inode);
500 offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
501 inode_unlock_shared(inode);
502 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500503 }
504
Christoph Hellwig545052e2017-10-01 17:58:54 -0400505 if (offset < 0)
506 return offset;
507 return vfs_setpos(file, offset, maxbytes);
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400508}
509
Mingming Cao617ba132006-10-11 01:20:53 -0700510const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400511 .llseek = ext4_llseek,
Jan Kara364443c2016-11-20 17:36:06 -0500512 .read_iter = ext4_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400513 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400514 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700515#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700516 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700517#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400518 .mmap = ext4_file_mmap,
Jan Karab8a61762017-11-01 16:36:45 +0100519 .mmap_supported_flags = MAP_SYNC,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400520 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700521 .release = ext4_release_file,
522 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700523 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700524 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400525 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100526 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700527};
528
Arjan van de Ven754661f2007-02-12 00:55:38 -0800529const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700530 .setattr = ext4_setattr,
David Howells99652ea2017-03-31 18:31:56 +0100531 .getattr = ext4_file_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700532 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200533 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800534 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400535 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700536};
537