blob: 702f239cd6db534b20276bce62ae1d2176bdb664 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/fadvise.c
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
Francois Camie1f8e872008-10-15 22:01:59 -07006 * 11Jan2003 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/file.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/pagemap.h>
15#include <linux/backing-dev.h>
16#include <linux/pagevec.h>
17#include <linux/fadvise.h>
Andrew Mortonebcf28e2006-03-24 03:18:04 -080018#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/syscalls.h>
Mel Gorman67d46b22013-02-22 16:35:59 -080020#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/unistd.h>
23
24/*
25 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
26 * deactivate the pages and clear PG_Referenced.
27 */
Al Viro4a0fd5b2013-01-21 15:16:58 -050028SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Al Viro2903ff02012-08-28 12:52:22 -040030 struct fd f = fdget(fd);
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -080031 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 struct address_space *mapping;
33 struct backing_dev_info *bdi;
Andrew Mortonebcf28e2006-03-24 03:18:04 -080034 loff_t endbyte; /* inclusive */
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 pgoff_t start_index;
36 pgoff_t end_index;
37 unsigned long nrpages;
38 int ret = 0;
39
Al Viro2903ff02012-08-28 12:52:22 -040040 if (!f.file)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 return -EBADF;
42
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -080043 inode = file_inode(f.file);
44 if (S_ISFIFO(inode->i_mode)) {
Valentine Barshak87ba81d2006-01-08 01:03:44 -080045 ret = -ESPIPE;
46 goto out;
47 }
48
Al Viro2903ff02012-08-28 12:52:22 -040049 mapping = f.file->f_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 if (!mapping || len < 0) {
51 ret = -EINVAL;
52 goto out;
53 }
54
Shakeel Butt3a77d212017-09-08 16:13:05 -070055 bdi = inode_to_bdi(mapping->host);
56
57 if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) {
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080058 switch (advice) {
59 case POSIX_FADV_NORMAL:
60 case POSIX_FADV_RANDOM:
61 case POSIX_FADV_SEQUENTIAL:
62 case POSIX_FADV_WILLNEED:
63 case POSIX_FADV_NOREUSE:
64 case POSIX_FADV_DONTNEED:
65 /* no bad return value, but ignore advice */
66 break;
67 default:
68 ret = -EINVAL;
69 }
Carsten Ottefe77ba62005-06-23 22:05:29 -070070 goto out;
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080071 }
Carsten Ottefe77ba62005-06-23 22:05:29 -070072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /* Careful about overflows. Len == 0 means "as much as possible" */
74 endbyte = offset + len;
75 if (!len || endbyte < len)
76 endbyte = -1;
Andrew Mortonebcf28e2006-03-24 03:18:04 -080077 else
78 endbyte--; /* inclusive */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 switch (advice) {
81 case POSIX_FADV_NORMAL:
Al Viro2903ff02012-08-28 12:52:22 -040082 f.file->f_ra.ra_pages = bdi->ra_pages;
83 spin_lock(&f.file->f_lock);
84 f.file->f_mode &= ~FMODE_RANDOM;
85 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 break;
87 case POSIX_FADV_RANDOM:
Al Viro2903ff02012-08-28 12:52:22 -040088 spin_lock(&f.file->f_lock);
89 f.file->f_mode |= FMODE_RANDOM;
90 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 break;
92 case POSIX_FADV_SEQUENTIAL:
Al Viro2903ff02012-08-28 12:52:22 -040093 f.file->f_ra.ra_pages = bdi->ra_pages * 2;
94 spin_lock(&f.file->f_lock);
95 f.file->f_mode &= ~FMODE_RANDOM;
96 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 break;
98 case POSIX_FADV_WILLNEED:
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 /* First and last PARTIAL page! */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300100 start_index = offset >> PAGE_SHIFT;
101 end_index = endbyte >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103 /* Careful about overflow on the "+1" */
104 nrpages = end_index - start_index + 1;
105 if (!nrpages)
106 nrpages = ~0UL;
KOSAKI Motohiro3d3727c2012-07-31 16:42:50 -0700107
108 /*
109 * Ignore return value because fadvise() shall return
110 * success even if filesystem can't retrieve a hint,
111 */
Al Viro2903ff02012-08-28 12:52:22 -0400112 force_page_cache_readahead(mapping, f.file, start_index,
KOSAKI Motohiro3d3727c2012-07-31 16:42:50 -0700113 nrpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 break;
Andrew Morton60c371b2006-08-05 12:14:25 -0700115 case POSIX_FADV_NOREUSE:
116 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 case POSIX_FADV_DONTNEED:
Tejun Heo703c2702015-05-22 17:13:44 -0400118 if (!inode_write_congested(mapping->host))
Shawn Bohrerad8a1b52012-01-10 15:07:35 -0800119 __filemap_fdatawrite_range(mapping, offset, endbyte,
120 WB_SYNC_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Mel Gorman441c2282014-12-12 16:56:33 -0800122 /*
123 * First and last FULL page! Partial pages are deliberately
124 * preserved on the expectation that it is better to preserve
125 * needed memory than to discard unneeded memory.
126 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_SHIFT);
Oleg Drokin18aba412016-06-08 15:33:59 -0700129 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
130 /* First page is tricky as 0 - 1 = -1, but pgoff_t
131 * is unsigned, so the end_index >= start_index
132 * check below would be true and we'll discard the whole
133 * file cache which is not what was asked.
134 */
135 if (end_index == 0)
136 break;
137
138 end_index--;
139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Mel Gorman67d46b22013-02-22 16:35:59 -0800141 if (end_index >= start_index) {
Johannes Weiner4dd72b42016-12-19 16:23:03 -0800142 unsigned long count;
143
144 /*
145 * It's common to FADV_DONTNEED right after
146 * the read or write that instantiates the
147 * pages, in which case there will be some
148 * sitting on the local LRU cache. Try to
149 * avoid the expensive remote drain and the
150 * second cache tree walk below by flushing
151 * them out right away.
152 */
153 lru_add_drain();
154
155 count = invalidate_mapping_pages(mapping,
Mel Gorman67d46b22013-02-22 16:35:59 -0800156 start_index, end_index);
157
158 /*
159 * If fewer pages were invalidated than expected then
160 * it is possible that some of the pages were on
161 * a per-cpu pagevec for a remote CPU. Drain all
162 * pagevecs and try again.
163 */
164 if (count < (end_index - start_index + 1)) {
165 lru_add_drain_all();
166 invalidate_mapping_pages(mapping, start_index,
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800167 end_index);
Mel Gorman67d46b22013-02-22 16:35:59 -0800168 }
169 }
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800170 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 default:
172 ret = -EINVAL;
173 }
174out:
Al Viro2903ff02012-08-28 12:52:22 -0400175 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return ret;
177}
178
179#ifdef __ARCH_WANT_SYS_FADVISE64
180
Al Viro4a0fd5b2013-01-21 15:16:58 -0500181SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 return sys_fadvise64_64(fd, offset, len, advice);
184}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186#endif