blob: dc1a1d5d825b48de17f3192f8d0c50829dd07885 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andrew Morton9d0243b2006-01-08 01:00:39 -08002/*
3 * Implement the manual drop-all-pagecache function
4 */
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/writeback.h>
10#include <linux/sysctl.h>
11#include <linux/gfp.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110012#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080013
14/* A global variable is a bit ugly, but it keeps the code simple */
15int sysctl_drop_caches;
16
Al Viro01a05b32010-03-23 06:06:58 -040017static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080018{
Jan Karaeccb95c2008-04-29 00:59:37 -070019 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080020
Dave Chinner74278da2015-03-04 12:37:22 -050021 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080022 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +110023 spin_lock(&inode->i_lock);
Jan Kara3d0acc02019-02-01 14:21:23 -080024 /*
25 * We must skip inodes in unusual state. We may also skip
26 * inodes without pages but we deliberately won't in case
27 * we need to reschedule to avoid softlockups.
28 */
Dave Chinner250df6e2011-03-22 22:23:36 +110029 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
Jan Kara3d0acc02019-02-01 14:21:23 -080030 (inode->i_mapping->nrpages == 0 && !need_resched())) {
Dave Chinner250df6e2011-03-22 22:23:36 +110031 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080032 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +110033 }
Jan Karaeccb95c2008-04-29 00:59:37 -070034 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +110035 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -050036 spin_unlock(&sb->s_inode_list_lock);
37
Mike Waychison28697352009-06-16 15:32:59 -070038 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070039 iput(toput_inode);
40 toput_inode = inode;
Dave Chinner74278da2015-03-04 12:37:22 -050041
Eric Sandeen77de8ee2019-12-06 10:54:23 -060042 cond_resched();
Dave Chinner74278da2015-03-04 12:37:22 -050043 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080044 }
Dave Chinner74278da2015-03-04 12:37:22 -050045 spin_unlock(&sb->s_inode_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070046 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080047}
48
Joe Perches1f7e0612014-06-06 14:38:05 -070049int drop_caches_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070050 void __user *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080051{
Petr Holasekcb16e952011-03-23 16:43:09 -070052 int ret;
53
54 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
55 if (ret)
56 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080057 if (write) {
Dave Hansen5509a5d2014-04-03 14:48:19 -070058 static int stfu;
59
60 if (sysctl_drop_caches & 1) {
Al Viro01a05b32010-03-23 06:06:58 -040061 iterate_supers(drop_pagecache_sb, NULL);
Dave Hansen5509a5d2014-04-03 14:48:19 -070062 count_vm_event(DROP_PAGECACHE);
63 }
64 if (sysctl_drop_caches & 2) {
Andrew Morton9d0243b2006-01-08 01:00:39 -080065 drop_slab();
Dave Hansen5509a5d2014-04-03 14:48:19 -070066 count_vm_event(DROP_SLAB);
67 }
68 if (!stfu) {
69 pr_info("%s (%d): drop_caches: %d\n",
70 current->comm, task_pid_nr(current),
71 sysctl_drop_caches);
72 }
73 stfu |= sysctl_drop_caches & 4;
Andrew Morton9d0243b2006-01-08 01:00:39 -080074 }
75 return 0;
76}