blob: 280460fef06647e9fedc9464e927c99d966ebadd [file] [log] [blame]
Andrew Morton9d0243b2006-01-08 01:00:39 -08001/*
2 * Implement the manual drop-all-pagecache function
3 */
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/fs.h>
8#include <linux/writeback.h>
9#include <linux/sysctl.h>
10#include <linux/gfp.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110011#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080012
13/* A global variable is a bit ugly, but it keeps the code simple */
14int sysctl_drop_caches;
15
Al Viro01a05b32010-03-23 06:06:58 -040016static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080017{
Jan Karaeccb95c2008-04-29 00:59:37 -070018 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080019
Dave Chinner74278da2015-03-04 12:37:22 -050020 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080021 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +110022 spin_lock(&inode->i_lock);
Jan Karac3ce5202019-02-01 14:21:23 -080023 /*
24 * We must skip inodes in unusual state. We may also skip
25 * inodes without pages but we deliberately won't in case
26 * we need to reschedule to avoid softlockups.
27 */
Dave Chinner250df6e2011-03-22 22:23:36 +110028 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
Jan Karac3ce5202019-02-01 14:21:23 -080029 (inode->i_mapping->nrpages == 0 && !need_resched())) {
Dave Chinner250df6e2011-03-22 22:23:36 +110030 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080031 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +110032 }
Jan Karaeccb95c2008-04-29 00:59:37 -070033 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +110034 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -050035 spin_unlock(&sb->s_inode_list_lock);
36
Jan Karac3ce5202019-02-01 14:21:23 -080037 cond_resched();
Mike Waychison28697352009-06-16 15:32:59 -070038 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070039 iput(toput_inode);
40 toput_inode = inode;
Dave Chinner74278da2015-03-04 12:37:22 -050041
42 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080043 }
Dave Chinner74278da2015-03-04 12:37:22 -050044 spin_unlock(&sb->s_inode_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070045 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080046}
47
Joe Perches1f7e0612014-06-06 14:38:05 -070048int drop_caches_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070049 void __user *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080050{
Petr Holasekcb16e952011-03-23 16:43:09 -070051 int ret;
52
53 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
54 if (ret)
55 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080056 if (write) {
Dave Hansen5509a5d2014-04-03 14:48:19 -070057 static int stfu;
58
59 if (sysctl_drop_caches & 1) {
Al Viro01a05b32010-03-23 06:06:58 -040060 iterate_supers(drop_pagecache_sb, NULL);
Dave Hansen5509a5d2014-04-03 14:48:19 -070061 count_vm_event(DROP_PAGECACHE);
62 }
63 if (sysctl_drop_caches & 2) {
Andrew Morton9d0243b2006-01-08 01:00:39 -080064 drop_slab();
Dave Hansen5509a5d2014-04-03 14:48:19 -070065 count_vm_event(DROP_SLAB);
66 }
67 if (!stfu) {
68 pr_info("%s (%d): drop_caches: %d\n",
69 current->comm, task_pid_nr(current),
70 sysctl_drop_caches);
71 }
72 stfu |= sysctl_drop_caches & 4;
Andrew Morton9d0243b2006-01-08 01:00:39 -080073 }
74 return 0;
75}