blob: c00e055b62820945bef291fa68b145a4d7145667 [file] [log] [blame]
Andrew Morton9d0243b2006-01-08 01:00:39 -08001/*
2 * Implement the manual drop-all-pagecache function
3 */
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/fs.h>
8#include <linux/writeback.h>
9#include <linux/sysctl.h>
10#include <linux/gfp.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110011#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080012
13/* A global variable is a bit ugly, but it keeps the code simple */
14int sysctl_drop_caches;
15
Al Viro01a05b32010-03-23 06:06:58 -040016static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080017{
Jan Karaeccb95c2008-04-29 00:59:37 -070018 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080019
Dave Chinner55fa6092011-03-22 22:23:40 +110020 spin_lock(&inode_sb_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080021 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +110022 spin_lock(&inode->i_lock);
23 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
24 (inode->i_mapping->nrpages == 0)) {
25 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080026 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +110027 }
Jan Karaeccb95c2008-04-29 00:59:37 -070028 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +110029 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +110030 spin_unlock(&inode_sb_list_lock);
Mike Waychison28697352009-06-16 15:32:59 -070031 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070032 iput(toput_inode);
33 toput_inode = inode;
Dave Chinner55fa6092011-03-22 22:23:40 +110034 spin_lock(&inode_sb_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080035 }
Dave Chinner55fa6092011-03-22 22:23:40 +110036 spin_unlock(&inode_sb_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070037 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080038}
39
Adrian Bunk07d45da2008-04-29 00:58:57 -070040static void drop_slab(void)
Andrew Morton9d0243b2006-01-08 01:00:39 -080041{
42 int nr_objects;
Ying Hana09ed5e2011-05-24 17:12:26 -070043 struct shrink_control shrink = {
44 .gfp_mask = GFP_KERNEL,
Ying Hana09ed5e2011-05-24 17:12:26 -070045 };
Andrew Morton9d0243b2006-01-08 01:00:39 -080046
47 do {
Ying Han1495f232011-05-24 17:12:27 -070048 nr_objects = shrink_slab(&shrink, 1000, 1000);
Andrew Morton9d0243b2006-01-08 01:00:39 -080049 } while (nr_objects > 10);
50}
51
52int drop_caches_sysctl_handler(ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070053 void __user *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080054{
Petr Holasekcb16e952011-03-23 16:43:09 -070055 int ret;
56
57 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
58 if (ret)
59 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080060 if (write) {
61 if (sysctl_drop_caches & 1)
Al Viro01a05b32010-03-23 06:06:58 -040062 iterate_supers(drop_pagecache_sb, NULL);
Andrew Morton9d0243b2006-01-08 01:00:39 -080063 if (sysctl_drop_caches & 2)
64 drop_slab();
65 }
66 return 0;
67}