blob: 2bc2c87f35e7a251628da1390f288b0cf8f2e721 [file] [log] [blame]
Andrew Morton9d0243b2006-01-08 01:00:39 -08001/*
2 * Implement the manual drop-all-pagecache function
3 */
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/fs.h>
8#include <linux/writeback.h>
9#include <linux/sysctl.h>
10#include <linux/gfp.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110011#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080012
13/* A global variable is a bit ugly, but it keeps the code simple */
14int sysctl_drop_caches;
15
Al Viro01a05b32010-03-23 06:06:58 -040016static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080017{
Jan Karaeccb95c2008-04-29 00:59:37 -070018 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080019
Dave Chinner55fa6092011-03-22 22:23:40 +110020 spin_lock(&inode_sb_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080021 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +110022 spin_lock(&inode->i_lock);
23 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
24 (inode->i_mapping->nrpages == 0)) {
25 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080026 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +110027 }
Jan Karaeccb95c2008-04-29 00:59:37 -070028 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +110029 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +110030 spin_unlock(&inode_sb_list_lock);
Mike Waychison28697352009-06-16 15:32:59 -070031 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070032 iput(toput_inode);
33 toput_inode = inode;
Dave Chinner55fa6092011-03-22 22:23:40 +110034 spin_lock(&inode_sb_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080035 }
Dave Chinner55fa6092011-03-22 22:23:40 +110036 spin_unlock(&inode_sb_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070037 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080038}
39
Adrian Bunk07d45da2008-04-29 00:58:57 -070040static void drop_slab(void)
Andrew Morton9d0243b2006-01-08 01:00:39 -080041{
42 int nr_objects;
43
44 do {
Johannes Weiner6b4f7792014-12-12 16:56:13 -080045 int nid;
46
47 nr_objects = 0;
48 for_each_online_node(nid)
49 nr_objects += shrink_node_slabs(GFP_KERNEL, nid,
50 1000, 1000);
Andrew Morton9d0243b2006-01-08 01:00:39 -080051 } while (nr_objects > 10);
52}
53
Joe Perches1f7e0612014-06-06 14:38:05 -070054int drop_caches_sysctl_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070055 void __user *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080056{
Petr Holasekcb16e952011-03-23 16:43:09 -070057 int ret;
58
59 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
60 if (ret)
61 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080062 if (write) {
Dave Hansen5509a5d2014-04-03 14:48:19 -070063 static int stfu;
64
65 if (sysctl_drop_caches & 1) {
Al Viro01a05b32010-03-23 06:06:58 -040066 iterate_supers(drop_pagecache_sb, NULL);
Dave Hansen5509a5d2014-04-03 14:48:19 -070067 count_vm_event(DROP_PAGECACHE);
68 }
69 if (sysctl_drop_caches & 2) {
Andrew Morton9d0243b2006-01-08 01:00:39 -080070 drop_slab();
Dave Hansen5509a5d2014-04-03 14:48:19 -070071 count_vm_event(DROP_SLAB);
72 }
73 if (!stfu) {
74 pr_info("%s (%d): drop_caches: %d\n",
75 current->comm, task_pid_nr(current),
76 sysctl_drop_caches);
77 }
78 stfu |= sysctl_drop_caches & 4;
Andrew Morton9d0243b2006-01-08 01:00:39 -080079 }
80 return 0;
81}