blob: 9fd702f5bfb2886a715e787b6470615ea07021e0 [file] [log] [blame]
Andrew Morton9d0243b2006-01-08 01:00:39 -08001/*
2 * Implement the manual drop-all-pagecache function
3 */
4
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/fs.h>
8#include <linux/writeback.h>
9#include <linux/sysctl.h>
10#include <linux/gfp.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110011#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080012
13/* A global variable is a bit ugly, but it keeps the code simple */
14int sysctl_drop_caches;
15
Al Viro01a05b32010-03-23 06:06:58 -040016static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080017{
Jan Karaeccb95c2008-04-29 00:59:37 -070018 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080019
Dave Chinner55fa6092011-03-22 22:23:40 +110020 spin_lock(&inode_sb_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080021 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6ed2011-03-22 22:23:36 +110022 spin_lock(&inode->i_lock);
23 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
24 (inode->i_mapping->nrpages == 0)) {
25 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080026 continue;
Dave Chinner250df6ed2011-03-22 22:23:36 +110027 }
Jan Karaeccb95c2008-04-29 00:59:37 -070028 __iget(inode);
Dave Chinner250df6ed2011-03-22 22:23:36 +110029 spin_unlock(&inode->i_lock);
Dave Chinner55fa6092011-03-22 22:23:40 +110030 spin_unlock(&inode_sb_list_lock);
Mike Waychison28697352009-06-16 15:32:59 -070031 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070032 iput(toput_inode);
33 toput_inode = inode;
Dave Chinner55fa6092011-03-22 22:23:40 +110034 spin_lock(&inode_sb_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080035 }
Dave Chinner55fa6092011-03-22 22:23:40 +110036 spin_unlock(&inode_sb_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070037 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080038}
39
Adrian Bunk07d45da2008-04-29 00:58:57 -070040static void drop_slab(void)
Andrew Morton9d0243b2006-01-08 01:00:39 -080041{
42 int nr_objects;
Ying Hana09ed5e2011-05-24 17:12:26 -070043 struct shrink_control shrink = {
44 .gfp_mask = GFP_KERNEL,
Ying Hana09ed5e2011-05-24 17:12:26 -070045 };
Andrew Morton9d0243b2006-01-08 01:00:39 -080046
Dave Chinner0ce3d742013-08-28 10:18:03 +100047 nodes_setall(shrink.nodes_to_scan);
Andrew Morton9d0243b2006-01-08 01:00:39 -080048 do {
Ying Han1495f232011-05-24 17:12:27 -070049 nr_objects = shrink_slab(&shrink, 1000, 1000);
Andrew Morton9d0243b2006-01-08 01:00:39 -080050 } while (nr_objects > 10);
51}
52
53int drop_caches_sysctl_handler(ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070054 void __user *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080055{
Petr Holasekcb16e952011-03-23 16:43:09 -070056 int ret;
57
58 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
59 if (ret)
60 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080061 if (write) {
62 if (sysctl_drop_caches & 1)
Al Viro01a05b32010-03-23 06:06:58 -040063 iterate_supers(drop_pagecache_sb, NULL);
Andrew Morton9d0243b2006-01-08 01:00:39 -080064 if (sysctl_drop_caches & 2)
65 drop_slab();
66 }
67 return 0;
68}