blob: 021b8a319b9e2cf7f0f60a5f6fedcfe3367f8016 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02002 * include/linux/writeback.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 */
4#ifndef WRITEBACK_H
5#define WRITEBACK_H
6
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +04007#include <linux/sched.h>
Kent Overstreeta27bb332013-05-07 16:19:08 -07008#include <linux/workqueue.h>
Jens Axboef5ff8422007-09-21 09:19:54 +02009#include <linux/fs.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040010
Wu Fengguang54848d72011-04-05 13:21:19 -060011DECLARE_PER_CPU(int, dirty_throttle_leaks);
12
Wu Fengguangffd1f602011-06-19 22:18:42 -060013/*
Wu Fengguang1a12d8b2010-08-29 13:28:09 -060014 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
15 *
16 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
17 *
Wu Fengguangffd1f602011-06-19 22:18:42 -060018 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
19 * time) for the dirty pages to drop, unless written enough pages.
20 *
21 * The global dirty threshold is normally equal to the global dirty limit,
22 * except when the system suddenly allocates a lot of anonymous memory and
23 * knocks down the global dirty threshold quickly, in which case the global
24 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
25 */
Wu Fengguang1a12d8b2010-08-29 13:28:09 -060026#define DIRTY_SCOPE 8
27#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
Wu Fengguangffd1f602011-06-19 22:18:42 -060028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029struct backing_dev_info;
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 * fs/fs-writeback.c
33 */
34enum writeback_sync_modes {
35 WB_SYNC_NONE, /* Don't wait on anything */
36 WB_SYNC_ALL, /* Wait on every mapping */
Linus Torvalds1da177e2005-04-16 15:20:36 -070037};
38
39/*
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060040 * why some writeback work was initiated
41 */
42enum wb_reason {
43 WB_REASON_BACKGROUND,
44 WB_REASON_TRY_TO_FREE_PAGES,
45 WB_REASON_SYNC,
46 WB_REASON_PERIODIC,
47 WB_REASON_LAPTOP_TIMER,
48 WB_REASON_FREE_MORE_MEM,
49 WB_REASON_FS_FREE_SPACE,
Wanpeng Lifc6df802013-07-08 16:00:15 -070050 /*
51 * There is no bdi forker thread any more and works are done
52 * by emergency worker, however, this is TPs userland visible
53 * and we'll be exposing exactly the same information,
54 * so it has a mismatch name.
55 */
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060056 WB_REASON_FORKER_THREAD,
57
58 WB_REASON_MAX,
59};
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060060
61/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * A control structure which tells the writeback code what to do. These are
63 * always on the stack, and hence need no locking. They are always initialised
64 * in a manner such that unspecified fields are set to zero.
65 */
66struct writeback_control {
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 long nr_to_write; /* Write this many pages, and decrement
68 this for each page written */
69 long pages_skipped; /* Pages which were not written */
70
71 /*
Andrew Morton95468fd2012-03-05 15:06:02 -080072 * For a_ops->writepages(): if start or end are non-zero then this is
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 * a hint that the filesystem need only write out the pages inside that
74 * byterange. The byte at `end' is included in the writeout request.
75 */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -070076 loff_t range_start;
77 loff_t range_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Richard Kennedy4cd90692012-04-25 14:53:05 +010079 enum writeback_sync_modes sync_mode;
80
Andrew Morton22905f72005-11-16 15:07:01 -080081 unsigned for_kupdate:1; /* A kupdate writeback */
Wu Fengguangb17621f2009-12-03 13:54:25 +010082 unsigned for_background:1; /* A background writeback */
Wu Fengguang6e6938b2010-06-06 10:38:15 -060083 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
Andrew Morton22905f72005-11-16 15:07:01 -080084 unsigned for_reclaim:1; /* Invoked from the page allocator */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -070085 unsigned range_cyclic:1; /* range_start is cyclic */
Dave Chinner7747bd42013-07-02 22:38:35 +100086 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -070087};
88
89/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 * fs/fs-writeback.c
91 */
Jens Axboe03ba3782009-09-09 09:08:54 +020092struct bdi_writeback;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093int inode_wait(void *);
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -060094void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
95void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
96 enum wb_reason reason);
Miao Xie10ee27a2013-01-10 13:47:57 +080097int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
98int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
99 enum wb_reason reason);
Jens Axboeb6e51312009-09-16 15:13:54 +0200100void sync_inodes_sb(struct super_block *);
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -0600101void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
Jan Kara169ebd92012-05-03 14:48:03 +0200102void inode_wait_for_writeback(struct inode *inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104/* writeback.h requires fs.h; it, too, is not included from here. */
105static inline void wait_on_inode(struct inode *inode)
106{
107 might_sleep();
Christoph Hellwigeaff8072009-12-17 14:25:01 +0100108 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
Joern Engel1c0eeaf2007-10-16 23:30:44 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/*
112 * mm/page-writeback.c
113 */
Jens Axboec2c49862010-05-20 09:18:47 +0200114#ifdef CONFIG_BLOCK
Matthew Garrett31373d02010-04-06 14:25:14 +0200115void laptop_io_completion(struct backing_dev_info *info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116void laptop_sync_completion(void);
Matthew Garrett31373d02010-04-06 14:25:14 +0200117void laptop_mode_sync(struct work_struct *work);
118void laptop_mode_timer_fn(unsigned long data);
Jens Axboec2c49862010-05-20 09:18:47 +0200119#else
120static inline void laptop_sync_completion(void) { }
121#endif
Andrew Morton232ea4d2007-02-28 20:13:21 -0800122void throttle_vm_writeout(gfp_t gfp_mask);
Johannes Weinera756cf52012-01-10 15:07:49 -0800123bool zone_dirty_ok(struct zone *zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Wu Fengguangc42843f2011-03-02 15:54:09 -0600125extern unsigned long global_dirty_limit;
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* These are exported to sysctl. */
128extern int dirty_background_ratio;
David Rientjes2da02992009-01-06 14:39:31 -0800129extern unsigned long dirty_background_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130extern int vm_dirty_ratio;
David Rientjes2da02992009-01-06 14:39:31 -0800131extern unsigned long vm_dirty_bytes;
Alexey Dobriyan704503d2009-03-31 15:23:18 -0700132extern unsigned int dirty_writeback_interval;
133extern unsigned int dirty_expire_interval;
Bron Gondwana195cf4532008-02-04 22:29:20 -0800134extern int vm_highmem_is_dirtyable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135extern int block_dump;
136extern int laptop_mode;
137
David Rientjes2da02992009-01-06 14:39:31 -0800138extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700139 void __user *buffer, size_t *lenp,
David Rientjes2da02992009-01-06 14:39:31 -0800140 loff_t *ppos);
141extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700142 void __user *buffer, size_t *lenp,
David Rientjes2da02992009-01-06 14:39:31 -0800143 loff_t *ppos);
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700144extern int dirty_ratio_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700145 void __user *buffer, size_t *lenp,
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700146 loff_t *ppos);
David Rientjes2da02992009-01-06 14:39:31 -0800147extern int dirty_bytes_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700148 void __user *buffer, size_t *lenp,
David Rientjes2da02992009-01-06 14:39:31 -0800149 loff_t *ppos);
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151struct ctl_table;
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700152int dirty_writeback_centisecs_handler(struct ctl_table *, int,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 void __user *, size_t *, loff_t *);
154
Wu Fengguang16c40422010-08-11 14:17:39 -0700155void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
156unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
157 unsigned long dirty);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700158
Wu Fengguange98be2d2010-08-29 11:22:30 -0600159void __bdi_update_bandwidth(struct backing_dev_info *bdi,
Wu Fengguangc42843f2011-03-02 15:54:09 -0600160 unsigned long thresh,
Wu Fengguangaf6a3112011-10-03 20:46:17 -0600161 unsigned long bg_thresh,
Wu Fengguangc42843f2011-03-02 15:54:09 -0600162 unsigned long dirty,
163 unsigned long bdi_thresh,
164 unsigned long bdi_dirty,
Wu Fengguange98be2d2010-08-29 11:22:30 -0600165 unsigned long start_time);
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167void page_writeback_init(void);
Namjae Jeond0e1d662012-12-11 16:00:21 -0800168void balance_dirty_pages_ratelimited(struct address_space *mapping);
Andrew Mortonfa5a7342006-03-24 03:18:10 -0800169
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700170typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
171 void *data);
172
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700173int generic_writepages(struct address_space *mapping,
174 struct writeback_control *wbc);
Eric Sandeen5b41d922010-10-27 21:30:13 -0400175void tag_pages_for_writeback(struct address_space *mapping,
176 pgoff_t start, pgoff_t end);
Miklos Szeredi0ea97182007-05-10 22:22:51 -0700177int write_cache_pages(struct address_space *mapping,
178 struct writeback_control *wbc, writepage_t writepage,
179 void *data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
Peter Zijlstraa200ee12007-10-08 18:54:37 +0200181void set_page_dirty_balance(struct page *page, int page_mkwrite);
Chandra Seetharaman2d1d43f2006-09-29 02:01:25 -0700182void writeback_set_ratelimit(void);
Namhyung Kim92c09c02010-10-26 14:22:03 -0700183void tag_pages_for_writeback(struct address_space *mapping,
184 pgoff_t start, pgoff_t end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Wu Fengguang2f800fb2011-08-08 15:22:00 -0600186void account_page_redirty(struct page *page);
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188#endif /* WRITEBACK_H */