blob: 7f3fa79f25c0d257f0b00c330f732b0034eb4868 [file] [log] [blame]
Andrew Morton3fcfab12006-10-19 23:28:16 -07001
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
Jens Axboe03ba3782009-09-09 09:08:54 +02004#include <linux/kthread.h>
5#include <linux/freezer.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07006#include <linux/fs.h>
Jens Axboe26160152009-03-17 09:35:06 +01007#include <linux/pagemap.h>
Jens Axboe03ba3782009-09-09 09:08:54 +02008#include <linux/mm.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -07009#include <linux/sched.h>
10#include <linux/module.h>
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070011#include <linux/writeback.h>
12#include <linux/device.h>
13
Jens Axboe26160152009-03-17 09:35:06 +010014void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
15{
16}
17EXPORT_SYMBOL(default_unplug_io_fn);
18
19struct backing_dev_info default_backing_dev_info = {
20 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
21 .state = 0,
22 .capabilities = BDI_CAP_MAP_COPY,
23 .unplug_io_fn = default_unplug_io_fn,
24};
25EXPORT_SYMBOL_GPL(default_backing_dev_info);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070026
27static struct class *bdi_class;
Jens Axboe03ba3782009-09-09 09:08:54 +020028DEFINE_SPINLOCK(bdi_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +020029LIST_HEAD(bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +020030LIST_HEAD(bdi_pending_list);
31
32static struct task_struct *sync_supers_tsk;
33static struct timer_list sync_supers_timer;
34
35static int bdi_sync_supers(void *);
36static void sync_supers_timer_fn(unsigned long);
37static void arm_supers_timer(void);
38
39static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -070040
Miklos Szeredi76f14182008-04-30 00:54:36 -070041#ifdef CONFIG_DEBUG_FS
42#include <linux/debugfs.h>
43#include <linux/seq_file.h>
44
45static struct dentry *bdi_debug_root;
46
47static void bdi_debug_init(void)
48{
49 bdi_debug_root = debugfs_create_dir("bdi", NULL);
50}
51
52static int bdi_debug_stats_show(struct seq_file *m, void *v)
53{
54 struct backing_dev_info *bdi = m->private;
David Rientjes364aeb22009-01-06 14:39:29 -080055 unsigned long background_thresh;
56 unsigned long dirty_thresh;
57 unsigned long bdi_thresh;
Miklos Szeredi76f14182008-04-30 00:54:36 -070058
59 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
60
61#define K(x) ((x) << (PAGE_SHIFT - 10))
62 seq_printf(m,
63 "BdiWriteback: %8lu kB\n"
64 "BdiReclaimable: %8lu kB\n"
65 "BdiDirtyThresh: %8lu kB\n"
66 "DirtyThresh: %8lu kB\n"
67 "BackgroundThresh: %8lu kB\n",
68 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
69 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
70 K(bdi_thresh),
71 K(dirty_thresh),
72 K(background_thresh));
73#undef K
74
75 return 0;
76}
77
78static int bdi_debug_stats_open(struct inode *inode, struct file *file)
79{
80 return single_open(file, bdi_debug_stats_show, inode->i_private);
81}
82
83static const struct file_operations bdi_debug_stats_fops = {
84 .open = bdi_debug_stats_open,
85 .read = seq_read,
86 .llseek = seq_lseek,
87 .release = single_release,
88};
89
90static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
91{
92 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
93 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
94 bdi, &bdi_debug_stats_fops);
95}
96
97static void bdi_debug_unregister(struct backing_dev_info *bdi)
98{
99 debugfs_remove(bdi->debug_stats);
100 debugfs_remove(bdi->debug_dir);
101}
102#else
103static inline void bdi_debug_init(void)
104{
105}
106static inline void bdi_debug_register(struct backing_dev_info *bdi,
107 const char *name)
108{
109}
110static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
111{
112}
113#endif
114
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700115static ssize_t read_ahead_kb_store(struct device *dev,
116 struct device_attribute *attr,
117 const char *buf, size_t count)
118{
119 struct backing_dev_info *bdi = dev_get_drvdata(dev);
120 char *end;
121 unsigned long read_ahead_kb;
122 ssize_t ret = -EINVAL;
123
124 read_ahead_kb = simple_strtoul(buf, &end, 10);
125 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
126 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
127 ret = count;
128 }
129 return ret;
130}
131
132#define K(pages) ((pages) << (PAGE_SHIFT - 10))
133
134#define BDI_SHOW(name, expr) \
135static ssize_t name##_show(struct device *dev, \
136 struct device_attribute *attr, char *page) \
137{ \
138 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
139 \
140 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
141}
142
143BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
144
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700145static ssize_t min_ratio_store(struct device *dev,
146 struct device_attribute *attr, const char *buf, size_t count)
147{
148 struct backing_dev_info *bdi = dev_get_drvdata(dev);
149 char *end;
150 unsigned int ratio;
151 ssize_t ret = -EINVAL;
152
153 ratio = simple_strtoul(buf, &end, 10);
154 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
155 ret = bdi_set_min_ratio(bdi, ratio);
156 if (!ret)
157 ret = count;
158 }
159 return ret;
160}
161BDI_SHOW(min_ratio, bdi->min_ratio)
162
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700163static ssize_t max_ratio_store(struct device *dev,
164 struct device_attribute *attr, const char *buf, size_t count)
165{
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
167 char *end;
168 unsigned int ratio;
169 ssize_t ret = -EINVAL;
170
171 ratio = simple_strtoul(buf, &end, 10);
172 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
173 ret = bdi_set_max_ratio(bdi, ratio);
174 if (!ret)
175 ret = count;
176 }
177 return ret;
178}
179BDI_SHOW(max_ratio, bdi->max_ratio)
180
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700181#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
182
183static struct device_attribute bdi_dev_attrs[] = {
184 __ATTR_RW(read_ahead_kb),
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700185 __ATTR_RW(min_ratio),
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700186 __ATTR_RW(max_ratio),
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700187 __ATTR_NULL,
188};
189
190static __init int bdi_class_init(void)
191{
192 bdi_class = class_create(THIS_MODULE, "bdi");
193 bdi_class->dev_attrs = bdi_dev_attrs;
Miklos Szeredi76f14182008-04-30 00:54:36 -0700194 bdi_debug_init();
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700195 return 0;
196}
Miklos Szeredi76f14182008-04-30 00:54:36 -0700197postcore_initcall(bdi_class_init);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700198
Jens Axboe26160152009-03-17 09:35:06 +0100199static int __init default_bdi_init(void)
200{
201 int err;
202
Jens Axboe03ba3782009-09-09 09:08:54 +0200203 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
204 BUG_ON(IS_ERR(sync_supers_tsk));
205
206 init_timer(&sync_supers_timer);
207 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
208 arm_supers_timer();
209
Jens Axboe26160152009-03-17 09:35:06 +0100210 err = bdi_init(&default_backing_dev_info);
211 if (!err)
212 bdi_register(&default_backing_dev_info, NULL, "default");
213
214 return err;
215}
216subsys_initcall(default_bdi_init);
217
Jens Axboe03ba3782009-09-09 09:08:54 +0200218static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
219{
220 memset(wb, 0, sizeof(*wb));
221
222 wb->bdi = bdi;
223 wb->last_old_flush = jiffies;
224 INIT_LIST_HEAD(&wb->b_dirty);
225 INIT_LIST_HEAD(&wb->b_io);
226 INIT_LIST_HEAD(&wb->b_more_io);
227}
228
229static void bdi_task_init(struct backing_dev_info *bdi,
230 struct bdi_writeback *wb)
231{
232 struct task_struct *tsk = current;
233
234 spin_lock(&bdi->wb_lock);
235 list_add_tail_rcu(&wb->list, &bdi->wb_list);
236 spin_unlock(&bdi->wb_lock);
237
238 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
239 set_freezable();
240
241 /*
242 * Our parent may run at a different priority, just set us to normal
243 */
244 set_user_nice(tsk, 0);
245}
246
247static int bdi_start_fn(void *ptr)
248{
249 struct bdi_writeback *wb = ptr;
250 struct backing_dev_info *bdi = wb->bdi;
251 int ret;
252
253 /*
254 * Add us to the active bdi_list
255 */
256 spin_lock(&bdi_lock);
257 list_add(&bdi->bdi_list, &bdi_list);
258 spin_unlock(&bdi_lock);
259
260 bdi_task_init(bdi, wb);
261
262 /*
263 * Clear pending bit and wakeup anybody waiting to tear us down
264 */
265 clear_bit(BDI_pending, &bdi->state);
266 smp_mb__after_clear_bit();
267 wake_up_bit(&bdi->state, BDI_pending);
268
269 ret = bdi_writeback_task(wb);
270
271 /*
272 * Remove us from the list
273 */
274 spin_lock(&bdi->wb_lock);
275 list_del_rcu(&wb->list);
276 spin_unlock(&bdi->wb_lock);
277
278 /*
279 * Flush any work that raced with us exiting. No new work
280 * will be added, since this bdi isn't discoverable anymore.
281 */
282 if (!list_empty(&bdi->work_list))
283 wb_do_writeback(wb, 1);
284
285 wb->task = NULL;
286 return ret;
287}
288
289int bdi_has_dirty_io(struct backing_dev_info *bdi)
290{
291 return wb_has_dirty_io(&bdi->wb);
292}
293
294static void bdi_flush_io(struct backing_dev_info *bdi)
295{
296 struct writeback_control wbc = {
297 .bdi = bdi,
298 .sync_mode = WB_SYNC_NONE,
299 .older_than_this = NULL,
300 .range_cyclic = 1,
301 .nr_to_write = 1024,
302 };
303
304 writeback_inodes_wbc(&wbc);
305}
306
307/*
308 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
309 * or we risk deadlocking on ->s_umount. The longer term solution would be
310 * to implement sync_supers_bdi() or similar and simply do it from the
311 * bdi writeback tasks individually.
312 */
313static int bdi_sync_supers(void *unused)
314{
315 set_user_nice(current, 0);
316
317 while (!kthread_should_stop()) {
318 set_current_state(TASK_INTERRUPTIBLE);
319 schedule();
320
321 /*
322 * Do this periodically, like kupdated() did before.
323 */
324 sync_supers();
325 }
326
327 return 0;
328}
329
330static void arm_supers_timer(void)
331{
332 unsigned long next;
333
334 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
335 mod_timer(&sync_supers_timer, round_jiffies_up(next));
336}
337
338static void sync_supers_timer_fn(unsigned long unused)
339{
340 wake_up_process(sync_supers_tsk);
341 arm_supers_timer();
342}
343
344static int bdi_forker_task(void *ptr)
345{
346 struct bdi_writeback *me = ptr;
347
348 bdi_task_init(me->bdi, me);
349
350 for (;;) {
351 struct backing_dev_info *bdi, *tmp;
352 struct bdi_writeback *wb;
353
354 /*
355 * Temporary measure, we want to make sure we don't see
356 * dirty data on the default backing_dev_info
357 */
358 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
359 wb_do_writeback(me, 0);
360
361 spin_lock(&bdi_lock);
362
363 /*
364 * Check if any existing bdi's have dirty data without
365 * a thread registered. If so, set that up.
366 */
367 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
368 if (bdi->wb.task)
369 continue;
370 if (list_empty(&bdi->work_list) &&
371 !bdi_has_dirty_io(bdi))
372 continue;
373
374 bdi_add_default_flusher_task(bdi);
375 }
376
377 set_current_state(TASK_INTERRUPTIBLE);
378
379 if (list_empty(&bdi_pending_list)) {
380 unsigned long wait;
381
382 spin_unlock(&bdi_lock);
383 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
384 schedule_timeout(wait);
385 try_to_freeze();
386 continue;
387 }
388
389 __set_current_state(TASK_RUNNING);
390
391 /*
392 * This is our real job - check for pending entries in
393 * bdi_pending_list, and create the tasks that got added
394 */
395 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
396 bdi_list);
397 list_del_init(&bdi->bdi_list);
398 spin_unlock(&bdi_lock);
399
400 wb = &bdi->wb;
401 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
402 dev_name(bdi->dev));
403 /*
404 * If task creation fails, then readd the bdi to
405 * the pending list and force writeout of the bdi
406 * from this forker thread. That will free some memory
407 * and we can try again.
408 */
409 if (IS_ERR(wb->task)) {
410 wb->task = NULL;
411
412 /*
413 * Add this 'bdi' to the back, so we get
414 * a chance to flush other bdi's to free
415 * memory.
416 */
417 spin_lock(&bdi_lock);
418 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
419 spin_unlock(&bdi_lock);
420
421 bdi_flush_io(bdi);
422 }
423 }
424
425 return 0;
426}
427
428/*
429 * Add the default flusher task that gets created for any bdi
430 * that has dirty data pending writeout
431 */
432void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
433{
434 if (!bdi_cap_writeback_dirty(bdi))
435 return;
436
437 /*
438 * Check with the helper whether to proceed adding a task. Will only
439 * abort if we two or more simultanous calls to
440 * bdi_add_default_flusher_task() occured, further additions will block
441 * waiting for previous additions to finish.
442 */
443 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
444 list_move_tail(&bdi->bdi_list, &bdi_pending_list);
445
446 /*
447 * We are now on the pending list, wake up bdi_forker_task()
448 * to finish the job and add us back to the active bdi_list
449 */
450 wake_up_process(default_backing_dev_info.wb.task);
451 }
452}
453
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700454int bdi_register(struct backing_dev_info *bdi, struct device *parent,
455 const char *fmt, ...)
456{
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700457 va_list args;
458 int ret = 0;
459 struct device *dev;
460
Andrew Morton69fc2082008-12-09 13:14:06 -0800461 if (bdi->dev) /* The driver needs to use separate queues per device */
Kay Sieversf1d0b062008-12-02 10:31:50 -0800462 goto exit;
463
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700464 va_start(args, fmt);
Greg Kroah-Hartman19051c52008-05-15 13:44:08 -0700465 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700466 va_end(args);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700467 if (IS_ERR(dev)) {
468 ret = PTR_ERR(dev);
469 goto exit;
470 }
471
Jens Axboe03ba3782009-09-09 09:08:54 +0200472 spin_lock(&bdi_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200473 list_add_tail(&bdi->bdi_list, &bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +0200474 spin_unlock(&bdi_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200475
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700476 bdi->dev = dev;
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700477
Jens Axboe03ba3782009-09-09 09:08:54 +0200478 /*
479 * Just start the forker thread for our default backing_dev_info,
480 * and add other bdi's to the list. They will get a thread created
481 * on-demand when they need it.
482 */
483 if (bdi_cap_flush_forker(bdi)) {
484 struct bdi_writeback *wb = &bdi->wb;
485
486 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
487 dev_name(dev));
488 if (IS_ERR(wb->task)) {
489 wb->task = NULL;
490 ret = -ENOMEM;
491
492 spin_lock(&bdi_lock);
493 list_del(&bdi->bdi_list);
494 spin_unlock(&bdi_lock);
495 goto exit;
496 }
497 }
498
499 bdi_debug_register(bdi, dev_name(dev));
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700500exit:
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700501 return ret;
502}
503EXPORT_SYMBOL(bdi_register);
504
505int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
506{
507 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
508}
509EXPORT_SYMBOL(bdi_register_dev);
510
Jens Axboe03ba3782009-09-09 09:08:54 +0200511/*
512 * Remove bdi from the global list and shutdown any threads we have running
513 */
514static void bdi_wb_shutdown(struct backing_dev_info *bdi)
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200515{
Jens Axboe03ba3782009-09-09 09:08:54 +0200516 struct bdi_writeback *wb;
517
518 if (!bdi_cap_writeback_dirty(bdi))
519 return;
520
521 /*
522 * If setup is pending, wait for that to complete first
523 */
524 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
525 TASK_UNINTERRUPTIBLE);
526
527 /*
528 * Make sure nobody finds us on the bdi_list anymore
529 */
530 spin_lock(&bdi_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200531 list_del(&bdi->bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +0200532 spin_unlock(&bdi_lock);
533
534 /*
535 * Finally, kill the kernel threads. We don't need to be RCU
536 * safe anymore, since the bdi is gone from visibility.
537 */
538 list_for_each_entry(wb, &bdi->wb_list, list)
539 kthread_stop(wb->task);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200540}
541
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700542void bdi_unregister(struct backing_dev_info *bdi)
543{
544 if (bdi->dev) {
Jens Axboe03ba3782009-09-09 09:08:54 +0200545 if (!bdi_cap_flush_forker(bdi))
546 bdi_wb_shutdown(bdi);
Miklos Szeredi76f14182008-04-30 00:54:36 -0700547 bdi_debug_unregister(bdi);
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700548 device_unregister(bdi->dev);
549 bdi->dev = NULL;
550 }
551}
552EXPORT_SYMBOL(bdi_unregister);
Andrew Morton3fcfab12006-10-19 23:28:16 -0700553
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700554int bdi_init(struct backing_dev_info *bdi)
555{
Jens Axboe03ba3782009-09-09 09:08:54 +0200556 int i, err;
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700557
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700558 bdi->dev = NULL;
559
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700560 bdi->min_ratio = 0;
Peter Zijlstraa42dde02008-04-30 00:54:36 -0700561 bdi->max_ratio = 100;
562 bdi->max_prop_frac = PROP_FRAC_BASE;
Jens Axboe03ba3782009-09-09 09:08:54 +0200563 spin_lock_init(&bdi->wb_lock);
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200564 INIT_LIST_HEAD(&bdi->bdi_list);
Jens Axboe03ba3782009-09-09 09:08:54 +0200565 INIT_LIST_HEAD(&bdi->wb_list);
566 INIT_LIST_HEAD(&bdi->work_list);
567
568 bdi_wb_init(&bdi->wb, bdi);
569
570 /*
571 * Just one thread support for now, hard code mask and count
572 */
573 bdi->wb_mask = 1;
574 bdi->wb_cnt = 1;
Peter Zijlstra189d3c42008-04-30 00:54:35 -0700575
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700576 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
Peter Zijlstraea319512008-12-26 15:08:55 +0100577 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700578 if (err)
579 goto err;
580 }
581
582 bdi->dirty_exceeded = 0;
583 err = prop_local_init_percpu(&bdi->completions);
584
585 if (err) {
586err:
Denis Cheng4b01a0b2007-12-04 23:45:07 -0800587 while (i--)
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700588 percpu_counter_destroy(&bdi->bdi_stat[i]);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700589 }
590
591 return err;
592}
593EXPORT_SYMBOL(bdi_init);
594
595void bdi_destroy(struct backing_dev_info *bdi)
596{
597 int i;
598
Jens Axboe03ba3782009-09-09 09:08:54 +0200599 WARN_ON(bdi_has_dirty_io(bdi));
Jens Axboe66f3b8e2009-09-02 09:19:46 +0200600
Peter Zijlstracf0ca9f2008-04-30 00:54:32 -0700601 bdi_unregister(bdi);
602
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700603 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
604 percpu_counter_destroy(&bdi->bdi_stat[i]);
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700605
606 prop_local_destroy_percpu(&bdi->completions);
Peter Zijlstrab2e8fb62007-10-16 23:25:47 -0700607}
608EXPORT_SYMBOL(bdi_destroy);
609
Andrew Morton3fcfab12006-10-19 23:28:16 -0700610static wait_queue_head_t congestion_wqh[2] = {
611 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
612 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
613 };
614
Jens Axboe1faa16d2009-04-06 14:48:01 +0200615void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700616{
617 enum bdi_state bit;
Jens Axboe1faa16d2009-04-06 14:48:01 +0200618 wait_queue_head_t *wqh = &congestion_wqh[sync];
Andrew Morton3fcfab12006-10-19 23:28:16 -0700619
Jens Axboe1faa16d2009-04-06 14:48:01 +0200620 bit = sync ? BDI_sync_congested : BDI_async_congested;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700621 clear_bit(bit, &bdi->state);
622 smp_mb__after_clear_bit();
623 if (waitqueue_active(wqh))
624 wake_up(wqh);
625}
626EXPORT_SYMBOL(clear_bdi_congested);
627
Jens Axboe1faa16d2009-04-06 14:48:01 +0200628void set_bdi_congested(struct backing_dev_info *bdi, int sync)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700629{
630 enum bdi_state bit;
631
Jens Axboe1faa16d2009-04-06 14:48:01 +0200632 bit = sync ? BDI_sync_congested : BDI_async_congested;
Andrew Morton3fcfab12006-10-19 23:28:16 -0700633 set_bit(bit, &bdi->state);
634}
635EXPORT_SYMBOL(set_bdi_congested);
636
637/**
638 * congestion_wait - wait for a backing_dev to become uncongested
Jens Axboe8aa7e842009-07-09 14:52:32 +0200639 * @sync: SYNC or ASYNC IO
Andrew Morton3fcfab12006-10-19 23:28:16 -0700640 * @timeout: timeout in jiffies
641 *
642 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
643 * write congestion. If no backing_devs are congested then just wait for the
644 * next write to be completed.
645 */
Jens Axboe8aa7e842009-07-09 14:52:32 +0200646long congestion_wait(int sync, long timeout)
Andrew Morton3fcfab12006-10-19 23:28:16 -0700647{
648 long ret;
649 DEFINE_WAIT(wait);
Jens Axboe8aa7e842009-07-09 14:52:32 +0200650 wait_queue_head_t *wqh = &congestion_wqh[sync];
Andrew Morton3fcfab12006-10-19 23:28:16 -0700651
652 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
653 ret = io_schedule_timeout(timeout);
654 finish_wait(wqh, &wait);
655 return ret;
656}
657EXPORT_SYMBOL(congestion_wait);
Peter Zijlstra04fbfdc2007-10-16 23:25:50 -0700658