Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1 | |
| 2 | #include <linux/wait.h> |
| 3 | #include <linux/backing-dev.h> |
| 4 | #include <linux/fs.h> |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/module.h> |
| 7 | |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 8 | int bdi_init(struct backing_dev_info *bdi) |
| 9 | { |
| 10 | int i, j; |
| 11 | int err; |
| 12 | |
| 13 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { |
| 14 | err = percpu_counter_init_irq(&bdi->bdi_stat[i], 0); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 15 | if (err) |
| 16 | goto err; |
| 17 | } |
| 18 | |
| 19 | bdi->dirty_exceeded = 0; |
| 20 | err = prop_local_init_percpu(&bdi->completions); |
| 21 | |
| 22 | if (err) { |
| 23 | err: |
| 24 | for (j = 0; j < i; j++) |
| 25 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | return err; |
| 29 | } |
| 30 | EXPORT_SYMBOL(bdi_init); |
| 31 | |
| 32 | void bdi_destroy(struct backing_dev_info *bdi) |
| 33 | { |
| 34 | int i; |
| 35 | |
| 36 | for (i = 0; i < NR_BDI_STAT_ITEMS; i++) |
| 37 | percpu_counter_destroy(&bdi->bdi_stat[i]); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 38 | |
| 39 | prop_local_destroy_percpu(&bdi->completions); |
Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 40 | } |
| 41 | EXPORT_SYMBOL(bdi_destroy); |
| 42 | |
Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 43 | static wait_queue_head_t congestion_wqh[2] = { |
| 44 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
| 45 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
| 46 | }; |
| 47 | |
| 48 | |
| 49 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw) |
| 50 | { |
| 51 | enum bdi_state bit; |
| 52 | wait_queue_head_t *wqh = &congestion_wqh[rw]; |
| 53 | |
| 54 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; |
| 55 | clear_bit(bit, &bdi->state); |
| 56 | smp_mb__after_clear_bit(); |
| 57 | if (waitqueue_active(wqh)) |
| 58 | wake_up(wqh); |
| 59 | } |
| 60 | EXPORT_SYMBOL(clear_bdi_congested); |
| 61 | |
| 62 | void set_bdi_congested(struct backing_dev_info *bdi, int rw) |
| 63 | { |
| 64 | enum bdi_state bit; |
| 65 | |
| 66 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; |
| 67 | set_bit(bit, &bdi->state); |
| 68 | } |
| 69 | EXPORT_SYMBOL(set_bdi_congested); |
| 70 | |
| 71 | /** |
| 72 | * congestion_wait - wait for a backing_dev to become uncongested |
| 73 | * @rw: READ or WRITE |
| 74 | * @timeout: timeout in jiffies |
| 75 | * |
| 76 | * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit |
| 77 | * write congestion. If no backing_devs are congested then just wait for the |
| 78 | * next write to be completed. |
| 79 | */ |
| 80 | long congestion_wait(int rw, long timeout) |
| 81 | { |
| 82 | long ret; |
| 83 | DEFINE_WAIT(wait); |
| 84 | wait_queue_head_t *wqh = &congestion_wqh[rw]; |
| 85 | |
| 86 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 87 | ret = io_schedule_timeout(timeout); |
| 88 | finish_wait(wqh, &wait); |
| 89 | return ret; |
| 90 | } |
| 91 | EXPORT_SYMBOL(congestion_wait); |
Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 92 | |