Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Interface for controlling IO bandwidth on a request queue |
| 3 | * |
| 4 | * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> |
| 5 | */ |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/blkdev.h> |
| 10 | #include <linux/bio.h> |
| 11 | #include <linux/blktrace_api.h> |
| 12 | #include "blk-cgroup.h" |
Tejun Heo | bc9fcbf | 2011-10-19 14:31:18 +0200 | [diff] [blame] | 13 | #include "blk.h" |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 14 | |
| 15 | /* Max dispatch from a group in 1 round */ |
| 16 | static int throtl_grp_quantum = 8; |
| 17 | |
| 18 | /* Total max dispatch from all groups in one round */ |
| 19 | static int throtl_quantum = 32; |
| 20 | |
| 21 | /* Throttling is performed over 100ms slice and after that slice is renewed */ |
| 22 | static unsigned long throtl_slice = HZ/10; /* 100 ms */ |
| 23 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 24 | static struct blkcg_policy blkcg_policy_throtl; |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 25 | |
Vivek Goyal | 450adcb | 2011-03-01 13:40:54 -0500 | [diff] [blame] | 26 | /* A workqueue to queue throttle related work */ |
| 27 | static struct workqueue_struct *kthrotld_workqueue; |
Vivek Goyal | 450adcb | 2011-03-01 13:40:54 -0500 | [diff] [blame] | 28 | |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 29 | struct throtl_service_queue { |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 30 | struct throtl_service_queue *parent_sq; /* the parent service_queue */ |
| 31 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 32 | /* |
| 33 | * Bios queued directly to this service_queue or dispatched from |
| 34 | * children throtl_grp's. |
| 35 | */ |
| 36 | struct bio_list bio_lists[2]; /* queued bios [READ/WRITE] */ |
| 37 | unsigned int nr_queued[2]; /* number of queued bios */ |
| 38 | |
| 39 | /* |
| 40 | * RB tree of active children throtl_grp's, which are sorted by |
| 41 | * their ->disptime. |
| 42 | */ |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 43 | struct rb_root pending_tree; /* RB tree of active tgs */ |
| 44 | struct rb_node *first_pending; /* first node in the tree */ |
| 45 | unsigned int nr_pending; /* # queued in the tree */ |
| 46 | unsigned long first_pending_disptime; /* disptime of the first tg */ |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 47 | }; |
| 48 | |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 49 | enum tg_state_flags { |
| 50 | THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ |
Tejun Heo | 0e9f416 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 51 | THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 52 | }; |
| 53 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 54 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) |
| 55 | |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 56 | /* Per-cpu group stats */ |
| 57 | struct tg_stats_cpu { |
| 58 | /* total bytes transferred */ |
| 59 | struct blkg_rwstat service_bytes; |
| 60 | /* total IOs serviced, post merge */ |
| 61 | struct blkg_rwstat serviced; |
| 62 | }; |
| 63 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 64 | struct throtl_grp { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 65 | /* must be the first member */ |
| 66 | struct blkg_policy_data pd; |
| 67 | |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 68 | /* active throtl group service_queue member */ |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 69 | struct rb_node rb_node; |
| 70 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 71 | /* throtl_data this group belongs to */ |
| 72 | struct throtl_data *td; |
| 73 | |
Tejun Heo | 49a2f1e | 2013-05-14 13:52:34 -0700 | [diff] [blame] | 74 | /* this group's service queue */ |
| 75 | struct throtl_service_queue service_queue; |
| 76 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 77 | /* |
| 78 | * Dispatch time in jiffies. This is the estimated time when group |
| 79 | * will unthrottle and is ready to dispatch more bio. It is used as |
| 80 | * key to sort active groups in service tree. |
| 81 | */ |
| 82 | unsigned long disptime; |
| 83 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 84 | unsigned int flags; |
| 85 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 86 | /* bytes per second rate limits */ |
| 87 | uint64_t bps[2]; |
| 88 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 89 | /* IOPS limits */ |
| 90 | unsigned int iops[2]; |
| 91 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 92 | /* Number of bytes disptached in current slice */ |
| 93 | uint64_t bytes_disp[2]; |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 94 | /* Number of bio's dispatched in current slice */ |
| 95 | unsigned int io_disp[2]; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 96 | |
| 97 | /* When did we start a new slice */ |
| 98 | unsigned long slice_start[2]; |
| 99 | unsigned long slice_end[2]; |
Vivek Goyal | fe07143 | 2010-10-01 14:49:49 +0200 | [diff] [blame] | 100 | |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 101 | /* Per cpu stats pointer */ |
| 102 | struct tg_stats_cpu __percpu *stats_cpu; |
| 103 | |
| 104 | /* List of tgs waiting for per cpu stats memory to be allocated */ |
| 105 | struct list_head stats_alloc_node; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 106 | }; |
| 107 | |
| 108 | struct throtl_data |
| 109 | { |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 110 | /* service tree for active throtl groups */ |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 111 | struct throtl_service_queue service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 112 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 113 | struct request_queue *queue; |
| 114 | |
| 115 | /* Total Number of queued bios on READ and WRITE lists */ |
| 116 | unsigned int nr_queued[2]; |
| 117 | |
| 118 | /* |
Vivek Goyal | 02977e4 | 2010-10-01 14:49:48 +0200 | [diff] [blame] | 119 | * number of total undestroyed groups |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 120 | */ |
| 121 | unsigned int nr_undestroyed_grps; |
| 122 | |
| 123 | /* Work for dispatching throttled bios */ |
Tejun Heo | cb76199 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 124 | struct delayed_work dispatch_work; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 125 | }; |
| 126 | |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 127 | /* list and work item to allocate percpu group stats */ |
| 128 | static DEFINE_SPINLOCK(tg_stats_alloc_lock); |
| 129 | static LIST_HEAD(tg_stats_alloc_list); |
| 130 | |
| 131 | static void tg_stats_alloc_fn(struct work_struct *); |
| 132 | static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn); |
| 133 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 134 | static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) |
| 135 | { |
| 136 | return pd ? container_of(pd, struct throtl_grp, pd) : NULL; |
| 137 | } |
| 138 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 139 | static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 140 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 141 | return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 142 | } |
| 143 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 144 | static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 145 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 146 | return pd_to_blkg(&tg->pd); |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 147 | } |
| 148 | |
Tejun Heo | 03d8e11 | 2012-04-13 13:11:32 -0700 | [diff] [blame] | 149 | static inline struct throtl_grp *td_root_tg(struct throtl_data *td) |
| 150 | { |
| 151 | return blkg_to_tg(td->queue->root_blkg); |
| 152 | } |
| 153 | |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 154 | /** |
| 155 | * sq_to_tg - return the throl_grp the specified service queue belongs to |
| 156 | * @sq: the throtl_service_queue of interest |
| 157 | * |
| 158 | * Return the throtl_grp @sq belongs to. If @sq is the top-level one |
| 159 | * embedded in throtl_data, %NULL is returned. |
| 160 | */ |
| 161 | static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) |
| 162 | { |
| 163 | if (sq && sq->parent_sq) |
| 164 | return container_of(sq, struct throtl_grp, service_queue); |
| 165 | else |
| 166 | return NULL; |
| 167 | } |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 168 | |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 169 | /** |
| 170 | * sq_to_td - return throtl_data the specified service queue belongs to |
| 171 | * @sq: the throtl_service_queue of interest |
| 172 | * |
| 173 | * A service_queue can be embeded in either a throtl_grp or throtl_data. |
| 174 | * Determine the associated throtl_data accordingly and return it. |
| 175 | */ |
| 176 | static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) |
| 177 | { |
| 178 | struct throtl_grp *tg = sq_to_tg(sq); |
| 179 | |
| 180 | if (tg) |
| 181 | return tg->td; |
| 182 | else |
| 183 | return container_of(sq, struct throtl_data, service_queue); |
| 184 | } |
| 185 | |
| 186 | /** |
| 187 | * throtl_log - log debug message via blktrace |
| 188 | * @sq: the service_queue being reported |
| 189 | * @fmt: printf format string |
| 190 | * @args: printf args |
| 191 | * |
| 192 | * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a |
| 193 | * throtl_grp; otherwise, just "throtl". |
| 194 | * |
| 195 | * TODO: this should be made a function and name formatting should happen |
| 196 | * after testing whether blktrace is enabled. |
| 197 | */ |
| 198 | #define throtl_log(sq, fmt, args...) do { \ |
| 199 | struct throtl_grp *__tg = sq_to_tg((sq)); \ |
| 200 | struct throtl_data *__td = sq_to_td((sq)); \ |
| 201 | \ |
| 202 | (void)__td; \ |
| 203 | if ((__tg)) { \ |
| 204 | char __pbuf[128]; \ |
| 205 | \ |
| 206 | blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \ |
| 207 | blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \ |
| 208 | } else { \ |
| 209 | blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ |
| 210 | } \ |
| 211 | } while (0) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 212 | |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 213 | /* |
| 214 | * Worker for allocating per cpu stat for tgs. This is scheduled on the |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 215 | * system_wq once there are some groups on the alloc_list waiting for |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 216 | * allocation. |
| 217 | */ |
| 218 | static void tg_stats_alloc_fn(struct work_struct *work) |
| 219 | { |
| 220 | static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */ |
| 221 | struct delayed_work *dwork = to_delayed_work(work); |
| 222 | bool empty = false; |
| 223 | |
| 224 | alloc_stats: |
| 225 | if (!stats_cpu) { |
| 226 | stats_cpu = alloc_percpu(struct tg_stats_cpu); |
| 227 | if (!stats_cpu) { |
| 228 | /* allocation failed, try again after some time */ |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 229 | schedule_delayed_work(dwork, msecs_to_jiffies(10)); |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 230 | return; |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | spin_lock_irq(&tg_stats_alloc_lock); |
| 235 | |
| 236 | if (!list_empty(&tg_stats_alloc_list)) { |
| 237 | struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list, |
| 238 | struct throtl_grp, |
| 239 | stats_alloc_node); |
| 240 | swap(tg->stats_cpu, stats_cpu); |
| 241 | list_del_init(&tg->stats_alloc_node); |
| 242 | } |
| 243 | |
| 244 | empty = list_empty(&tg_stats_alloc_list); |
| 245 | spin_unlock_irq(&tg_stats_alloc_lock); |
| 246 | if (!empty) |
| 247 | goto alloc_stats; |
| 248 | } |
| 249 | |
Tejun Heo | 49a2f1e | 2013-05-14 13:52:34 -0700 | [diff] [blame] | 250 | /* init a service_queue, assumes the caller zeroed it */ |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 251 | static void throtl_service_queue_init(struct throtl_service_queue *sq, |
| 252 | struct throtl_service_queue *parent_sq) |
Tejun Heo | 49a2f1e | 2013-05-14 13:52:34 -0700 | [diff] [blame] | 253 | { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 254 | bio_list_init(&sq->bio_lists[0]); |
| 255 | bio_list_init(&sq->bio_lists[1]); |
Tejun Heo | 49a2f1e | 2013-05-14 13:52:34 -0700 | [diff] [blame] | 256 | sq->pending_tree = RB_ROOT; |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 257 | sq->parent_sq = parent_sq; |
Tejun Heo | 49a2f1e | 2013-05-14 13:52:34 -0700 | [diff] [blame] | 258 | } |
| 259 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 260 | static void throtl_pd_init(struct blkcg_gq *blkg) |
Vivek Goyal | a29a171 | 2011-05-19 15:38:19 -0400 | [diff] [blame] | 261 | { |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 262 | struct throtl_grp *tg = blkg_to_tg(blkg); |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 263 | struct throtl_data *td = blkg->q->td; |
Tejun Heo | ff26eaa | 2012-05-23 12:16:21 +0200 | [diff] [blame] | 264 | unsigned long flags; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 265 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 266 | throtl_service_queue_init(&tg->service_queue, &td->service_queue); |
Vivek Goyal | a29a171 | 2011-05-19 15:38:19 -0400 | [diff] [blame] | 267 | RB_CLEAR_NODE(&tg->rb_node); |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 268 | tg->td = td; |
Vivek Goyal | a29a171 | 2011-05-19 15:38:19 -0400 | [diff] [blame] | 269 | |
Tejun Heo | e56da7e | 2012-03-05 13:15:07 -0800 | [diff] [blame] | 270 | tg->bps[READ] = -1; |
| 271 | tg->bps[WRITE] = -1; |
| 272 | tg->iops[READ] = -1; |
| 273 | tg->iops[WRITE] = -1; |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 274 | |
| 275 | /* |
| 276 | * Ugh... We need to perform per-cpu allocation for tg->stats_cpu |
| 277 | * but percpu allocator can't be called from IO path. Queue tg on |
| 278 | * tg_stats_alloc_list and allocate from work item. |
| 279 | */ |
Tejun Heo | ff26eaa | 2012-05-23 12:16:21 +0200 | [diff] [blame] | 280 | spin_lock_irqsave(&tg_stats_alloc_lock, flags); |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 281 | list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); |
Tejun Heo | 3b07e9c | 2012-08-20 14:51:24 -0700 | [diff] [blame] | 282 | schedule_delayed_work(&tg_stats_alloc_work, 0); |
Tejun Heo | ff26eaa | 2012-05-23 12:16:21 +0200 | [diff] [blame] | 283 | spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 284 | } |
| 285 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 286 | static void throtl_pd_exit(struct blkcg_gq *blkg) |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 287 | { |
| 288 | struct throtl_grp *tg = blkg_to_tg(blkg); |
Tejun Heo | ff26eaa | 2012-05-23 12:16:21 +0200 | [diff] [blame] | 289 | unsigned long flags; |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 290 | |
Tejun Heo | ff26eaa | 2012-05-23 12:16:21 +0200 | [diff] [blame] | 291 | spin_lock_irqsave(&tg_stats_alloc_lock, flags); |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 292 | list_del_init(&tg->stats_alloc_node); |
Tejun Heo | ff26eaa | 2012-05-23 12:16:21 +0200 | [diff] [blame] | 293 | spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 294 | |
| 295 | free_percpu(tg->stats_cpu); |
| 296 | } |
| 297 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 298 | static void throtl_pd_reset_stats(struct blkcg_gq *blkg) |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 299 | { |
| 300 | struct throtl_grp *tg = blkg_to_tg(blkg); |
| 301 | int cpu; |
| 302 | |
| 303 | if (tg->stats_cpu == NULL) |
| 304 | return; |
| 305 | |
| 306 | for_each_possible_cpu(cpu) { |
| 307 | struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); |
| 308 | |
| 309 | blkg_rwstat_reset(&sc->service_bytes); |
| 310 | blkg_rwstat_reset(&sc->serviced); |
| 311 | } |
Vivek Goyal | a29a171 | 2011-05-19 15:38:19 -0400 | [diff] [blame] | 312 | } |
| 313 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 314 | static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td, |
| 315 | struct blkcg *blkcg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 316 | { |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 317 | /* |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 318 | * This is the common case when there are no blkcgs. Avoid lookup |
| 319 | * in this case |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 320 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 321 | if (blkcg == &blkcg_root) |
Tejun Heo | 03d8e11 | 2012-04-13 13:11:32 -0700 | [diff] [blame] | 322 | return td_root_tg(td); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 323 | |
Tejun Heo | e8989fa | 2012-03-05 13:15:20 -0800 | [diff] [blame] | 324 | return blkg_to_tg(blkg_lookup(blkcg, td->queue)); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 325 | } |
| 326 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 327 | static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 328 | struct blkcg *blkcg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 329 | { |
Vivek Goyal | f469a7b | 2011-05-19 15:38:23 -0400 | [diff] [blame] | 330 | struct request_queue *q = td->queue; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 331 | struct throtl_grp *tg = NULL; |
Tejun Heo | 0a5a7d0 | 2012-03-05 13:15:02 -0800 | [diff] [blame] | 332 | |
Vivek Goyal | f469a7b | 2011-05-19 15:38:23 -0400 | [diff] [blame] | 333 | /* |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 334 | * This is the common case when there are no blkcgs. Avoid lookup |
| 335 | * in this case |
Vivek Goyal | f469a7b | 2011-05-19 15:38:23 -0400 | [diff] [blame] | 336 | */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 337 | if (blkcg == &blkcg_root) { |
Tejun Heo | 03d8e11 | 2012-04-13 13:11:32 -0700 | [diff] [blame] | 338 | tg = td_root_tg(td); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 339 | } else { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 340 | struct blkcg_gq *blkg; |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 341 | |
Tejun Heo | 3c96cb3 | 2012-04-13 13:11:34 -0700 | [diff] [blame] | 342 | blkg = blkg_lookup_create(blkcg, q); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 343 | |
| 344 | /* if %NULL and @q is alive, fall back to root_tg */ |
| 345 | if (!IS_ERR(blkg)) |
Tejun Heo | 0381411 | 2012-03-05 13:15:14 -0800 | [diff] [blame] | 346 | tg = blkg_to_tg(blkg); |
Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 347 | else if (!blk_queue_dying(q)) |
Tejun Heo | 03d8e11 | 2012-04-13 13:11:32 -0700 | [diff] [blame] | 348 | tg = td_root_tg(td); |
Vivek Goyal | f469a7b | 2011-05-19 15:38:23 -0400 | [diff] [blame] | 349 | } |
| 350 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 351 | return tg; |
| 352 | } |
| 353 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 354 | static struct throtl_grp * |
| 355 | throtl_rb_first(struct throtl_service_queue *parent_sq) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 356 | { |
| 357 | /* Service tree is empty */ |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 358 | if (!parent_sq->nr_pending) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 359 | return NULL; |
| 360 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 361 | if (!parent_sq->first_pending) |
| 362 | parent_sq->first_pending = rb_first(&parent_sq->pending_tree); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 363 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 364 | if (parent_sq->first_pending) |
| 365 | return rb_entry_tg(parent_sq->first_pending); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 366 | |
| 367 | return NULL; |
| 368 | } |
| 369 | |
| 370 | static void rb_erase_init(struct rb_node *n, struct rb_root *root) |
| 371 | { |
| 372 | rb_erase(n, root); |
| 373 | RB_CLEAR_NODE(n); |
| 374 | } |
| 375 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 376 | static void throtl_rb_erase(struct rb_node *n, |
| 377 | struct throtl_service_queue *parent_sq) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 378 | { |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 379 | if (parent_sq->first_pending == n) |
| 380 | parent_sq->first_pending = NULL; |
| 381 | rb_erase_init(n, &parent_sq->pending_tree); |
| 382 | --parent_sq->nr_pending; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 383 | } |
| 384 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 385 | static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 386 | { |
| 387 | struct throtl_grp *tg; |
| 388 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 389 | tg = throtl_rb_first(parent_sq); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 390 | if (!tg) |
| 391 | return; |
| 392 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 393 | parent_sq->first_pending_disptime = tg->disptime; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 394 | } |
| 395 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 396 | static void tg_service_queue_add(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 397 | { |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 398 | struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 399 | struct rb_node **node = &parent_sq->pending_tree.rb_node; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 400 | struct rb_node *parent = NULL; |
| 401 | struct throtl_grp *__tg; |
| 402 | unsigned long key = tg->disptime; |
| 403 | int left = 1; |
| 404 | |
| 405 | while (*node != NULL) { |
| 406 | parent = *node; |
| 407 | __tg = rb_entry_tg(parent); |
| 408 | |
| 409 | if (time_before(key, __tg->disptime)) |
| 410 | node = &parent->rb_left; |
| 411 | else { |
| 412 | node = &parent->rb_right; |
| 413 | left = 0; |
| 414 | } |
| 415 | } |
| 416 | |
| 417 | if (left) |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 418 | parent_sq->first_pending = &tg->rb_node; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 419 | |
| 420 | rb_link_node(&tg->rb_node, parent, node); |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 421 | rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 422 | } |
| 423 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 424 | static void __throtl_enqueue_tg(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 425 | { |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 426 | tg_service_queue_add(tg); |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 427 | tg->flags |= THROTL_TG_PENDING; |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 428 | tg->service_queue.parent_sq->nr_pending++; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 429 | } |
| 430 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 431 | static void throtl_enqueue_tg(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 432 | { |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 433 | if (!(tg->flags & THROTL_TG_PENDING)) |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 434 | __throtl_enqueue_tg(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 435 | } |
| 436 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 437 | static void __throtl_dequeue_tg(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 438 | { |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 439 | throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 440 | tg->flags &= ~THROTL_TG_PENDING; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 441 | } |
| 442 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 443 | static void throtl_dequeue_tg(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 444 | { |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 445 | if (tg->flags & THROTL_TG_PENDING) |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 446 | __throtl_dequeue_tg(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 447 | } |
| 448 | |
Tejun Heo | a9131a2 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 449 | /* Call with queue lock held */ |
| 450 | static void throtl_schedule_delayed_work(struct throtl_data *td, |
| 451 | unsigned long delay) |
| 452 | { |
| 453 | struct delayed_work *dwork = &td->dispatch_work; |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 454 | struct throtl_service_queue *sq = &td->service_queue; |
Tejun Heo | a9131a2 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 455 | |
Tejun Heo | 6a52560 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 456 | mod_delayed_work(kthrotld_workqueue, dwork, delay); |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 457 | throtl_log(sq, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); |
Tejun Heo | a9131a2 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 458 | } |
| 459 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 460 | static void throtl_schedule_next_dispatch(struct throtl_data *td) |
| 461 | { |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 462 | struct throtl_service_queue *sq = &td->service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 463 | |
Tejun Heo | 6a52560 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 464 | /* any pending children left? */ |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 465 | if (!sq->nr_pending) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 466 | return; |
| 467 | |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 468 | update_min_dispatch_time(sq); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 469 | |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 470 | if (time_before_eq(sq->first_pending_disptime, jiffies)) |
Vivek Goyal | 450adcb | 2011-03-01 13:40:54 -0500 | [diff] [blame] | 471 | throtl_schedule_delayed_work(td, 0); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 472 | else |
Tejun Heo | c9e0332 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 473 | throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 474 | } |
| 475 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 476 | static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 477 | { |
| 478 | tg->bytes_disp[rw] = 0; |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 479 | tg->io_disp[rw] = 0; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 480 | tg->slice_start[rw] = jiffies; |
| 481 | tg->slice_end[rw] = jiffies + throtl_slice; |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 482 | throtl_log(&tg->service_queue, |
| 483 | "[%c] new slice start=%lu end=%lu jiffies=%lu", |
| 484 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
| 485 | tg->slice_end[rw], jiffies); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 486 | } |
| 487 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 488 | static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, |
| 489 | unsigned long jiffy_end) |
Vivek Goyal | d1ae8ff | 2010-12-01 19:34:46 +0100 | [diff] [blame] | 490 | { |
| 491 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); |
| 492 | } |
| 493 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 494 | static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, |
| 495 | unsigned long jiffy_end) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 496 | { |
| 497 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 498 | throtl_log(&tg->service_queue, |
| 499 | "[%c] extend slice start=%lu end=%lu jiffies=%lu", |
| 500 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
| 501 | tg->slice_end[rw], jiffies); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | /* Determine if previously allocated or extended slice is complete or not */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 505 | static bool throtl_slice_used(struct throtl_grp *tg, bool rw) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 506 | { |
| 507 | if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) |
| 508 | return 0; |
| 509 | |
| 510 | return 1; |
| 511 | } |
| 512 | |
| 513 | /* Trim the used slices and adjust slice start accordingly */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 514 | static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 515 | { |
Vivek Goyal | 3aad5d3 | 2010-10-01 14:51:14 +0200 | [diff] [blame] | 516 | unsigned long nr_slices, time_elapsed, io_trim; |
| 517 | u64 bytes_trim, tmp; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 518 | |
| 519 | BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); |
| 520 | |
| 521 | /* |
| 522 | * If bps are unlimited (-1), then time slice don't get |
| 523 | * renewed. Don't try to trim the slice if slice is used. A new |
| 524 | * slice will start when appropriate. |
| 525 | */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 526 | if (throtl_slice_used(tg, rw)) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 527 | return; |
| 528 | |
Vivek Goyal | d1ae8ff | 2010-12-01 19:34:46 +0100 | [diff] [blame] | 529 | /* |
| 530 | * A bio has been dispatched. Also adjust slice_end. It might happen |
| 531 | * that initially cgroup limit was very low resulting in high |
| 532 | * slice_end, but later limit was bumped up and bio was dispached |
| 533 | * sooner, then we need to reduce slice_end. A high bogus slice_end |
| 534 | * is bad because it does not allow new slice to start. |
| 535 | */ |
| 536 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 537 | throtl_set_slice_end(tg, rw, jiffies + throtl_slice); |
Vivek Goyal | d1ae8ff | 2010-12-01 19:34:46 +0100 | [diff] [blame] | 538 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 539 | time_elapsed = jiffies - tg->slice_start[rw]; |
| 540 | |
| 541 | nr_slices = time_elapsed / throtl_slice; |
| 542 | |
| 543 | if (!nr_slices) |
| 544 | return; |
Vivek Goyal | 3aad5d3 | 2010-10-01 14:51:14 +0200 | [diff] [blame] | 545 | tmp = tg->bps[rw] * throtl_slice * nr_slices; |
| 546 | do_div(tmp, HZ); |
| 547 | bytes_trim = tmp; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 548 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 549 | io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 550 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 551 | if (!bytes_trim && !io_trim) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 552 | return; |
| 553 | |
| 554 | if (tg->bytes_disp[rw] >= bytes_trim) |
| 555 | tg->bytes_disp[rw] -= bytes_trim; |
| 556 | else |
| 557 | tg->bytes_disp[rw] = 0; |
| 558 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 559 | if (tg->io_disp[rw] >= io_trim) |
| 560 | tg->io_disp[rw] -= io_trim; |
| 561 | else |
| 562 | tg->io_disp[rw] = 0; |
| 563 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 564 | tg->slice_start[rw] += nr_slices * throtl_slice; |
| 565 | |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 566 | throtl_log(&tg->service_queue, |
| 567 | "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", |
| 568 | rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, |
| 569 | tg->slice_start[rw], tg->slice_end[rw], jiffies); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 570 | } |
| 571 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 572 | static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, |
| 573 | unsigned long *wait) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 574 | { |
| 575 | bool rw = bio_data_dir(bio); |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 576 | unsigned int io_allowed; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 577 | unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; |
Vivek Goyal | c49c06e | 2010-10-01 21:16:42 +0200 | [diff] [blame] | 578 | u64 tmp; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 579 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 580 | jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 581 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 582 | /* Slice has just started. Consider one slice interval */ |
| 583 | if (!jiffy_elapsed) |
| 584 | jiffy_elapsed_rnd = throtl_slice; |
| 585 | |
| 586 | jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); |
| 587 | |
Vivek Goyal | c49c06e | 2010-10-01 21:16:42 +0200 | [diff] [blame] | 588 | /* |
| 589 | * jiffy_elapsed_rnd should not be a big value as minimum iops can be |
| 590 | * 1 then at max jiffy elapsed should be equivalent of 1 second as we |
| 591 | * will allow dispatch after 1 second and after that slice should |
| 592 | * have been trimmed. |
| 593 | */ |
| 594 | |
| 595 | tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; |
| 596 | do_div(tmp, HZ); |
| 597 | |
| 598 | if (tmp > UINT_MAX) |
| 599 | io_allowed = UINT_MAX; |
| 600 | else |
| 601 | io_allowed = tmp; |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 602 | |
| 603 | if (tg->io_disp[rw] + 1 <= io_allowed) { |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 604 | if (wait) |
| 605 | *wait = 0; |
| 606 | return 1; |
| 607 | } |
| 608 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 609 | /* Calc approx time to dispatch */ |
| 610 | jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; |
| 611 | |
| 612 | if (jiffy_wait > jiffy_elapsed) |
| 613 | jiffy_wait = jiffy_wait - jiffy_elapsed; |
| 614 | else |
| 615 | jiffy_wait = 1; |
| 616 | |
| 617 | if (wait) |
| 618 | *wait = jiffy_wait; |
| 619 | return 0; |
| 620 | } |
| 621 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 622 | static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, |
| 623 | unsigned long *wait) |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 624 | { |
| 625 | bool rw = bio_data_dir(bio); |
Vivek Goyal | 3aad5d3 | 2010-10-01 14:51:14 +0200 | [diff] [blame] | 626 | u64 bytes_allowed, extra_bytes, tmp; |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 627 | unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 628 | |
| 629 | jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; |
| 630 | |
| 631 | /* Slice has just started. Consider one slice interval */ |
| 632 | if (!jiffy_elapsed) |
| 633 | jiffy_elapsed_rnd = throtl_slice; |
| 634 | |
| 635 | jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); |
| 636 | |
Vivek Goyal | 5e901a2 | 2010-10-01 21:16:38 +0200 | [diff] [blame] | 637 | tmp = tg->bps[rw] * jiffy_elapsed_rnd; |
| 638 | do_div(tmp, HZ); |
Vivek Goyal | 3aad5d3 | 2010-10-01 14:51:14 +0200 | [diff] [blame] | 639 | bytes_allowed = tmp; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 640 | |
| 641 | if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { |
| 642 | if (wait) |
| 643 | *wait = 0; |
| 644 | return 1; |
| 645 | } |
| 646 | |
| 647 | /* Calc approx time to dispatch */ |
| 648 | extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; |
| 649 | jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); |
| 650 | |
| 651 | if (!jiffy_wait) |
| 652 | jiffy_wait = 1; |
| 653 | |
| 654 | /* |
| 655 | * This wait time is without taking into consideration the rounding |
| 656 | * up we did. Add that time also. |
| 657 | */ |
| 658 | jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 659 | if (wait) |
| 660 | *wait = jiffy_wait; |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 661 | return 0; |
| 662 | } |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 663 | |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 664 | static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) { |
| 665 | if (tg->bps[rw] == -1 && tg->iops[rw] == -1) |
| 666 | return 1; |
| 667 | return 0; |
| 668 | } |
| 669 | |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 670 | /* |
| 671 | * Returns whether one can dispatch a bio or not. Also returns approx number |
| 672 | * of jiffies to wait before this bio is with-in IO rate and can be dispatched |
| 673 | */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 674 | static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, |
| 675 | unsigned long *wait) |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 676 | { |
| 677 | bool rw = bio_data_dir(bio); |
| 678 | unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; |
| 679 | |
| 680 | /* |
| 681 | * Currently whole state machine of group depends on first bio |
| 682 | * queued in the group bio list. So one should not be calling |
| 683 | * this function with a different bio if there are other bios |
| 684 | * queued. |
| 685 | */ |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 686 | BUG_ON(tg->service_queue.nr_queued[rw] && |
| 687 | bio != bio_list_peek(&tg->service_queue.bio_lists[rw])); |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 688 | |
| 689 | /* If tg->bps = -1, then BW is unlimited */ |
| 690 | if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { |
| 691 | if (wait) |
| 692 | *wait = 0; |
| 693 | return 1; |
| 694 | } |
| 695 | |
| 696 | /* |
| 697 | * If previous slice expired, start a new one otherwise renew/extend |
| 698 | * existing slice to make sure it is at least throtl_slice interval |
| 699 | * long since now. |
| 700 | */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 701 | if (throtl_slice_used(tg, rw)) |
| 702 | throtl_start_new_slice(tg, rw); |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 703 | else { |
| 704 | if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 705 | throtl_extend_slice(tg, rw, jiffies + throtl_slice); |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 706 | } |
| 707 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 708 | if (tg_with_in_bps_limit(tg, bio, &bps_wait) && |
| 709 | tg_with_in_iops_limit(tg, bio, &iops_wait)) { |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 710 | if (wait) |
| 711 | *wait = 0; |
| 712 | return 1; |
| 713 | } |
| 714 | |
| 715 | max_wait = max(bps_wait, iops_wait); |
| 716 | |
| 717 | if (wait) |
| 718 | *wait = max_wait; |
| 719 | |
| 720 | if (time_before(tg->slice_end[rw], jiffies + max_wait)) |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 721 | throtl_extend_slice(tg, rw, jiffies + max_wait); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 722 | |
| 723 | return 0; |
| 724 | } |
| 725 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 726 | static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes, |
Tejun Heo | 629ed0b | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 727 | int rw) |
| 728 | { |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 729 | struct throtl_grp *tg = blkg_to_tg(blkg); |
| 730 | struct tg_stats_cpu *stats_cpu; |
Tejun Heo | 629ed0b | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 731 | unsigned long flags; |
| 732 | |
| 733 | /* If per cpu stats are not allocated yet, don't do any accounting. */ |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 734 | if (tg->stats_cpu == NULL) |
Tejun Heo | 629ed0b | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 735 | return; |
| 736 | |
| 737 | /* |
| 738 | * Disabling interrupts to provide mutual exclusion between two |
| 739 | * writes on same cpu. It probably is not needed for 64bit. Not |
| 740 | * optimizing that case yet. |
| 741 | */ |
| 742 | local_irq_save(flags); |
| 743 | |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 744 | stats_cpu = this_cpu_ptr(tg->stats_cpu); |
Tejun Heo | 629ed0b | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 745 | |
Tejun Heo | 629ed0b | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 746 | blkg_rwstat_add(&stats_cpu->serviced, rw, 1); |
| 747 | blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); |
| 748 | |
| 749 | local_irq_restore(flags); |
| 750 | } |
| 751 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 752 | static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) |
| 753 | { |
| 754 | bool rw = bio_data_dir(bio); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 755 | |
| 756 | /* Charge the bio to the group */ |
| 757 | tg->bytes_disp[rw] += bio->bi_size; |
Vivek Goyal | 8e89d13 | 2010-09-15 17:06:37 -0400 | [diff] [blame] | 758 | tg->io_disp[rw]++; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 759 | |
Tejun Heo | 2a0f61e | 2013-05-14 13:52:36 -0700 | [diff] [blame^] | 760 | /* |
| 761 | * REQ_THROTTLED is used to prevent the same bio to be throttled |
| 762 | * more than once as a throttled bio will go through blk-throtl the |
| 763 | * second time when it eventually gets issued. Set it when a bio |
| 764 | * is being charged to a tg. |
| 765 | * |
| 766 | * Dispatch stats aren't recursive and each @bio should only be |
| 767 | * accounted by the @tg it was originally associated with. Let's |
| 768 | * update the stats when setting REQ_THROTTLED for the first time |
| 769 | * which is guaranteed to be for the @bio's original tg. |
| 770 | */ |
| 771 | if (!(bio->bi_rw & REQ_THROTTLED)) { |
| 772 | bio->bi_rw |= REQ_THROTTLED; |
| 773 | throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, |
| 774 | bio->bi_rw); |
| 775 | } |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 776 | } |
| 777 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 778 | static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 779 | { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 780 | struct throtl_service_queue *sq = &tg->service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 781 | bool rw = bio_data_dir(bio); |
| 782 | |
Tejun Heo | 0e9f416 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 783 | /* |
| 784 | * If @tg doesn't currently have any bios queued in the same |
| 785 | * direction, queueing @bio can change when @tg should be |
| 786 | * dispatched. Mark that @tg was empty. This is automatically |
| 787 | * cleaered on the next tg_update_disptime(). |
| 788 | */ |
| 789 | if (!sq->nr_queued[rw]) |
| 790 | tg->flags |= THROTL_TG_WAS_EMPTY; |
| 791 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 792 | bio_list_add(&sq->bio_lists[rw], bio); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 793 | /* Take a bio reference on tg */ |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 794 | blkg_get(tg_to_blkg(tg)); |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 795 | sq->nr_queued[rw]++; |
Tejun Heo | e2d57e6 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 796 | tg->td->nr_queued[rw]++; |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 797 | throtl_enqueue_tg(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 798 | } |
| 799 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 800 | static void tg_update_disptime(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 801 | { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 802 | struct throtl_service_queue *sq = &tg->service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 803 | unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; |
| 804 | struct bio *bio; |
| 805 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 806 | if ((bio = bio_list_peek(&sq->bio_lists[READ]))) |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 807 | tg_may_dispatch(tg, bio, &read_wait); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 808 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 809 | if ((bio = bio_list_peek(&sq->bio_lists[WRITE]))) |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 810 | tg_may_dispatch(tg, bio, &write_wait); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 811 | |
| 812 | min_wait = min(read_wait, write_wait); |
| 813 | disptime = jiffies + min_wait; |
| 814 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 815 | /* Update dispatch time */ |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 816 | throtl_dequeue_tg(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 817 | tg->disptime = disptime; |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 818 | throtl_enqueue_tg(tg); |
Tejun Heo | 0e9f416 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 819 | |
| 820 | /* see throtl_add_bio_tg() */ |
| 821 | tg->flags &= ~THROTL_TG_WAS_EMPTY; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 822 | } |
| 823 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 824 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 825 | { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 826 | struct throtl_service_queue *sq = &tg->service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 827 | struct bio *bio; |
| 828 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 829 | bio = bio_list_pop(&sq->bio_lists[rw]); |
| 830 | sq->nr_queued[rw]--; |
Tejun Heo | 1adaf3d | 2012-03-05 13:15:15 -0800 | [diff] [blame] | 831 | /* Drop bio reference on blkg */ |
| 832 | blkg_put(tg_to_blkg(tg)); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 833 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 834 | BUG_ON(tg->td->nr_queued[rw] <= 0); |
| 835 | tg->td->nr_queued[rw]--; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 836 | |
| 837 | throtl_charge_bio(tg, bio); |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 838 | bio_list_add(&sq->parent_sq->bio_lists[rw], bio); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 839 | |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 840 | throtl_trim_slice(tg, rw); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 841 | } |
| 842 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 843 | static int throtl_dispatch_tg(struct throtl_grp *tg) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 844 | { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 845 | struct throtl_service_queue *sq = &tg->service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 846 | unsigned int nr_reads = 0, nr_writes = 0; |
| 847 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; |
Vivek Goyal | c2f6805 | 2010-11-15 19:32:42 +0100 | [diff] [blame] | 848 | unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 849 | struct bio *bio; |
| 850 | |
| 851 | /* Try to dispatch 75% READS and 25% WRITES */ |
| 852 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 853 | while ((bio = bio_list_peek(&sq->bio_lists[READ])) && |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 854 | tg_may_dispatch(tg, bio, NULL)) { |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 855 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 856 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 857 | nr_reads++; |
| 858 | |
| 859 | if (nr_reads >= max_nr_reads) |
| 860 | break; |
| 861 | } |
| 862 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 863 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) && |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 864 | tg_may_dispatch(tg, bio, NULL)) { |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 865 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 866 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 867 | nr_writes++; |
| 868 | |
| 869 | if (nr_writes >= max_nr_writes) |
| 870 | break; |
| 871 | } |
| 872 | |
| 873 | return nr_reads + nr_writes; |
| 874 | } |
| 875 | |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 876 | static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 877 | { |
| 878 | unsigned int nr_disp = 0; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 879 | |
| 880 | while (1) { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 881 | struct throtl_grp *tg = throtl_rb_first(parent_sq); |
| 882 | struct throtl_service_queue *sq = &tg->service_queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 883 | |
| 884 | if (!tg) |
| 885 | break; |
| 886 | |
| 887 | if (time_before(jiffies, tg->disptime)) |
| 888 | break; |
| 889 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 890 | throtl_dequeue_tg(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 891 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 892 | nr_disp += throtl_dispatch_tg(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 893 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 894 | if (sq->nr_queued[0] || sq->nr_queued[1]) |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 895 | tg_update_disptime(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 896 | |
| 897 | if (nr_disp >= throtl_quantum) |
| 898 | break; |
| 899 | } |
| 900 | |
| 901 | return nr_disp; |
| 902 | } |
| 903 | |
Tejun Heo | cb76199 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 904 | /* work function to dispatch throttled bios */ |
| 905 | void blk_throtl_dispatch_work_fn(struct work_struct *work) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 906 | { |
Tejun Heo | cb76199 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 907 | struct throtl_data *td = container_of(to_delayed_work(work), |
| 908 | struct throtl_data, dispatch_work); |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 909 | struct throtl_service_queue *sq = &td->service_queue; |
Tejun Heo | cb76199 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 910 | struct request_queue *q = td->queue; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 911 | unsigned int nr_disp = 0; |
| 912 | struct bio_list bio_list_on_stack; |
| 913 | struct bio *bio; |
Vivek Goyal | 69d60eb | 2011-03-09 08:27:37 +0100 | [diff] [blame] | 914 | struct blk_plug plug; |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 915 | int rw; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 916 | |
| 917 | spin_lock_irq(q->queue_lock); |
| 918 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 919 | bio_list_init(&bio_list_on_stack); |
| 920 | |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 921 | throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", |
Tejun Heo | 6a52560 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 922 | td->nr_queued[READ] + td->nr_queued[WRITE], |
| 923 | td->nr_queued[READ], td->nr_queued[WRITE]); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 924 | |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 925 | nr_disp = throtl_select_dispatch(sq); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 926 | |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 927 | if (nr_disp) { |
| 928 | for (rw = READ; rw <= WRITE; rw++) { |
| 929 | bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]); |
| 930 | bio_list_init(&sq->bio_lists[rw]); |
| 931 | } |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 932 | throtl_log(sq, "bios disp=%u", nr_disp); |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 933 | } |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 934 | |
| 935 | throtl_schedule_next_dispatch(td); |
Tejun Heo | 6a52560 | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 936 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 937 | spin_unlock_irq(q->queue_lock); |
| 938 | |
| 939 | /* |
| 940 | * If we dispatched some requests, unplug the queue to make sure |
| 941 | * immediate dispatch |
| 942 | */ |
| 943 | if (nr_disp) { |
Vivek Goyal | 69d60eb | 2011-03-09 08:27:37 +0100 | [diff] [blame] | 944 | blk_start_plug(&plug); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 945 | while((bio = bio_list_pop(&bio_list_on_stack))) |
| 946 | generic_make_request(bio); |
Vivek Goyal | 69d60eb | 2011-03-09 08:27:37 +0100 | [diff] [blame] | 947 | blk_finish_plug(&plug); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 948 | } |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 949 | } |
| 950 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 951 | static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, |
| 952 | struct blkg_policy_data *pd, int off) |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 953 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 954 | struct throtl_grp *tg = pd_to_tg(pd); |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 955 | struct blkg_rwstat rwstat = { }, tmp; |
| 956 | int i, cpu; |
| 957 | |
| 958 | for_each_possible_cpu(cpu) { |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 959 | struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 960 | |
| 961 | tmp = blkg_rwstat_read((void *)sc + off); |
| 962 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
| 963 | rwstat.cnt[i] += tmp.cnt[i]; |
| 964 | } |
| 965 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 966 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 967 | } |
| 968 | |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 969 | static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft, |
| 970 | struct seq_file *sf) |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 971 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 972 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 973 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 974 | blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl, |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 975 | cft->private, true); |
Tejun Heo | 41b38b6 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 976 | return 0; |
| 977 | } |
| 978 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 979 | static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, |
| 980 | int off) |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 981 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 982 | struct throtl_grp *tg = pd_to_tg(pd); |
| 983 | u64 v = *(u64 *)((void *)tg + off); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 984 | |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 985 | if (v == -1) |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 986 | return 0; |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 987 | return __blkg_prfill_u64(sf, pd, v); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 988 | } |
| 989 | |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 990 | static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, |
| 991 | int off) |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 992 | { |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 993 | struct throtl_grp *tg = pd_to_tg(pd); |
| 994 | unsigned int v = *(unsigned int *)((void *)tg + off); |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 995 | |
| 996 | if (v == -1) |
| 997 | return 0; |
Tejun Heo | f95a04a | 2012-04-16 13:57:26 -0700 | [diff] [blame] | 998 | return __blkg_prfill_u64(sf, pd, v); |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 999 | } |
| 1000 | |
| 1001 | static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft, |
| 1002 | struct seq_file *sf) |
| 1003 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1004 | blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64, |
| 1005 | &blkcg_policy_throtl, cft->private, false); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1006 | return 0; |
| 1007 | } |
| 1008 | |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1009 | static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft, |
| 1010 | struct seq_file *sf) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1011 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1012 | blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint, |
| 1013 | &blkcg_policy_throtl, cft->private, false); |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1014 | return 0; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1015 | } |
| 1016 | |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1017 | static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, |
| 1018 | bool is_u64) |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1019 | { |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1020 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1021 | struct blkg_conf_ctx ctx; |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1022 | struct throtl_grp *tg; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1023 | struct throtl_data *td; |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1024 | int ret; |
| 1025 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1026 | ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1027 | if (ret) |
| 1028 | return ret; |
| 1029 | |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1030 | tg = blkg_to_tg(ctx.blkg); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1031 | td = ctx.blkg->q->td; |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1032 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1033 | if (!ctx.v) |
| 1034 | ctx.v = -1; |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1035 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1036 | if (is_u64) |
| 1037 | *(u64 *)((void *)tg + cft->private) = ctx.v; |
| 1038 | else |
| 1039 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1040 | |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1041 | throtl_log(&tg->service_queue, |
| 1042 | "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", |
| 1043 | tg->bps[READ], tg->bps[WRITE], |
| 1044 | tg->iops[READ], tg->iops[WRITE]); |
Tejun Heo | 632b449 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 1045 | |
| 1046 | /* |
| 1047 | * We're already holding queue_lock and know @tg is valid. Let's |
| 1048 | * apply the new config directly. |
| 1049 | * |
| 1050 | * Restart the slices for both READ and WRITES. It might happen |
| 1051 | * that a group's limit are dropped suddenly and we don't want to |
| 1052 | * account recently dispatched IO with new low rate. |
| 1053 | */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 1054 | throtl_start_new_slice(tg, 0); |
| 1055 | throtl_start_new_slice(tg, 1); |
Tejun Heo | 632b449 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 1056 | |
Tejun Heo | 5b2c16a | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 1057 | if (tg->flags & THROTL_TG_PENDING) { |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1058 | tg_update_disptime(tg); |
Tejun Heo | 632b449 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 1059 | throtl_schedule_next_dispatch(td); |
| 1060 | } |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1061 | |
| 1062 | blkg_conf_finish(&ctx); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1063 | return 0; |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1064 | } |
| 1065 | |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1066 | static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft, |
| 1067 | const char *buf) |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1068 | { |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1069 | return tg_set_conf(cgrp, cft, buf, true); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1070 | } |
| 1071 | |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1072 | static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft, |
| 1073 | const char *buf) |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1074 | { |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1075 | return tg_set_conf(cgrp, cft, buf, false); |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1076 | } |
| 1077 | |
| 1078 | static struct cftype throtl_files[] = { |
| 1079 | { |
| 1080 | .name = "throttle.read_bps_device", |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1081 | .private = offsetof(struct throtl_grp, bps[READ]), |
| 1082 | .read_seq_string = tg_print_conf_u64, |
| 1083 | .write_string = tg_set_conf_u64, |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1084 | .max_write_len = 256, |
| 1085 | }, |
| 1086 | { |
| 1087 | .name = "throttle.write_bps_device", |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1088 | .private = offsetof(struct throtl_grp, bps[WRITE]), |
| 1089 | .read_seq_string = tg_print_conf_u64, |
| 1090 | .write_string = tg_set_conf_u64, |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1091 | .max_write_len = 256, |
| 1092 | }, |
| 1093 | { |
| 1094 | .name = "throttle.read_iops_device", |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1095 | .private = offsetof(struct throtl_grp, iops[READ]), |
| 1096 | .read_seq_string = tg_print_conf_uint, |
| 1097 | .write_string = tg_set_conf_uint, |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1098 | .max_write_len = 256, |
| 1099 | }, |
| 1100 | { |
| 1101 | .name = "throttle.write_iops_device", |
Tejun Heo | af133ce | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1102 | .private = offsetof(struct throtl_grp, iops[WRITE]), |
| 1103 | .read_seq_string = tg_print_conf_uint, |
| 1104 | .write_string = tg_set_conf_uint, |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1105 | .max_write_len = 256, |
| 1106 | }, |
| 1107 | { |
| 1108 | .name = "throttle.io_service_bytes", |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 1109 | .private = offsetof(struct tg_stats_cpu, service_bytes), |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1110 | .read_seq_string = tg_print_cpu_rwstat, |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1111 | }, |
| 1112 | { |
| 1113 | .name = "throttle.io_serviced", |
Tejun Heo | 5bc4afb1 | 2012-04-01 14:38:45 -0700 | [diff] [blame] | 1114 | .private = offsetof(struct tg_stats_cpu, serviced), |
Tejun Heo | 8a3d261 | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1115 | .read_seq_string = tg_print_cpu_rwstat, |
Tejun Heo | 60c2bc2 | 2012-04-01 14:38:43 -0700 | [diff] [blame] | 1116 | }, |
| 1117 | { } /* terminate */ |
| 1118 | }; |
| 1119 | |
Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 1120 | static void throtl_shutdown_wq(struct request_queue *q) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1121 | { |
| 1122 | struct throtl_data *td = q->td; |
| 1123 | |
Tejun Heo | cb76199 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 1124 | cancel_delayed_work_sync(&td->dispatch_work); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1125 | } |
| 1126 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1127 | static struct blkcg_policy blkcg_policy_throtl = { |
Tejun Heo | f9fcc2d | 2012-04-16 13:57:27 -0700 | [diff] [blame] | 1128 | .pd_size = sizeof(struct throtl_grp), |
| 1129 | .cftypes = throtl_files, |
| 1130 | |
| 1131 | .pd_init_fn = throtl_pd_init, |
| 1132 | .pd_exit_fn = throtl_pd_exit, |
| 1133 | .pd_reset_stats_fn = throtl_pd_reset_stats, |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1134 | }; |
| 1135 | |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1136 | bool blk_throtl_bio(struct request_queue *q, struct bio *bio) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1137 | { |
| 1138 | struct throtl_data *td = q->td; |
| 1139 | struct throtl_grp *tg; |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1140 | struct throtl_service_queue *sq; |
Tejun Heo | 0e9f416 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1141 | bool rw = bio_data_dir(bio); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1142 | struct blkcg *blkcg; |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1143 | bool throttled = false; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1144 | |
Tejun Heo | 2a0f61e | 2013-05-14 13:52:36 -0700 | [diff] [blame^] | 1145 | /* see throtl_charge_bio() */ |
| 1146 | if (bio->bi_rw & REQ_THROTTLED) |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1147 | goto out; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1148 | |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 1149 | /* |
| 1150 | * A throtl_grp pointer retrieved under rcu can be used to access |
| 1151 | * basic fields like stats and io rates. If a group has no rules, |
| 1152 | * just update the dispatch stats in lockless manner and return. |
| 1153 | */ |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 1154 | rcu_read_lock(); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1155 | blkcg = bio_blkcg(bio); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 1156 | tg = throtl_lookup_tg(td, blkcg); |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 1157 | if (tg) { |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 1158 | if (tg_no_rule_group(tg, rw)) { |
Tejun Heo | 629ed0b | 2012-04-01 14:38:44 -0700 | [diff] [blame] | 1159 | throtl_update_dispatch_stats(tg_to_blkg(tg), |
| 1160 | bio->bi_size, bio->bi_rw); |
Tejun Heo | 2a7f124 | 2012-03-05 13:15:01 -0800 | [diff] [blame] | 1161 | goto out_unlock_rcu; |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 1162 | } |
| 1163 | } |
Vivek Goyal | af75cd3 | 2011-05-19 15:38:31 -0400 | [diff] [blame] | 1164 | |
| 1165 | /* |
| 1166 | * Either group has not been allocated yet or it is not an unlimited |
| 1167 | * IO group |
| 1168 | */ |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1169 | spin_lock_irq(q->queue_lock); |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 1170 | tg = throtl_lookup_create_tg(td, blkcg); |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1171 | if (unlikely(!tg)) |
| 1172 | goto out_unlock; |
Vivek Goyal | f469a7b | 2011-05-19 15:38:23 -0400 | [diff] [blame] | 1173 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1174 | sq = &tg->service_queue; |
| 1175 | |
Tejun Heo | 0e9f416 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1176 | /* throtl is FIFO - if other bios are already queued, should queue */ |
| 1177 | if (sq->nr_queued[rw]) |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1178 | goto queue_bio; |
Vivek Goyal | de701c7 | 2011-03-07 21:09:32 +0100 | [diff] [blame] | 1179 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1180 | /* Bio is with-in rate limit of group */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 1181 | if (tg_may_dispatch(tg, bio, NULL)) { |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1182 | throtl_charge_bio(tg, bio); |
Vivek Goyal | 04521db | 2011-03-22 21:54:29 +0100 | [diff] [blame] | 1183 | |
| 1184 | /* |
| 1185 | * We need to trim slice even when bios are not being queued |
| 1186 | * otherwise it might happen that a bio is not queued for |
| 1187 | * a long time and slice keeps on extending and trim is not |
| 1188 | * called for a long time. Now if limits are reduced suddenly |
| 1189 | * we take into account all the IO dispatched so far at new |
| 1190 | * low rate and * newly queued IO gets a really long dispatch |
| 1191 | * time. |
| 1192 | * |
| 1193 | * So keep on trimming slice even if bio is not queued. |
| 1194 | */ |
Tejun Heo | 0f3457f | 2013-05-14 13:52:32 -0700 | [diff] [blame] | 1195 | throtl_trim_slice(tg, rw); |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1196 | goto out_unlock; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1197 | } |
| 1198 | |
| 1199 | queue_bio: |
Tejun Heo | fda6f27 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1200 | throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", |
| 1201 | rw == READ ? 'R' : 'W', |
| 1202 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], |
| 1203 | tg->io_disp[rw], tg->iops[rw], |
| 1204 | sq->nr_queued[READ], sq->nr_queued[WRITE]); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1205 | |
Tejun Heo | 671058f | 2012-03-05 13:15:29 -0800 | [diff] [blame] | 1206 | bio_associate_current(bio); |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1207 | throtl_add_bio_tg(bio, tg); |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1208 | throttled = true; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1209 | |
Tejun Heo | 0e9f416 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1210 | /* update @tg's dispatch time if @tg was empty before @bio */ |
| 1211 | if (tg->flags & THROTL_TG_WAS_EMPTY) { |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1212 | tg_update_disptime(tg); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1213 | throtl_schedule_next_dispatch(td); |
| 1214 | } |
| 1215 | |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1216 | out_unlock: |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1217 | spin_unlock_irq(q->queue_lock); |
Tejun Heo | 2a7f124 | 2012-03-05 13:15:01 -0800 | [diff] [blame] | 1218 | out_unlock_rcu: |
| 1219 | rcu_read_unlock(); |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1220 | out: |
Tejun Heo | 2a0f61e | 2013-05-14 13:52:36 -0700 | [diff] [blame^] | 1221 | /* |
| 1222 | * As multiple blk-throtls may stack in the same issue path, we |
| 1223 | * don't want bios to leave with the flag set. Clear the flag if |
| 1224 | * being issued. |
| 1225 | */ |
| 1226 | if (!throttled) |
| 1227 | bio->bi_rw &= ~REQ_THROTTLED; |
Tejun Heo | bc16a4f | 2011-10-19 14:33:01 +0200 | [diff] [blame] | 1228 | return throttled; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1229 | } |
| 1230 | |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1231 | /** |
| 1232 | * blk_throtl_drain - drain throttled bios |
| 1233 | * @q: request_queue to drain throttled bios for |
| 1234 | * |
| 1235 | * Dispatch all currently throttled bios on @q through ->make_request_fn(). |
| 1236 | */ |
| 1237 | void blk_throtl_drain(struct request_queue *q) |
| 1238 | __releases(q->queue_lock) __acquires(q->queue_lock) |
| 1239 | { |
| 1240 | struct throtl_data *td = q->td; |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 1241 | struct throtl_service_queue *parent_sq = &td->service_queue; |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1242 | struct throtl_grp *tg; |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1243 | struct bio *bio; |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1244 | int rw; |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1245 | |
Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 1246 | queue_lockdep_assert_held(q); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1247 | |
Tejun Heo | 0049af7 | 2013-05-14 13:52:33 -0700 | [diff] [blame] | 1248 | while ((tg = throtl_rb_first(parent_sq))) { |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1249 | struct throtl_service_queue *sq = &tg->service_queue; |
| 1250 | |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1251 | throtl_dequeue_tg(tg); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1252 | |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1253 | while ((bio = bio_list_peek(&sq->bio_lists[READ]))) |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1254 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); |
Tejun Heo | 73f0d49 | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1255 | while ((bio = bio_list_peek(&sq->bio_lists[WRITE]))) |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1256 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1257 | } |
| 1258 | spin_unlock_irq(q->queue_lock); |
| 1259 | |
Tejun Heo | 651930b | 2013-05-14 13:52:35 -0700 | [diff] [blame] | 1260 | for (rw = READ; rw <= WRITE; rw++) |
| 1261 | while ((bio = bio_list_pop(&parent_sq->bio_lists[rw]))) |
| 1262 | generic_make_request(bio); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1263 | |
| 1264 | spin_lock_irq(q->queue_lock); |
| 1265 | } |
| 1266 | |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1267 | int blk_throtl_init(struct request_queue *q) |
| 1268 | { |
| 1269 | struct throtl_data *td; |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1270 | int ret; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1271 | |
| 1272 | td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); |
| 1273 | if (!td) |
| 1274 | return -ENOMEM; |
| 1275 | |
Tejun Heo | cb76199 | 2013-05-14 13:52:31 -0700 | [diff] [blame] | 1276 | INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); |
Tejun Heo | 77216b0 | 2013-05-14 13:52:36 -0700 | [diff] [blame] | 1277 | throtl_service_queue_init(&td->service_queue, NULL); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1278 | |
Tejun Heo | cd1604f | 2012-03-05 13:15:06 -0800 | [diff] [blame] | 1279 | q->td = td; |
Vivek Goyal | 29b1258 | 2011-05-19 15:38:24 -0400 | [diff] [blame] | 1280 | td->queue = q; |
Vivek Goyal | 02977e4 | 2010-10-01 14:49:48 +0200 | [diff] [blame] | 1281 | |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1282 | /* activate policy */ |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1283 | ret = blkcg_activate_policy(q, &blkcg_policy_throtl); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1284 | if (ret) |
Vivek Goyal | 29b1258 | 2011-05-19 15:38:24 -0400 | [diff] [blame] | 1285 | kfree(td); |
Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 1286 | return ret; |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1287 | } |
| 1288 | |
| 1289 | void blk_throtl_exit(struct request_queue *q) |
| 1290 | { |
Tejun Heo | c875f4d | 2012-03-05 13:15:22 -0800 | [diff] [blame] | 1291 | BUG_ON(!q->td); |
Vivek Goyal | da52777 | 2011-03-02 19:05:33 -0500 | [diff] [blame] | 1292 | throtl_shutdown_wq(q); |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1293 | blkcg_deactivate_policy(q, &blkcg_policy_throtl); |
Tejun Heo | c9a929d | 2011-10-19 14:42:16 +0200 | [diff] [blame] | 1294 | kfree(q->td); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1295 | } |
| 1296 | |
| 1297 | static int __init throtl_init(void) |
| 1298 | { |
Vivek Goyal | 450adcb | 2011-03-01 13:40:54 -0500 | [diff] [blame] | 1299 | kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); |
| 1300 | if (!kthrotld_workqueue) |
| 1301 | panic("Failed to create kthrotld\n"); |
| 1302 | |
Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 1303 | return blkcg_policy_register(&blkcg_policy_throtl); |
Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1304 | } |
| 1305 | |
| 1306 | module_init(throtl_init); |