blob: 9a7c4bd45fff031e784a935a30995c73fbce0d1e [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Tejun Heo24bdb8e2015-08-18 14:55:22 -070017#include <linux/percpu_counter.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Tejun Heoa5049a82014-06-19 17:42:57 -040021#include <linux/atomic.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050022
Tejun Heo24bdb8e2015-08-18 14:55:22 -070023/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
24#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
25
Vivek Goyal9355aed2010-10-01 21:16:41 +020026/* Max limits for throttle policy */
27#define THROTL_IOPS_MAX UINT_MAX
28
Tejun Heof48ec1d2012-04-13 13:11:25 -070029#ifdef CONFIG_BLK_CGROUP
30
Tejun Heoedcb0722012-04-01 14:38:42 -070031enum blkg_rwstat_type {
32 BLKG_RWSTAT_READ,
33 BLKG_RWSTAT_WRITE,
34 BLKG_RWSTAT_SYNC,
35 BLKG_RWSTAT_ASYNC,
36
37 BLKG_RWSTAT_NR,
38 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070039};
40
Tejun Heoa6371202012-04-19 16:29:24 -070041struct blkcg_gq;
42
Tejun Heo3c798392012-04-16 13:57:25 -070043struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070044 struct cgroup_subsys_state css;
45 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070046
47 struct radix_tree_root blkg_tree;
48 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070049 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070050
Tejun Heo81437642015-08-18 14:55:15 -070051 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
Tejun Heo52ebea72015-05-22 17:13:37 -040052
Tejun Heo7876f932015-07-09 16:39:49 -040053 struct list_head all_blkcgs_node;
Tejun Heo52ebea72015-05-22 17:13:37 -040054#ifdef CONFIG_CGROUP_WRITEBACK
55 struct list_head cgwb_list;
56#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -050057};
58
Tejun Heoe6269c42015-08-18 14:55:21 -070059/*
60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
Tejun Heo24bdb8e2015-08-18 14:55:22 -070061 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
62 * to carry result values from read and sum operations.
Tejun Heoe6269c42015-08-18 14:55:21 -070063 */
Tejun Heoedcb0722012-04-01 14:38:42 -070064struct blkg_stat {
Tejun Heo24bdb8e2015-08-18 14:55:22 -070065 struct percpu_counter cpu_cnt;
Tejun Heoe6269c42015-08-18 14:55:21 -070066 atomic64_t aux_cnt;
Tejun Heoedcb0722012-04-01 14:38:42 -070067};
68
69struct blkg_rwstat {
Tejun Heo24bdb8e2015-08-18 14:55:22 -070070 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
Tejun Heoe6269c42015-08-18 14:55:21 -070071 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
Tejun Heoedcb0722012-04-01 14:38:42 -070072};
73
Tejun Heof95a04a2012-04-16 13:57:26 -070074/*
75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
76 * request_queue (q). This is used by blkcg policies which need to track
77 * information per blkcg - q pair.
78 *
Tejun Heo001bea72015-08-18 14:55:11 -070079 * There can be multiple active blkcg policies and each blkg:policy pair is
80 * represented by a blkg_policy_data which is allocated and freed by each
81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
82 * area by allocating larger data structure which embeds blkg_policy_data
83 * at the beginning.
Tejun Heof95a04a2012-04-16 13:57:26 -070084 */
Tejun Heo03814112012-03-05 13:15:14 -080085struct blkg_policy_data {
Tejun Heob276a872013-01-09 08:05:12 -080086 /* the blkg and policy id this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070087 struct blkcg_gq *blkg;
Tejun Heob276a872013-01-09 08:05:12 -080088 int plid;
Tejun Heo03814112012-03-05 13:15:14 -080089};
90
Arianna Avanzinie48453c2015-06-05 23:38:42 +020091/*
Tejun Heoe4a9bde2015-08-18 14:55:16 -070092 * Policies that need to keep per-blkcg data which is independent from any
93 * request_queue associated to it should implement cpd_alloc/free_fn()
94 * methods. A policy can allocate private data area by allocating larger
95 * data structure which embeds blkcg_policy_data at the beginning.
96 * cpd_init() is invoked to let each policy handle per-blkcg data.
Arianna Avanzinie48453c2015-06-05 23:38:42 +020097 */
98struct blkcg_policy_data {
Tejun Heo81437642015-08-18 14:55:15 -070099 /* the blkcg and policy id this per-policy data belongs to */
100 struct blkcg *blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200101 int plid;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200102};
103
Tejun Heo3c798392012-04-16 13:57:25 -0700104/* association between a blk cgroup and a request queue */
105struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -0800106 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -0700107 struct request_queue *q;
108 struct list_head q_node;
109 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -0700110 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -0800111
Tejun Heoce7acfe2015-05-22 17:13:38 -0400112 /*
113 * Each blkg gets congested separately and the congestion state is
114 * propagated to the matching bdi_writeback_congested.
115 */
116 struct bdi_writeback_congested *wb_congested;
117
Tejun Heo3c547862013-01-09 08:05:10 -0800118 /* all non-root blkcg_gq's are guaranteed to have access to parent */
119 struct blkcg_gq *parent;
120
Tejun Heoa0516612012-06-26 15:05:44 -0700121 /* request allocation list for this blkcg-q pair */
122 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800123
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800124 /* reference count */
Tejun Heoa5049a82014-06-19 17:42:57 -0400125 atomic_t refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500126
Tejun Heof427d902013-01-09 08:05:12 -0800127 /* is this blkg online? protected by both blkcg and q locks */
128 bool online;
129
Tejun Heo77ea7332015-08-18 14:55:24 -0700130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
132
Tejun Heo36558c82012-04-16 13:57:24 -0700133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800134
Tejun Heo36558c82012-04-16 13:57:24 -0700135 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500136};
137
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700138typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
Tejun Heo81437642015-08-18 14:55:15 -0700139typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700140typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heo001bea72015-08-18 14:55:11 -0700141typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
Tejun Heoa9520cd2015-08-18 14:55:14 -0700142typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
143typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
144typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
Tejun Heo001bea72015-08-18 14:55:11 -0700145typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
Tejun Heoa9520cd2015-08-18 14:55:14 -0700146typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
Vivek Goyal3e252062009-12-04 10:36:42 -0500147
Tejun Heo3c798392012-04-16 13:57:25 -0700148struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700149 int plid;
Tejun Heo36558c82012-04-16 13:57:24 -0700150 /* cgroup files for the policy */
Tejun Heo2ee867dc2015-08-18 14:55:34 -0700151 struct cftype *dfl_cftypes;
Tejun Heo880f50e2015-08-18 14:55:30 -0700152 struct cftype *legacy_cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700153
154 /* operations */
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700155 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200156 blkcg_pol_init_cpd_fn *cpd_init_fn;
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700157 blkcg_pol_free_cpd_fn *cpd_free_fn;
158
Tejun Heo001bea72015-08-18 14:55:11 -0700159 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700160 blkcg_pol_init_pd_fn *pd_init_fn;
Tejun Heof427d902013-01-09 08:05:12 -0800161 blkcg_pol_online_pd_fn *pd_online_fn;
162 blkcg_pol_offline_pd_fn *pd_offline_fn;
Tejun Heo001bea72015-08-18 14:55:11 -0700163 blkcg_pol_free_pd_fn *pd_free_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700164 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500165};
166
Tejun Heo3c798392012-04-16 13:57:25 -0700167extern struct blkcg blkcg_root;
Tejun Heo496d5e72015-05-22 17:13:21 -0400168extern struct cgroup_subsys_state * const blkcg_root_css;
Tejun Heo36558c82012-04-16 13:57:24 -0700169
Tejun Heo24f29042015-08-18 14:55:17 -0700170struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
171 struct request_queue *q, bool update_hint);
Tejun Heo3c798392012-04-16 13:57:25 -0700172struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
173 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700174int blkcg_init_queue(struct request_queue *q);
175void blkcg_drain_queue(struct request_queue *q);
176void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800177
Vivek Goyal3e252062009-12-04 10:36:42 -0500178/* Blkio controller policy registration */
Jens Axboed5bf0292014-06-22 16:31:56 -0600179int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo3c798392012-04-16 13:57:25 -0700180void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700181int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700182 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700183void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700184 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500185
Tejun Heodd165eb2015-08-18 14:55:33 -0700186const char *blkg_dev_name(struct blkcg_gq *blkg);
Tejun Heo3c798392012-04-16 13:57:25 -0700187void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700188 u64 (*prfill)(struct seq_file *,
189 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700190 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700191 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700192u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
193u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700194 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700195u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
196u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
197 int off);
Tejun Heo77ea7332015-08-18 14:55:24 -0700198int blkg_print_stat_bytes(struct seq_file *sf, void *v);
199int blkg_print_stat_ios(struct seq_file *sf, void *v);
200int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
201int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
Tejun Heo829fdb52012-04-01 14:38:43 -0700202
Tejun Heof12c74c2015-08-18 14:55:23 -0700203u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
204 struct blkcg_policy *pol, int off);
205struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
206 struct blkcg_policy *pol, int off);
Tejun Heo16b3de62013-01-09 08:05:12 -0800207
Tejun Heo829fdb52012-04-01 14:38:43 -0700208struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700209 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700210 struct blkcg_gq *blkg;
Tejun Heo36aa9e52015-08-18 14:55:31 -0700211 char *body;
Tejun Heo829fdb52012-04-01 14:38:43 -0700212};
213
Tejun Heo3c798392012-04-16 13:57:25 -0700214int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
Tejun Heo36aa9e52015-08-18 14:55:31 -0700215 char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700216void blkg_conf_finish(struct blkg_conf_ctx *ctx);
217
218
Tejun Heoa7c6d552013-08-08 20:11:23 -0400219static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
220{
221 return css ? container_of(css, struct blkcg, css) : NULL;
222}
223
Tejun Heob1208b52012-06-04 20:40:57 -0700224static inline struct blkcg *task_blkcg(struct task_struct *tsk)
225{
Tejun Heoc165b3e2015-08-18 14:55:29 -0700226 return css_to_blkcg(task_css(tsk, io_cgrp_id));
Tejun Heob1208b52012-06-04 20:40:57 -0700227}
228
229static inline struct blkcg *bio_blkcg(struct bio *bio)
230{
231 if (bio && bio->bi_css)
Tejun Heoa7c6d552013-08-08 20:11:23 -0400232 return css_to_blkcg(bio->bi_css);
Tejun Heob1208b52012-06-04 20:40:57 -0700233 return task_blkcg(current);
234}
235
Tejun Heofd383c22015-05-22 17:13:23 -0400236static inline struct cgroup_subsys_state *
237task_get_blkcg_css(struct task_struct *task)
238{
Tejun Heoc165b3e2015-08-18 14:55:29 -0700239 return task_get_css(task, io_cgrp_id);
Tejun Heofd383c22015-05-22 17:13:23 -0400240}
241
Tejun Heo03814112012-03-05 13:15:14 -0800242/**
Tejun Heo3c547862013-01-09 08:05:10 -0800243 * blkcg_parent - get the parent of a blkcg
244 * @blkcg: blkcg of interest
245 *
246 * Return the parent blkcg of @blkcg. Can be called anytime.
247 */
248static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
249{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400250 return css_to_blkcg(blkcg->css.parent);
Tejun Heo3c547862013-01-09 08:05:10 -0800251}
252
253/**
Tejun Heo24f29042015-08-18 14:55:17 -0700254 * __blkg_lookup - internal version of blkg_lookup()
255 * @blkcg: blkcg of interest
256 * @q: request_queue of interest
257 * @update_hint: whether to update lookup hint with the result or not
258 *
259 * This is internal version and shouldn't be used by policy
260 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
261 * @q's bypass state. If @update_hint is %true, the caller should be
262 * holding @q->queue_lock and lookup hint is updated on success.
263 */
264static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
265 struct request_queue *q,
266 bool update_hint)
267{
268 struct blkcg_gq *blkg;
269
Tejun Heo85b6bc92015-08-18 14:55:18 -0700270 if (blkcg == &blkcg_root)
271 return q->root_blkg;
272
Tejun Heo24f29042015-08-18 14:55:17 -0700273 blkg = rcu_dereference(blkcg->blkg_hint);
274 if (blkg && blkg->q == q)
275 return blkg;
276
277 return blkg_lookup_slowpath(blkcg, q, update_hint);
278}
279
280/**
281 * blkg_lookup - lookup blkg for the specified blkcg - q pair
282 * @blkcg: blkcg of interest
283 * @q: request_queue of interest
284 *
285 * Lookup blkg for the @blkcg - @q pair. This function should be called
286 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
287 * - see blk_queue_bypass_start() for details.
288 */
289static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
290 struct request_queue *q)
291{
292 WARN_ON_ONCE(!rcu_read_lock_held());
293
294 if (unlikely(blk_queue_bypass(q)))
295 return NULL;
296 return __blkg_lookup(blkcg, q, false);
297}
298
299/**
Tejun Heo03814112012-03-05 13:15:14 -0800300 * blkg_to_pdata - get policy private data
301 * @blkg: blkg of interest
302 * @pol: policy of interest
303 *
304 * Return pointer to private data associated with the @blkg-@pol pair.
305 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700306static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
307 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800308{
Tejun Heof95a04a2012-04-16 13:57:26 -0700309 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800310}
311
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200312static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
313 struct blkcg_policy *pol)
314{
Tejun Heo81437642015-08-18 14:55:15 -0700315 return blkcg ? blkcg->cpd[pol->plid] : NULL;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200316}
317
Tejun Heo03814112012-03-05 13:15:14 -0800318/**
319 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700320 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800321 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700322 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800323 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700324static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800325{
Tejun Heof95a04a2012-04-16 13:57:26 -0700326 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800327}
328
Tejun Heo81437642015-08-18 14:55:15 -0700329static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
330{
331 return cpd ? cpd->blkcg : NULL;
332}
333
Tejun Heo54e7ed12012-04-16 13:57:23 -0700334/**
335 * blkg_path - format cgroup path of blkg
336 * @blkg: blkg of interest
337 * @buf: target buffer
338 * @buflen: target buffer length
339 *
340 * Format the path of the cgroup of @blkg into @buf.
341 */
Tejun Heo3c798392012-04-16 13:57:25 -0700342static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200343{
Tejun Heoe61734c2014-02-12 09:29:50 -0500344 char *p;
Tejun Heo54e7ed12012-04-16 13:57:23 -0700345
Tejun Heoe61734c2014-02-12 09:29:50 -0500346 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
347 if (!p) {
Tejun Heo54e7ed12012-04-16 13:57:23 -0700348 strncpy(buf, "<unavailable>", buflen);
Tejun Heoe61734c2014-02-12 09:29:50 -0500349 return -ENAMETOOLONG;
350 }
351
352 memmove(buf, p, buf + buflen - p);
353 return 0;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200354}
355
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800356/**
357 * blkg_get - get a blkg reference
358 * @blkg: blkg to get
359 *
Tejun Heoa5049a82014-06-19 17:42:57 -0400360 * The caller should be holding an existing reference.
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800361 */
Tejun Heo3c798392012-04-16 13:57:25 -0700362static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800363{
Tejun Heoa5049a82014-06-19 17:42:57 -0400364 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
365 atomic_inc(&blkg->refcnt);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800366}
367
Tejun Heo2a4fd072013-05-14 13:52:31 -0700368void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800369
370/**
371 * blkg_put - put a blkg reference
372 * @blkg: blkg to put
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800373 */
Tejun Heo3c798392012-04-16 13:57:25 -0700374static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800375{
Tejun Heoa5049a82014-06-19 17:42:57 -0400376 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
377 if (atomic_dec_and_test(&blkg->refcnt))
Tejun Heo2a4fd072013-05-14 13:52:31 -0700378 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800379}
380
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700381/**
382 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
383 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400384 * @pos_css: used for iteration
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700385 * @p_blkg: target blkg to walk descendants of
386 *
387 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
388 * read locked. If called under either blkcg or queue lock, the iteration
389 * is guaranteed to include all and only online blkgs. The caller may
Tejun Heo492eb212013-08-08 20:11:25 -0400390 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
Tejun Heobd8815a2013-08-08 20:11:27 -0400391 * @p_blkg is included in the iteration and the first node to be visited.
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700392 */
Tejun Heo492eb212013-08-08 20:11:25 -0400393#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
394 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
395 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700396 (p_blkg)->q, false)))
397
Tejun Heoedcb0722012-04-01 14:38:42 -0700398/**
Tejun Heoaa539cb2013-05-14 13:52:31 -0700399 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
400 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400401 * @pos_css: used for iteration
Tejun Heoaa539cb2013-05-14 13:52:31 -0700402 * @p_blkg: target blkg to walk descendants of
403 *
404 * Similar to blkg_for_each_descendant_pre() but performs post-order
Tejun Heobd8815a2013-08-08 20:11:27 -0400405 * traversal instead. Synchronization rules are the same. @p_blkg is
406 * included in the iteration and the last node to be visited.
Tejun Heoaa539cb2013-05-14 13:52:31 -0700407 */
Tejun Heo492eb212013-08-08 20:11:25 -0400408#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
409 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
410 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heoaa539cb2013-05-14 13:52:31 -0700411 (p_blkg)->q, false)))
412
413/**
Tejun Heoa0516612012-06-26 15:05:44 -0700414 * blk_get_rl - get request_list to use
415 * @q: request_queue of interest
416 * @bio: bio which will be attached to the allocated request (may be %NULL)
417 *
418 * The caller wants to allocate a request from @q to use for @bio. Find
419 * the request_list to use and obtain a reference on it. Should be called
420 * under queue_lock. This function is guaranteed to return non-%NULL
421 * request_list.
422 */
423static inline struct request_list *blk_get_rl(struct request_queue *q,
424 struct bio *bio)
425{
426 struct blkcg *blkcg;
427 struct blkcg_gq *blkg;
428
429 rcu_read_lock();
430
431 blkcg = bio_blkcg(bio);
432
433 /* bypass blkg lookup and use @q->root_rl directly for root */
434 if (blkcg == &blkcg_root)
435 goto root_rl;
436
437 /*
438 * Try to use blkg->rl. blkg lookup may fail under memory pressure
439 * or if either the blkcg or queue is going away. Fall back to
440 * root_rl in such cases.
441 */
Tejun Heoae118892015-08-18 14:55:20 -0700442 blkg = blkg_lookup(blkcg, q);
443 if (unlikely(!blkg))
Tejun Heoa0516612012-06-26 15:05:44 -0700444 goto root_rl;
445
446 blkg_get(blkg);
447 rcu_read_unlock();
448 return &blkg->rl;
449root_rl:
450 rcu_read_unlock();
451 return &q->root_rl;
452}
453
454/**
455 * blk_put_rl - put request_list
456 * @rl: request_list to put
457 *
458 * Put the reference acquired by blk_get_rl(). Should be called under
459 * queue_lock.
460 */
461static inline void blk_put_rl(struct request_list *rl)
462{
Tejun Heo401efbf2015-08-18 14:55:06 -0700463 if (rl->blkg->blkcg != &blkcg_root)
Tejun Heoa0516612012-06-26 15:05:44 -0700464 blkg_put(rl->blkg);
465}
466
467/**
468 * blk_rq_set_rl - associate a request with a request_list
469 * @rq: request of interest
470 * @rl: target request_list
471 *
472 * Associate @rq with @rl so that accounting and freeing can know the
473 * request_list @rq came from.
474 */
475static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
476{
477 rq->rl = rl;
478}
479
480/**
481 * blk_rq_rl - return the request_list a request came from
482 * @rq: request of interest
483 *
484 * Return the request_list @rq is allocated from.
485 */
486static inline struct request_list *blk_rq_rl(struct request *rq)
487{
488 return rq->rl;
489}
490
491struct request_list *__blk_queue_next_rl(struct request_list *rl,
492 struct request_queue *q);
493/**
494 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
495 *
496 * Should be used under queue_lock.
497 */
498#define blk_queue_for_each_rl(rl, q) \
499 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
500
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700501static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
Peter Zijlstra90d38392013-11-12 19:42:14 -0800502{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700503 int ret;
504
505 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
506 if (ret)
507 return ret;
508
Tejun Heoe6269c42015-08-18 14:55:21 -0700509 atomic64_set(&stat->aux_cnt, 0);
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700510 return 0;
511}
512
513static inline void blkg_stat_exit(struct blkg_stat *stat)
514{
515 percpu_counter_destroy(&stat->cpu_cnt);
Peter Zijlstra90d38392013-11-12 19:42:14 -0800516}
517
Tejun Heoa0516612012-06-26 15:05:44 -0700518/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700519 * blkg_stat_add - add a value to a blkg_stat
520 * @stat: target blkg_stat
521 * @val: value to add
522 *
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700523 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
524 * don't re-enter this function for the same counter.
Tejun Heoedcb0722012-04-01 14:38:42 -0700525 */
526static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
527{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700528 __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
Tejun Heoedcb0722012-04-01 14:38:42 -0700529}
530
531/**
532 * blkg_stat_read - read the current value of a blkg_stat
533 * @stat: blkg_stat to read
Tejun Heoedcb0722012-04-01 14:38:42 -0700534 */
535static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
536{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700537 return percpu_counter_sum_positive(&stat->cpu_cnt);
Tejun Heoedcb0722012-04-01 14:38:42 -0700538}
539
540/**
541 * blkg_stat_reset - reset a blkg_stat
542 * @stat: blkg_stat to reset
543 */
544static inline void blkg_stat_reset(struct blkg_stat *stat)
545{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700546 percpu_counter_set(&stat->cpu_cnt, 0);
Tejun Heoe6269c42015-08-18 14:55:21 -0700547 atomic64_set(&stat->aux_cnt, 0);
Tejun Heoedcb0722012-04-01 14:38:42 -0700548}
549
550/**
Tejun Heoe6269c42015-08-18 14:55:21 -0700551 * blkg_stat_add_aux - add a blkg_stat into another's aux count
Tejun Heo16b3de62013-01-09 08:05:12 -0800552 * @to: the destination blkg_stat
553 * @from: the source
554 *
Tejun Heoe6269c42015-08-18 14:55:21 -0700555 * Add @from's count including the aux one to @to's aux count.
Tejun Heo16b3de62013-01-09 08:05:12 -0800556 */
Tejun Heoe6269c42015-08-18 14:55:21 -0700557static inline void blkg_stat_add_aux(struct blkg_stat *to,
558 struct blkg_stat *from)
Tejun Heo16b3de62013-01-09 08:05:12 -0800559{
Tejun Heoe6269c42015-08-18 14:55:21 -0700560 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
561 &to->aux_cnt);
Tejun Heo16b3de62013-01-09 08:05:12 -0800562}
563
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700564static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
565{
566 int i, ret;
567
568 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
569 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
570 if (ret) {
571 while (--i >= 0)
572 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
573 return ret;
574 }
575 atomic64_set(&rwstat->aux_cnt[i], 0);
576 }
577 return 0;
578}
579
580static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
Peter Zijlstra90d38392013-11-12 19:42:14 -0800581{
Tejun Heoe6269c42015-08-18 14:55:21 -0700582 int i;
583
Tejun Heoe6269c42015-08-18 14:55:21 -0700584 for (i = 0; i < BLKG_RWSTAT_NR; i++)
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700585 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
Peter Zijlstra90d38392013-11-12 19:42:14 -0800586}
587
Tejun Heo16b3de62013-01-09 08:05:12 -0800588/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700589 * blkg_rwstat_add - add a value to a blkg_rwstat
590 * @rwstat: target blkg_rwstat
591 * @rw: mask of REQ_{WRITE|SYNC}
592 * @val: value to add
593 *
594 * Add @val to @rwstat. The counters are chosen according to @rw. The
595 * caller is responsible for synchronizing calls to this function.
596 */
597static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
598 int rw, uint64_t val)
599{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700600 struct percpu_counter *cnt;
Tejun Heoedcb0722012-04-01 14:38:42 -0700601
602 if (rw & REQ_WRITE)
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700603 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
Tejun Heoedcb0722012-04-01 14:38:42 -0700604 else
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
Tejun Heoedcb0722012-04-01 14:38:42 -0700606
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700607 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
608
609 if (rw & REQ_SYNC)
610 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
611 else
612 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
613
614 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
Tejun Heoedcb0722012-04-01 14:38:42 -0700615}
616
617/**
618 * blkg_rwstat_read - read the current values of a blkg_rwstat
619 * @rwstat: blkg_rwstat to read
620 *
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700621 * Read the current snapshot of @rwstat and return it in the aux counts.
Tejun Heoedcb0722012-04-01 14:38:42 -0700622 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700623static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700624{
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700625 struct blkg_rwstat result;
626 int i;
Tejun Heoedcb0722012-04-01 14:38:42 -0700627
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700628 for (i = 0; i < BLKG_RWSTAT_NR; i++)
629 atomic64_set(&result.aux_cnt[i],
630 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
631 return result;
Tejun Heoedcb0722012-04-01 14:38:42 -0700632}
633
634/**
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800635 * blkg_rwstat_total - read the total count of a blkg_rwstat
Tejun Heoedcb0722012-04-01 14:38:42 -0700636 * @rwstat: blkg_rwstat to read
637 *
638 * Return the total count of @rwstat regardless of the IO direction. This
639 * function can be called without synchronization and takes care of u64
640 * atomicity.
641 */
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800642static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700643{
644 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
645
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700646 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
647 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
Tejun Heoedcb0722012-04-01 14:38:42 -0700648}
649
650/**
651 * blkg_rwstat_reset - reset a blkg_rwstat
652 * @rwstat: blkg_rwstat to reset
653 */
654static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
655{
Tejun Heoe6269c42015-08-18 14:55:21 -0700656 int i;
657
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700658 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
659 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
Tejun Heoe6269c42015-08-18 14:55:21 -0700660 atomic64_set(&rwstat->aux_cnt[i], 0);
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700661 }
Tejun Heoedcb0722012-04-01 14:38:42 -0700662}
663
Tejun Heo16b3de62013-01-09 08:05:12 -0800664/**
Tejun Heoe6269c42015-08-18 14:55:21 -0700665 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
Tejun Heo16b3de62013-01-09 08:05:12 -0800666 * @to: the destination blkg_rwstat
667 * @from: the source
668 *
Tejun Heoe6269c42015-08-18 14:55:21 -0700669 * Add @from's count including the aux one to @to's aux count.
Tejun Heo16b3de62013-01-09 08:05:12 -0800670 */
Tejun Heoe6269c42015-08-18 14:55:21 -0700671static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
672 struct blkg_rwstat *from)
Tejun Heo16b3de62013-01-09 08:05:12 -0800673{
674 struct blkg_rwstat v = blkg_rwstat_read(from);
675 int i;
676
Tejun Heo16b3de62013-01-09 08:05:12 -0800677 for (i = 0; i < BLKG_RWSTAT_NR; i++)
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700678 atomic64_add(atomic64_read(&v.aux_cnt[i]) +
679 atomic64_read(&from->aux_cnt[i]),
Tejun Heoe6269c42015-08-18 14:55:21 -0700680 &to->aux_cnt[i]);
Tejun Heo16b3de62013-01-09 08:05:12 -0800681}
682
Tejun Heoae118892015-08-18 14:55:20 -0700683#ifdef CONFIG_BLK_DEV_THROTTLING
684extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
685 struct bio *bio);
686#else
687static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
688 struct bio *bio) { return false; }
689#endif
690
691static inline bool blkcg_bio_issue_check(struct request_queue *q,
692 struct bio *bio)
693{
694 struct blkcg *blkcg;
695 struct blkcg_gq *blkg;
696 bool throtl = false;
697
698 rcu_read_lock();
699 blkcg = bio_blkcg(bio);
700
701 blkg = blkg_lookup(blkcg, q);
702 if (unlikely(!blkg)) {
703 spin_lock_irq(q->queue_lock);
704 blkg = blkg_lookup_create(blkcg, q);
705 if (IS_ERR(blkg))
706 blkg = NULL;
707 spin_unlock_irq(q->queue_lock);
708 }
709
710 throtl = blk_throtl_bio(q, blkg, bio);
711
Tejun Heo77ea7332015-08-18 14:55:24 -0700712 if (!throtl) {
713 blkg = blkg ?: q->root_blkg;
714 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
715 bio->bi_iter.bi_size);
716 blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
717 }
718
Tejun Heoae118892015-08-18 14:55:20 -0700719 rcu_read_unlock();
720 return !throtl;
721}
722
Tejun Heo36558c82012-04-16 13:57:24 -0700723#else /* CONFIG_BLK_CGROUP */
724
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400725struct blkcg {
726};
Jens Axboe2f5ea472009-12-03 21:06:43 +0100727
Tejun Heof95a04a2012-04-16 13:57:26 -0700728struct blkg_policy_data {
729};
730
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200731struct blkcg_policy_data {
732};
733
Tejun Heo3c798392012-04-16 13:57:25 -0700734struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100735};
736
Tejun Heo3c798392012-04-16 13:57:25 -0700737struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500738};
739
Tejun Heo496d5e72015-05-22 17:13:21 -0400740#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
741
Tejun Heofd383c22015-05-22 17:13:23 -0400742static inline struct cgroup_subsys_state *
743task_get_blkcg_css(struct task_struct *task)
744{
745 return NULL;
746}
747
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400748#ifdef CONFIG_BLOCK
749
Tejun Heo3c798392012-04-16 13:57:25 -0700750static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800751static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
752static inline void blkcg_drain_queue(struct request_queue *q) { }
753static inline void blkcg_exit_queue(struct request_queue *q) { }
Jens Axboed5bf0292014-06-22 16:31:56 -0600754static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo3c798392012-04-16 13:57:25 -0700755static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700756static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700757 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700758static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700759 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500760
Tejun Heob1208b52012-06-04 20:40:57 -0700761static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700762
Tejun Heof95a04a2012-04-16 13:57:26 -0700763static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
764 struct blkcg_policy *pol) { return NULL; }
765static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700766static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
767static inline void blkg_get(struct blkcg_gq *blkg) { }
768static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200769
Tejun Heoa0516612012-06-26 15:05:44 -0700770static inline struct request_list *blk_get_rl(struct request_queue *q,
771 struct bio *bio) { return &q->root_rl; }
772static inline void blk_put_rl(struct request_list *rl) { }
773static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
774static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
775
Tejun Heoae118892015-08-18 14:55:20 -0700776static inline bool blkcg_bio_issue_check(struct request_queue *q,
777 struct bio *bio) { return true; }
778
Tejun Heoa0516612012-06-26 15:05:44 -0700779#define blk_queue_for_each_rl(rl, q) \
780 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
781
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400782#endif /* CONFIG_BLOCK */
Tejun Heo36558c82012-04-16 13:57:24 -0700783#endif /* CONFIG_BLK_CGROUP */
784#endif /* _BLK_CGROUP_H */