blob: 15f2382bc723819ad8374516913fb2110d5a9128 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Tejun Heoa5049a82014-06-19 17:42:57 -040021#include <linux/atomic.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050022
Vivek Goyal9355aed2010-10-01 21:16:41 +020023/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
Tejun Heof48ec1d2012-04-13 13:11:25 -070026#ifdef CONFIG_BLK_CGROUP
27
Tejun Heoedcb0722012-04-01 14:38:42 -070028enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070036};
37
Tejun Heoa6371202012-04-19 16:29:24 -070038struct blkcg_gq;
39
Tejun Heo3c798392012-04-16 13:57:25 -070040struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070041 struct cgroup_subsys_state css;
42 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070043
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070046 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070047
Tejun Heo81437642015-08-18 14:55:15 -070048 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
Tejun Heo52ebea72015-05-22 17:13:37 -040049
Tejun Heo7876f932015-07-09 16:39:49 -040050 struct list_head all_blkcgs_node;
Tejun Heo52ebea72015-05-22 17:13:37 -040051#ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
53#endif
Vivek Goyal31e4c282009-12-03 12:59:42 -050054};
55
Tejun Heoedcb0722012-04-01 14:38:42 -070056struct blkg_stat {
57 struct u64_stats_sync syncp;
58 uint64_t cnt;
59};
60
61struct blkg_rwstat {
62 struct u64_stats_sync syncp;
63 uint64_t cnt[BLKG_RWSTAT_NR];
64};
65
Tejun Heof95a04a2012-04-16 13:57:26 -070066/*
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
70 *
Tejun Heo001bea72015-08-18 14:55:11 -070071 * There can be multiple active blkcg policies and each blkg:policy pair is
72 * represented by a blkg_policy_data which is allocated and freed by each
73 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
74 * area by allocating larger data structure which embeds blkg_policy_data
75 * at the beginning.
Tejun Heof95a04a2012-04-16 13:57:26 -070076 */
Tejun Heo03814112012-03-05 13:15:14 -080077struct blkg_policy_data {
Tejun Heob276a872013-01-09 08:05:12 -080078 /* the blkg and policy id this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070079 struct blkcg_gq *blkg;
Tejun Heob276a872013-01-09 08:05:12 -080080 int plid;
Tejun Heo03814112012-03-05 13:15:14 -080081};
82
Arianna Avanzinie48453c2015-06-05 23:38:42 +020083/*
Tejun Heoe4a9bde2015-08-18 14:55:16 -070084 * Policies that need to keep per-blkcg data which is independent from any
85 * request_queue associated to it should implement cpd_alloc/free_fn()
86 * methods. A policy can allocate private data area by allocating larger
87 * data structure which embeds blkcg_policy_data at the beginning.
88 * cpd_init() is invoked to let each policy handle per-blkcg data.
Arianna Avanzinie48453c2015-06-05 23:38:42 +020089 */
90struct blkcg_policy_data {
Tejun Heo81437642015-08-18 14:55:15 -070091 /* the blkcg and policy id this per-policy data belongs to */
92 struct blkcg *blkcg;
Arianna Avanzinie48453c2015-06-05 23:38:42 +020093 int plid;
Arianna Avanzinie48453c2015-06-05 23:38:42 +020094};
95
Tejun Heo3c798392012-04-16 13:57:25 -070096/* association between a blk cgroup and a request queue */
97struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -080098 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -070099 struct request_queue *q;
100 struct list_head q_node;
101 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -0700102 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -0800103
Tejun Heoce7acfe2015-05-22 17:13:38 -0400104 /*
105 * Each blkg gets congested separately and the congestion state is
106 * propagated to the matching bdi_writeback_congested.
107 */
108 struct bdi_writeback_congested *wb_congested;
109
Tejun Heo3c547862013-01-09 08:05:10 -0800110 /* all non-root blkcg_gq's are guaranteed to have access to parent */
111 struct blkcg_gq *parent;
112
Tejun Heoa0516612012-06-26 15:05:44 -0700113 /* request allocation list for this blkcg-q pair */
114 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800115
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800116 /* reference count */
Tejun Heoa5049a82014-06-19 17:42:57 -0400117 atomic_t refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500118
Tejun Heof427d902013-01-09 08:05:12 -0800119 /* is this blkg online? protected by both blkcg and q locks */
120 bool online;
121
Tejun Heo36558c82012-04-16 13:57:24 -0700122 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800123
Tejun Heo36558c82012-04-16 13:57:24 -0700124 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500125};
126
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700127typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
Tejun Heo81437642015-08-18 14:55:15 -0700128typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700129typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
Tejun Heo001bea72015-08-18 14:55:11 -0700130typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
Tejun Heoa9520cd2015-08-18 14:55:14 -0700131typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
132typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
133typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
Tejun Heo001bea72015-08-18 14:55:11 -0700134typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
Tejun Heoa9520cd2015-08-18 14:55:14 -0700135typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
Vivek Goyal3e252062009-12-04 10:36:42 -0500136
Tejun Heo3c798392012-04-16 13:57:25 -0700137struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700138 int plid;
Tejun Heo36558c82012-04-16 13:57:24 -0700139 /* cgroup files for the policy */
140 struct cftype *cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700141
142 /* operations */
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700143 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200144 blkcg_pol_init_cpd_fn *cpd_init_fn;
Tejun Heoe4a9bde2015-08-18 14:55:16 -0700145 blkcg_pol_free_cpd_fn *cpd_free_fn;
146
Tejun Heo001bea72015-08-18 14:55:11 -0700147 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700148 blkcg_pol_init_pd_fn *pd_init_fn;
Tejun Heof427d902013-01-09 08:05:12 -0800149 blkcg_pol_online_pd_fn *pd_online_fn;
150 blkcg_pol_offline_pd_fn *pd_offline_fn;
Tejun Heo001bea72015-08-18 14:55:11 -0700151 blkcg_pol_free_pd_fn *pd_free_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700152 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500153};
154
Tejun Heo3c798392012-04-16 13:57:25 -0700155extern struct blkcg blkcg_root;
Tejun Heo496d5e72015-05-22 17:13:21 -0400156extern struct cgroup_subsys_state * const blkcg_root_css;
Tejun Heo36558c82012-04-16 13:57:24 -0700157
Tejun Heo3c798392012-04-16 13:57:25 -0700158struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
159struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
160 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700161int blkcg_init_queue(struct request_queue *q);
162void blkcg_drain_queue(struct request_queue *q);
163void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800164
Vivek Goyal3e252062009-12-04 10:36:42 -0500165/* Blkio controller policy registration */
Jens Axboed5bf0292014-06-22 16:31:56 -0600166int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo3c798392012-04-16 13:57:25 -0700167void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700168int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700169 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700170void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700171 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500172
Tejun Heo3c798392012-04-16 13:57:25 -0700173void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700174 u64 (*prfill)(struct seq_file *,
175 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700176 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700177 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700178u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
179u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700180 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700181u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
182u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
183 int off);
Tejun Heo829fdb52012-04-01 14:38:43 -0700184
Tejun Heo16b3de62013-01-09 08:05:12 -0800185u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
186struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
187 int off);
188
Tejun Heo829fdb52012-04-01 14:38:43 -0700189struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700190 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700191 struct blkcg_gq *blkg;
Tejun Heo36558c82012-04-16 13:57:24 -0700192 u64 v;
Tejun Heo829fdb52012-04-01 14:38:43 -0700193};
194
Tejun Heo3c798392012-04-16 13:57:25 -0700195int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
196 const char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700197void blkg_conf_finish(struct blkg_conf_ctx *ctx);
198
199
Tejun Heoa7c6d552013-08-08 20:11:23 -0400200static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
201{
202 return css ? container_of(css, struct blkcg, css) : NULL;
203}
204
Tejun Heob1208b52012-06-04 20:40:57 -0700205static inline struct blkcg *task_blkcg(struct task_struct *tsk)
206{
Tejun Heo073219e2014-02-08 10:36:58 -0500207 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
Tejun Heob1208b52012-06-04 20:40:57 -0700208}
209
210static inline struct blkcg *bio_blkcg(struct bio *bio)
211{
212 if (bio && bio->bi_css)
Tejun Heoa7c6d552013-08-08 20:11:23 -0400213 return css_to_blkcg(bio->bi_css);
Tejun Heob1208b52012-06-04 20:40:57 -0700214 return task_blkcg(current);
215}
216
Tejun Heofd383c22015-05-22 17:13:23 -0400217static inline struct cgroup_subsys_state *
218task_get_blkcg_css(struct task_struct *task)
219{
220 return task_get_css(task, blkio_cgrp_id);
221}
222
Tejun Heo03814112012-03-05 13:15:14 -0800223/**
Tejun Heo3c547862013-01-09 08:05:10 -0800224 * blkcg_parent - get the parent of a blkcg
225 * @blkcg: blkcg of interest
226 *
227 * Return the parent blkcg of @blkcg. Can be called anytime.
228 */
229static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
230{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400231 return css_to_blkcg(blkcg->css.parent);
Tejun Heo3c547862013-01-09 08:05:10 -0800232}
233
234/**
Tejun Heo03814112012-03-05 13:15:14 -0800235 * blkg_to_pdata - get policy private data
236 * @blkg: blkg of interest
237 * @pol: policy of interest
238 *
239 * Return pointer to private data associated with the @blkg-@pol pair.
240 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700241static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
242 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800243{
Tejun Heof95a04a2012-04-16 13:57:26 -0700244 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800245}
246
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200247static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
248 struct blkcg_policy *pol)
249{
Tejun Heo81437642015-08-18 14:55:15 -0700250 return blkcg ? blkcg->cpd[pol->plid] : NULL;
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200251}
252
Tejun Heo03814112012-03-05 13:15:14 -0800253/**
254 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700255 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800256 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700257 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800258 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700259static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800260{
Tejun Heof95a04a2012-04-16 13:57:26 -0700261 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800262}
263
Tejun Heo81437642015-08-18 14:55:15 -0700264static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
265{
266 return cpd ? cpd->blkcg : NULL;
267}
268
Tejun Heo54e7ed12012-04-16 13:57:23 -0700269/**
270 * blkg_path - format cgroup path of blkg
271 * @blkg: blkg of interest
272 * @buf: target buffer
273 * @buflen: target buffer length
274 *
275 * Format the path of the cgroup of @blkg into @buf.
276 */
Tejun Heo3c798392012-04-16 13:57:25 -0700277static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200278{
Tejun Heoe61734c2014-02-12 09:29:50 -0500279 char *p;
Tejun Heo54e7ed12012-04-16 13:57:23 -0700280
Tejun Heoe61734c2014-02-12 09:29:50 -0500281 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
282 if (!p) {
Tejun Heo54e7ed12012-04-16 13:57:23 -0700283 strncpy(buf, "<unavailable>", buflen);
Tejun Heoe61734c2014-02-12 09:29:50 -0500284 return -ENAMETOOLONG;
285 }
286
287 memmove(buf, p, buf + buflen - p);
288 return 0;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200289}
290
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800291/**
292 * blkg_get - get a blkg reference
293 * @blkg: blkg to get
294 *
Tejun Heoa5049a82014-06-19 17:42:57 -0400295 * The caller should be holding an existing reference.
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800296 */
Tejun Heo3c798392012-04-16 13:57:25 -0700297static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800298{
Tejun Heoa5049a82014-06-19 17:42:57 -0400299 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
300 atomic_inc(&blkg->refcnt);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800301}
302
Tejun Heo2a4fd072013-05-14 13:52:31 -0700303void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800304
305/**
306 * blkg_put - put a blkg reference
307 * @blkg: blkg to put
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800308 */
Tejun Heo3c798392012-04-16 13:57:25 -0700309static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800310{
Tejun Heoa5049a82014-06-19 17:42:57 -0400311 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
312 if (atomic_dec_and_test(&blkg->refcnt))
Tejun Heo2a4fd072013-05-14 13:52:31 -0700313 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800314}
315
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700316struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
317 bool update_hint);
318
319/**
320 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
321 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400322 * @pos_css: used for iteration
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700323 * @p_blkg: target blkg to walk descendants of
324 *
325 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
326 * read locked. If called under either blkcg or queue lock, the iteration
327 * is guaranteed to include all and only online blkgs. The caller may
Tejun Heo492eb212013-08-08 20:11:25 -0400328 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
Tejun Heobd8815a2013-08-08 20:11:27 -0400329 * @p_blkg is included in the iteration and the first node to be visited.
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700330 */
Tejun Heo492eb212013-08-08 20:11:25 -0400331#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
332 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
333 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700334 (p_blkg)->q, false)))
335
Tejun Heoedcb0722012-04-01 14:38:42 -0700336/**
Tejun Heoaa539cb2013-05-14 13:52:31 -0700337 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
338 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400339 * @pos_css: used for iteration
Tejun Heoaa539cb2013-05-14 13:52:31 -0700340 * @p_blkg: target blkg to walk descendants of
341 *
342 * Similar to blkg_for_each_descendant_pre() but performs post-order
Tejun Heobd8815a2013-08-08 20:11:27 -0400343 * traversal instead. Synchronization rules are the same. @p_blkg is
344 * included in the iteration and the last node to be visited.
Tejun Heoaa539cb2013-05-14 13:52:31 -0700345 */
Tejun Heo492eb212013-08-08 20:11:25 -0400346#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
347 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
348 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heoaa539cb2013-05-14 13:52:31 -0700349 (p_blkg)->q, false)))
350
351/**
Tejun Heoa0516612012-06-26 15:05:44 -0700352 * blk_get_rl - get request_list to use
353 * @q: request_queue of interest
354 * @bio: bio which will be attached to the allocated request (may be %NULL)
355 *
356 * The caller wants to allocate a request from @q to use for @bio. Find
357 * the request_list to use and obtain a reference on it. Should be called
358 * under queue_lock. This function is guaranteed to return non-%NULL
359 * request_list.
360 */
361static inline struct request_list *blk_get_rl(struct request_queue *q,
362 struct bio *bio)
363{
364 struct blkcg *blkcg;
365 struct blkcg_gq *blkg;
366
367 rcu_read_lock();
368
369 blkcg = bio_blkcg(bio);
370
371 /* bypass blkg lookup and use @q->root_rl directly for root */
372 if (blkcg == &blkcg_root)
373 goto root_rl;
374
375 /*
376 * Try to use blkg->rl. blkg lookup may fail under memory pressure
377 * or if either the blkcg or queue is going away. Fall back to
378 * root_rl in such cases.
379 */
380 blkg = blkg_lookup_create(blkcg, q);
381 if (unlikely(IS_ERR(blkg)))
382 goto root_rl;
383
384 blkg_get(blkg);
385 rcu_read_unlock();
386 return &blkg->rl;
387root_rl:
388 rcu_read_unlock();
389 return &q->root_rl;
390}
391
392/**
393 * blk_put_rl - put request_list
394 * @rl: request_list to put
395 *
396 * Put the reference acquired by blk_get_rl(). Should be called under
397 * queue_lock.
398 */
399static inline void blk_put_rl(struct request_list *rl)
400{
Tejun Heo401efbf2015-08-18 14:55:06 -0700401 if (rl->blkg->blkcg != &blkcg_root)
Tejun Heoa0516612012-06-26 15:05:44 -0700402 blkg_put(rl->blkg);
403}
404
405/**
406 * blk_rq_set_rl - associate a request with a request_list
407 * @rq: request of interest
408 * @rl: target request_list
409 *
410 * Associate @rq with @rl so that accounting and freeing can know the
411 * request_list @rq came from.
412 */
413static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
414{
415 rq->rl = rl;
416}
417
418/**
419 * blk_rq_rl - return the request_list a request came from
420 * @rq: request of interest
421 *
422 * Return the request_list @rq is allocated from.
423 */
424static inline struct request_list *blk_rq_rl(struct request *rq)
425{
426 return rq->rl;
427}
428
429struct request_list *__blk_queue_next_rl(struct request_list *rl,
430 struct request_queue *q);
431/**
432 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
433 *
434 * Should be used under queue_lock.
435 */
436#define blk_queue_for_each_rl(rl, q) \
437 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
438
Peter Zijlstra90d38392013-11-12 19:42:14 -0800439static inline void blkg_stat_init(struct blkg_stat *stat)
440{
441 u64_stats_init(&stat->syncp);
442}
443
Tejun Heoa0516612012-06-26 15:05:44 -0700444/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700445 * blkg_stat_add - add a value to a blkg_stat
446 * @stat: target blkg_stat
447 * @val: value to add
448 *
449 * Add @val to @stat. The caller is responsible for synchronizing calls to
450 * this function.
451 */
452static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
453{
454 u64_stats_update_begin(&stat->syncp);
455 stat->cnt += val;
456 u64_stats_update_end(&stat->syncp);
457}
458
459/**
460 * blkg_stat_read - read the current value of a blkg_stat
461 * @stat: blkg_stat to read
462 *
463 * Read the current value of @stat. This function can be called without
464 * synchroniztion and takes care of u64 atomicity.
465 */
466static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
467{
468 unsigned int start;
469 uint64_t v;
470
471 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700472 start = u64_stats_fetch_begin_irq(&stat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700473 v = stat->cnt;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700474 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700475
476 return v;
477}
478
479/**
480 * blkg_stat_reset - reset a blkg_stat
481 * @stat: blkg_stat to reset
482 */
483static inline void blkg_stat_reset(struct blkg_stat *stat)
484{
485 stat->cnt = 0;
486}
487
488/**
Tejun Heo16b3de62013-01-09 08:05:12 -0800489 * blkg_stat_merge - merge a blkg_stat into another
490 * @to: the destination blkg_stat
491 * @from: the source
492 *
493 * Add @from's count to @to.
494 */
495static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
496{
497 blkg_stat_add(to, blkg_stat_read(from));
498}
499
Peter Zijlstra90d38392013-11-12 19:42:14 -0800500static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
501{
502 u64_stats_init(&rwstat->syncp);
503}
504
Tejun Heo16b3de62013-01-09 08:05:12 -0800505/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700506 * blkg_rwstat_add - add a value to a blkg_rwstat
507 * @rwstat: target blkg_rwstat
508 * @rw: mask of REQ_{WRITE|SYNC}
509 * @val: value to add
510 *
511 * Add @val to @rwstat. The counters are chosen according to @rw. The
512 * caller is responsible for synchronizing calls to this function.
513 */
514static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
515 int rw, uint64_t val)
516{
517 u64_stats_update_begin(&rwstat->syncp);
518
519 if (rw & REQ_WRITE)
520 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
521 else
522 rwstat->cnt[BLKG_RWSTAT_READ] += val;
523 if (rw & REQ_SYNC)
524 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
525 else
526 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
527
528 u64_stats_update_end(&rwstat->syncp);
529}
530
531/**
532 * blkg_rwstat_read - read the current values of a blkg_rwstat
533 * @rwstat: blkg_rwstat to read
534 *
535 * Read the current snapshot of @rwstat and return it as the return value.
536 * This function can be called without synchronization and takes care of
537 * u64 atomicity.
538 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700539static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700540{
541 unsigned int start;
542 struct blkg_rwstat tmp;
543
544 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700545 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700546 tmp = *rwstat;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700547 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700548
549 return tmp;
550}
551
552/**
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800553 * blkg_rwstat_total - read the total count of a blkg_rwstat
Tejun Heoedcb0722012-04-01 14:38:42 -0700554 * @rwstat: blkg_rwstat to read
555 *
556 * Return the total count of @rwstat regardless of the IO direction. This
557 * function can be called without synchronization and takes care of u64
558 * atomicity.
559 */
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800560static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700561{
562 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
563
564 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
565}
566
567/**
568 * blkg_rwstat_reset - reset a blkg_rwstat
569 * @rwstat: blkg_rwstat to reset
570 */
571static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
572{
573 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
574}
575
Tejun Heo16b3de62013-01-09 08:05:12 -0800576/**
577 * blkg_rwstat_merge - merge a blkg_rwstat into another
578 * @to: the destination blkg_rwstat
579 * @from: the source
580 *
581 * Add @from's counts to @to.
582 */
583static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
584 struct blkg_rwstat *from)
585{
586 struct blkg_rwstat v = blkg_rwstat_read(from);
587 int i;
588
589 u64_stats_update_begin(&to->syncp);
590 for (i = 0; i < BLKG_RWSTAT_NR; i++)
591 to->cnt[i] += v.cnt[i];
592 u64_stats_update_end(&to->syncp);
593}
594
Tejun Heo36558c82012-04-16 13:57:24 -0700595#else /* CONFIG_BLK_CGROUP */
596
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400597struct blkcg {
598};
Jens Axboe2f5ea472009-12-03 21:06:43 +0100599
Tejun Heof95a04a2012-04-16 13:57:26 -0700600struct blkg_policy_data {
601};
602
Arianna Avanzinie48453c2015-06-05 23:38:42 +0200603struct blkcg_policy_data {
604};
605
Tejun Heo3c798392012-04-16 13:57:25 -0700606struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100607};
608
Tejun Heo3c798392012-04-16 13:57:25 -0700609struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500610};
611
Tejun Heo496d5e72015-05-22 17:13:21 -0400612#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
613
Tejun Heofd383c22015-05-22 17:13:23 -0400614static inline struct cgroup_subsys_state *
615task_get_blkcg_css(struct task_struct *task)
616{
617 return NULL;
618}
619
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400620#ifdef CONFIG_BLOCK
621
Tejun Heo3c798392012-04-16 13:57:25 -0700622static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800623static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
624static inline void blkcg_drain_queue(struct request_queue *q) { }
625static inline void blkcg_exit_queue(struct request_queue *q) { }
Jens Axboed5bf0292014-06-22 16:31:56 -0600626static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo3c798392012-04-16 13:57:25 -0700627static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700628static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700629 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700630static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700631 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500632
Tejun Heob1208b52012-06-04 20:40:57 -0700633static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700634
Tejun Heof95a04a2012-04-16 13:57:26 -0700635static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
636 struct blkcg_policy *pol) { return NULL; }
637static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700638static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
639static inline void blkg_get(struct blkcg_gq *blkg) { }
640static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200641
Tejun Heoa0516612012-06-26 15:05:44 -0700642static inline struct request_list *blk_get_rl(struct request_queue *q,
643 struct bio *bio) { return &q->root_rl; }
644static inline void blk_put_rl(struct request_list *rl) { }
645static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
646static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
647
648#define blk_queue_for_each_rl(rl, q) \
649 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
650
Tejun Heoefa7d1c2015-05-22 17:13:18 -0400651#endif /* CONFIG_BLOCK */
Tejun Heo36558c82012-04-16 13:57:24 -0700652#endif /* CONFIG_BLK_CGROUP */
653#endif /* _BLK_CGROUP_H */