blob: c567865b5f1df6baea2cdabf89663994a67e8ade [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Tejun Heoa5049a82014-06-19 17:42:57 -040021#include <linux/atomic.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050022
Vivek Goyal9355aed2010-10-01 21:16:41 +020023/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
Tejun Heo3381cb82012-04-01 14:38:44 -070026/* CFQ specific, out here for blkcg->cfq_weight */
27#define CFQ_WEIGHT_MIN 10
28#define CFQ_WEIGHT_MAX 1000
29#define CFQ_WEIGHT_DEFAULT 500
30
Tejun Heof48ec1d2012-04-13 13:11:25 -070031#ifdef CONFIG_BLK_CGROUP
32
Tejun Heoedcb0722012-04-01 14:38:42 -070033enum blkg_rwstat_type {
34 BLKG_RWSTAT_READ,
35 BLKG_RWSTAT_WRITE,
36 BLKG_RWSTAT_SYNC,
37 BLKG_RWSTAT_ASYNC,
38
39 BLKG_RWSTAT_NR,
40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070041};
42
Tejun Heoa6371202012-04-19 16:29:24 -070043struct blkcg_gq;
44
Tejun Heo3c798392012-04-16 13:57:25 -070045struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070046 struct cgroup_subsys_state css;
47 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070048
49 struct radix_tree_root blkg_tree;
50 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070051 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070052
Tejun Heo3c798392012-04-16 13:57:25 -070053 /* TODO: per-policy storage in blkcg */
Tejun Heo36558c82012-04-16 13:57:24 -070054 unsigned int cfq_weight; /* belongs to cfq */
Tejun Heoe71357e2013-01-09 08:05:10 -080055 unsigned int cfq_leaf_weight;
Vivek Goyal31e4c282009-12-03 12:59:42 -050056};
57
Tejun Heoedcb0722012-04-01 14:38:42 -070058struct blkg_stat {
59 struct u64_stats_sync syncp;
60 uint64_t cnt;
61};
62
63struct blkg_rwstat {
64 struct u64_stats_sync syncp;
65 uint64_t cnt[BLKG_RWSTAT_NR];
66};
67
Tejun Heof95a04a2012-04-16 13:57:26 -070068/*
69 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
70 * request_queue (q). This is used by blkcg policies which need to track
71 * information per blkcg - q pair.
72 *
73 * There can be multiple active blkcg policies and each has its private
74 * data on each blkg, the size of which is determined by
75 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
76 * together with blkg and invokes pd_init/exit_fn() methods.
77 *
78 * Such private data must embed struct blkg_policy_data (pd) at the
79 * beginning and pd_size can't be smaller than pd.
80 */
Tejun Heo03814112012-03-05 13:15:14 -080081struct blkg_policy_data {
Tejun Heob276a872013-01-09 08:05:12 -080082 /* the blkg and policy id this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070083 struct blkcg_gq *blkg;
Tejun Heob276a872013-01-09 08:05:12 -080084 int plid;
Tejun Heo03814112012-03-05 13:15:14 -080085
Tejun Heoa2b16932012-04-13 13:11:33 -070086 /* used during policy activation */
Tejun Heo36558c82012-04-16 13:57:24 -070087 struct list_head alloc_node;
Tejun Heo03814112012-03-05 13:15:14 -080088};
89
Tejun Heo3c798392012-04-16 13:57:25 -070090/* association between a blk cgroup and a request queue */
91struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -080092 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -070093 struct request_queue *q;
94 struct list_head q_node;
95 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -070096 struct blkcg *blkcg;
Tejun Heo3c547862013-01-09 08:05:10 -080097
98 /* all non-root blkcg_gq's are guaranteed to have access to parent */
99 struct blkcg_gq *parent;
100
Tejun Heoa0516612012-06-26 15:05:44 -0700101 /* request allocation list for this blkcg-q pair */
102 struct request_list rl;
Tejun Heo3c547862013-01-09 08:05:10 -0800103
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800104 /* reference count */
Tejun Heoa5049a82014-06-19 17:42:57 -0400105 atomic_t refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500106
Tejun Heof427d902013-01-09 08:05:12 -0800107 /* is this blkg online? protected by both blkcg and q locks */
108 bool online;
109
Tejun Heo36558c82012-04-16 13:57:24 -0700110 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800111
Tejun Heo36558c82012-04-16 13:57:24 -0700112 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500113};
114
Tejun Heo3c798392012-04-16 13:57:25 -0700115typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
Tejun Heof427d902013-01-09 08:05:12 -0800116typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
117typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
Tejun Heo3c798392012-04-16 13:57:25 -0700118typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
119typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
Vivek Goyal3e252062009-12-04 10:36:42 -0500120
Tejun Heo3c798392012-04-16 13:57:25 -0700121struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700122 int plid;
123 /* policy specific private data size */
Tejun Heof95a04a2012-04-16 13:57:26 -0700124 size_t pd_size;
Tejun Heo36558c82012-04-16 13:57:24 -0700125 /* cgroup files for the policy */
126 struct cftype *cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700127
128 /* operations */
129 blkcg_pol_init_pd_fn *pd_init_fn;
Tejun Heof427d902013-01-09 08:05:12 -0800130 blkcg_pol_online_pd_fn *pd_online_fn;
131 blkcg_pol_offline_pd_fn *pd_offline_fn;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700132 blkcg_pol_exit_pd_fn *pd_exit_fn;
133 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500134};
135
Tejun Heo3c798392012-04-16 13:57:25 -0700136extern struct blkcg blkcg_root;
Tejun Heo36558c82012-04-16 13:57:24 -0700137
Tejun Heo3c798392012-04-16 13:57:25 -0700138struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
139struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
140 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700141int blkcg_init_queue(struct request_queue *q);
142void blkcg_drain_queue(struct request_queue *q);
143void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800144
Vivek Goyal3e252062009-12-04 10:36:42 -0500145/* Blkio controller policy registration */
Jens Axboed5bf0292014-06-22 16:31:56 -0600146int blkcg_policy_register(struct blkcg_policy *pol);
Tejun Heo3c798392012-04-16 13:57:25 -0700147void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700148int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700149 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700150void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700151 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500152
Tejun Heo3c798392012-04-16 13:57:25 -0700153void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700154 u64 (*prfill)(struct seq_file *,
155 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700156 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700157 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700158u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
159u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700160 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700161u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
162u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
163 int off);
Tejun Heo829fdb52012-04-01 14:38:43 -0700164
Tejun Heo16b3de62013-01-09 08:05:12 -0800165u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
166struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
167 int off);
168
Tejun Heo829fdb52012-04-01 14:38:43 -0700169struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700170 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700171 struct blkcg_gq *blkg;
Tejun Heo36558c82012-04-16 13:57:24 -0700172 u64 v;
Tejun Heo829fdb52012-04-01 14:38:43 -0700173};
174
Tejun Heo3c798392012-04-16 13:57:25 -0700175int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
176 const char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700177void blkg_conf_finish(struct blkg_conf_ctx *ctx);
178
179
Tejun Heoa7c6d552013-08-08 20:11:23 -0400180static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
181{
182 return css ? container_of(css, struct blkcg, css) : NULL;
183}
184
Tejun Heob1208b52012-06-04 20:40:57 -0700185static inline struct blkcg *task_blkcg(struct task_struct *tsk)
186{
Tejun Heo073219e2014-02-08 10:36:58 -0500187 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
Tejun Heob1208b52012-06-04 20:40:57 -0700188}
189
190static inline struct blkcg *bio_blkcg(struct bio *bio)
191{
192 if (bio && bio->bi_css)
Tejun Heoa7c6d552013-08-08 20:11:23 -0400193 return css_to_blkcg(bio->bi_css);
Tejun Heob1208b52012-06-04 20:40:57 -0700194 return task_blkcg(current);
195}
196
Tejun Heo03814112012-03-05 13:15:14 -0800197/**
Tejun Heo3c547862013-01-09 08:05:10 -0800198 * blkcg_parent - get the parent of a blkcg
199 * @blkcg: blkcg of interest
200 *
201 * Return the parent blkcg of @blkcg. Can be called anytime.
202 */
203static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
204{
Tejun Heo5c9d5352014-05-16 13:22:48 -0400205 return css_to_blkcg(blkcg->css.parent);
Tejun Heo3c547862013-01-09 08:05:10 -0800206}
207
208/**
Tejun Heo03814112012-03-05 13:15:14 -0800209 * blkg_to_pdata - get policy private data
210 * @blkg: blkg of interest
211 * @pol: policy of interest
212 *
213 * Return pointer to private data associated with the @blkg-@pol pair.
214 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700215static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
216 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800217{
Tejun Heof95a04a2012-04-16 13:57:26 -0700218 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800219}
220
221/**
222 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700223 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800224 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700225 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800226 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700227static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800228{
Tejun Heof95a04a2012-04-16 13:57:26 -0700229 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800230}
231
Tejun Heo54e7ed12012-04-16 13:57:23 -0700232/**
233 * blkg_path - format cgroup path of blkg
234 * @blkg: blkg of interest
235 * @buf: target buffer
236 * @buflen: target buffer length
237 *
238 * Format the path of the cgroup of @blkg into @buf.
239 */
Tejun Heo3c798392012-04-16 13:57:25 -0700240static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200241{
Tejun Heoe61734c2014-02-12 09:29:50 -0500242 char *p;
Tejun Heo54e7ed12012-04-16 13:57:23 -0700243
Tejun Heoe61734c2014-02-12 09:29:50 -0500244 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
245 if (!p) {
Tejun Heo54e7ed12012-04-16 13:57:23 -0700246 strncpy(buf, "<unavailable>", buflen);
Tejun Heoe61734c2014-02-12 09:29:50 -0500247 return -ENAMETOOLONG;
248 }
249
250 memmove(buf, p, buf + buflen - p);
251 return 0;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200252}
253
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800254/**
255 * blkg_get - get a blkg reference
256 * @blkg: blkg to get
257 *
Tejun Heoa5049a82014-06-19 17:42:57 -0400258 * The caller should be holding an existing reference.
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800259 */
Tejun Heo3c798392012-04-16 13:57:25 -0700260static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800261{
Tejun Heoa5049a82014-06-19 17:42:57 -0400262 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
263 atomic_inc(&blkg->refcnt);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800264}
265
Tejun Heo2a4fd072013-05-14 13:52:31 -0700266void __blkg_release_rcu(struct rcu_head *rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800267
268/**
269 * blkg_put - put a blkg reference
270 * @blkg: blkg to put
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800271 */
Tejun Heo3c798392012-04-16 13:57:25 -0700272static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800273{
Tejun Heoa5049a82014-06-19 17:42:57 -0400274 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
275 if (atomic_dec_and_test(&blkg->refcnt))
Tejun Heo2a4fd072013-05-14 13:52:31 -0700276 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800277}
278
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700279struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
280 bool update_hint);
281
282/**
283 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
284 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400285 * @pos_css: used for iteration
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700286 * @p_blkg: target blkg to walk descendants of
287 *
288 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
289 * read locked. If called under either blkcg or queue lock, the iteration
290 * is guaranteed to include all and only online blkgs. The caller may
Tejun Heo492eb212013-08-08 20:11:25 -0400291 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
Tejun Heobd8815a2013-08-08 20:11:27 -0400292 * @p_blkg is included in the iteration and the first node to be visited.
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700293 */
Tejun Heo492eb212013-08-08 20:11:25 -0400294#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
295 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
296 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heodd4a4ff2013-05-14 13:52:30 -0700297 (p_blkg)->q, false)))
298
Tejun Heoedcb0722012-04-01 14:38:42 -0700299/**
Tejun Heoaa539cb2013-05-14 13:52:31 -0700300 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
301 * @d_blkg: loop cursor pointing to the current descendant
Tejun Heo492eb212013-08-08 20:11:25 -0400302 * @pos_css: used for iteration
Tejun Heoaa539cb2013-05-14 13:52:31 -0700303 * @p_blkg: target blkg to walk descendants of
304 *
305 * Similar to blkg_for_each_descendant_pre() but performs post-order
Tejun Heobd8815a2013-08-08 20:11:27 -0400306 * traversal instead. Synchronization rules are the same. @p_blkg is
307 * included in the iteration and the last node to be visited.
Tejun Heoaa539cb2013-05-14 13:52:31 -0700308 */
Tejun Heo492eb212013-08-08 20:11:25 -0400309#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
310 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
311 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
Tejun Heoaa539cb2013-05-14 13:52:31 -0700312 (p_blkg)->q, false)))
313
314/**
Tejun Heoa0516612012-06-26 15:05:44 -0700315 * blk_get_rl - get request_list to use
316 * @q: request_queue of interest
317 * @bio: bio which will be attached to the allocated request (may be %NULL)
318 *
319 * The caller wants to allocate a request from @q to use for @bio. Find
320 * the request_list to use and obtain a reference on it. Should be called
321 * under queue_lock. This function is guaranteed to return non-%NULL
322 * request_list.
323 */
324static inline struct request_list *blk_get_rl(struct request_queue *q,
325 struct bio *bio)
326{
327 struct blkcg *blkcg;
328 struct blkcg_gq *blkg;
329
330 rcu_read_lock();
331
332 blkcg = bio_blkcg(bio);
333
334 /* bypass blkg lookup and use @q->root_rl directly for root */
335 if (blkcg == &blkcg_root)
336 goto root_rl;
337
338 /*
339 * Try to use blkg->rl. blkg lookup may fail under memory pressure
340 * or if either the blkcg or queue is going away. Fall back to
341 * root_rl in such cases.
342 */
343 blkg = blkg_lookup_create(blkcg, q);
344 if (unlikely(IS_ERR(blkg)))
345 goto root_rl;
346
347 blkg_get(blkg);
348 rcu_read_unlock();
349 return &blkg->rl;
350root_rl:
351 rcu_read_unlock();
352 return &q->root_rl;
353}
354
355/**
356 * blk_put_rl - put request_list
357 * @rl: request_list to put
358 *
359 * Put the reference acquired by blk_get_rl(). Should be called under
360 * queue_lock.
361 */
362static inline void blk_put_rl(struct request_list *rl)
363{
364 /* root_rl may not have blkg set */
365 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
366 blkg_put(rl->blkg);
367}
368
369/**
370 * blk_rq_set_rl - associate a request with a request_list
371 * @rq: request of interest
372 * @rl: target request_list
373 *
374 * Associate @rq with @rl so that accounting and freeing can know the
375 * request_list @rq came from.
376 */
377static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
378{
379 rq->rl = rl;
380}
381
382/**
383 * blk_rq_rl - return the request_list a request came from
384 * @rq: request of interest
385 *
386 * Return the request_list @rq is allocated from.
387 */
388static inline struct request_list *blk_rq_rl(struct request *rq)
389{
390 return rq->rl;
391}
392
393struct request_list *__blk_queue_next_rl(struct request_list *rl,
394 struct request_queue *q);
395/**
396 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
397 *
398 * Should be used under queue_lock.
399 */
400#define blk_queue_for_each_rl(rl, q) \
401 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
402
Peter Zijlstra90d38392013-11-12 19:42:14 -0800403static inline void blkg_stat_init(struct blkg_stat *stat)
404{
405 u64_stats_init(&stat->syncp);
406}
407
Tejun Heoa0516612012-06-26 15:05:44 -0700408/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700409 * blkg_stat_add - add a value to a blkg_stat
410 * @stat: target blkg_stat
411 * @val: value to add
412 *
413 * Add @val to @stat. The caller is responsible for synchronizing calls to
414 * this function.
415 */
416static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
417{
418 u64_stats_update_begin(&stat->syncp);
419 stat->cnt += val;
420 u64_stats_update_end(&stat->syncp);
421}
422
423/**
424 * blkg_stat_read - read the current value of a blkg_stat
425 * @stat: blkg_stat to read
426 *
427 * Read the current value of @stat. This function can be called without
428 * synchroniztion and takes care of u64 atomicity.
429 */
430static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
431{
432 unsigned int start;
433 uint64_t v;
434
435 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700436 start = u64_stats_fetch_begin_irq(&stat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700437 v = stat->cnt;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700438 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700439
440 return v;
441}
442
443/**
444 * blkg_stat_reset - reset a blkg_stat
445 * @stat: blkg_stat to reset
446 */
447static inline void blkg_stat_reset(struct blkg_stat *stat)
448{
449 stat->cnt = 0;
450}
451
452/**
Tejun Heo16b3de62013-01-09 08:05:12 -0800453 * blkg_stat_merge - merge a blkg_stat into another
454 * @to: the destination blkg_stat
455 * @from: the source
456 *
457 * Add @from's count to @to.
458 */
459static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
460{
461 blkg_stat_add(to, blkg_stat_read(from));
462}
463
Peter Zijlstra90d38392013-11-12 19:42:14 -0800464static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
465{
466 u64_stats_init(&rwstat->syncp);
467}
468
Tejun Heo16b3de62013-01-09 08:05:12 -0800469/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700470 * blkg_rwstat_add - add a value to a blkg_rwstat
471 * @rwstat: target blkg_rwstat
472 * @rw: mask of REQ_{WRITE|SYNC}
473 * @val: value to add
474 *
475 * Add @val to @rwstat. The counters are chosen according to @rw. The
476 * caller is responsible for synchronizing calls to this function.
477 */
478static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
479 int rw, uint64_t val)
480{
481 u64_stats_update_begin(&rwstat->syncp);
482
483 if (rw & REQ_WRITE)
484 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
485 else
486 rwstat->cnt[BLKG_RWSTAT_READ] += val;
487 if (rw & REQ_SYNC)
488 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
489 else
490 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
491
492 u64_stats_update_end(&rwstat->syncp);
493}
494
495/**
496 * blkg_rwstat_read - read the current values of a blkg_rwstat
497 * @rwstat: blkg_rwstat to read
498 *
499 * Read the current snapshot of @rwstat and return it as the return value.
500 * This function can be called without synchronization and takes care of
501 * u64 atomicity.
502 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700503static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700504{
505 unsigned int start;
506 struct blkg_rwstat tmp;
507
508 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700509 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
Tejun Heoedcb0722012-04-01 14:38:42 -0700510 tmp = *rwstat;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700511 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
Tejun Heoedcb0722012-04-01 14:38:42 -0700512
513 return tmp;
514}
515
516/**
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800517 * blkg_rwstat_total - read the total count of a blkg_rwstat
Tejun Heoedcb0722012-04-01 14:38:42 -0700518 * @rwstat: blkg_rwstat to read
519 *
520 * Return the total count of @rwstat regardless of the IO direction. This
521 * function can be called without synchronization and takes care of u64
522 * atomicity.
523 */
Tejun Heo4d5e80a2013-01-09 08:05:12 -0800524static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700525{
526 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
527
528 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
529}
530
531/**
532 * blkg_rwstat_reset - reset a blkg_rwstat
533 * @rwstat: blkg_rwstat to reset
534 */
535static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
536{
537 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
538}
539
Tejun Heo16b3de62013-01-09 08:05:12 -0800540/**
541 * blkg_rwstat_merge - merge a blkg_rwstat into another
542 * @to: the destination blkg_rwstat
543 * @from: the source
544 *
545 * Add @from's counts to @to.
546 */
547static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
548 struct blkg_rwstat *from)
549{
550 struct blkg_rwstat v = blkg_rwstat_read(from);
551 int i;
552
553 u64_stats_update_begin(&to->syncp);
554 for (i = 0; i < BLKG_RWSTAT_NR; i++)
555 to->cnt[i] += v.cnt[i];
556 u64_stats_update_end(&to->syncp);
557}
558
Tejun Heo36558c82012-04-16 13:57:24 -0700559#else /* CONFIG_BLK_CGROUP */
560
561struct cgroup;
Tejun Heob1208b52012-06-04 20:40:57 -0700562struct blkcg;
Jens Axboe2f5ea472009-12-03 21:06:43 +0100563
Tejun Heof95a04a2012-04-16 13:57:26 -0700564struct blkg_policy_data {
565};
566
Tejun Heo3c798392012-04-16 13:57:25 -0700567struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100568};
569
Tejun Heo3c798392012-04-16 13:57:25 -0700570struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500571};
572
Tejun Heo3c798392012-04-16 13:57:25 -0700573static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800574static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
575static inline void blkcg_drain_queue(struct request_queue *q) { }
576static inline void blkcg_exit_queue(struct request_queue *q) { }
Jens Axboed5bf0292014-06-22 16:31:56 -0600577static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
Tejun Heo3c798392012-04-16 13:57:25 -0700578static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700579static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700580 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700581static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700582 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500583
Tejun Heob1208b52012-06-04 20:40:57 -0700584static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700585
Tejun Heof95a04a2012-04-16 13:57:26 -0700586static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
587 struct blkcg_policy *pol) { return NULL; }
588static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700589static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
590static inline void blkg_get(struct blkcg_gq *blkg) { }
591static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200592
Tejun Heoa0516612012-06-26 15:05:44 -0700593static inline struct request_list *blk_get_rl(struct request_queue *q,
594 struct bio *bio) { return &q->root_rl; }
595static inline void blk_put_rl(struct request_list *rl) { }
596static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
597static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
598
599#define blk_queue_for_each_rl(rl, q) \
600 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
601
Tejun Heo36558c82012-04-16 13:57:24 -0700602#endif /* CONFIG_BLK_CGROUP */
603#endif /* _BLK_CGROUP_H */