blob: 24597309e23d38700a6ca2ae80cd9aab71aa3474 [file] [log] [blame]
Vivek Goyal31e4c282009-12-03 12:59:42 -05001#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
Vivek Goyal575969a2011-05-19 15:38:29 -040017#include <linux/u64_stats_sync.h>
Tejun Heo829fdb52012-04-01 14:38:43 -070018#include <linux/seq_file.h>
Tejun Heoa6371202012-04-19 16:29:24 -070019#include <linux/radix-tree.h>
Tejun Heoa0516612012-06-26 15:05:44 -070020#include <linux/blkdev.h>
Vivek Goyal31e4c282009-12-03 12:59:42 -050021
Vivek Goyal9355aed2010-10-01 21:16:41 +020022/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX
24
Tejun Heo3381cb82012-04-01 14:38:44 -070025/* CFQ specific, out here for blkcg->cfq_weight */
26#define CFQ_WEIGHT_MIN 10
27#define CFQ_WEIGHT_MAX 1000
28#define CFQ_WEIGHT_DEFAULT 500
29
Tejun Heof48ec1d2012-04-13 13:11:25 -070030#ifdef CONFIG_BLK_CGROUP
31
Tejun Heoedcb0722012-04-01 14:38:42 -070032enum blkg_rwstat_type {
33 BLKG_RWSTAT_READ,
34 BLKG_RWSTAT_WRITE,
35 BLKG_RWSTAT_SYNC,
36 BLKG_RWSTAT_ASYNC,
37
38 BLKG_RWSTAT_NR,
39 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
Divyesh Shah303a3ac2010-04-01 15:01:24 -070040};
41
Tejun Heoa6371202012-04-19 16:29:24 -070042struct blkcg_gq;
43
Tejun Heo3c798392012-04-16 13:57:25 -070044struct blkcg {
Tejun Heo36558c82012-04-16 13:57:24 -070045 struct cgroup_subsys_state css;
46 spinlock_t lock;
Tejun Heoa6371202012-04-19 16:29:24 -070047
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq *blkg_hint;
Tejun Heo36558c82012-04-16 13:57:24 -070050 struct hlist_head blkg_list;
Tejun Heo9a9e8a22012-03-19 15:10:56 -070051
52 /* for policies to test whether associated blkcg has changed */
Tejun Heo36558c82012-04-16 13:57:24 -070053 uint64_t id;
Tejun Heo3381cb82012-04-01 14:38:44 -070054
Tejun Heo3c798392012-04-16 13:57:25 -070055 /* TODO: per-policy storage in blkcg */
Tejun Heo36558c82012-04-16 13:57:24 -070056 unsigned int cfq_weight; /* belongs to cfq */
Vivek Goyal31e4c282009-12-03 12:59:42 -050057};
58
Tejun Heoedcb0722012-04-01 14:38:42 -070059struct blkg_stat {
60 struct u64_stats_sync syncp;
61 uint64_t cnt;
62};
63
64struct blkg_rwstat {
65 struct u64_stats_sync syncp;
66 uint64_t cnt[BLKG_RWSTAT_NR];
67};
68
Tejun Heof95a04a2012-04-16 13:57:26 -070069/*
70 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
71 * request_queue (q). This is used by blkcg policies which need to track
72 * information per blkcg - q pair.
73 *
74 * There can be multiple active blkcg policies and each has its private
75 * data on each blkg, the size of which is determined by
76 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
77 * together with blkg and invokes pd_init/exit_fn() methods.
78 *
79 * Such private data must embed struct blkg_policy_data (pd) at the
80 * beginning and pd_size can't be smaller than pd.
81 */
Tejun Heo03814112012-03-05 13:15:14 -080082struct blkg_policy_data {
83 /* the blkg this per-policy data belongs to */
Tejun Heo3c798392012-04-16 13:57:25 -070084 struct blkcg_gq *blkg;
Tejun Heo03814112012-03-05 13:15:14 -080085
Tejun Heoa2b16932012-04-13 13:11:33 -070086 /* used during policy activation */
Tejun Heo36558c82012-04-16 13:57:24 -070087 struct list_head alloc_node;
Tejun Heo03814112012-03-05 13:15:14 -080088};
89
Tejun Heo3c798392012-04-16 13:57:25 -070090/* association between a blk cgroup and a request queue */
91struct blkcg_gq {
Tejun Heoc875f4d2012-03-05 13:15:22 -080092 /* Pointer to the associated request_queue */
Tejun Heo36558c82012-04-16 13:57:24 -070093 struct request_queue *q;
94 struct list_head q_node;
95 struct hlist_node blkcg_node;
Tejun Heo3c798392012-04-16 13:57:25 -070096 struct blkcg *blkcg;
Tejun Heoa0516612012-06-26 15:05:44 -070097 /* request allocation list for this blkcg-q pair */
98 struct request_list rl;
Tejun Heo1adaf3d2012-03-05 13:15:15 -080099 /* reference count */
Tejun Heo36558c82012-04-16 13:57:24 -0700100 int refcnt;
Vivek Goyal22084192009-12-03 12:59:49 -0500101
Tejun Heo36558c82012-04-16 13:57:24 -0700102 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800103
Tejun Heo36558c82012-04-16 13:57:24 -0700104 struct rcu_head rcu_head;
Vivek Goyal31e4c282009-12-03 12:59:42 -0500105};
106
Tejun Heo3c798392012-04-16 13:57:25 -0700107typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
108typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
109typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
Vivek Goyal3e252062009-12-04 10:36:42 -0500110
Tejun Heo3c798392012-04-16 13:57:25 -0700111struct blkcg_policy {
Tejun Heo36558c82012-04-16 13:57:24 -0700112 int plid;
113 /* policy specific private data size */
Tejun Heof95a04a2012-04-16 13:57:26 -0700114 size_t pd_size;
Tejun Heo36558c82012-04-16 13:57:24 -0700115 /* cgroup files for the policy */
116 struct cftype *cftypes;
Tejun Heof9fcc2d2012-04-16 13:57:27 -0700117
118 /* operations */
119 blkcg_pol_init_pd_fn *pd_init_fn;
120 blkcg_pol_exit_pd_fn *pd_exit_fn;
121 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
Vivek Goyal3e252062009-12-04 10:36:42 -0500122};
123
Tejun Heo3c798392012-04-16 13:57:25 -0700124extern struct blkcg blkcg_root;
Tejun Heo36558c82012-04-16 13:57:24 -0700125
Tejun Heo3c798392012-04-16 13:57:25 -0700126struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
127struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
128 struct request_queue *q);
Tejun Heo36558c82012-04-16 13:57:24 -0700129int blkcg_init_queue(struct request_queue *q);
130void blkcg_drain_queue(struct request_queue *q);
131void blkcg_exit_queue(struct request_queue *q);
Tejun Heo5efd6112012-03-05 13:15:12 -0800132
Vivek Goyal3e252062009-12-04 10:36:42 -0500133/* Blkio controller policy registration */
Tejun Heo3c798392012-04-16 13:57:25 -0700134int blkcg_policy_register(struct blkcg_policy *pol);
135void blkcg_policy_unregister(struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700136int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700137 const struct blkcg_policy *pol);
Tejun Heo36558c82012-04-16 13:57:24 -0700138void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700139 const struct blkcg_policy *pol);
Vivek Goyal3e252062009-12-04 10:36:42 -0500140
Tejun Heo3c798392012-04-16 13:57:25 -0700141void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
Tejun Heof95a04a2012-04-16 13:57:26 -0700142 u64 (*prfill)(struct seq_file *,
143 struct blkg_policy_data *, int),
Tejun Heo3c798392012-04-16 13:57:25 -0700144 const struct blkcg_policy *pol, int data,
Tejun Heoec399342012-04-13 13:11:27 -0700145 bool show_total);
Tejun Heof95a04a2012-04-16 13:57:26 -0700146u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
147u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo829fdb52012-04-01 14:38:43 -0700148 const struct blkg_rwstat *rwstat);
Tejun Heof95a04a2012-04-16 13:57:26 -0700149u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
150u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
151 int off);
Tejun Heo829fdb52012-04-01 14:38:43 -0700152
153struct blkg_conf_ctx {
Tejun Heo36558c82012-04-16 13:57:24 -0700154 struct gendisk *disk;
Tejun Heo3c798392012-04-16 13:57:25 -0700155 struct blkcg_gq *blkg;
Tejun Heo36558c82012-04-16 13:57:24 -0700156 u64 v;
Tejun Heo829fdb52012-04-01 14:38:43 -0700157};
158
Tejun Heo3c798392012-04-16 13:57:25 -0700159int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
160 const char *input, struct blkg_conf_ctx *ctx);
Tejun Heo829fdb52012-04-01 14:38:43 -0700161void blkg_conf_finish(struct blkg_conf_ctx *ctx);
162
163
Tejun Heob1208b52012-06-04 20:40:57 -0700164static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
165{
166 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
167 struct blkcg, css);
168}
169
170static inline struct blkcg *task_blkcg(struct task_struct *tsk)
171{
172 return container_of(task_subsys_state(tsk, blkio_subsys_id),
173 struct blkcg, css);
174}
175
176static inline struct blkcg *bio_blkcg(struct bio *bio)
177{
178 if (bio && bio->bi_css)
179 return container_of(bio->bi_css, struct blkcg, css);
180 return task_blkcg(current);
181}
182
Tejun Heo03814112012-03-05 13:15:14 -0800183/**
184 * blkg_to_pdata - get policy private data
185 * @blkg: blkg of interest
186 * @pol: policy of interest
187 *
188 * Return pointer to private data associated with the @blkg-@pol pair.
189 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700190static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
191 struct blkcg_policy *pol)
Tejun Heo03814112012-03-05 13:15:14 -0800192{
Tejun Heof95a04a2012-04-16 13:57:26 -0700193 return blkg ? blkg->pd[pol->plid] : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800194}
195
196/**
197 * pdata_to_blkg - get blkg associated with policy private data
Tejun Heof95a04a2012-04-16 13:57:26 -0700198 * @pd: policy private data of interest
Tejun Heo03814112012-03-05 13:15:14 -0800199 *
Tejun Heof95a04a2012-04-16 13:57:26 -0700200 * @pd is policy private data. Determine the blkg it's associated with.
Tejun Heo03814112012-03-05 13:15:14 -0800201 */
Tejun Heof95a04a2012-04-16 13:57:26 -0700202static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
Tejun Heo03814112012-03-05 13:15:14 -0800203{
Tejun Heof95a04a2012-04-16 13:57:26 -0700204 return pd ? pd->blkg : NULL;
Tejun Heo03814112012-03-05 13:15:14 -0800205}
206
Tejun Heo54e7ed12012-04-16 13:57:23 -0700207/**
208 * blkg_path - format cgroup path of blkg
209 * @blkg: blkg of interest
210 * @buf: target buffer
211 * @buflen: target buffer length
212 *
213 * Format the path of the cgroup of @blkg into @buf.
214 */
Tejun Heo3c798392012-04-16 13:57:25 -0700215static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
Vivek Goyalafc24d42010-04-26 19:27:56 +0200216{
Tejun Heo54e7ed12012-04-16 13:57:23 -0700217 int ret;
218
219 rcu_read_lock();
220 ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
221 rcu_read_unlock();
222 if (ret)
223 strncpy(buf, "<unavailable>", buflen);
224 return ret;
Vivek Goyalafc24d42010-04-26 19:27:56 +0200225}
226
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800227/**
228 * blkg_get - get a blkg reference
229 * @blkg: blkg to get
230 *
231 * The caller should be holding queue_lock and an existing reference.
232 */
Tejun Heo3c798392012-04-16 13:57:25 -0700233static inline void blkg_get(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800234{
235 lockdep_assert_held(blkg->q->queue_lock);
236 WARN_ON_ONCE(!blkg->refcnt);
237 blkg->refcnt++;
238}
239
Tejun Heo3c798392012-04-16 13:57:25 -0700240void __blkg_release(struct blkcg_gq *blkg);
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800241
242/**
243 * blkg_put - put a blkg reference
244 * @blkg: blkg to put
245 *
246 * The caller should be holding queue_lock.
247 */
Tejun Heo3c798392012-04-16 13:57:25 -0700248static inline void blkg_put(struct blkcg_gq *blkg)
Tejun Heo1adaf3d2012-03-05 13:15:15 -0800249{
250 lockdep_assert_held(blkg->q->queue_lock);
251 WARN_ON_ONCE(blkg->refcnt <= 0);
252 if (!--blkg->refcnt)
253 __blkg_release(blkg);
254}
255
Tejun Heoedcb0722012-04-01 14:38:42 -0700256/**
Tejun Heoa0516612012-06-26 15:05:44 -0700257 * blk_get_rl - get request_list to use
258 * @q: request_queue of interest
259 * @bio: bio which will be attached to the allocated request (may be %NULL)
260 *
261 * The caller wants to allocate a request from @q to use for @bio. Find
262 * the request_list to use and obtain a reference on it. Should be called
263 * under queue_lock. This function is guaranteed to return non-%NULL
264 * request_list.
265 */
266static inline struct request_list *blk_get_rl(struct request_queue *q,
267 struct bio *bio)
268{
269 struct blkcg *blkcg;
270 struct blkcg_gq *blkg;
271
272 rcu_read_lock();
273
274 blkcg = bio_blkcg(bio);
275
276 /* bypass blkg lookup and use @q->root_rl directly for root */
277 if (blkcg == &blkcg_root)
278 goto root_rl;
279
280 /*
281 * Try to use blkg->rl. blkg lookup may fail under memory pressure
282 * or if either the blkcg or queue is going away. Fall back to
283 * root_rl in such cases.
284 */
285 blkg = blkg_lookup_create(blkcg, q);
286 if (unlikely(IS_ERR(blkg)))
287 goto root_rl;
288
289 blkg_get(blkg);
290 rcu_read_unlock();
291 return &blkg->rl;
292root_rl:
293 rcu_read_unlock();
294 return &q->root_rl;
295}
296
297/**
298 * blk_put_rl - put request_list
299 * @rl: request_list to put
300 *
301 * Put the reference acquired by blk_get_rl(). Should be called under
302 * queue_lock.
303 */
304static inline void blk_put_rl(struct request_list *rl)
305{
306 /* root_rl may not have blkg set */
307 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
308 blkg_put(rl->blkg);
309}
310
311/**
312 * blk_rq_set_rl - associate a request with a request_list
313 * @rq: request of interest
314 * @rl: target request_list
315 *
316 * Associate @rq with @rl so that accounting and freeing can know the
317 * request_list @rq came from.
318 */
319static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
320{
321 rq->rl = rl;
322}
323
324/**
325 * blk_rq_rl - return the request_list a request came from
326 * @rq: request of interest
327 *
328 * Return the request_list @rq is allocated from.
329 */
330static inline struct request_list *blk_rq_rl(struct request *rq)
331{
332 return rq->rl;
333}
334
335struct request_list *__blk_queue_next_rl(struct request_list *rl,
336 struct request_queue *q);
337/**
338 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
339 *
340 * Should be used under queue_lock.
341 */
342#define blk_queue_for_each_rl(rl, q) \
343 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
344
345/**
Tejun Heoedcb0722012-04-01 14:38:42 -0700346 * blkg_stat_add - add a value to a blkg_stat
347 * @stat: target blkg_stat
348 * @val: value to add
349 *
350 * Add @val to @stat. The caller is responsible for synchronizing calls to
351 * this function.
352 */
353static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
354{
355 u64_stats_update_begin(&stat->syncp);
356 stat->cnt += val;
357 u64_stats_update_end(&stat->syncp);
358}
359
360/**
361 * blkg_stat_read - read the current value of a blkg_stat
362 * @stat: blkg_stat to read
363 *
364 * Read the current value of @stat. This function can be called without
365 * synchroniztion and takes care of u64 atomicity.
366 */
367static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
368{
369 unsigned int start;
370 uint64_t v;
371
372 do {
373 start = u64_stats_fetch_begin(&stat->syncp);
374 v = stat->cnt;
375 } while (u64_stats_fetch_retry(&stat->syncp, start));
376
377 return v;
378}
379
380/**
381 * blkg_stat_reset - reset a blkg_stat
382 * @stat: blkg_stat to reset
383 */
384static inline void blkg_stat_reset(struct blkg_stat *stat)
385{
386 stat->cnt = 0;
387}
388
389/**
390 * blkg_rwstat_add - add a value to a blkg_rwstat
391 * @rwstat: target blkg_rwstat
392 * @rw: mask of REQ_{WRITE|SYNC}
393 * @val: value to add
394 *
395 * Add @val to @rwstat. The counters are chosen according to @rw. The
396 * caller is responsible for synchronizing calls to this function.
397 */
398static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
399 int rw, uint64_t val)
400{
401 u64_stats_update_begin(&rwstat->syncp);
402
403 if (rw & REQ_WRITE)
404 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
405 else
406 rwstat->cnt[BLKG_RWSTAT_READ] += val;
407 if (rw & REQ_SYNC)
408 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
409 else
410 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
411
412 u64_stats_update_end(&rwstat->syncp);
413}
414
415/**
416 * blkg_rwstat_read - read the current values of a blkg_rwstat
417 * @rwstat: blkg_rwstat to read
418 *
419 * Read the current snapshot of @rwstat and return it as the return value.
420 * This function can be called without synchronization and takes care of
421 * u64 atomicity.
422 */
Tejun Heoc94bed892012-04-16 13:57:22 -0700423static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
Tejun Heoedcb0722012-04-01 14:38:42 -0700424{
425 unsigned int start;
426 struct blkg_rwstat tmp;
427
428 do {
429 start = u64_stats_fetch_begin(&rwstat->syncp);
430 tmp = *rwstat;
431 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
432
433 return tmp;
434}
435
436/**
437 * blkg_rwstat_sum - read the total count of a blkg_rwstat
438 * @rwstat: blkg_rwstat to read
439 *
440 * Return the total count of @rwstat regardless of the IO direction. This
441 * function can be called without synchronization and takes care of u64
442 * atomicity.
443 */
444static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
445{
446 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
447
448 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
449}
450
451/**
452 * blkg_rwstat_reset - reset a blkg_rwstat
453 * @rwstat: blkg_rwstat to reset
454 */
455static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
456{
457 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
458}
459
Tejun Heo36558c82012-04-16 13:57:24 -0700460#else /* CONFIG_BLK_CGROUP */
461
462struct cgroup;
Tejun Heob1208b52012-06-04 20:40:57 -0700463struct blkcg;
Jens Axboe2f5ea472009-12-03 21:06:43 +0100464
Tejun Heof95a04a2012-04-16 13:57:26 -0700465struct blkg_policy_data {
466};
467
Tejun Heo3c798392012-04-16 13:57:25 -0700468struct blkcg_gq {
Jens Axboe2f5ea472009-12-03 21:06:43 +0100469};
470
Tejun Heo3c798392012-04-16 13:57:25 -0700471struct blkcg_policy {
Vivek Goyal3e252062009-12-04 10:36:42 -0500472};
473
Tejun Heo3c798392012-04-16 13:57:25 -0700474static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
Tejun Heo5efd6112012-03-05 13:15:12 -0800475static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
476static inline void blkcg_drain_queue(struct request_queue *q) { }
477static inline void blkcg_exit_queue(struct request_queue *q) { }
Tejun Heo3c798392012-04-16 13:57:25 -0700478static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
479static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
Tejun Heoa2b16932012-04-13 13:11:33 -0700480static inline int blkcg_activate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700481 const struct blkcg_policy *pol) { return 0; }
Tejun Heoa2b16932012-04-13 13:11:33 -0700482static inline void blkcg_deactivate_policy(struct request_queue *q,
Tejun Heo3c798392012-04-16 13:57:25 -0700483 const struct blkcg_policy *pol) { }
Vivek Goyal3e252062009-12-04 10:36:42 -0500484
Tejun Heob1208b52012-06-04 20:40:57 -0700485static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
486static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
Tejun Heoa0516612012-06-26 15:05:44 -0700487
Tejun Heof95a04a2012-04-16 13:57:26 -0700488static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
489 struct blkcg_policy *pol) { return NULL; }
490static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
Tejun Heo3c798392012-04-16 13:57:25 -0700491static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
492static inline void blkg_get(struct blkcg_gq *blkg) { }
493static inline void blkg_put(struct blkcg_gq *blkg) { }
Vivek Goyalafc24d42010-04-26 19:27:56 +0200494
Tejun Heoa0516612012-06-26 15:05:44 -0700495static inline struct request_list *blk_get_rl(struct request_queue *q,
496 struct bio *bio) { return &q->root_rl; }
497static inline void blk_put_rl(struct request_list *rl) { }
498static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
499static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
500
501#define blk_queue_for_each_rl(rl, q) \
502 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
503
Tejun Heo36558c82012-04-16 13:57:24 -0700504#endif /* CONFIG_BLK_CGROUP */
505#endif /* _BLK_CGROUP_H */