blob: 254d9ab5707cf4629e186d512d5f43e7eca3f94a [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001#ifndef _BCACHE_REQUEST_H_
2#define _BCACHE_REQUEST_H_
3
4#include <linux/cgroup.h>
5
6struct search {
7 /* Stack frame for bio_complete */
8 struct closure cl;
9
10 struct bcache_device *d;
11 struct task_struct *task;
12
13 struct bbio bio;
14 struct bio *orig_bio;
15 struct bio *cache_miss;
16 unsigned cache_bio_sectors;
17
18 unsigned recoverable:1;
19 unsigned unaligned_bvec:1;
20
21 unsigned write:1;
22 unsigned writeback:1;
23
24 /* IO error returned to s->bio */
25 short error;
26 unsigned long start_time;
27
28 /* Anything past op->keys won't get zeroed in do_bio_hook */
29 struct btree_op op;
30};
31
32void bch_cache_read_endio(struct bio *, int);
33int bch_get_congested(struct cache_set *);
34void bch_insert_data(struct closure *cl);
35void bch_btree_insert_async(struct closure *);
36void bch_cache_read_endio(struct bio *, int);
37
38void bch_open_buckets_free(struct cache_set *);
39int bch_open_buckets_alloc(struct cache_set *);
40
41void bch_cached_dev_request_init(struct cached_dev *dc);
42void bch_flash_dev_request_init(struct bcache_device *d);
43
44extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
45
46struct bch_cgroup {
47#ifdef CONFIG_CGROUP_BCACHE
48 struct cgroup_subsys_state css;
49#endif
50 /*
51 * We subtract one from the index into bch_cache_modes[], so that
52 * default == -1; this makes it so the rest match up with d->cache_mode,
53 * and we use d->cache_mode if cgrp->cache_mode < 0
54 */
55 short cache_mode;
56 bool verify;
57 struct cache_stat_collector stats;
58};
59
60struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
61
62#endif /* _BCACHE_REQUEST_H_ */