blob: 1f1b59d38db5b8d428200238c14886be89ceaf25 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001#ifndef _BCACHE_REQUEST_H_
2#define _BCACHE_REQUEST_H_
3
4#include <linux/cgroup.h>
5
6struct search {
7 /* Stack frame for bio_complete */
8 struct closure cl;
9
10 struct bcache_device *d;
11 struct task_struct *task;
12
13 struct bbio bio;
14 struct bio *orig_bio;
15 struct bio *cache_miss;
16 unsigned cache_bio_sectors;
17
18 unsigned recoverable:1;
19 unsigned unaligned_bvec:1;
20
21 unsigned write:1;
22 unsigned writeback:1;
23
24 /* IO error returned to s->bio */
25 short error;
26 unsigned long start_time;
27
28 /* Anything past op->keys won't get zeroed in do_bio_hook */
29 struct btree_op op;
30};
31
32void bch_cache_read_endio(struct bio *, int);
Kent Overstreetc37511b2013-04-26 15:39:55 -070033unsigned bch_get_congested(struct cache_set *);
Kent Overstreeta34a8bf2013-10-24 17:07:04 -070034void bch_data_insert(struct closure *cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -070035void bch_cache_read_endio(struct bio *, int);
36
37void bch_open_buckets_free(struct cache_set *);
38int bch_open_buckets_alloc(struct cache_set *);
39
40void bch_cached_dev_request_init(struct cached_dev *dc);
41void bch_flash_dev_request_init(struct bcache_device *d);
42
43extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
44
45struct bch_cgroup {
46#ifdef CONFIG_CGROUP_BCACHE
47 struct cgroup_subsys_state css;
48#endif
49 /*
50 * We subtract one from the index into bch_cache_modes[], so that
51 * default == -1; this makes it so the rest match up with d->cache_mode,
52 * and we use d->cache_mode if cgrp->cache_mode < 0
53 */
54 short cache_mode;
55 bool verify;
56 struct cache_stat_collector stats;
57};
58
59struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
60
61#endif /* _BCACHE_REQUEST_H_ */