blob: 7d02ac5f936e76a48b8bad36961c6aef45917ab8 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001#ifndef _BCACHE_REQUEST_H_
2#define _BCACHE_REQUEST_H_
3
4#include <linux/cgroup.h>
5
6struct search {
7 /* Stack frame for bio_complete */
8 struct closure cl;
9
10 struct bcache_device *d;
11 struct task_struct *task;
12
13 struct bbio bio;
14 struct bio *orig_bio;
15 struct bio *cache_miss;
16 unsigned cache_bio_sectors;
17
18 unsigned recoverable:1;
19 unsigned unaligned_bvec:1;
20
21 unsigned write:1;
22 unsigned writeback:1;
23
24 /* IO error returned to s->bio */
25 short error;
26 unsigned long start_time;
27
Kent Overstreetcafe5632013-03-23 16:11:31 -070028 struct btree_op op;
Kent Overstreet0b932072013-07-24 17:26:51 -070029
30 /* Anything past this point won't get zeroed in search_alloc() */
31 struct keylist insert_keys;
Kent Overstreetcafe5632013-03-23 16:11:31 -070032};
33
34void bch_cache_read_endio(struct bio *, int);
Kent Overstreetc37511b2013-04-26 15:39:55 -070035unsigned bch_get_congested(struct cache_set *);
Kent Overstreeta34a8bf2013-10-24 17:07:04 -070036void bch_data_insert(struct closure *cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -070037void bch_cache_read_endio(struct bio *, int);
38
39void bch_open_buckets_free(struct cache_set *);
40int bch_open_buckets_alloc(struct cache_set *);
41
42void bch_cached_dev_request_init(struct cached_dev *dc);
43void bch_flash_dev_request_init(struct bcache_device *d);
44
45extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
46
47struct bch_cgroup {
48#ifdef CONFIG_CGROUP_BCACHE
49 struct cgroup_subsys_state css;
50#endif
51 /*
52 * We subtract one from the index into bch_cache_modes[], so that
53 * default == -1; this makes it so the rest match up with d->cache_mode,
54 * and we use d->cache_mode if cgrp->cache_mode < 0
55 */
56 short cache_mode;
57 bool verify;
58 struct cache_stat_collector stats;
59};
60
61struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
62
63#endif /* _BCACHE_REQUEST_H_ */