blob: 179ed5bf81a3e4e9e2279a30d2fe10d1396e2d2b [file] [log] [blame]
Joe Thornberc6b4fcb2013-03-01 22:45:51 +00001/*
2 * Copyright (C) 2012 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_CACHE_METADATA_H
8#define DM_CACHE_METADATA_H
9
10#include "dm-cache-block-types.h"
11#include "dm-cache-policy-internal.h"
Mike Snitzer895b47d2014-07-14 15:37:18 -040012#include "persistent-data/dm-space-map-metadata.h"
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000013
14/*----------------------------------------------------------------*/
15
Mike Snitzer895b47d2014-07-14 15:37:18 -040016#define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000017
18/* FIXME: remove this restriction */
19/*
20 * The metadata device is currently limited in size.
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000021 */
Mike Snitzer895b47d2014-07-14 15:37:18 -040022#define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000023
24/*
25 * A metadata device larger than 16GB triggers a warning.
26 */
27#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
28
29/*----------------------------------------------------------------*/
30
31/*
32 * Ext[234]-style compat feature flags.
33 *
34 * A new feature which old metadata will still be compatible with should
35 * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
36 *
37 * A new feature that is not compatible with old code should define a
38 * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
39 * that flag.
40 *
41 * A new feature that is not compatible with old code accessing the
42 * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
43 * guard the relevant code with that flag.
44 *
45 * As these various flags are defined they should be added to the
46 * following masks.
47 */
Joe Thornber629d0a82016-09-22 06:15:21 -040048
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000049#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
50#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
51#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
52
Joe Thornberb29d4982016-12-15 04:57:31 -050053struct dm_cache_metadata;
54
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000055/*
Joe Thornber629d0a82016-09-22 06:15:21 -040056 * Reopens or creates a new, empty metadata volume. Returns an ERR_PTR on
57 * failure. If reopening then features must match.
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000058 */
59struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
60 sector_t data_block_size,
61 bool may_format_device,
Joe Thornber629d0a82016-09-22 06:15:21 -040062 size_t policy_hint_size,
63 unsigned metadata_version);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000064
65void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
66
67/*
68 * The metadata needs to know how many cache blocks there are. We don't
69 * care about the origin, assuming the core target is giving us valid
70 * origin blocks to map to.
71 */
72int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
Joe Thornberd14fcf32016-03-10 16:20:58 +000073int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000074
75int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
76 sector_t discard_block_size,
Joe Thornber1bad9bc2014-11-07 14:47:07 +000077 dm_dblock_t new_nr_entries);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000078
79typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
Joe Thornber1bad9bc2014-11-07 14:47:07 +000080 dm_dblock_t dblock, bool discarded);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000081int dm_cache_load_discards(struct dm_cache_metadata *cmd,
82 load_discard_fn fn, void *context);
83
Joe Thornber1bad9bc2014-11-07 14:47:07 +000084int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000085
86int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
87int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
88int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
89
90typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
91 dm_cblock_t cblock, bool dirty,
92 uint32_t hint, bool hint_valid);
93int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
Mike Snitzerea2dd8c2013-03-20 17:21:28 +000094 struct dm_cache_policy *policy,
Joe Thornberc6b4fcb2013-03-01 22:45:51 +000095 load_mapping_fn fn,
96 void *context);
97
Joe Thornber629d0a82016-09-22 06:15:21 -040098int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
99 unsigned nr_bits, unsigned long *bits);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000100
101struct dm_cache_statistics {
102 uint32_t read_hits;
103 uint32_t read_misses;
104 uint32_t write_hits;
105 uint32_t write_misses;
106};
107
108void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
109 struct dm_cache_statistics *stats);
Joe Thornber028ae9f2015-04-22 16:42:35 -0400110
111/*
112 * 'void' because it's no big deal if it fails.
113 */
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000114void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
115 struct dm_cache_statistics *stats);
116
117int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
118
119int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
120 dm_block_t *result);
121
122int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
123 dm_block_t *result);
124
125void dm_cache_dump(struct dm_cache_metadata *cmd);
126
127/*
128 * The policy is invited to save a 32bit hint value for every cblock (eg,
129 * for a hit count). These are stored against the policy name. If
130 * policies are changed, then hints will be lost. If the machine crashes,
131 * hints will be lost.
132 *
133 * The hints are indexed by the cblock, but many policies will not
134 * neccessarily have a fast way of accessing efficiently via cblock. So
135 * rather than querying the policy for each cblock, we let it walk its data
136 * structures and fill in the hints in whatever order it wishes.
137 */
Joe Thornber05966612014-04-03 16:16:44 +0100138int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000139
Joe Thornber2ee57d52013-10-24 14:10:29 -0400140/*
141 * Query method. Are all the blocks in the cache clean?
142 */
143int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
144
Joe Thornberd14fcf32016-03-10 16:20:58 +0000145int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
Joe Thornber028ae9f2015-04-22 16:42:35 -0400146int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
147void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
148void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
149int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
150
Joe Thornberc6b4fcb2013-03-01 22:45:51 +0000151/*----------------------------------------------------------------*/
152
153#endif /* DM_CACHE_METADATA_H */