Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Red Hat. All rights reserved. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #ifndef DM_CACHE_POLICY_H |
| 8 | #define DM_CACHE_POLICY_H |
| 9 | |
| 10 | #include "dm-cache-block-types.h" |
| 11 | |
| 12 | #include <linux/device-mapper.h> |
| 13 | |
| 14 | /*----------------------------------------------------------------*/ |
| 15 | |
| 16 | /* FIXME: make it clear which methods are optional. Get debug policy to |
| 17 | * double check this at start. |
| 18 | */ |
| 19 | |
| 20 | /* |
| 21 | * The cache policy makes the important decisions about which blocks get to |
| 22 | * live on the faster cache device. |
| 23 | * |
| 24 | * When the core target has to remap a bio it calls the 'map' method of the |
| 25 | * policy. This returns an instruction telling the core target what to do. |
| 26 | * |
| 27 | * POLICY_HIT: |
| 28 | * That block is in the cache. Remap to the cache and carry on. |
| 29 | * |
| 30 | * POLICY_MISS: |
| 31 | * This block is on the origin device. Remap and carry on. |
| 32 | * |
| 33 | * POLICY_NEW: |
| 34 | * This block is currently on the origin device, but the policy wants to |
| 35 | * move it. The core should: |
| 36 | * |
| 37 | * - hold any further io to this origin block |
| 38 | * - copy the origin to the given cache block |
| 39 | * - release all the held blocks |
| 40 | * - remap the original block to the cache |
| 41 | * |
| 42 | * POLICY_REPLACE: |
| 43 | * This block is currently on the origin device. The policy wants to |
| 44 | * move it to the cache, with the added complication that the destination |
| 45 | * cache block needs a writeback first. The core should: |
| 46 | * |
| 47 | * - hold any further io to this origin block |
| 48 | * - hold any further io to the origin block that's being written back |
| 49 | * - writeback |
| 50 | * - copy new block to cache |
| 51 | * - release held blocks |
| 52 | * - remap bio to cache and reissue. |
| 53 | * |
| 54 | * Should the core run into trouble while processing a POLICY_NEW or |
| 55 | * POLICY_REPLACE instruction it will roll back the policies mapping using |
| 56 | * remove_mapping() or force_mapping(). These methods must not fail. This |
| 57 | * approach avoids having transactional semantics in the policy (ie, the |
| 58 | * core informing the policy when a migration is complete), and hence makes |
| 59 | * it easier to write new policies. |
| 60 | * |
| 61 | * In general policy methods should never block, except in the case of the |
| 62 | * map function when can_migrate is set. So be careful to implement using |
| 63 | * bounded, preallocated memory. |
| 64 | */ |
| 65 | enum policy_operation { |
| 66 | POLICY_HIT, |
| 67 | POLICY_MISS, |
| 68 | POLICY_NEW, |
| 69 | POLICY_REPLACE |
| 70 | }; |
| 71 | |
| 72 | /* |
| 73 | * This is the instruction passed back to the core target. |
| 74 | */ |
| 75 | struct policy_result { |
| 76 | enum policy_operation op; |
| 77 | dm_oblock_t old_oblock; /* POLICY_REPLACE */ |
| 78 | dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */ |
| 79 | }; |
| 80 | |
| 81 | typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock, |
| 82 | dm_oblock_t oblock, uint32_t hint); |
| 83 | |
| 84 | /* |
| 85 | * The cache policy object. Just a bunch of methods. It is envisaged that |
| 86 | * this structure will be embedded in a bigger, policy specific structure |
| 87 | * (ie. use container_of()). |
| 88 | */ |
| 89 | struct dm_cache_policy { |
| 90 | |
| 91 | /* |
| 92 | * FIXME: make it clear which methods are optional, and which may |
| 93 | * block. |
| 94 | */ |
| 95 | |
| 96 | /* |
| 97 | * Destroys this object. |
| 98 | */ |
| 99 | void (*destroy)(struct dm_cache_policy *p); |
| 100 | |
| 101 | /* |
| 102 | * See large comment above. |
| 103 | * |
| 104 | * oblock - the origin block we're interested in. |
| 105 | * |
| 106 | * can_block - indicates whether the current thread is allowed to |
| 107 | * block. -EWOULDBLOCK returned if it can't and would. |
| 108 | * |
| 109 | * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE |
| 110 | * instructions. If denied and the policy would have |
| 111 | * returned one of these instructions it should |
| 112 | * return -EWOULDBLOCK. |
| 113 | * |
| 114 | * discarded_oblock - indicates whether the whole origin block is |
| 115 | * in a discarded state (FIXME: better to tell the |
| 116 | * policy about this sooner, so it can recycle that |
| 117 | * cache block if it wants.) |
| 118 | * bio - the bio that triggered this call. |
| 119 | * result - gets filled in with the instruction. |
| 120 | * |
| 121 | * May only return 0, or -EWOULDBLOCK (if !can_migrate) |
| 122 | */ |
| 123 | int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock, |
| 124 | bool can_block, bool can_migrate, bool discarded_oblock, |
| 125 | struct bio *bio, struct policy_result *result); |
| 126 | |
| 127 | /* |
| 128 | * Sometimes we want to see if a block is in the cache, without |
| 129 | * triggering any update of stats. (ie. it's not a real hit). |
| 130 | * |
| 131 | * Must not block. |
| 132 | * |
Alasdair G Kergon | e12c1fd | 2013-05-10 14:37:17 +0100 | [diff] [blame] | 133 | * Returns 0 if in cache, -ENOENT if not, < 0 for other errors |
| 134 | * (-EWOULDBLOCK would be typical). |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 135 | */ |
| 136 | int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); |
| 137 | |
| 138 | /* |
| 139 | * oblock must be a mapped block. Must not block. |
| 140 | */ |
| 141 | void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock); |
| 142 | void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock); |
| 143 | |
| 144 | /* |
| 145 | * Called when a cache target is first created. Used to load a |
| 146 | * mapping from the metadata device into the policy. |
| 147 | */ |
| 148 | int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock, |
| 149 | dm_cblock_t cblock, uint32_t hint, bool hint_valid); |
| 150 | |
| 151 | int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn, |
| 152 | void *context); |
| 153 | |
| 154 | /* |
| 155 | * Override functions used on the error paths of the core target. |
| 156 | * They must succeed. |
| 157 | */ |
| 158 | void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock); |
| 159 | void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock, |
| 160 | dm_oblock_t new_oblock); |
| 161 | |
| 162 | int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock); |
| 163 | |
| 164 | |
| 165 | /* |
| 166 | * How full is the cache? |
| 167 | */ |
| 168 | dm_cblock_t (*residency)(struct dm_cache_policy *p); |
| 169 | |
| 170 | /* |
| 171 | * Because of where we sit in the block layer, we can be asked to |
| 172 | * map a lot of little bios that are all in the same block (no |
| 173 | * queue merging has occurred). To stop the policy being fooled by |
| 174 | * these the core target sends regular tick() calls to the policy. |
| 175 | * The policy should only count an entry as hit once per tick. |
| 176 | */ |
| 177 | void (*tick)(struct dm_cache_policy *p); |
| 178 | |
| 179 | /* |
| 180 | * Configuration. |
| 181 | */ |
| 182 | int (*emit_config_values)(struct dm_cache_policy *p, |
| 183 | char *result, unsigned maxlen); |
| 184 | int (*set_config_value)(struct dm_cache_policy *p, |
| 185 | const char *key, const char *value); |
| 186 | |
| 187 | /* |
| 188 | * Book keeping ptr for the policy register, not for general use. |
| 189 | */ |
| 190 | void *private; |
| 191 | }; |
| 192 | |
| 193 | /*----------------------------------------------------------------*/ |
| 194 | |
| 195 | /* |
| 196 | * We maintain a little register of the different policy types. |
| 197 | */ |
| 198 | #define CACHE_POLICY_NAME_SIZE 16 |
Mike Snitzer | 4e7f506 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 199 | #define CACHE_POLICY_VERSION_SIZE 3 |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 200 | |
| 201 | struct dm_cache_policy_type { |
| 202 | /* For use by the register code only. */ |
| 203 | struct list_head list; |
| 204 | |
| 205 | /* |
| 206 | * Policy writers should fill in these fields. The name field is |
| 207 | * what gets passed on the target line to select your policy. |
| 208 | */ |
| 209 | char name[CACHE_POLICY_NAME_SIZE]; |
Mike Snitzer | 4e7f506 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 210 | unsigned version[CACHE_POLICY_VERSION_SIZE]; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 211 | |
| 212 | /* |
| 213 | * Policies may store a hint for each each cache block. |
| 214 | * Currently the size of this hint must be 0 or 4 bytes but we |
| 215 | * expect to relax this in future. |
| 216 | */ |
| 217 | size_t hint_size; |
| 218 | |
| 219 | struct module *owner; |
| 220 | struct dm_cache_policy *(*create)(dm_cblock_t cache_size, |
| 221 | sector_t origin_size, |
| 222 | sector_t block_size); |
| 223 | }; |
| 224 | |
| 225 | int dm_cache_policy_register(struct dm_cache_policy_type *type); |
| 226 | void dm_cache_policy_unregister(struct dm_cache_policy_type *type); |
| 227 | |
| 228 | /*----------------------------------------------------------------*/ |
| 229 | |
| 230 | #endif /* DM_CACHE_POLICY_H */ |