Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Red Hat. All rights reserved. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm.h" |
| 8 | #include "dm-bio-prison.h" |
Darrick J. Wong | b844fe6 | 2013-04-05 15:36:32 +0100 | [diff] [blame] | 9 | #include "dm-bio-record.h" |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 10 | #include "dm-cache-metadata.h" |
| 11 | |
| 12 | #include <linux/dm-io.h> |
| 13 | #include <linux/dm-kcopyd.h> |
Manuel Schölling | 0f30af9 | 2014-05-22 22:42:37 +0200 | [diff] [blame] | 14 | #include <linux/jiffies.h> |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 15 | #include <linux/init.h> |
| 16 | #include <linux/mempool.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/vmalloc.h> |
| 20 | |
| 21 | #define DM_MSG_PREFIX "cache" |
| 22 | |
| 23 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle, |
| 24 | "A percentage of time allocated for copying to and/or from cache"); |
| 25 | |
| 26 | /*----------------------------------------------------------------*/ |
| 27 | |
Joe Thornber | 77289d3 | 2015-05-15 13:45:30 +0100 | [diff] [blame] | 28 | #define IOT_RESOLUTION 4 |
| 29 | |
| 30 | struct io_tracker { |
| 31 | spinlock_t lock; |
| 32 | |
| 33 | /* |
| 34 | * Sectors of in-flight IO. |
| 35 | */ |
| 36 | sector_t in_flight; |
| 37 | |
| 38 | /* |
| 39 | * The time, in jiffies, when this device became idle (if it is |
| 40 | * indeed idle). |
| 41 | */ |
| 42 | unsigned long idle_time; |
| 43 | unsigned long last_update_time; |
| 44 | }; |
| 45 | |
| 46 | static void iot_init(struct io_tracker *iot) |
| 47 | { |
| 48 | spin_lock_init(&iot->lock); |
| 49 | iot->in_flight = 0ul; |
| 50 | iot->idle_time = 0ul; |
| 51 | iot->last_update_time = jiffies; |
| 52 | } |
| 53 | |
| 54 | static bool __iot_idle_for(struct io_tracker *iot, unsigned long jifs) |
| 55 | { |
| 56 | if (iot->in_flight) |
| 57 | return false; |
| 58 | |
| 59 | return time_after(jiffies, iot->idle_time + jifs); |
| 60 | } |
| 61 | |
| 62 | static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs) |
| 63 | { |
| 64 | bool r; |
| 65 | unsigned long flags; |
| 66 | |
| 67 | spin_lock_irqsave(&iot->lock, flags); |
| 68 | r = __iot_idle_for(iot, jifs); |
| 69 | spin_unlock_irqrestore(&iot->lock, flags); |
| 70 | |
| 71 | return r; |
| 72 | } |
| 73 | |
| 74 | static void iot_io_begin(struct io_tracker *iot, sector_t len) |
| 75 | { |
| 76 | unsigned long flags; |
| 77 | |
| 78 | spin_lock_irqsave(&iot->lock, flags); |
| 79 | iot->in_flight += len; |
| 80 | spin_unlock_irqrestore(&iot->lock, flags); |
| 81 | } |
| 82 | |
| 83 | static void __iot_io_end(struct io_tracker *iot, sector_t len) |
| 84 | { |
| 85 | iot->in_flight -= len; |
| 86 | if (!iot->in_flight) |
| 87 | iot->idle_time = jiffies; |
| 88 | } |
| 89 | |
| 90 | static void iot_io_end(struct io_tracker *iot, sector_t len) |
| 91 | { |
| 92 | unsigned long flags; |
| 93 | |
| 94 | spin_lock_irqsave(&iot->lock, flags); |
| 95 | __iot_io_end(iot, len); |
| 96 | spin_unlock_irqrestore(&iot->lock, flags); |
| 97 | } |
| 98 | |
| 99 | /*----------------------------------------------------------------*/ |
| 100 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 101 | /* |
| 102 | * Glossary: |
| 103 | * |
| 104 | * oblock: index of an origin block |
| 105 | * cblock: index of a cache block |
| 106 | * promotion: movement of a block from origin to cache |
| 107 | * demotion: movement of a block from cache to origin |
| 108 | * migration: movement of a block between the origin and cache device, |
| 109 | * either direction |
| 110 | */ |
| 111 | |
| 112 | /*----------------------------------------------------------------*/ |
| 113 | |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 114 | /* |
| 115 | * There are a couple of places where we let a bio run, but want to do some |
| 116 | * work before calling its endio function. We do this by temporarily |
| 117 | * changing the endio fn. |
| 118 | */ |
| 119 | struct dm_hook_info { |
| 120 | bio_end_io_t *bi_end_io; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 121 | }; |
| 122 | |
| 123 | static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, |
| 124 | bio_end_io_t *bi_end_io, void *bi_private) |
| 125 | { |
| 126 | h->bi_end_io = bio->bi_end_io; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 127 | |
| 128 | bio->bi_end_io = bi_end_io; |
| 129 | bio->bi_private = bi_private; |
| 130 | } |
| 131 | |
| 132 | static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) |
| 133 | { |
| 134 | bio->bi_end_io = h->bi_end_io; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /*----------------------------------------------------------------*/ |
| 138 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 139 | #define MIGRATION_POOL_SIZE 128 |
| 140 | #define COMMIT_PERIOD HZ |
| 141 | #define MIGRATION_COUNT_WINDOW 10 |
| 142 | |
| 143 | /* |
Mike Snitzer | 0547304 | 2013-08-16 10:54:19 -0400 | [diff] [blame] | 144 | * The block size of the device holding cache data must be |
| 145 | * between 32KB and 1GB. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 146 | */ |
| 147 | #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) |
Mike Snitzer | 0547304 | 2013-08-16 10:54:19 -0400 | [diff] [blame] | 148 | #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 149 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 150 | enum cache_metadata_mode { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 151 | CM_WRITE, /* metadata may be changed */ |
| 152 | CM_READ_ONLY, /* metadata may not be changed */ |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 153 | CM_FAIL |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 154 | }; |
| 155 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 156 | enum cache_io_mode { |
| 157 | /* |
| 158 | * Data is written to cached blocks only. These blocks are marked |
| 159 | * dirty. If you lose the cache device you will lose data. |
| 160 | * Potential performance increase for both reads and writes. |
| 161 | */ |
| 162 | CM_IO_WRITEBACK, |
| 163 | |
| 164 | /* |
| 165 | * Data is written to both cache and origin. Blocks are never |
| 166 | * dirty. Potential performance benfit for reads only. |
| 167 | */ |
| 168 | CM_IO_WRITETHROUGH, |
| 169 | |
| 170 | /* |
| 171 | * A degraded mode useful for various cache coherency situations |
| 172 | * (eg, rolling back snapshots). Reads and writes always go to the |
| 173 | * origin. If a write goes to a cached oblock, then the cache |
| 174 | * block is invalidated. |
| 175 | */ |
| 176 | CM_IO_PASSTHROUGH |
| 177 | }; |
| 178 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 179 | struct cache_features { |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 180 | enum cache_metadata_mode mode; |
| 181 | enum cache_io_mode io_mode; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 182 | }; |
| 183 | |
| 184 | struct cache_stats { |
| 185 | atomic_t read_hit; |
| 186 | atomic_t read_miss; |
| 187 | atomic_t write_hit; |
| 188 | atomic_t write_miss; |
| 189 | atomic_t demotion; |
| 190 | atomic_t promotion; |
| 191 | atomic_t copies_avoided; |
| 192 | atomic_t cache_cell_clash; |
| 193 | atomic_t commit_count; |
| 194 | atomic_t discard_count; |
| 195 | }; |
| 196 | |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 197 | /* |
| 198 | * Defines a range of cblocks, begin to (end - 1) are in the range. end is |
| 199 | * the one-past-the-end value. |
| 200 | */ |
| 201 | struct cblock_range { |
| 202 | dm_cblock_t begin; |
| 203 | dm_cblock_t end; |
| 204 | }; |
| 205 | |
| 206 | struct invalidation_request { |
| 207 | struct list_head list; |
| 208 | struct cblock_range *cblocks; |
| 209 | |
| 210 | atomic_t complete; |
| 211 | int err; |
| 212 | |
| 213 | wait_queue_head_t result_wait; |
| 214 | }; |
| 215 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 216 | struct cache { |
| 217 | struct dm_target *ti; |
| 218 | struct dm_target_callbacks callbacks; |
| 219 | |
Mike Snitzer | c9ec5d7 | 2013-08-16 10:54:21 -0400 | [diff] [blame] | 220 | struct dm_cache_metadata *cmd; |
| 221 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 222 | /* |
| 223 | * Metadata is written to this device. |
| 224 | */ |
| 225 | struct dm_dev *metadata_dev; |
| 226 | |
| 227 | /* |
| 228 | * The slower of the two data devices. Typically a spindle. |
| 229 | */ |
| 230 | struct dm_dev *origin_dev; |
| 231 | |
| 232 | /* |
| 233 | * The faster of the two data devices. Typically an SSD. |
| 234 | */ |
| 235 | struct dm_dev *cache_dev; |
| 236 | |
| 237 | /* |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 238 | * Size of the origin device in _complete_ blocks and native sectors. |
| 239 | */ |
| 240 | dm_oblock_t origin_blocks; |
| 241 | sector_t origin_sectors; |
| 242 | |
| 243 | /* |
| 244 | * Size of the cache device in blocks. |
| 245 | */ |
| 246 | dm_cblock_t cache_size; |
| 247 | |
| 248 | /* |
| 249 | * Fields for converting from sectors to blocks. |
| 250 | */ |
| 251 | uint32_t sectors_per_block; |
| 252 | int sectors_per_block_shift; |
| 253 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 254 | spinlock_t lock; |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 255 | struct list_head deferred_cells; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 256 | struct bio_list deferred_bios; |
| 257 | struct bio_list deferred_flush_bios; |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 258 | struct bio_list deferred_writethrough_bios; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 259 | struct list_head quiesced_migrations; |
| 260 | struct list_head completed_migrations; |
| 261 | struct list_head need_commit_migrations; |
| 262 | sector_t migration_threshold; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 263 | wait_queue_head_t migration_wait; |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 264 | atomic_t nr_allocated_migrations; |
| 265 | |
| 266 | /* |
| 267 | * The number of in flight migrations that are performing |
| 268 | * background io. eg, promotion, writeback. |
| 269 | */ |
| 270 | atomic_t nr_io_migrations; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 271 | |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 272 | wait_queue_head_t quiescing_wait; |
Joe Thornber | 238f836 | 2013-10-30 17:29:30 +0000 | [diff] [blame] | 273 | atomic_t quiescing; |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 274 | atomic_t quiescing_ack; |
| 275 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 276 | /* |
| 277 | * cache_size entries, dirty if set |
| 278 | */ |
Anssi Hannula | 44fa816 | 2014-08-01 11:55:47 -0400 | [diff] [blame] | 279 | atomic_t nr_dirty; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 280 | unsigned long *dirty_bitset; |
| 281 | |
| 282 | /* |
| 283 | * origin_blocks entries, discarded if set. |
| 284 | */ |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 285 | dm_dblock_t discard_nr_blocks; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 286 | unsigned long *discard_bitset; |
Joe Thornber | 08b1845 | 2014-11-06 14:38:01 +0000 | [diff] [blame] | 287 | uint32_t discard_block_size; /* a power of 2 times sectors per block */ |
Mike Snitzer | c9ec5d7 | 2013-08-16 10:54:21 -0400 | [diff] [blame] | 288 | |
| 289 | /* |
| 290 | * Rather than reconstructing the table line for the status we just |
| 291 | * save it and regurgitate. |
| 292 | */ |
| 293 | unsigned nr_ctr_args; |
| 294 | const char **ctr_args; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 295 | |
| 296 | struct dm_kcopyd_client *copier; |
| 297 | struct workqueue_struct *wq; |
| 298 | struct work_struct worker; |
| 299 | |
| 300 | struct delayed_work waker; |
| 301 | unsigned long last_commit_jiffies; |
| 302 | |
| 303 | struct dm_bio_prison *prison; |
| 304 | struct dm_deferred_set *all_io_ds; |
| 305 | |
| 306 | mempool_t *migration_pool; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 307 | |
| 308 | struct dm_cache_policy *policy; |
| 309 | unsigned policy_nr_args; |
| 310 | |
| 311 | bool need_tick_bio:1; |
| 312 | bool sized:1; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 313 | bool invalidate:1; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 314 | bool commit_requested:1; |
| 315 | bool loaded_mappings:1; |
| 316 | bool loaded_discards:1; |
| 317 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 318 | /* |
Mike Snitzer | c9ec5d7 | 2013-08-16 10:54:21 -0400 | [diff] [blame] | 319 | * Cache features such as write-through. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 320 | */ |
Mike Snitzer | c9ec5d7 | 2013-08-16 10:54:21 -0400 | [diff] [blame] | 321 | struct cache_features features; |
| 322 | |
| 323 | struct cache_stats stats; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 324 | |
| 325 | /* |
| 326 | * Invalidation fields. |
| 327 | */ |
| 328 | spinlock_t invalidation_lock; |
| 329 | struct list_head invalidation_requests; |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 330 | |
| 331 | struct io_tracker origin_tracker; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 332 | }; |
| 333 | |
| 334 | struct per_bio_data { |
| 335 | bool tick:1; |
| 336 | unsigned req_nr:2; |
| 337 | struct dm_deferred_entry *all_io_entry; |
Mike Snitzer | c6eda5e | 2014-01-31 14:11:54 -0500 | [diff] [blame] | 338 | struct dm_hook_info hook_info; |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 339 | sector_t len; |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 340 | |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 341 | /* |
| 342 | * writethrough fields. These MUST remain at the end of this |
| 343 | * structure and the 'cache' member must be the first as it |
Joe Thornber | aeed142 | 2013-05-10 14:37:18 +0100 | [diff] [blame] | 344 | * is used to determine the offset of the writethrough fields. |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 345 | */ |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 346 | struct cache *cache; |
| 347 | dm_cblock_t cblock; |
Darrick J. Wong | b844fe6 | 2013-04-05 15:36:32 +0100 | [diff] [blame] | 348 | struct dm_bio_details bio_details; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 349 | }; |
| 350 | |
| 351 | struct dm_cache_migration { |
| 352 | struct list_head list; |
| 353 | struct cache *cache; |
| 354 | |
| 355 | unsigned long start_jiffies; |
| 356 | dm_oblock_t old_oblock; |
| 357 | dm_oblock_t new_oblock; |
| 358 | dm_cblock_t cblock; |
| 359 | |
| 360 | bool err:1; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 361 | bool discard:1; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 362 | bool writeback:1; |
| 363 | bool demote:1; |
| 364 | bool promote:1; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 365 | bool requeue_holder:1; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 366 | bool invalidate:1; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 367 | |
| 368 | struct dm_bio_prison_cell *old_ocell; |
| 369 | struct dm_bio_prison_cell *new_ocell; |
| 370 | }; |
| 371 | |
| 372 | /* |
| 373 | * Processing a bio in the worker thread may require these memory |
| 374 | * allocations. We prealloc to avoid deadlocks (the same worker thread |
| 375 | * frees them back to the mempool). |
| 376 | */ |
| 377 | struct prealloc { |
| 378 | struct dm_cache_migration *mg; |
| 379 | struct dm_bio_prison_cell *cell1; |
| 380 | struct dm_bio_prison_cell *cell2; |
| 381 | }; |
| 382 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 383 | static enum cache_metadata_mode get_cache_mode(struct cache *cache); |
| 384 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 385 | static void wake_worker(struct cache *cache) |
| 386 | { |
| 387 | queue_work(cache->wq, &cache->worker); |
| 388 | } |
| 389 | |
| 390 | /*----------------------------------------------------------------*/ |
| 391 | |
| 392 | static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) |
| 393 | { |
| 394 | /* FIXME: change to use a local slab. */ |
| 395 | return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); |
| 396 | } |
| 397 | |
| 398 | static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) |
| 399 | { |
| 400 | dm_bio_prison_free_cell(cache->prison, cell); |
| 401 | } |
| 402 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 403 | static struct dm_cache_migration *alloc_migration(struct cache *cache) |
| 404 | { |
| 405 | struct dm_cache_migration *mg; |
| 406 | |
| 407 | mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); |
| 408 | if (mg) { |
| 409 | mg->cache = cache; |
| 410 | atomic_inc(&mg->cache->nr_allocated_migrations); |
| 411 | } |
| 412 | |
| 413 | return mg; |
| 414 | } |
| 415 | |
| 416 | static void free_migration(struct dm_cache_migration *mg) |
| 417 | { |
Joe Thornber | 88bf5184 | 2015-05-27 15:39:45 +0100 | [diff] [blame] | 418 | struct cache *cache = mg->cache; |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 419 | |
Joe Thornber | 88bf5184 | 2015-05-27 15:39:45 +0100 | [diff] [blame] | 420 | if (atomic_dec_and_test(&cache->nr_allocated_migrations)) |
| 421 | wake_up(&cache->migration_wait); |
| 422 | |
| 423 | mempool_free(mg, cache->migration_pool); |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 424 | } |
| 425 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 426 | static int prealloc_data_structs(struct cache *cache, struct prealloc *p) |
| 427 | { |
| 428 | if (!p->mg) { |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 429 | p->mg = alloc_migration(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 430 | if (!p->mg) |
| 431 | return -ENOMEM; |
| 432 | } |
| 433 | |
| 434 | if (!p->cell1) { |
| 435 | p->cell1 = alloc_prison_cell(cache); |
| 436 | if (!p->cell1) |
| 437 | return -ENOMEM; |
| 438 | } |
| 439 | |
| 440 | if (!p->cell2) { |
| 441 | p->cell2 = alloc_prison_cell(cache); |
| 442 | if (!p->cell2) |
| 443 | return -ENOMEM; |
| 444 | } |
| 445 | |
| 446 | return 0; |
| 447 | } |
| 448 | |
| 449 | static void prealloc_free_structs(struct cache *cache, struct prealloc *p) |
| 450 | { |
| 451 | if (p->cell2) |
| 452 | free_prison_cell(cache, p->cell2); |
| 453 | |
| 454 | if (p->cell1) |
| 455 | free_prison_cell(cache, p->cell1); |
| 456 | |
| 457 | if (p->mg) |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 458 | free_migration(p->mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) |
| 462 | { |
| 463 | struct dm_cache_migration *mg = p->mg; |
| 464 | |
| 465 | BUG_ON(!mg); |
| 466 | p->mg = NULL; |
| 467 | |
| 468 | return mg; |
| 469 | } |
| 470 | |
| 471 | /* |
| 472 | * You must have a cell within the prealloc struct to return. If not this |
| 473 | * function will BUG() rather than returning NULL. |
| 474 | */ |
| 475 | static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p) |
| 476 | { |
| 477 | struct dm_bio_prison_cell *r = NULL; |
| 478 | |
| 479 | if (p->cell1) { |
| 480 | r = p->cell1; |
| 481 | p->cell1 = NULL; |
| 482 | |
| 483 | } else if (p->cell2) { |
| 484 | r = p->cell2; |
| 485 | p->cell2 = NULL; |
| 486 | } else |
| 487 | BUG(); |
| 488 | |
| 489 | return r; |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * You can't have more than two cells in a prealloc struct. BUG() will be |
| 494 | * called if you try and overfill. |
| 495 | */ |
| 496 | static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell) |
| 497 | { |
| 498 | if (!p->cell2) |
| 499 | p->cell2 = cell; |
| 500 | |
| 501 | else if (!p->cell1) |
| 502 | p->cell1 = cell; |
| 503 | |
| 504 | else |
| 505 | BUG(); |
| 506 | } |
| 507 | |
| 508 | /*----------------------------------------------------------------*/ |
| 509 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 510 | static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 511 | { |
| 512 | key->virtual = 0; |
| 513 | key->dev = 0; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 514 | key->block_begin = from_oblock(begin); |
| 515 | key->block_end = from_oblock(end); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | /* |
| 519 | * The caller hands in a preallocated cell, and a free function for it. |
| 520 | * The cell will be freed if there's an error, or if it wasn't used because |
| 521 | * a cell with that key already exists. |
| 522 | */ |
| 523 | typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell); |
| 524 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 525 | static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end, |
| 526 | struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, |
| 527 | cell_free_fn free_fn, void *free_context, |
| 528 | struct dm_bio_prison_cell **cell_result) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 529 | { |
| 530 | int r; |
| 531 | struct dm_cell_key key; |
| 532 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 533 | build_key(oblock_begin, oblock_end, &key); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 534 | r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); |
| 535 | if (r) |
| 536 | free_fn(free_context, cell_prealloc); |
| 537 | |
| 538 | return r; |
| 539 | } |
| 540 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 541 | static int bio_detain(struct cache *cache, dm_oblock_t oblock, |
| 542 | struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, |
| 543 | cell_free_fn free_fn, void *free_context, |
| 544 | struct dm_bio_prison_cell **cell_result) |
| 545 | { |
| 546 | dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL); |
| 547 | return bio_detain_range(cache, oblock, end, bio, |
| 548 | cell_prealloc, free_fn, free_context, cell_result); |
| 549 | } |
| 550 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 551 | static int get_cell(struct cache *cache, |
| 552 | dm_oblock_t oblock, |
| 553 | struct prealloc *structs, |
| 554 | struct dm_bio_prison_cell **cell_result) |
| 555 | { |
| 556 | int r; |
| 557 | struct dm_cell_key key; |
| 558 | struct dm_bio_prison_cell *cell_prealloc; |
| 559 | |
| 560 | cell_prealloc = prealloc_get_cell(structs); |
| 561 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 562 | build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 563 | r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); |
| 564 | if (r) |
| 565 | prealloc_put_cell(structs, cell_prealloc); |
| 566 | |
| 567 | return r; |
| 568 | } |
| 569 | |
Joe Thornber | aeed142 | 2013-05-10 14:37:18 +0100 | [diff] [blame] | 570 | /*----------------------------------------------------------------*/ |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 571 | |
| 572 | static bool is_dirty(struct cache *cache, dm_cblock_t b) |
| 573 | { |
| 574 | return test_bit(from_cblock(b), cache->dirty_bitset); |
| 575 | } |
| 576 | |
| 577 | static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) |
| 578 | { |
| 579 | if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { |
Anssi Hannula | 44fa816 | 2014-08-01 11:55:47 -0400 | [diff] [blame] | 580 | atomic_inc(&cache->nr_dirty); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 581 | policy_set_dirty(cache->policy, oblock); |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) |
| 586 | { |
| 587 | if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { |
| 588 | policy_clear_dirty(cache->policy, oblock); |
Anssi Hannula | 44fa816 | 2014-08-01 11:55:47 -0400 | [diff] [blame] | 589 | if (atomic_dec_return(&cache->nr_dirty) == 0) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 590 | dm_table_event(cache->ti->table); |
| 591 | } |
| 592 | } |
| 593 | |
| 594 | /*----------------------------------------------------------------*/ |
Joe Thornber | aeed142 | 2013-05-10 14:37:18 +0100 | [diff] [blame] | 595 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 596 | static bool block_size_is_power_of_two(struct cache *cache) |
| 597 | { |
| 598 | return cache->sectors_per_block_shift >= 0; |
| 599 | } |
| 600 | |
Mikulas Patocka | 43aeaa2 | 2013-07-10 23:41:17 +0100 | [diff] [blame] | 601 | /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ |
| 602 | #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 |
| 603 | __always_inline |
| 604 | #endif |
Joe Thornber | 414dd67 | 2013-03-20 17:21:25 +0000 | [diff] [blame] | 605 | static dm_block_t block_div(dm_block_t b, uint32_t n) |
| 606 | { |
| 607 | do_div(b, n); |
| 608 | |
| 609 | return b; |
| 610 | } |
| 611 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 612 | static dm_block_t oblocks_per_dblock(struct cache *cache) |
| 613 | { |
| 614 | dm_block_t oblocks = cache->discard_block_size; |
| 615 | |
| 616 | if (block_size_is_power_of_two(cache)) |
| 617 | oblocks >>= cache->sectors_per_block_shift; |
| 618 | else |
| 619 | oblocks = block_div(oblocks, cache->sectors_per_block); |
| 620 | |
| 621 | return oblocks; |
| 622 | } |
| 623 | |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 624 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) |
| 625 | { |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 626 | return to_dblock(block_div(from_oblock(oblock), |
| 627 | oblocks_per_dblock(cache))); |
| 628 | } |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 629 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 630 | static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock) |
| 631 | { |
| 632 | return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache)); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 633 | } |
| 634 | |
| 635 | static void set_discard(struct cache *cache, dm_dblock_t b) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 636 | { |
| 637 | unsigned long flags; |
| 638 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 639 | BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 640 | atomic_inc(&cache->stats.discard_count); |
| 641 | |
| 642 | spin_lock_irqsave(&cache->lock, flags); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 643 | set_bit(from_dblock(b), cache->discard_bitset); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 644 | spin_unlock_irqrestore(&cache->lock, flags); |
| 645 | } |
| 646 | |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 647 | static void clear_discard(struct cache *cache, dm_dblock_t b) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 648 | { |
| 649 | unsigned long flags; |
| 650 | |
| 651 | spin_lock_irqsave(&cache->lock, flags); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 652 | clear_bit(from_dblock(b), cache->discard_bitset); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 653 | spin_unlock_irqrestore(&cache->lock, flags); |
| 654 | } |
| 655 | |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 656 | static bool is_discarded(struct cache *cache, dm_dblock_t b) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 657 | { |
| 658 | int r; |
| 659 | unsigned long flags; |
| 660 | |
| 661 | spin_lock_irqsave(&cache->lock, flags); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 662 | r = test_bit(from_dblock(b), cache->discard_bitset); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 663 | spin_unlock_irqrestore(&cache->lock, flags); |
| 664 | |
| 665 | return r; |
| 666 | } |
| 667 | |
| 668 | static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) |
| 669 | { |
| 670 | int r; |
| 671 | unsigned long flags; |
| 672 | |
| 673 | spin_lock_irqsave(&cache->lock, flags); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 674 | r = test_bit(from_dblock(oblock_to_dblock(cache, b)), |
| 675 | cache->discard_bitset); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 676 | spin_unlock_irqrestore(&cache->lock, flags); |
| 677 | |
| 678 | return r; |
| 679 | } |
| 680 | |
| 681 | /*----------------------------------------------------------------*/ |
| 682 | |
| 683 | static void load_stats(struct cache *cache) |
| 684 | { |
| 685 | struct dm_cache_statistics stats; |
| 686 | |
| 687 | dm_cache_metadata_get_stats(cache->cmd, &stats); |
| 688 | atomic_set(&cache->stats.read_hit, stats.read_hits); |
| 689 | atomic_set(&cache->stats.read_miss, stats.read_misses); |
| 690 | atomic_set(&cache->stats.write_hit, stats.write_hits); |
| 691 | atomic_set(&cache->stats.write_miss, stats.write_misses); |
| 692 | } |
| 693 | |
| 694 | static void save_stats(struct cache *cache) |
| 695 | { |
| 696 | struct dm_cache_statistics stats; |
| 697 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 698 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
| 699 | return; |
| 700 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 701 | stats.read_hits = atomic_read(&cache->stats.read_hit); |
| 702 | stats.read_misses = atomic_read(&cache->stats.read_miss); |
| 703 | stats.write_hits = atomic_read(&cache->stats.write_hit); |
| 704 | stats.write_misses = atomic_read(&cache->stats.write_miss); |
| 705 | |
| 706 | dm_cache_metadata_set_stats(cache->cmd, &stats); |
| 707 | } |
| 708 | |
| 709 | /*---------------------------------------------------------------- |
| 710 | * Per bio data |
| 711 | *--------------------------------------------------------------*/ |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 712 | |
| 713 | /* |
| 714 | * If using writeback, leave out struct per_bio_data's writethrough fields. |
| 715 | */ |
| 716 | #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) |
| 717 | #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) |
| 718 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 719 | static bool writethrough_mode(struct cache_features *f) |
| 720 | { |
| 721 | return f->io_mode == CM_IO_WRITETHROUGH; |
| 722 | } |
| 723 | |
| 724 | static bool writeback_mode(struct cache_features *f) |
| 725 | { |
| 726 | return f->io_mode == CM_IO_WRITEBACK; |
| 727 | } |
| 728 | |
| 729 | static bool passthrough_mode(struct cache_features *f) |
| 730 | { |
| 731 | return f->io_mode == CM_IO_PASSTHROUGH; |
| 732 | } |
| 733 | |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 734 | static size_t get_per_bio_data_size(struct cache *cache) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 735 | { |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 736 | return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) |
| 740 | { |
| 741 | struct per_bio_data *pb = dm_per_bio_data(bio, data_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 742 | BUG_ON(!pb); |
| 743 | return pb; |
| 744 | } |
| 745 | |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 746 | static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 747 | { |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 748 | struct per_bio_data *pb = get_per_bio_data(bio, data_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 749 | |
| 750 | pb->tick = false; |
| 751 | pb->req_nr = dm_bio_get_target_bio_nr(bio); |
| 752 | pb->all_io_entry = NULL; |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 753 | pb->len = 0; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 754 | |
| 755 | return pb; |
| 756 | } |
| 757 | |
| 758 | /*---------------------------------------------------------------- |
| 759 | * Remapping |
| 760 | *--------------------------------------------------------------*/ |
| 761 | static void remap_to_origin(struct cache *cache, struct bio *bio) |
| 762 | { |
| 763 | bio->bi_bdev = cache->origin_dev->bdev; |
| 764 | } |
| 765 | |
| 766 | static void remap_to_cache(struct cache *cache, struct bio *bio, |
| 767 | dm_cblock_t cblock) |
| 768 | { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 769 | sector_t bi_sector = bio->bi_iter.bi_sector; |
Heinz Mauelshagen | e0d849f | 2014-02-27 22:46:48 +0100 | [diff] [blame] | 770 | sector_t block = from_cblock(cblock); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 771 | |
| 772 | bio->bi_bdev = cache->cache_dev->bdev; |
| 773 | if (!block_size_is_power_of_two(cache)) |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 774 | bio->bi_iter.bi_sector = |
Heinz Mauelshagen | e0d849f | 2014-02-27 22:46:48 +0100 | [diff] [blame] | 775 | (block * cache->sectors_per_block) + |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 776 | sector_div(bi_sector, cache->sectors_per_block); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 777 | else |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 778 | bio->bi_iter.bi_sector = |
Heinz Mauelshagen | e0d849f | 2014-02-27 22:46:48 +0100 | [diff] [blame] | 779 | (block << cache->sectors_per_block_shift) | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 780 | (bi_sector & (cache->sectors_per_block - 1)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 781 | } |
| 782 | |
| 783 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
| 784 | { |
| 785 | unsigned long flags; |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 786 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 787 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 788 | |
| 789 | spin_lock_irqsave(&cache->lock, flags); |
| 790 | if (cache->need_tick_bio && |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 791 | !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) && |
Mike Christie | e604714 | 2016-06-05 14:32:04 -0500 | [diff] [blame] | 792 | bio_op(bio) != REQ_OP_DISCARD) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 793 | pb->tick = true; |
| 794 | cache->need_tick_bio = false; |
| 795 | } |
| 796 | spin_unlock_irqrestore(&cache->lock, flags); |
| 797 | } |
| 798 | |
| 799 | static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, |
| 800 | dm_oblock_t oblock) |
| 801 | { |
| 802 | check_if_tick_bio_needed(cache, bio); |
| 803 | remap_to_origin(cache, bio); |
| 804 | if (bio_data_dir(bio) == WRITE) |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 805 | clear_discard(cache, oblock_to_dblock(cache, oblock)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 806 | } |
| 807 | |
| 808 | static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, |
| 809 | dm_oblock_t oblock, dm_cblock_t cblock) |
| 810 | { |
Joe Thornber | f8e5f01 | 2013-10-21 12:51:45 +0100 | [diff] [blame] | 811 | check_if_tick_bio_needed(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 812 | remap_to_cache(cache, bio, cblock); |
| 813 | if (bio_data_dir(bio) == WRITE) { |
| 814 | set_dirty(cache, oblock, cblock); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 815 | clear_discard(cache, oblock_to_dblock(cache, oblock)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 816 | } |
| 817 | } |
| 818 | |
| 819 | static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) |
| 820 | { |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 821 | sector_t block_nr = bio->bi_iter.bi_sector; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 822 | |
| 823 | if (!block_size_is_power_of_two(cache)) |
| 824 | (void) sector_div(block_nr, cache->sectors_per_block); |
| 825 | else |
| 826 | block_nr >>= cache->sectors_per_block_shift; |
| 827 | |
| 828 | return to_oblock(block_nr); |
| 829 | } |
| 830 | |
| 831 | static int bio_triggers_commit(struct cache *cache, struct bio *bio) |
| 832 | { |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 833 | return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 834 | } |
| 835 | |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 836 | /* |
| 837 | * You must increment the deferred set whilst the prison cell is held. To |
| 838 | * encourage this, we ask for 'cell' to be passed in. |
| 839 | */ |
| 840 | static void inc_ds(struct cache *cache, struct bio *bio, |
| 841 | struct dm_bio_prison_cell *cell) |
| 842 | { |
| 843 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 844 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
| 845 | |
| 846 | BUG_ON(!cell); |
| 847 | BUG_ON(pb->all_io_entry); |
| 848 | |
| 849 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
| 850 | } |
| 851 | |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 852 | static bool accountable_bio(struct cache *cache, struct bio *bio) |
| 853 | { |
| 854 | return ((bio->bi_bdev == cache->origin_dev->bdev) && |
Mike Christie | e604714 | 2016-06-05 14:32:04 -0500 | [diff] [blame] | 855 | bio_op(bio) != REQ_OP_DISCARD); |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 856 | } |
| 857 | |
| 858 | static void accounted_begin(struct cache *cache, struct bio *bio) |
| 859 | { |
| 860 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 861 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
| 862 | |
| 863 | if (accountable_bio(cache, bio)) { |
| 864 | pb->len = bio_sectors(bio); |
| 865 | iot_io_begin(&cache->origin_tracker, pb->len); |
| 866 | } |
| 867 | } |
| 868 | |
| 869 | static void accounted_complete(struct cache *cache, struct bio *bio) |
| 870 | { |
| 871 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 872 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
| 873 | |
| 874 | iot_io_end(&cache->origin_tracker, pb->len); |
| 875 | } |
| 876 | |
| 877 | static void accounted_request(struct cache *cache, struct bio *bio) |
| 878 | { |
| 879 | accounted_begin(cache, bio); |
| 880 | generic_make_request(bio); |
| 881 | } |
| 882 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 883 | static void issue(struct cache *cache, struct bio *bio) |
| 884 | { |
| 885 | unsigned long flags; |
| 886 | |
| 887 | if (!bio_triggers_commit(cache, bio)) { |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 888 | accounted_request(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 889 | return; |
| 890 | } |
| 891 | |
| 892 | /* |
| 893 | * Batch together any bios that trigger commits and then issue a |
| 894 | * single commit for them in do_worker(). |
| 895 | */ |
| 896 | spin_lock_irqsave(&cache->lock, flags); |
| 897 | cache->commit_requested = true; |
| 898 | bio_list_add(&cache->deferred_flush_bios, bio); |
| 899 | spin_unlock_irqrestore(&cache->lock, flags); |
| 900 | } |
| 901 | |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 902 | static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) |
| 903 | { |
| 904 | inc_ds(cache, bio, cell); |
| 905 | issue(cache, bio); |
| 906 | } |
| 907 | |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 908 | static void defer_writethrough_bio(struct cache *cache, struct bio *bio) |
| 909 | { |
| 910 | unsigned long flags; |
| 911 | |
| 912 | spin_lock_irqsave(&cache->lock, flags); |
| 913 | bio_list_add(&cache->deferred_writethrough_bios, bio); |
| 914 | spin_unlock_irqrestore(&cache->lock, flags); |
| 915 | |
| 916 | wake_worker(cache); |
| 917 | } |
| 918 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 919 | static void writethrough_endio(struct bio *bio) |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 920 | { |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 921 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 922 | |
| 923 | dm_unhook_bio(&pb->hook_info, bio); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 924 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 925 | if (bio->bi_error) { |
| 926 | bio_endio(bio); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 927 | return; |
| 928 | } |
| 929 | |
Darrick J. Wong | b844fe6 | 2013-04-05 15:36:32 +0100 | [diff] [blame] | 930 | dm_bio_restore(&pb->bio_details, bio); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 931 | remap_to_cache(pb->cache, bio, pb->cblock); |
| 932 | |
| 933 | /* |
| 934 | * We can't issue this bio directly, since we're in interrupt |
Joe Thornber | aeed142 | 2013-05-10 14:37:18 +0100 | [diff] [blame] | 935 | * context. So it gets put on a bio list for processing by the |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 936 | * worker thread. |
| 937 | */ |
| 938 | defer_writethrough_bio(pb->cache, bio); |
| 939 | } |
| 940 | |
| 941 | /* |
| 942 | * When running in writethrough mode we need to send writes to clean blocks |
| 943 | * to both the cache and origin devices. In future we'd like to clone the |
| 944 | * bio and send them in parallel, but for now we're doing them in |
| 945 | * series as this is easier. |
| 946 | */ |
| 947 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, |
| 948 | dm_oblock_t oblock, dm_cblock_t cblock) |
| 949 | { |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 950 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 951 | |
| 952 | pb->cache = cache; |
| 953 | pb->cblock = cblock; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 954 | dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL); |
Darrick J. Wong | b844fe6 | 2013-04-05 15:36:32 +0100 | [diff] [blame] | 955 | dm_bio_record(&pb->bio_details, bio); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 956 | |
| 957 | remap_to_origin_clear_discard(pb->cache, bio, oblock); |
| 958 | } |
| 959 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 960 | /*---------------------------------------------------------------- |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 961 | * Failure modes |
| 962 | *--------------------------------------------------------------*/ |
| 963 | static enum cache_metadata_mode get_cache_mode(struct cache *cache) |
| 964 | { |
| 965 | return cache->features.mode; |
| 966 | } |
| 967 | |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 968 | static const char *cache_device_name(struct cache *cache) |
| 969 | { |
| 970 | return dm_device_name(dm_table_get_md(cache->ti->table)); |
| 971 | } |
| 972 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 973 | static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) |
| 974 | { |
| 975 | const char *descs[] = { |
| 976 | "write", |
| 977 | "read-only", |
| 978 | "fail" |
| 979 | }; |
| 980 | |
| 981 | dm_table_event(cache->ti->table); |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 982 | DMINFO("%s: switching cache to %s mode", |
| 983 | cache_device_name(cache), descs[(int)mode]); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 984 | } |
| 985 | |
| 986 | static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode) |
| 987 | { |
Joe Thornber | d14fcf3 | 2016-03-10 16:20:58 +0000 | [diff] [blame] | 988 | bool needs_check; |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 989 | enum cache_metadata_mode old_mode = get_cache_mode(cache); |
| 990 | |
Joe Thornber | d14fcf3 | 2016-03-10 16:20:58 +0000 | [diff] [blame] | 991 | if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) { |
| 992 | DMERR("unable to read needs_check flag, setting failure mode"); |
| 993 | new_mode = CM_FAIL; |
| 994 | } |
| 995 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 996 | if (new_mode == CM_WRITE && needs_check) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 997 | DMERR("%s: unable to switch cache to write mode until repaired.", |
| 998 | cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 999 | if (old_mode != new_mode) |
| 1000 | new_mode = old_mode; |
| 1001 | else |
| 1002 | new_mode = CM_READ_ONLY; |
| 1003 | } |
| 1004 | |
| 1005 | /* Never move out of fail mode */ |
| 1006 | if (old_mode == CM_FAIL) |
| 1007 | new_mode = CM_FAIL; |
| 1008 | |
| 1009 | switch (new_mode) { |
| 1010 | case CM_FAIL: |
| 1011 | case CM_READ_ONLY: |
| 1012 | dm_cache_metadata_set_read_only(cache->cmd); |
| 1013 | break; |
| 1014 | |
| 1015 | case CM_WRITE: |
| 1016 | dm_cache_metadata_set_read_write(cache->cmd); |
| 1017 | break; |
| 1018 | } |
| 1019 | |
| 1020 | cache->features.mode = new_mode; |
| 1021 | |
| 1022 | if (new_mode != old_mode) |
| 1023 | notify_mode_switch(cache, new_mode); |
| 1024 | } |
| 1025 | |
| 1026 | static void abort_transaction(struct cache *cache) |
| 1027 | { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1028 | const char *dev_name = cache_device_name(cache); |
| 1029 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1030 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
| 1031 | return; |
| 1032 | |
| 1033 | if (dm_cache_metadata_set_needs_check(cache->cmd)) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1034 | DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1035 | set_cache_mode(cache, CM_FAIL); |
| 1036 | } |
| 1037 | |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1038 | DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1039 | if (dm_cache_metadata_abort(cache->cmd)) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1040 | DMERR("%s: failed to abort metadata transaction", dev_name); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1041 | set_cache_mode(cache, CM_FAIL); |
| 1042 | } |
| 1043 | } |
| 1044 | |
| 1045 | static void metadata_operation_failed(struct cache *cache, const char *op, int r) |
| 1046 | { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1047 | DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", |
| 1048 | cache_device_name(cache), op, r); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1049 | abort_transaction(cache); |
| 1050 | set_cache_mode(cache, CM_READ_ONLY); |
| 1051 | } |
| 1052 | |
| 1053 | /*---------------------------------------------------------------- |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1054 | * Migration processing |
| 1055 | * |
| 1056 | * Migration covers moving data from the origin device to the cache, or |
| 1057 | * vice versa. |
| 1058 | *--------------------------------------------------------------*/ |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1059 | static void inc_io_migrations(struct cache *cache) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1060 | { |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1061 | atomic_inc(&cache->nr_io_migrations); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1062 | } |
| 1063 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1064 | static void dec_io_migrations(struct cache *cache) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1065 | { |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1066 | atomic_dec(&cache->nr_io_migrations); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1067 | } |
| 1068 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1069 | static bool discard_or_flush(struct bio *bio) |
| 1070 | { |
Mike Christie | e604714 | 2016-06-05 14:32:04 -0500 | [diff] [blame] | 1071 | return bio_op(bio) == REQ_OP_DISCARD || |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 1072 | bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1073 | } |
| 1074 | |
| 1075 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) |
| 1076 | { |
Mike Snitzer | dc9cee5 | 2015-08-31 15:41:34 -0400 | [diff] [blame] | 1077 | if (discard_or_flush(cell->holder)) { |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1078 | /* |
Mike Snitzer | dc9cee5 | 2015-08-31 15:41:34 -0400 | [diff] [blame] | 1079 | * We have to handle these bios individually. |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1080 | */ |
Mike Snitzer | dc9cee5 | 2015-08-31 15:41:34 -0400 | [diff] [blame] | 1081 | dm_cell_release(cache->prison, cell, &cache->deferred_bios); |
| 1082 | free_prison_cell(cache, cell); |
| 1083 | } else |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1084 | list_add_tail(&cell->user_list, &cache->deferred_cells); |
| 1085 | } |
| 1086 | |
| 1087 | static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1088 | { |
| 1089 | unsigned long flags; |
| 1090 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1091 | if (!holder && dm_cell_promote_or_release(cache->prison, cell)) { |
| 1092 | /* |
| 1093 | * There was no prisoner to promote to holder, the |
| 1094 | * cell has been released. |
| 1095 | */ |
| 1096 | free_prison_cell(cache, cell); |
| 1097 | return; |
| 1098 | } |
| 1099 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1100 | spin_lock_irqsave(&cache->lock, flags); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1101 | __cell_defer(cache, cell); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1102 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1103 | |
| 1104 | wake_worker(cache); |
| 1105 | } |
| 1106 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1107 | static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err) |
| 1108 | { |
| 1109 | dm_cell_error(cache->prison, cell, err); |
Mike Snitzer | dc9cee5 | 2015-08-31 15:41:34 -0400 | [diff] [blame] | 1110 | free_prison_cell(cache, cell); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1111 | } |
| 1112 | |
| 1113 | static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell) |
| 1114 | { |
| 1115 | cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE); |
| 1116 | } |
| 1117 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1118 | static void free_io_migration(struct dm_cache_migration *mg) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1119 | { |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1120 | struct cache *cache = mg->cache; |
| 1121 | |
| 1122 | dec_io_migrations(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1123 | free_migration(mg); |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1124 | wake_worker(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1125 | } |
| 1126 | |
| 1127 | static void migration_failure(struct dm_cache_migration *mg) |
| 1128 | { |
| 1129 | struct cache *cache = mg->cache; |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1130 | const char *dev_name = cache_device_name(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1131 | |
| 1132 | if (mg->writeback) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1133 | DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1134 | set_dirty(cache, mg->old_oblock, mg->cblock); |
| 1135 | cell_defer(cache, mg->old_ocell, false); |
| 1136 | |
| 1137 | } else if (mg->demote) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1138 | DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1139 | policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); |
| 1140 | |
Heinz Mauelshagen | 80f659f | 2013-10-14 17:10:47 +0200 | [diff] [blame] | 1141 | cell_defer(cache, mg->old_ocell, mg->promote ? false : true); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1142 | if (mg->promote) |
Heinz Mauelshagen | 80f659f | 2013-10-14 17:10:47 +0200 | [diff] [blame] | 1143 | cell_defer(cache, mg->new_ocell, true); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1144 | } else { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1145 | DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1146 | policy_remove_mapping(cache->policy, mg->new_oblock); |
Heinz Mauelshagen | 80f659f | 2013-10-14 17:10:47 +0200 | [diff] [blame] | 1147 | cell_defer(cache, mg->new_ocell, true); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1148 | } |
| 1149 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1150 | free_io_migration(mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1151 | } |
| 1152 | |
| 1153 | static void migration_success_pre_commit(struct dm_cache_migration *mg) |
| 1154 | { |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1155 | int r; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1156 | unsigned long flags; |
| 1157 | struct cache *cache = mg->cache; |
| 1158 | |
| 1159 | if (mg->writeback) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1160 | clear_dirty(cache, mg->old_oblock, mg->cblock); |
Anssi Hannula | 40aa978 | 2014-09-05 03:11:28 +0300 | [diff] [blame] | 1161 | cell_defer(cache, mg->old_ocell, false); |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1162 | free_io_migration(mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1163 | return; |
| 1164 | |
| 1165 | } else if (mg->demote) { |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1166 | r = dm_cache_remove_mapping(cache->cmd, mg->cblock); |
| 1167 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1168 | DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata", |
| 1169 | cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1170 | metadata_operation_failed(cache, "dm_cache_remove_mapping", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1171 | policy_force_mapping(cache->policy, mg->new_oblock, |
| 1172 | mg->old_oblock); |
| 1173 | if (mg->promote) |
| 1174 | cell_defer(cache, mg->new_ocell, true); |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1175 | free_io_migration(mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1176 | return; |
| 1177 | } |
| 1178 | } else { |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1179 | r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock); |
| 1180 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1181 | DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata", |
| 1182 | cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1183 | metadata_operation_failed(cache, "dm_cache_insert_mapping", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1184 | policy_remove_mapping(cache->policy, mg->new_oblock); |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1185 | free_io_migration(mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1186 | return; |
| 1187 | } |
| 1188 | } |
| 1189 | |
| 1190 | spin_lock_irqsave(&cache->lock, flags); |
| 1191 | list_add_tail(&mg->list, &cache->need_commit_migrations); |
| 1192 | cache->commit_requested = true; |
| 1193 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1194 | } |
| 1195 | |
| 1196 | static void migration_success_post_commit(struct dm_cache_migration *mg) |
| 1197 | { |
| 1198 | unsigned long flags; |
| 1199 | struct cache *cache = mg->cache; |
| 1200 | |
| 1201 | if (mg->writeback) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1202 | DMWARN_LIMIT("%s: writeback unexpectedly triggered commit", |
| 1203 | cache_device_name(cache)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1204 | return; |
| 1205 | |
| 1206 | } else if (mg->demote) { |
Heinz Mauelshagen | 80f659f | 2013-10-14 17:10:47 +0200 | [diff] [blame] | 1207 | cell_defer(cache, mg->old_ocell, mg->promote ? false : true); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1208 | |
| 1209 | if (mg->promote) { |
| 1210 | mg->demote = false; |
| 1211 | |
| 1212 | spin_lock_irqsave(&cache->lock, flags); |
| 1213 | list_add_tail(&mg->list, &cache->quiesced_migrations); |
| 1214 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1215 | |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 1216 | } else { |
| 1217 | if (mg->invalidate) |
| 1218 | policy_remove_mapping(cache->policy, mg->old_oblock); |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1219 | free_io_migration(mg); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 1220 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1221 | |
| 1222 | } else { |
Joe Thornber | 1e32134 | 2014-11-27 12:26:46 +0000 | [diff] [blame] | 1223 | if (mg->requeue_holder) { |
| 1224 | clear_dirty(cache, mg->new_oblock, mg->cblock); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1225 | cell_defer(cache, mg->new_ocell, true); |
Joe Thornber | 1e32134 | 2014-11-27 12:26:46 +0000 | [diff] [blame] | 1226 | } else { |
| 1227 | /* |
| 1228 | * The block was promoted via an overwrite, so it's dirty. |
| 1229 | */ |
| 1230 | set_dirty(cache, mg->new_oblock, mg->cblock); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1231 | bio_endio(mg->new_ocell->holder); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1232 | cell_defer(cache, mg->new_ocell, false); |
| 1233 | } |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1234 | free_io_migration(mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1235 | } |
| 1236 | } |
| 1237 | |
| 1238 | static void copy_complete(int read_err, unsigned long write_err, void *context) |
| 1239 | { |
| 1240 | unsigned long flags; |
| 1241 | struct dm_cache_migration *mg = (struct dm_cache_migration *) context; |
| 1242 | struct cache *cache = mg->cache; |
| 1243 | |
| 1244 | if (read_err || write_err) |
| 1245 | mg->err = true; |
| 1246 | |
| 1247 | spin_lock_irqsave(&cache->lock, flags); |
| 1248 | list_add_tail(&mg->list, &cache->completed_migrations); |
| 1249 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1250 | |
| 1251 | wake_worker(cache); |
| 1252 | } |
| 1253 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1254 | static void issue_copy(struct dm_cache_migration *mg) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1255 | { |
| 1256 | int r; |
| 1257 | struct dm_io_region o_region, c_region; |
| 1258 | struct cache *cache = mg->cache; |
Heinz Mauelshagen | 8b9d966 | 2014-03-12 00:40:05 +0100 | [diff] [blame] | 1259 | sector_t cblock = from_cblock(mg->cblock); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1260 | |
| 1261 | o_region.bdev = cache->origin_dev->bdev; |
| 1262 | o_region.count = cache->sectors_per_block; |
| 1263 | |
| 1264 | c_region.bdev = cache->cache_dev->bdev; |
Heinz Mauelshagen | 8b9d966 | 2014-03-12 00:40:05 +0100 | [diff] [blame] | 1265 | c_region.sector = cblock * cache->sectors_per_block; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1266 | c_region.count = cache->sectors_per_block; |
| 1267 | |
| 1268 | if (mg->writeback || mg->demote) { |
| 1269 | /* demote */ |
| 1270 | o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; |
| 1271 | r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); |
| 1272 | } else { |
| 1273 | /* promote */ |
| 1274 | o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; |
| 1275 | r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); |
| 1276 | } |
| 1277 | |
Heinz Mauelshagen | 2c2263c | 2013-10-14 17:14:45 +0200 | [diff] [blame] | 1278 | if (r < 0) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1279 | DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1280 | migration_failure(mg); |
Heinz Mauelshagen | 2c2263c | 2013-10-14 17:14:45 +0200 | [diff] [blame] | 1281 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1282 | } |
| 1283 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1284 | static void overwrite_endio(struct bio *bio) |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1285 | { |
| 1286 | struct dm_cache_migration *mg = bio->bi_private; |
| 1287 | struct cache *cache = mg->cache; |
| 1288 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 1289 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
| 1290 | unsigned long flags; |
| 1291 | |
Mike Snitzer | 80ae49a | 2014-01-31 14:30:37 -0500 | [diff] [blame] | 1292 | dm_unhook_bio(&pb->hook_info, bio); |
| 1293 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1294 | if (bio->bi_error) |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1295 | mg->err = true; |
| 1296 | |
Mike Snitzer | 80ae49a | 2014-01-31 14:30:37 -0500 | [diff] [blame] | 1297 | mg->requeue_holder = false; |
| 1298 | |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1299 | spin_lock_irqsave(&cache->lock, flags); |
| 1300 | list_add_tail(&mg->list, &cache->completed_migrations); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1301 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1302 | |
| 1303 | wake_worker(cache); |
| 1304 | } |
| 1305 | |
| 1306 | static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) |
| 1307 | { |
| 1308 | size_t pb_data_size = get_per_bio_data_size(mg->cache); |
| 1309 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
| 1310 | |
| 1311 | dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); |
| 1312 | remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1313 | |
| 1314 | /* |
| 1315 | * No need to inc_ds() here, since the cell will be held for the |
| 1316 | * duration of the io. |
| 1317 | */ |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 1318 | accounted_request(mg->cache, bio); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1319 | } |
| 1320 | |
| 1321 | static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) |
| 1322 | { |
| 1323 | return (bio_data_dir(bio) == WRITE) && |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1324 | (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1325 | } |
| 1326 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1327 | static void avoid_copy(struct dm_cache_migration *mg) |
| 1328 | { |
| 1329 | atomic_inc(&mg->cache->stats.copies_avoided); |
| 1330 | migration_success_pre_commit(mg); |
| 1331 | } |
| 1332 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1333 | static void calc_discard_block_range(struct cache *cache, struct bio *bio, |
| 1334 | dm_dblock_t *b, dm_dblock_t *e) |
| 1335 | { |
| 1336 | sector_t sb = bio->bi_iter.bi_sector; |
| 1337 | sector_t se = bio_end_sector(bio); |
| 1338 | |
| 1339 | *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); |
| 1340 | |
| 1341 | if (se - sb < cache->discard_block_size) |
| 1342 | *e = *b; |
| 1343 | else |
| 1344 | *e = to_dblock(block_div(se, cache->discard_block_size)); |
| 1345 | } |
| 1346 | |
| 1347 | static void issue_discard(struct dm_cache_migration *mg) |
| 1348 | { |
| 1349 | dm_dblock_t b, e; |
| 1350 | struct bio *bio = mg->new_ocell->holder; |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1351 | struct cache *cache = mg->cache; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1352 | |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1353 | calc_discard_block_range(cache, bio, &b, &e); |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1354 | while (b != e) { |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1355 | set_discard(cache, b); |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1356 | b = to_dblock(from_dblock(b) + 1); |
| 1357 | } |
| 1358 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1359 | bio_endio(bio); |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1360 | cell_defer(cache, mg->new_ocell, false); |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1361 | free_migration(mg); |
Joe Thornber | cc7da0b | 2015-09-01 11:38:19 +0100 | [diff] [blame] | 1362 | wake_worker(cache); |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1363 | } |
| 1364 | |
| 1365 | static void issue_copy_or_discard(struct dm_cache_migration *mg) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1366 | { |
| 1367 | bool avoid; |
| 1368 | struct cache *cache = mg->cache; |
| 1369 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1370 | if (mg->discard) { |
| 1371 | issue_discard(mg); |
| 1372 | return; |
| 1373 | } |
| 1374 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1375 | if (mg->writeback || mg->demote) |
| 1376 | avoid = !is_dirty(cache, mg->cblock) || |
| 1377 | is_discarded_oblock(cache, mg->old_oblock); |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1378 | else { |
| 1379 | struct bio *bio = mg->new_ocell->holder; |
| 1380 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1381 | avoid = is_discarded_oblock(cache, mg->new_oblock); |
| 1382 | |
Joe Thornber | f29a314 | 2014-11-27 12:21:08 +0000 | [diff] [blame] | 1383 | if (writeback_mode(&cache->features) && |
| 1384 | !avoid && bio_writes_complete_block(cache, bio)) { |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1385 | issue_overwrite(mg, bio); |
| 1386 | return; |
| 1387 | } |
| 1388 | } |
| 1389 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1390 | avoid ? avoid_copy(mg) : issue_copy(mg); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1391 | } |
| 1392 | |
| 1393 | static void complete_migration(struct dm_cache_migration *mg) |
| 1394 | { |
| 1395 | if (mg->err) |
| 1396 | migration_failure(mg); |
| 1397 | else |
| 1398 | migration_success_pre_commit(mg); |
| 1399 | } |
| 1400 | |
| 1401 | static void process_migrations(struct cache *cache, struct list_head *head, |
| 1402 | void (*fn)(struct dm_cache_migration *)) |
| 1403 | { |
| 1404 | unsigned long flags; |
| 1405 | struct list_head list; |
| 1406 | struct dm_cache_migration *mg, *tmp; |
| 1407 | |
| 1408 | INIT_LIST_HEAD(&list); |
| 1409 | spin_lock_irqsave(&cache->lock, flags); |
| 1410 | list_splice_init(head, &list); |
| 1411 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1412 | |
| 1413 | list_for_each_entry_safe(mg, tmp, &list, list) |
| 1414 | fn(mg); |
| 1415 | } |
| 1416 | |
| 1417 | static void __queue_quiesced_migration(struct dm_cache_migration *mg) |
| 1418 | { |
| 1419 | list_add_tail(&mg->list, &mg->cache->quiesced_migrations); |
| 1420 | } |
| 1421 | |
| 1422 | static void queue_quiesced_migration(struct dm_cache_migration *mg) |
| 1423 | { |
| 1424 | unsigned long flags; |
| 1425 | struct cache *cache = mg->cache; |
| 1426 | |
| 1427 | spin_lock_irqsave(&cache->lock, flags); |
| 1428 | __queue_quiesced_migration(mg); |
| 1429 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1430 | |
| 1431 | wake_worker(cache); |
| 1432 | } |
| 1433 | |
| 1434 | static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) |
| 1435 | { |
| 1436 | unsigned long flags; |
| 1437 | struct dm_cache_migration *mg, *tmp; |
| 1438 | |
| 1439 | spin_lock_irqsave(&cache->lock, flags); |
| 1440 | list_for_each_entry_safe(mg, tmp, work, list) |
| 1441 | __queue_quiesced_migration(mg); |
| 1442 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1443 | |
| 1444 | wake_worker(cache); |
| 1445 | } |
| 1446 | |
| 1447 | static void check_for_quiesced_migrations(struct cache *cache, |
| 1448 | struct per_bio_data *pb) |
| 1449 | { |
| 1450 | struct list_head work; |
| 1451 | |
| 1452 | if (!pb->all_io_entry) |
| 1453 | return; |
| 1454 | |
| 1455 | INIT_LIST_HEAD(&work); |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1456 | dm_deferred_entry_dec(pb->all_io_entry, &work); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1457 | |
| 1458 | if (!list_empty(&work)) |
| 1459 | queue_quiesced_migrations(cache, &work); |
| 1460 | } |
| 1461 | |
| 1462 | static void quiesce_migration(struct dm_cache_migration *mg) |
| 1463 | { |
| 1464 | if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) |
| 1465 | queue_quiesced_migration(mg); |
| 1466 | } |
| 1467 | |
| 1468 | static void promote(struct cache *cache, struct prealloc *structs, |
| 1469 | dm_oblock_t oblock, dm_cblock_t cblock, |
| 1470 | struct dm_bio_prison_cell *cell) |
| 1471 | { |
| 1472 | struct dm_cache_migration *mg = prealloc_get_migration(structs); |
| 1473 | |
| 1474 | mg->err = false; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1475 | mg->discard = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1476 | mg->writeback = false; |
| 1477 | mg->demote = false; |
| 1478 | mg->promote = true; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1479 | mg->requeue_holder = true; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 1480 | mg->invalidate = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1481 | mg->cache = cache; |
| 1482 | mg->new_oblock = oblock; |
| 1483 | mg->cblock = cblock; |
| 1484 | mg->old_ocell = NULL; |
| 1485 | mg->new_ocell = cell; |
| 1486 | mg->start_jiffies = jiffies; |
| 1487 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1488 | inc_io_migrations(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1489 | quiesce_migration(mg); |
| 1490 | } |
| 1491 | |
| 1492 | static void writeback(struct cache *cache, struct prealloc *structs, |
| 1493 | dm_oblock_t oblock, dm_cblock_t cblock, |
| 1494 | struct dm_bio_prison_cell *cell) |
| 1495 | { |
| 1496 | struct dm_cache_migration *mg = prealloc_get_migration(structs); |
| 1497 | |
| 1498 | mg->err = false; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1499 | mg->discard = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1500 | mg->writeback = true; |
| 1501 | mg->demote = false; |
| 1502 | mg->promote = false; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1503 | mg->requeue_holder = true; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 1504 | mg->invalidate = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1505 | mg->cache = cache; |
| 1506 | mg->old_oblock = oblock; |
| 1507 | mg->cblock = cblock; |
| 1508 | mg->old_ocell = cell; |
| 1509 | mg->new_ocell = NULL; |
| 1510 | mg->start_jiffies = jiffies; |
| 1511 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1512 | inc_io_migrations(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1513 | quiesce_migration(mg); |
| 1514 | } |
| 1515 | |
| 1516 | static void demote_then_promote(struct cache *cache, struct prealloc *structs, |
| 1517 | dm_oblock_t old_oblock, dm_oblock_t new_oblock, |
| 1518 | dm_cblock_t cblock, |
| 1519 | struct dm_bio_prison_cell *old_ocell, |
| 1520 | struct dm_bio_prison_cell *new_ocell) |
| 1521 | { |
| 1522 | struct dm_cache_migration *mg = prealloc_get_migration(structs); |
| 1523 | |
| 1524 | mg->err = false; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1525 | mg->discard = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1526 | mg->writeback = false; |
| 1527 | mg->demote = true; |
| 1528 | mg->promote = true; |
Joe Thornber | c9d28d5 | 2013-10-31 13:55:48 -0400 | [diff] [blame] | 1529 | mg->requeue_holder = true; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 1530 | mg->invalidate = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1531 | mg->cache = cache; |
| 1532 | mg->old_oblock = old_oblock; |
| 1533 | mg->new_oblock = new_oblock; |
| 1534 | mg->cblock = cblock; |
| 1535 | mg->old_ocell = old_ocell; |
| 1536 | mg->new_ocell = new_ocell; |
| 1537 | mg->start_jiffies = jiffies; |
| 1538 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1539 | inc_io_migrations(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1540 | quiesce_migration(mg); |
| 1541 | } |
| 1542 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1543 | /* |
| 1544 | * Invalidate a cache entry. No writeback occurs; any changes in the cache |
| 1545 | * block are thrown away. |
| 1546 | */ |
| 1547 | static void invalidate(struct cache *cache, struct prealloc *structs, |
| 1548 | dm_oblock_t oblock, dm_cblock_t cblock, |
| 1549 | struct dm_bio_prison_cell *cell) |
| 1550 | { |
| 1551 | struct dm_cache_migration *mg = prealloc_get_migration(structs); |
| 1552 | |
| 1553 | mg->err = false; |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1554 | mg->discard = false; |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1555 | mg->writeback = false; |
| 1556 | mg->demote = true; |
| 1557 | mg->promote = false; |
| 1558 | mg->requeue_holder = true; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 1559 | mg->invalidate = true; |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1560 | mg->cache = cache; |
| 1561 | mg->old_oblock = oblock; |
| 1562 | mg->cblock = cblock; |
| 1563 | mg->old_ocell = cell; |
| 1564 | mg->new_ocell = NULL; |
| 1565 | mg->start_jiffies = jiffies; |
| 1566 | |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1567 | inc_io_migrations(cache); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1568 | quiesce_migration(mg); |
| 1569 | } |
| 1570 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1571 | static void discard(struct cache *cache, struct prealloc *structs, |
| 1572 | struct dm_bio_prison_cell *cell) |
| 1573 | { |
| 1574 | struct dm_cache_migration *mg = prealloc_get_migration(structs); |
| 1575 | |
| 1576 | mg->err = false; |
| 1577 | mg->discard = true; |
| 1578 | mg->writeback = false; |
| 1579 | mg->demote = false; |
| 1580 | mg->promote = false; |
| 1581 | mg->requeue_holder = false; |
| 1582 | mg->invalidate = false; |
| 1583 | mg->cache = cache; |
| 1584 | mg->old_ocell = NULL; |
| 1585 | mg->new_ocell = cell; |
| 1586 | mg->start_jiffies = jiffies; |
| 1587 | |
| 1588 | quiesce_migration(mg); |
| 1589 | } |
| 1590 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1591 | /*---------------------------------------------------------------- |
| 1592 | * bio processing |
| 1593 | *--------------------------------------------------------------*/ |
| 1594 | static void defer_bio(struct cache *cache, struct bio *bio) |
| 1595 | { |
| 1596 | unsigned long flags; |
| 1597 | |
| 1598 | spin_lock_irqsave(&cache->lock, flags); |
| 1599 | bio_list_add(&cache->deferred_bios, bio); |
| 1600 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1601 | |
| 1602 | wake_worker(cache); |
| 1603 | } |
| 1604 | |
| 1605 | static void process_flush_bio(struct cache *cache, struct bio *bio) |
| 1606 | { |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 1607 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 1608 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1609 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 1610 | BUG_ON(bio->bi_iter.bi_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1611 | if (!pb->req_nr) |
| 1612 | remap_to_origin(cache, bio); |
| 1613 | else |
| 1614 | remap_to_cache(cache, bio, 0); |
| 1615 | |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1616 | /* |
Mike Christie | 28a8f0d | 2016-06-05 14:32:25 -0500 | [diff] [blame] | 1617 | * REQ_PREFLUSH is not directed at any particular block so we don't |
| 1618 | * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1619 | * by dm-core. |
| 1620 | */ |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1621 | issue(cache, bio); |
| 1622 | } |
| 1623 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1624 | static void process_discard_bio(struct cache *cache, struct prealloc *structs, |
| 1625 | struct bio *bio) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1626 | { |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1627 | int r; |
| 1628 | dm_dblock_t b, e; |
| 1629 | struct dm_bio_prison_cell *cell_prealloc, *new_ocell; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1630 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1631 | calc_discard_block_range(cache, bio, &b, &e); |
| 1632 | if (b == e) { |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1633 | bio_endio(bio); |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1634 | return; |
| 1635 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1636 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1637 | cell_prealloc = prealloc_get_cell(structs); |
| 1638 | r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc, |
| 1639 | (cell_free_fn) prealloc_put_cell, |
| 1640 | structs, &new_ocell); |
| 1641 | if (r > 0) |
| 1642 | return; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1643 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1644 | discard(cache, structs, new_ocell); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1645 | } |
| 1646 | |
| 1647 | static bool spare_migration_bandwidth(struct cache *cache) |
| 1648 | { |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 1649 | sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1650 | cache->sectors_per_block; |
| 1651 | return current_volume < cache->migration_threshold; |
| 1652 | } |
| 1653 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1654 | static void inc_hit_counter(struct cache *cache, struct bio *bio) |
| 1655 | { |
| 1656 | atomic_inc(bio_data_dir(bio) == READ ? |
| 1657 | &cache->stats.read_hit : &cache->stats.write_hit); |
| 1658 | } |
| 1659 | |
| 1660 | static void inc_miss_counter(struct cache *cache, struct bio *bio) |
| 1661 | { |
| 1662 | atomic_inc(bio_data_dir(bio) == READ ? |
| 1663 | &cache->stats.read_miss : &cache->stats.write_miss); |
| 1664 | } |
| 1665 | |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 1666 | /*----------------------------------------------------------------*/ |
| 1667 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1668 | struct inc_detail { |
| 1669 | struct cache *cache; |
| 1670 | struct bio_list bios_for_issue; |
| 1671 | struct bio_list unhandled_bios; |
| 1672 | bool any_writes; |
| 1673 | }; |
| 1674 | |
| 1675 | static void inc_fn(void *context, struct dm_bio_prison_cell *cell) |
| 1676 | { |
| 1677 | struct bio *bio; |
| 1678 | struct inc_detail *detail = context; |
| 1679 | struct cache *cache = detail->cache; |
| 1680 | |
| 1681 | inc_ds(cache, cell->holder, cell); |
| 1682 | if (bio_data_dir(cell->holder) == WRITE) |
| 1683 | detail->any_writes = true; |
| 1684 | |
| 1685 | while ((bio = bio_list_pop(&cell->bios))) { |
| 1686 | if (discard_or_flush(bio)) { |
| 1687 | bio_list_add(&detail->unhandled_bios, bio); |
| 1688 | continue; |
| 1689 | } |
| 1690 | |
| 1691 | if (bio_data_dir(bio) == WRITE) |
| 1692 | detail->any_writes = true; |
| 1693 | |
| 1694 | bio_list_add(&detail->bios_for_issue, bio); |
| 1695 | inc_ds(cache, bio, cell); |
| 1696 | } |
| 1697 | } |
| 1698 | |
| 1699 | // FIXME: refactor these two |
| 1700 | static void remap_cell_to_origin_clear_discard(struct cache *cache, |
| 1701 | struct dm_bio_prison_cell *cell, |
| 1702 | dm_oblock_t oblock, bool issue_holder) |
| 1703 | { |
| 1704 | struct bio *bio; |
| 1705 | unsigned long flags; |
| 1706 | struct inc_detail detail; |
| 1707 | |
| 1708 | detail.cache = cache; |
| 1709 | bio_list_init(&detail.bios_for_issue); |
| 1710 | bio_list_init(&detail.unhandled_bios); |
| 1711 | detail.any_writes = false; |
| 1712 | |
| 1713 | spin_lock_irqsave(&cache->lock, flags); |
| 1714 | dm_cell_visit_release(cache->prison, inc_fn, &detail, cell); |
| 1715 | bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios); |
| 1716 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1717 | |
| 1718 | remap_to_origin(cache, cell->holder); |
| 1719 | if (issue_holder) |
| 1720 | issue(cache, cell->holder); |
| 1721 | else |
| 1722 | accounted_begin(cache, cell->holder); |
| 1723 | |
| 1724 | if (detail.any_writes) |
| 1725 | clear_discard(cache, oblock_to_dblock(cache, oblock)); |
| 1726 | |
| 1727 | while ((bio = bio_list_pop(&detail.bios_for_issue))) { |
| 1728 | remap_to_origin(cache, bio); |
| 1729 | issue(cache, bio); |
| 1730 | } |
Joe Thornber | 9153df7 | 2015-08-31 18:20:08 +0100 | [diff] [blame] | 1731 | |
| 1732 | free_prison_cell(cache, cell); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1733 | } |
| 1734 | |
| 1735 | static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell, |
| 1736 | dm_oblock_t oblock, dm_cblock_t cblock, bool issue_holder) |
| 1737 | { |
| 1738 | struct bio *bio; |
| 1739 | unsigned long flags; |
| 1740 | struct inc_detail detail; |
| 1741 | |
| 1742 | detail.cache = cache; |
| 1743 | bio_list_init(&detail.bios_for_issue); |
| 1744 | bio_list_init(&detail.unhandled_bios); |
| 1745 | detail.any_writes = false; |
| 1746 | |
| 1747 | spin_lock_irqsave(&cache->lock, flags); |
| 1748 | dm_cell_visit_release(cache->prison, inc_fn, &detail, cell); |
| 1749 | bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios); |
| 1750 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1751 | |
| 1752 | remap_to_cache(cache, cell->holder, cblock); |
| 1753 | if (issue_holder) |
| 1754 | issue(cache, cell->holder); |
| 1755 | else |
| 1756 | accounted_begin(cache, cell->holder); |
| 1757 | |
| 1758 | if (detail.any_writes) { |
| 1759 | set_dirty(cache, oblock, cblock); |
| 1760 | clear_discard(cache, oblock_to_dblock(cache, oblock)); |
| 1761 | } |
| 1762 | |
| 1763 | while ((bio = bio_list_pop(&detail.bios_for_issue))) { |
| 1764 | remap_to_cache(cache, bio, cblock); |
| 1765 | issue(cache, bio); |
| 1766 | } |
Joe Thornber | 9153df7 | 2015-08-31 18:20:08 +0100 | [diff] [blame] | 1767 | |
| 1768 | free_prison_cell(cache, cell); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1769 | } |
| 1770 | |
| 1771 | /*----------------------------------------------------------------*/ |
| 1772 | |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 1773 | struct old_oblock_lock { |
| 1774 | struct policy_locker locker; |
| 1775 | struct cache *cache; |
| 1776 | struct prealloc *structs; |
| 1777 | struct dm_bio_prison_cell *cell; |
| 1778 | }; |
| 1779 | |
| 1780 | static int null_locker(struct policy_locker *locker, dm_oblock_t b) |
| 1781 | { |
| 1782 | /* This should never be called */ |
| 1783 | BUG(); |
| 1784 | return 0; |
| 1785 | } |
| 1786 | |
| 1787 | static int cell_locker(struct policy_locker *locker, dm_oblock_t b) |
| 1788 | { |
| 1789 | struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker); |
| 1790 | struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs); |
| 1791 | |
| 1792 | return bio_detain(l->cache, b, NULL, cell_prealloc, |
| 1793 | (cell_free_fn) prealloc_put_cell, |
| 1794 | l->structs, &l->cell); |
| 1795 | } |
| 1796 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1797 | static void process_cell(struct cache *cache, struct prealloc *structs, |
| 1798 | struct dm_bio_prison_cell *new_ocell) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1799 | { |
| 1800 | int r; |
| 1801 | bool release_cell = true; |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1802 | struct bio *bio = new_ocell->holder; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1803 | dm_oblock_t block = get_bio_block(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1804 | struct policy_result lookup_result; |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1805 | bool passthrough = passthrough_mode(&cache->features); |
Joe Thornber | 4077525 | 2015-05-15 15:29:58 +0100 | [diff] [blame] | 1806 | bool fast_promotion, can_migrate; |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 1807 | struct old_oblock_lock ool; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1808 | |
Joe Thornber | 4077525 | 2015-05-15 15:29:58 +0100 | [diff] [blame] | 1809 | fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio); |
| 1810 | can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache)); |
Joe Thornber | 43c32bf | 2014-11-25 13:14:57 +0000 | [diff] [blame] | 1811 | |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 1812 | ool.locker.fn = cell_locker; |
| 1813 | ool.cache = cache; |
| 1814 | ool.structs = structs; |
| 1815 | ool.cell = NULL; |
Joe Thornber | 4077525 | 2015-05-15 15:29:58 +0100 | [diff] [blame] | 1816 | r = policy_map(cache->policy, block, true, can_migrate, fast_promotion, |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 1817 | bio, &ool.locker, &lookup_result); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1818 | |
| 1819 | if (r == -EWOULDBLOCK) |
| 1820 | /* migration has been denied */ |
| 1821 | lookup_result.op = POLICY_MISS; |
| 1822 | |
| 1823 | switch (lookup_result.op) { |
| 1824 | case POLICY_HIT: |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1825 | if (passthrough) { |
| 1826 | inc_miss_counter(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1827 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1828 | /* |
| 1829 | * Passthrough always maps to the origin, |
| 1830 | * invalidating any cache blocks that are written |
| 1831 | * to. |
| 1832 | */ |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1833 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1834 | if (bio_data_dir(bio) == WRITE) { |
| 1835 | atomic_inc(&cache->stats.demotion); |
| 1836 | invalidate(cache, structs, block, lookup_result.cblock, new_ocell); |
| 1837 | release_cell = false; |
| 1838 | |
| 1839 | } else { |
| 1840 | /* FIXME: factor out issue_origin() */ |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1841 | remap_to_origin_clear_discard(cache, bio, block); |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1842 | inc_and_issue(cache, bio, new_ocell); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1843 | } |
| 1844 | } else { |
| 1845 | inc_hit_counter(cache, bio); |
| 1846 | |
| 1847 | if (bio_data_dir(bio) == WRITE && |
| 1848 | writethrough_mode(&cache->features) && |
| 1849 | !is_dirty(cache, lookup_result.cblock)) { |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1850 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1851 | inc_and_issue(cache, bio, new_ocell); |
| 1852 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1853 | } else { |
| 1854 | remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true); |
| 1855 | release_cell = false; |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 1856 | } |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 1857 | } |
| 1858 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1859 | break; |
| 1860 | |
| 1861 | case POLICY_MISS: |
| 1862 | inc_miss_counter(cache, bio); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1863 | remap_cell_to_origin_clear_discard(cache, new_ocell, block, true); |
| 1864 | release_cell = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1865 | break; |
| 1866 | |
| 1867 | case POLICY_NEW: |
| 1868 | atomic_inc(&cache->stats.promotion); |
| 1869 | promote(cache, structs, block, lookup_result.cblock, new_ocell); |
| 1870 | release_cell = false; |
| 1871 | break; |
| 1872 | |
| 1873 | case POLICY_REPLACE: |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1874 | atomic_inc(&cache->stats.demotion); |
| 1875 | atomic_inc(&cache->stats.promotion); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1876 | demote_then_promote(cache, structs, lookup_result.old_oblock, |
| 1877 | block, lookup_result.cblock, |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 1878 | ool.cell, new_ocell); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1879 | release_cell = false; |
| 1880 | break; |
| 1881 | |
| 1882 | default: |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 1883 | DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u", |
| 1884 | cache_device_name(cache), __func__, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1885 | (unsigned) lookup_result.op); |
| 1886 | bio_io_error(bio); |
| 1887 | } |
| 1888 | |
| 1889 | if (release_cell) |
| 1890 | cell_defer(cache, new_ocell, false); |
| 1891 | } |
| 1892 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1893 | static void process_bio(struct cache *cache, struct prealloc *structs, |
| 1894 | struct bio *bio) |
| 1895 | { |
| 1896 | int r; |
| 1897 | dm_oblock_t block = get_bio_block(cache, bio); |
| 1898 | struct dm_bio_prison_cell *cell_prealloc, *new_ocell; |
| 1899 | |
| 1900 | /* |
| 1901 | * Check to see if that block is currently migrating. |
| 1902 | */ |
| 1903 | cell_prealloc = prealloc_get_cell(structs); |
| 1904 | r = bio_detain(cache, block, bio, cell_prealloc, |
| 1905 | (cell_free_fn) prealloc_put_cell, |
| 1906 | structs, &new_ocell); |
| 1907 | if (r > 0) |
| 1908 | return; |
| 1909 | |
| 1910 | process_cell(cache, structs, new_ocell); |
| 1911 | } |
| 1912 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1913 | static int need_commit_due_to_time(struct cache *cache) |
| 1914 | { |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1915 | return jiffies < cache->last_commit_jiffies || |
| 1916 | jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1917 | } |
| 1918 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1919 | /* |
| 1920 | * A non-zero return indicates read_only or fail_io mode. |
| 1921 | */ |
| 1922 | static int commit(struct cache *cache, bool clean_shutdown) |
| 1923 | { |
| 1924 | int r; |
| 1925 | |
| 1926 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
| 1927 | return -EINVAL; |
| 1928 | |
| 1929 | atomic_inc(&cache->stats.commit_count); |
| 1930 | r = dm_cache_commit(cache->cmd, clean_shutdown); |
| 1931 | if (r) |
| 1932 | metadata_operation_failed(cache, "dm_cache_commit", r); |
| 1933 | |
| 1934 | return r; |
| 1935 | } |
| 1936 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1937 | static int commit_if_needed(struct cache *cache) |
| 1938 | { |
Heinz Mauelshagen | ffcbcb6 | 2013-10-14 17:24:43 +0200 | [diff] [blame] | 1939 | int r = 0; |
| 1940 | |
| 1941 | if ((cache->commit_requested || need_commit_due_to_time(cache)) && |
| 1942 | dm_cache_changed_this_transaction(cache->cmd)) { |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 1943 | r = commit(cache, false); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1944 | cache->commit_requested = false; |
Heinz Mauelshagen | ffcbcb6 | 2013-10-14 17:24:43 +0200 | [diff] [blame] | 1945 | cache->last_commit_jiffies = jiffies; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1946 | } |
| 1947 | |
Heinz Mauelshagen | ffcbcb6 | 2013-10-14 17:24:43 +0200 | [diff] [blame] | 1948 | return r; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1949 | } |
| 1950 | |
| 1951 | static void process_deferred_bios(struct cache *cache) |
| 1952 | { |
Mike Snitzer | 665022d | 2015-07-16 21:48:55 -0400 | [diff] [blame] | 1953 | bool prealloc_used = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1954 | unsigned long flags; |
| 1955 | struct bio_list bios; |
| 1956 | struct bio *bio; |
| 1957 | struct prealloc structs; |
| 1958 | |
| 1959 | memset(&structs, 0, sizeof(structs)); |
| 1960 | bio_list_init(&bios); |
| 1961 | |
| 1962 | spin_lock_irqsave(&cache->lock, flags); |
| 1963 | bio_list_merge(&bios, &cache->deferred_bios); |
| 1964 | bio_list_init(&cache->deferred_bios); |
| 1965 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1966 | |
| 1967 | while (!bio_list_empty(&bios)) { |
| 1968 | /* |
| 1969 | * If we've got no free migration structs, and processing |
| 1970 | * this bio might require one, we pause until there are some |
| 1971 | * prepared mappings to process. |
| 1972 | */ |
Mike Snitzer | 795e633 | 2015-07-29 13:48:23 -0400 | [diff] [blame] | 1973 | prealloc_used = true; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1974 | if (prealloc_data_structs(cache, &structs)) { |
| 1975 | spin_lock_irqsave(&cache->lock, flags); |
| 1976 | bio_list_merge(&cache->deferred_bios, &bios); |
| 1977 | spin_unlock_irqrestore(&cache->lock, flags); |
| 1978 | break; |
| 1979 | } |
| 1980 | |
| 1981 | bio = bio_list_pop(&bios); |
| 1982 | |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 1983 | if (bio->bi_opf & REQ_PREFLUSH) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1984 | process_flush_bio(cache, bio); |
Mike Christie | e604714 | 2016-06-05 14:32:04 -0500 | [diff] [blame] | 1985 | else if (bio_op(bio) == REQ_OP_DISCARD) |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 1986 | process_discard_bio(cache, &structs, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1987 | else |
| 1988 | process_bio(cache, &structs, bio); |
| 1989 | } |
| 1990 | |
Mike Snitzer | 665022d | 2015-07-16 21:48:55 -0400 | [diff] [blame] | 1991 | if (prealloc_used) |
| 1992 | prealloc_free_structs(cache, &structs); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 1993 | } |
| 1994 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1995 | static void process_deferred_cells(struct cache *cache) |
| 1996 | { |
Mike Snitzer | 665022d | 2015-07-16 21:48:55 -0400 | [diff] [blame] | 1997 | bool prealloc_used = false; |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 1998 | unsigned long flags; |
| 1999 | struct dm_bio_prison_cell *cell, *tmp; |
| 2000 | struct list_head cells; |
| 2001 | struct prealloc structs; |
| 2002 | |
| 2003 | memset(&structs, 0, sizeof(structs)); |
| 2004 | |
| 2005 | INIT_LIST_HEAD(&cells); |
| 2006 | |
| 2007 | spin_lock_irqsave(&cache->lock, flags); |
| 2008 | list_splice_init(&cache->deferred_cells, &cells); |
| 2009 | spin_unlock_irqrestore(&cache->lock, flags); |
| 2010 | |
| 2011 | list_for_each_entry_safe(cell, tmp, &cells, user_list) { |
| 2012 | /* |
| 2013 | * If we've got no free migration structs, and processing |
| 2014 | * this bio might require one, we pause until there are some |
| 2015 | * prepared mappings to process. |
| 2016 | */ |
Mike Snitzer | 795e633 | 2015-07-29 13:48:23 -0400 | [diff] [blame] | 2017 | prealloc_used = true; |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 2018 | if (prealloc_data_structs(cache, &structs)) { |
| 2019 | spin_lock_irqsave(&cache->lock, flags); |
| 2020 | list_splice(&cells, &cache->deferred_cells); |
| 2021 | spin_unlock_irqrestore(&cache->lock, flags); |
| 2022 | break; |
| 2023 | } |
| 2024 | |
| 2025 | process_cell(cache, &structs, cell); |
| 2026 | } |
| 2027 | |
Mike Snitzer | 665022d | 2015-07-16 21:48:55 -0400 | [diff] [blame] | 2028 | if (prealloc_used) |
| 2029 | prealloc_free_structs(cache, &structs); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 2030 | } |
| 2031 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2032 | static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) |
| 2033 | { |
| 2034 | unsigned long flags; |
| 2035 | struct bio_list bios; |
| 2036 | struct bio *bio; |
| 2037 | |
| 2038 | bio_list_init(&bios); |
| 2039 | |
| 2040 | spin_lock_irqsave(&cache->lock, flags); |
| 2041 | bio_list_merge(&bios, &cache->deferred_flush_bios); |
| 2042 | bio_list_init(&cache->deferred_flush_bios); |
| 2043 | spin_unlock_irqrestore(&cache->lock, flags); |
| 2044 | |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 2045 | /* |
| 2046 | * These bios have already been through inc_ds() |
| 2047 | */ |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2048 | while ((bio = bio_list_pop(&bios))) |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 2049 | submit_bios ? accounted_request(cache, bio) : bio_io_error(bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2050 | } |
| 2051 | |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 2052 | static void process_deferred_writethrough_bios(struct cache *cache) |
| 2053 | { |
| 2054 | unsigned long flags; |
| 2055 | struct bio_list bios; |
| 2056 | struct bio *bio; |
| 2057 | |
| 2058 | bio_list_init(&bios); |
| 2059 | |
| 2060 | spin_lock_irqsave(&cache->lock, flags); |
| 2061 | bio_list_merge(&bios, &cache->deferred_writethrough_bios); |
| 2062 | bio_list_init(&cache->deferred_writethrough_bios); |
| 2063 | spin_unlock_irqrestore(&cache->lock, flags); |
| 2064 | |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 2065 | /* |
| 2066 | * These bios have already been through inc_ds() |
| 2067 | */ |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 2068 | while ((bio = bio_list_pop(&bios))) |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 2069 | accounted_request(cache, bio); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 2070 | } |
| 2071 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2072 | static void writeback_some_dirty_blocks(struct cache *cache) |
| 2073 | { |
Mike Snitzer | 665022d | 2015-07-16 21:48:55 -0400 | [diff] [blame] | 2074 | bool prealloc_used = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2075 | dm_oblock_t oblock; |
| 2076 | dm_cblock_t cblock; |
| 2077 | struct prealloc structs; |
| 2078 | struct dm_bio_prison_cell *old_ocell; |
Joe Thornber | 20f6814 | 2015-05-15 15:20:09 +0100 | [diff] [blame] | 2079 | bool busy = !iot_idle_for(&cache->origin_tracker, HZ); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2080 | |
| 2081 | memset(&structs, 0, sizeof(structs)); |
| 2082 | |
| 2083 | while (spare_migration_bandwidth(cache)) { |
Mike Snitzer | e782eff | 2015-07-16 21:26:10 -0400 | [diff] [blame] | 2084 | if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) |
| 2085 | break; /* no work to do */ |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2086 | |
Mike Snitzer | 795e633 | 2015-07-29 13:48:23 -0400 | [diff] [blame] | 2087 | prealloc_used = true; |
Mike Snitzer | e782eff | 2015-07-16 21:26:10 -0400 | [diff] [blame] | 2088 | if (prealloc_data_structs(cache, &structs) || |
| 2089 | get_cell(cache, oblock, &structs, &old_ocell)) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2090 | policy_set_dirty(cache->policy, oblock); |
| 2091 | break; |
| 2092 | } |
| 2093 | |
| 2094 | writeback(cache, &structs, oblock, cblock, old_ocell); |
| 2095 | } |
| 2096 | |
Mike Snitzer | 665022d | 2015-07-16 21:48:55 -0400 | [diff] [blame] | 2097 | if (prealloc_used) |
| 2098 | prealloc_free_structs(cache, &structs); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2099 | } |
| 2100 | |
| 2101 | /*---------------------------------------------------------------- |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2102 | * Invalidations. |
| 2103 | * Dropping something from the cache *without* writing back. |
| 2104 | *--------------------------------------------------------------*/ |
| 2105 | |
| 2106 | static void process_invalidation_request(struct cache *cache, struct invalidation_request *req) |
| 2107 | { |
| 2108 | int r = 0; |
| 2109 | uint64_t begin = from_cblock(req->cblocks->begin); |
| 2110 | uint64_t end = from_cblock(req->cblocks->end); |
| 2111 | |
| 2112 | while (begin != end) { |
| 2113 | r = policy_remove_cblock(cache->policy, to_cblock(begin)); |
| 2114 | if (!r) { |
| 2115 | r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 2116 | if (r) { |
| 2117 | metadata_operation_failed(cache, "dm_cache_remove_mapping", r); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2118 | break; |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 2119 | } |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2120 | |
| 2121 | } else if (r == -ENODATA) { |
| 2122 | /* harmless, already unmapped */ |
| 2123 | r = 0; |
| 2124 | |
| 2125 | } else { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 2126 | DMERR("%s: policy_remove_cblock failed", cache_device_name(cache)); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2127 | break; |
| 2128 | } |
| 2129 | |
| 2130 | begin++; |
| 2131 | } |
| 2132 | |
| 2133 | cache->commit_requested = true; |
| 2134 | |
| 2135 | req->err = r; |
| 2136 | atomic_set(&req->complete, 1); |
| 2137 | |
| 2138 | wake_up(&req->result_wait); |
| 2139 | } |
| 2140 | |
| 2141 | static void process_invalidation_requests(struct cache *cache) |
| 2142 | { |
| 2143 | struct list_head list; |
| 2144 | struct invalidation_request *req, *tmp; |
| 2145 | |
| 2146 | INIT_LIST_HEAD(&list); |
| 2147 | spin_lock(&cache->invalidation_lock); |
| 2148 | list_splice_init(&cache->invalidation_requests, &list); |
| 2149 | spin_unlock(&cache->invalidation_lock); |
| 2150 | |
| 2151 | list_for_each_entry_safe (req, tmp, &list, list) |
| 2152 | process_invalidation_request(cache, req); |
| 2153 | } |
| 2154 | |
| 2155 | /*---------------------------------------------------------------- |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2156 | * Main worker loop |
| 2157 | *--------------------------------------------------------------*/ |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2158 | static bool is_quiescing(struct cache *cache) |
| 2159 | { |
Joe Thornber | 238f836 | 2013-10-30 17:29:30 +0000 | [diff] [blame] | 2160 | return atomic_read(&cache->quiescing); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2161 | } |
| 2162 | |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2163 | static void ack_quiescing(struct cache *cache) |
| 2164 | { |
| 2165 | if (is_quiescing(cache)) { |
| 2166 | atomic_inc(&cache->quiescing_ack); |
| 2167 | wake_up(&cache->quiescing_wait); |
| 2168 | } |
| 2169 | } |
| 2170 | |
| 2171 | static void wait_for_quiescing_ack(struct cache *cache) |
| 2172 | { |
| 2173 | wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); |
| 2174 | } |
| 2175 | |
| 2176 | static void start_quiescing(struct cache *cache) |
| 2177 | { |
Joe Thornber | 238f836 | 2013-10-30 17:29:30 +0000 | [diff] [blame] | 2178 | atomic_inc(&cache->quiescing); |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2179 | wait_for_quiescing_ack(cache); |
| 2180 | } |
| 2181 | |
| 2182 | static void stop_quiescing(struct cache *cache) |
| 2183 | { |
Joe Thornber | 238f836 | 2013-10-30 17:29:30 +0000 | [diff] [blame] | 2184 | atomic_set(&cache->quiescing, 0); |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2185 | atomic_set(&cache->quiescing_ack, 0); |
| 2186 | } |
| 2187 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2188 | static void wait_for_migrations(struct cache *cache) |
| 2189 | { |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 2190 | wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2191 | } |
| 2192 | |
| 2193 | static void stop_worker(struct cache *cache) |
| 2194 | { |
| 2195 | cancel_delayed_work(&cache->waker); |
| 2196 | flush_workqueue(cache->wq); |
| 2197 | } |
| 2198 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 2199 | static void requeue_deferred_cells(struct cache *cache) |
| 2200 | { |
| 2201 | unsigned long flags; |
| 2202 | struct list_head cells; |
| 2203 | struct dm_bio_prison_cell *cell, *tmp; |
| 2204 | |
| 2205 | INIT_LIST_HEAD(&cells); |
| 2206 | spin_lock_irqsave(&cache->lock, flags); |
| 2207 | list_splice_init(&cache->deferred_cells, &cells); |
| 2208 | spin_unlock_irqrestore(&cache->lock, flags); |
| 2209 | |
| 2210 | list_for_each_entry_safe(cell, tmp, &cells, user_list) |
| 2211 | cell_requeue(cache, cell); |
| 2212 | } |
| 2213 | |
| 2214 | static void requeue_deferred_bios(struct cache *cache) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2215 | { |
| 2216 | struct bio *bio; |
| 2217 | struct bio_list bios; |
| 2218 | |
| 2219 | bio_list_init(&bios); |
| 2220 | bio_list_merge(&bios, &cache->deferred_bios); |
| 2221 | bio_list_init(&cache->deferred_bios); |
| 2222 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 2223 | while ((bio = bio_list_pop(&bios))) { |
| 2224 | bio->bi_error = DM_ENDIO_REQUEUE; |
| 2225 | bio_endio(bio); |
| 2226 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2227 | } |
| 2228 | |
| 2229 | static int more_work(struct cache *cache) |
| 2230 | { |
| 2231 | if (is_quiescing(cache)) |
| 2232 | return !list_empty(&cache->quiesced_migrations) || |
| 2233 | !list_empty(&cache->completed_migrations) || |
| 2234 | !list_empty(&cache->need_commit_migrations); |
| 2235 | else |
| 2236 | return !bio_list_empty(&cache->deferred_bios) || |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 2237 | !list_empty(&cache->deferred_cells) || |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2238 | !bio_list_empty(&cache->deferred_flush_bios) || |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 2239 | !bio_list_empty(&cache->deferred_writethrough_bios) || |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2240 | !list_empty(&cache->quiesced_migrations) || |
| 2241 | !list_empty(&cache->completed_migrations) || |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2242 | !list_empty(&cache->need_commit_migrations) || |
| 2243 | cache->invalidate; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2244 | } |
| 2245 | |
| 2246 | static void do_worker(struct work_struct *ws) |
| 2247 | { |
| 2248 | struct cache *cache = container_of(ws, struct cache, worker); |
| 2249 | |
| 2250 | do { |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2251 | if (!is_quiescing(cache)) { |
| 2252 | writeback_some_dirty_blocks(cache); |
| 2253 | process_deferred_writethrough_bios(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2254 | process_deferred_bios(cache); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 2255 | process_deferred_cells(cache); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2256 | process_invalidation_requests(cache); |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2257 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2258 | |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 2259 | process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2260 | process_migrations(cache, &cache->completed_migrations, complete_migration); |
| 2261 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2262 | if (commit_if_needed(cache)) { |
| 2263 | process_deferred_flush_bios(cache, false); |
Joe Thornber | 304affa | 2014-06-24 15:36:58 -0400 | [diff] [blame] | 2264 | process_migrations(cache, &cache->need_commit_migrations, migration_failure); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2265 | } else { |
| 2266 | process_deferred_flush_bios(cache, true); |
| 2267 | process_migrations(cache, &cache->need_commit_migrations, |
| 2268 | migration_success_post_commit); |
| 2269 | } |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2270 | |
| 2271 | ack_quiescing(cache); |
| 2272 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2273 | } while (more_work(cache)); |
| 2274 | } |
| 2275 | |
| 2276 | /* |
| 2277 | * We want to commit periodically so that not too much |
| 2278 | * unwritten metadata builds up. |
| 2279 | */ |
| 2280 | static void do_waker(struct work_struct *ws) |
| 2281 | { |
| 2282 | struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); |
Joe Thornber | fba1010 | 2015-05-29 10:20:56 +0100 | [diff] [blame] | 2283 | policy_tick(cache->policy, true); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2284 | wake_worker(cache); |
| 2285 | queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); |
| 2286 | } |
| 2287 | |
| 2288 | /*----------------------------------------------------------------*/ |
| 2289 | |
| 2290 | static int is_congested(struct dm_dev *dev, int bdi_bits) |
| 2291 | { |
| 2292 | struct request_queue *q = bdev_get_queue(dev->bdev); |
| 2293 | return bdi_congested(&q->backing_dev_info, bdi_bits); |
| 2294 | } |
| 2295 | |
| 2296 | static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) |
| 2297 | { |
| 2298 | struct cache *cache = container_of(cb, struct cache, callbacks); |
| 2299 | |
| 2300 | return is_congested(cache->origin_dev, bdi_bits) || |
| 2301 | is_congested(cache->cache_dev, bdi_bits); |
| 2302 | } |
| 2303 | |
| 2304 | /*---------------------------------------------------------------- |
| 2305 | * Target methods |
| 2306 | *--------------------------------------------------------------*/ |
| 2307 | |
| 2308 | /* |
| 2309 | * This function gets called on the error paths of the constructor, so we |
| 2310 | * have to cope with a partially initialised struct. |
| 2311 | */ |
| 2312 | static void destroy(struct cache *cache) |
| 2313 | { |
| 2314 | unsigned i; |
| 2315 | |
Julia Lawall | 6f65985 | 2015-09-13 14:15:05 +0200 | [diff] [blame] | 2316 | mempool_destroy(cache->migration_pool); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2317 | |
| 2318 | if (cache->all_io_ds) |
| 2319 | dm_deferred_set_destroy(cache->all_io_ds); |
| 2320 | |
| 2321 | if (cache->prison) |
| 2322 | dm_bio_prison_destroy(cache->prison); |
| 2323 | |
| 2324 | if (cache->wq) |
| 2325 | destroy_workqueue(cache->wq); |
| 2326 | |
| 2327 | if (cache->dirty_bitset) |
| 2328 | free_bitset(cache->dirty_bitset); |
| 2329 | |
| 2330 | if (cache->discard_bitset) |
| 2331 | free_bitset(cache->discard_bitset); |
| 2332 | |
| 2333 | if (cache->copier) |
| 2334 | dm_kcopyd_client_destroy(cache->copier); |
| 2335 | |
| 2336 | if (cache->cmd) |
| 2337 | dm_cache_metadata_close(cache->cmd); |
| 2338 | |
| 2339 | if (cache->metadata_dev) |
| 2340 | dm_put_device(cache->ti, cache->metadata_dev); |
| 2341 | |
| 2342 | if (cache->origin_dev) |
| 2343 | dm_put_device(cache->ti, cache->origin_dev); |
| 2344 | |
| 2345 | if (cache->cache_dev) |
| 2346 | dm_put_device(cache->ti, cache->cache_dev); |
| 2347 | |
| 2348 | if (cache->policy) |
| 2349 | dm_cache_policy_destroy(cache->policy); |
| 2350 | |
| 2351 | for (i = 0; i < cache->nr_ctr_args ; i++) |
| 2352 | kfree(cache->ctr_args[i]); |
| 2353 | kfree(cache->ctr_args); |
| 2354 | |
| 2355 | kfree(cache); |
| 2356 | } |
| 2357 | |
| 2358 | static void cache_dtr(struct dm_target *ti) |
| 2359 | { |
| 2360 | struct cache *cache = ti->private; |
| 2361 | |
| 2362 | destroy(cache); |
| 2363 | } |
| 2364 | |
| 2365 | static sector_t get_dev_size(struct dm_dev *dev) |
| 2366 | { |
| 2367 | return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; |
| 2368 | } |
| 2369 | |
| 2370 | /*----------------------------------------------------------------*/ |
| 2371 | |
| 2372 | /* |
| 2373 | * Construct a cache device mapping. |
| 2374 | * |
| 2375 | * cache <metadata dev> <cache dev> <origin dev> <block size> |
| 2376 | * <#feature args> [<feature arg>]* |
| 2377 | * <policy> <#policy args> [<policy arg>]* |
| 2378 | * |
| 2379 | * metadata dev : fast device holding the persistent metadata |
| 2380 | * cache dev : fast device holding cached data blocks |
| 2381 | * origin dev : slow device holding original data blocks |
| 2382 | * block size : cache unit size in sectors |
| 2383 | * |
| 2384 | * #feature args : number of feature arguments passed |
| 2385 | * feature args : writethrough. (The default is writeback.) |
| 2386 | * |
| 2387 | * policy : the replacement policy to use |
| 2388 | * #policy args : an even number of policy arguments corresponding |
| 2389 | * to key/value pairs passed to the policy |
| 2390 | * policy args : key/value pairs passed to the policy |
| 2391 | * E.g. 'sequential_threshold 1024' |
| 2392 | * See cache-policies.txt for details. |
| 2393 | * |
| 2394 | * Optional feature arguments are: |
| 2395 | * writethrough : write through caching that prohibits cache block |
| 2396 | * content from being different from origin block content. |
| 2397 | * Without this argument, the default behaviour is to write |
| 2398 | * back cache block contents later for performance reasons, |
| 2399 | * so they may differ from the corresponding origin blocks. |
| 2400 | */ |
| 2401 | struct cache_args { |
| 2402 | struct dm_target *ti; |
| 2403 | |
| 2404 | struct dm_dev *metadata_dev; |
| 2405 | |
| 2406 | struct dm_dev *cache_dev; |
| 2407 | sector_t cache_sectors; |
| 2408 | |
| 2409 | struct dm_dev *origin_dev; |
| 2410 | sector_t origin_sectors; |
| 2411 | |
| 2412 | uint32_t block_size; |
| 2413 | |
| 2414 | const char *policy_name; |
| 2415 | int policy_argc; |
| 2416 | const char **policy_argv; |
| 2417 | |
| 2418 | struct cache_features features; |
| 2419 | }; |
| 2420 | |
| 2421 | static void destroy_cache_args(struct cache_args *ca) |
| 2422 | { |
| 2423 | if (ca->metadata_dev) |
| 2424 | dm_put_device(ca->ti, ca->metadata_dev); |
| 2425 | |
| 2426 | if (ca->cache_dev) |
| 2427 | dm_put_device(ca->ti, ca->cache_dev); |
| 2428 | |
| 2429 | if (ca->origin_dev) |
| 2430 | dm_put_device(ca->ti, ca->origin_dev); |
| 2431 | |
| 2432 | kfree(ca); |
| 2433 | } |
| 2434 | |
| 2435 | static bool at_least_one_arg(struct dm_arg_set *as, char **error) |
| 2436 | { |
| 2437 | if (!as->argc) { |
| 2438 | *error = "Insufficient args"; |
| 2439 | return false; |
| 2440 | } |
| 2441 | |
| 2442 | return true; |
| 2443 | } |
| 2444 | |
| 2445 | static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as, |
| 2446 | char **error) |
| 2447 | { |
| 2448 | int r; |
| 2449 | sector_t metadata_dev_size; |
| 2450 | char b[BDEVNAME_SIZE]; |
| 2451 | |
| 2452 | if (!at_least_one_arg(as, error)) |
| 2453 | return -EINVAL; |
| 2454 | |
| 2455 | r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, |
| 2456 | &ca->metadata_dev); |
| 2457 | if (r) { |
| 2458 | *error = "Error opening metadata device"; |
| 2459 | return r; |
| 2460 | } |
| 2461 | |
| 2462 | metadata_dev_size = get_dev_size(ca->metadata_dev); |
| 2463 | if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING) |
| 2464 | DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", |
| 2465 | bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); |
| 2466 | |
| 2467 | return 0; |
| 2468 | } |
| 2469 | |
| 2470 | static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as, |
| 2471 | char **error) |
| 2472 | { |
| 2473 | int r; |
| 2474 | |
| 2475 | if (!at_least_one_arg(as, error)) |
| 2476 | return -EINVAL; |
| 2477 | |
| 2478 | r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, |
| 2479 | &ca->cache_dev); |
| 2480 | if (r) { |
| 2481 | *error = "Error opening cache device"; |
| 2482 | return r; |
| 2483 | } |
| 2484 | ca->cache_sectors = get_dev_size(ca->cache_dev); |
| 2485 | |
| 2486 | return 0; |
| 2487 | } |
| 2488 | |
| 2489 | static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as, |
| 2490 | char **error) |
| 2491 | { |
| 2492 | int r; |
| 2493 | |
| 2494 | if (!at_least_one_arg(as, error)) |
| 2495 | return -EINVAL; |
| 2496 | |
| 2497 | r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, |
| 2498 | &ca->origin_dev); |
| 2499 | if (r) { |
| 2500 | *error = "Error opening origin device"; |
| 2501 | return r; |
| 2502 | } |
| 2503 | |
| 2504 | ca->origin_sectors = get_dev_size(ca->origin_dev); |
| 2505 | if (ca->ti->len > ca->origin_sectors) { |
| 2506 | *error = "Device size larger than cached device"; |
| 2507 | return -EINVAL; |
| 2508 | } |
| 2509 | |
| 2510 | return 0; |
| 2511 | } |
| 2512 | |
| 2513 | static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as, |
| 2514 | char **error) |
| 2515 | { |
Mike Snitzer | 0547304 | 2013-08-16 10:54:19 -0400 | [diff] [blame] | 2516 | unsigned long block_size; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2517 | |
| 2518 | if (!at_least_one_arg(as, error)) |
| 2519 | return -EINVAL; |
| 2520 | |
Mike Snitzer | 0547304 | 2013-08-16 10:54:19 -0400 | [diff] [blame] | 2521 | if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size || |
| 2522 | block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || |
| 2523 | block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || |
| 2524 | block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2525 | *error = "Invalid data block size"; |
| 2526 | return -EINVAL; |
| 2527 | } |
| 2528 | |
Mike Snitzer | 0547304 | 2013-08-16 10:54:19 -0400 | [diff] [blame] | 2529 | if (block_size > ca->cache_sectors) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2530 | *error = "Data block size is larger than the cache device"; |
| 2531 | return -EINVAL; |
| 2532 | } |
| 2533 | |
Mike Snitzer | 0547304 | 2013-08-16 10:54:19 -0400 | [diff] [blame] | 2534 | ca->block_size = block_size; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2535 | |
| 2536 | return 0; |
| 2537 | } |
| 2538 | |
| 2539 | static void init_features(struct cache_features *cf) |
| 2540 | { |
| 2541 | cf->mode = CM_WRITE; |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 2542 | cf->io_mode = CM_IO_WRITEBACK; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2543 | } |
| 2544 | |
| 2545 | static int parse_features(struct cache_args *ca, struct dm_arg_set *as, |
| 2546 | char **error) |
| 2547 | { |
| 2548 | static struct dm_arg _args[] = { |
| 2549 | {0, 1, "Invalid number of cache feature arguments"}, |
| 2550 | }; |
| 2551 | |
| 2552 | int r; |
| 2553 | unsigned argc; |
| 2554 | const char *arg; |
| 2555 | struct cache_features *cf = &ca->features; |
| 2556 | |
| 2557 | init_features(cf); |
| 2558 | |
| 2559 | r = dm_read_arg_group(_args, as, &argc, error); |
| 2560 | if (r) |
| 2561 | return -EINVAL; |
| 2562 | |
| 2563 | while (argc--) { |
| 2564 | arg = dm_shift_arg(as); |
| 2565 | |
| 2566 | if (!strcasecmp(arg, "writeback")) |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 2567 | cf->io_mode = CM_IO_WRITEBACK; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2568 | |
| 2569 | else if (!strcasecmp(arg, "writethrough")) |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 2570 | cf->io_mode = CM_IO_WRITETHROUGH; |
| 2571 | |
| 2572 | else if (!strcasecmp(arg, "passthrough")) |
| 2573 | cf->io_mode = CM_IO_PASSTHROUGH; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2574 | |
| 2575 | else { |
| 2576 | *error = "Unrecognised cache feature requested"; |
| 2577 | return -EINVAL; |
| 2578 | } |
| 2579 | } |
| 2580 | |
| 2581 | return 0; |
| 2582 | } |
| 2583 | |
| 2584 | static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, |
| 2585 | char **error) |
| 2586 | { |
| 2587 | static struct dm_arg _args[] = { |
| 2588 | {0, 1024, "Invalid number of policy arguments"}, |
| 2589 | }; |
| 2590 | |
| 2591 | int r; |
| 2592 | |
| 2593 | if (!at_least_one_arg(as, error)) |
| 2594 | return -EINVAL; |
| 2595 | |
| 2596 | ca->policy_name = dm_shift_arg(as); |
| 2597 | |
| 2598 | r = dm_read_arg_group(_args, as, &ca->policy_argc, error); |
| 2599 | if (r) |
| 2600 | return -EINVAL; |
| 2601 | |
| 2602 | ca->policy_argv = (const char **)as->argv; |
| 2603 | dm_consume_args(as, ca->policy_argc); |
| 2604 | |
| 2605 | return 0; |
| 2606 | } |
| 2607 | |
| 2608 | static int parse_cache_args(struct cache_args *ca, int argc, char **argv, |
| 2609 | char **error) |
| 2610 | { |
| 2611 | int r; |
| 2612 | struct dm_arg_set as; |
| 2613 | |
| 2614 | as.argc = argc; |
| 2615 | as.argv = argv; |
| 2616 | |
| 2617 | r = parse_metadata_dev(ca, &as, error); |
| 2618 | if (r) |
| 2619 | return r; |
| 2620 | |
| 2621 | r = parse_cache_dev(ca, &as, error); |
| 2622 | if (r) |
| 2623 | return r; |
| 2624 | |
| 2625 | r = parse_origin_dev(ca, &as, error); |
| 2626 | if (r) |
| 2627 | return r; |
| 2628 | |
| 2629 | r = parse_block_size(ca, &as, error); |
| 2630 | if (r) |
| 2631 | return r; |
| 2632 | |
| 2633 | r = parse_features(ca, &as, error); |
| 2634 | if (r) |
| 2635 | return r; |
| 2636 | |
| 2637 | r = parse_policy(ca, &as, error); |
| 2638 | if (r) |
| 2639 | return r; |
| 2640 | |
| 2641 | return 0; |
| 2642 | } |
| 2643 | |
| 2644 | /*----------------------------------------------------------------*/ |
| 2645 | |
| 2646 | static struct kmem_cache *migration_cache; |
| 2647 | |
Alasdair G Kergon | 2c73c47 | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2648 | #define NOT_CORE_OPTION 1 |
| 2649 | |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2650 | static int process_config_option(struct cache *cache, const char *key, const char *value) |
Alasdair G Kergon | 2c73c47 | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2651 | { |
| 2652 | unsigned long tmp; |
| 2653 | |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2654 | if (!strcasecmp(key, "migration_threshold")) { |
| 2655 | if (kstrtoul(value, 10, &tmp)) |
Alasdair G Kergon | 2c73c47 | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2656 | return -EINVAL; |
| 2657 | |
| 2658 | cache->migration_threshold = tmp; |
| 2659 | return 0; |
| 2660 | } |
| 2661 | |
| 2662 | return NOT_CORE_OPTION; |
| 2663 | } |
| 2664 | |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2665 | static int set_config_value(struct cache *cache, const char *key, const char *value) |
| 2666 | { |
| 2667 | int r = process_config_option(cache, key, value); |
| 2668 | |
| 2669 | if (r == NOT_CORE_OPTION) |
| 2670 | r = policy_set_config_value(cache->policy, key, value); |
| 2671 | |
| 2672 | if (r) |
| 2673 | DMWARN("bad config value for %s: %s", key, value); |
| 2674 | |
| 2675 | return r; |
| 2676 | } |
| 2677 | |
| 2678 | static int set_config_values(struct cache *cache, int argc, const char **argv) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2679 | { |
| 2680 | int r = 0; |
| 2681 | |
| 2682 | if (argc & 1) { |
| 2683 | DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs."); |
| 2684 | return -EINVAL; |
| 2685 | } |
| 2686 | |
| 2687 | while (argc) { |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2688 | r = set_config_value(cache, argv[0], argv[1]); |
| 2689 | if (r) |
| 2690 | break; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2691 | |
| 2692 | argc -= 2; |
| 2693 | argv += 2; |
| 2694 | } |
| 2695 | |
| 2696 | return r; |
| 2697 | } |
| 2698 | |
| 2699 | static int create_cache_policy(struct cache *cache, struct cache_args *ca, |
| 2700 | char **error) |
| 2701 | { |
Mikulas Patocka | 4cb3e1d | 2013-10-01 18:35:39 -0400 | [diff] [blame] | 2702 | struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name, |
| 2703 | cache->cache_size, |
| 2704 | cache->origin_sectors, |
| 2705 | cache->sectors_per_block); |
| 2706 | if (IS_ERR(p)) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2707 | *error = "Error creating cache's policy"; |
Mikulas Patocka | 4cb3e1d | 2013-10-01 18:35:39 -0400 | [diff] [blame] | 2708 | return PTR_ERR(p); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2709 | } |
Mikulas Patocka | 4cb3e1d | 2013-10-01 18:35:39 -0400 | [diff] [blame] | 2710 | cache->policy = p; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2711 | |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2712 | return 0; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2713 | } |
| 2714 | |
Joe Thornber | 08b1845 | 2014-11-06 14:38:01 +0000 | [diff] [blame] | 2715 | /* |
Joe Thornber | 2bb812d | 2014-11-26 16:07:50 +0000 | [diff] [blame] | 2716 | * We want the discard block size to be at least the size of the cache |
| 2717 | * block size and have no more than 2^14 discard blocks across the origin. |
Joe Thornber | 08b1845 | 2014-11-06 14:38:01 +0000 | [diff] [blame] | 2718 | */ |
| 2719 | #define MAX_DISCARD_BLOCKS (1 << 14) |
| 2720 | |
| 2721 | static bool too_many_discard_blocks(sector_t discard_block_size, |
| 2722 | sector_t origin_size) |
| 2723 | { |
| 2724 | (void) sector_div(origin_size, discard_block_size); |
| 2725 | |
| 2726 | return origin_size > MAX_DISCARD_BLOCKS; |
| 2727 | } |
| 2728 | |
| 2729 | static sector_t calculate_discard_block_size(sector_t cache_block_size, |
| 2730 | sector_t origin_size) |
| 2731 | { |
Joe Thornber | 2bb812d | 2014-11-26 16:07:50 +0000 | [diff] [blame] | 2732 | sector_t discard_block_size = cache_block_size; |
Joe Thornber | 08b1845 | 2014-11-06 14:38:01 +0000 | [diff] [blame] | 2733 | |
| 2734 | if (origin_size) |
| 2735 | while (too_many_discard_blocks(discard_block_size, origin_size)) |
| 2736 | discard_block_size *= 2; |
| 2737 | |
| 2738 | return discard_block_size; |
| 2739 | } |
| 2740 | |
Joe Thornber | d1d9220 | 2014-11-11 11:58:32 +0000 | [diff] [blame] | 2741 | static void set_cache_size(struct cache *cache, dm_cblock_t size) |
| 2742 | { |
| 2743 | dm_block_t nr_blocks = from_cblock(size); |
| 2744 | |
| 2745 | if (nr_blocks > (1 << 20) && cache->cache_size != size) |
| 2746 | DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n" |
| 2747 | "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n" |
| 2748 | "Please consider increasing the cache block size to reduce the overall cache block count.", |
| 2749 | (unsigned long long) nr_blocks); |
| 2750 | |
| 2751 | cache->cache_size = size; |
| 2752 | } |
| 2753 | |
Joe Thornber | f8350da | 2013-05-10 14:37:16 +0100 | [diff] [blame] | 2754 | #define DEFAULT_MIGRATION_THRESHOLD 2048 |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2755 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2756 | static int cache_create(struct cache_args *ca, struct cache **result) |
| 2757 | { |
| 2758 | int r = 0; |
| 2759 | char **error = &ca->ti->error; |
| 2760 | struct cache *cache; |
| 2761 | struct dm_target *ti = ca->ti; |
| 2762 | dm_block_t origin_blocks; |
| 2763 | struct dm_cache_metadata *cmd; |
| 2764 | bool may_format = ca->features.mode == CM_WRITE; |
| 2765 | |
| 2766 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 2767 | if (!cache) |
| 2768 | return -ENOMEM; |
| 2769 | |
| 2770 | cache->ti = ca->ti; |
| 2771 | ti->private = cache; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2772 | ti->num_flush_bios = 2; |
| 2773 | ti->flush_supported = true; |
| 2774 | |
| 2775 | ti->num_discard_bios = 1; |
| 2776 | ti->discards_supported = true; |
| 2777 | ti->discard_zeroes_data_unsupported = true; |
Joe Thornber | 2572629 | 2014-11-24 14:05:16 +0000 | [diff] [blame] | 2778 | ti->split_discard_bios = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2779 | |
Joe Thornber | 8c5008f | 2013-05-10 14:37:18 +0100 | [diff] [blame] | 2780 | cache->features = ca->features; |
Mike Snitzer | 30187e1 | 2016-01-31 13:28:26 -0500 | [diff] [blame] | 2781 | ti->per_io_data_size = get_per_bio_data_size(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2782 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2783 | cache->callbacks.congested_fn = cache_is_congested; |
| 2784 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
| 2785 | |
| 2786 | cache->metadata_dev = ca->metadata_dev; |
| 2787 | cache->origin_dev = ca->origin_dev; |
| 2788 | cache->cache_dev = ca->cache_dev; |
| 2789 | |
| 2790 | ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL; |
| 2791 | |
| 2792 | /* FIXME: factor out this whole section */ |
| 2793 | origin_blocks = cache->origin_sectors = ca->origin_sectors; |
Joe Thornber | 414dd67 | 2013-03-20 17:21:25 +0000 | [diff] [blame] | 2794 | origin_blocks = block_div(origin_blocks, ca->block_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2795 | cache->origin_blocks = to_oblock(origin_blocks); |
| 2796 | |
| 2797 | cache->sectors_per_block = ca->block_size; |
| 2798 | if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { |
| 2799 | r = -EINVAL; |
| 2800 | goto bad; |
| 2801 | } |
| 2802 | |
| 2803 | if (ca->block_size & (ca->block_size - 1)) { |
| 2804 | dm_block_t cache_size = ca->cache_sectors; |
| 2805 | |
| 2806 | cache->sectors_per_block_shift = -1; |
Joe Thornber | 414dd67 | 2013-03-20 17:21:25 +0000 | [diff] [blame] | 2807 | cache_size = block_div(cache_size, ca->block_size); |
Joe Thornber | d1d9220 | 2014-11-11 11:58:32 +0000 | [diff] [blame] | 2808 | set_cache_size(cache, to_cblock(cache_size)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2809 | } else { |
| 2810 | cache->sectors_per_block_shift = __ffs(ca->block_size); |
Joe Thornber | d1d9220 | 2014-11-11 11:58:32 +0000 | [diff] [blame] | 2811 | set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2812 | } |
| 2813 | |
| 2814 | r = create_cache_policy(cache, ca, error); |
| 2815 | if (r) |
| 2816 | goto bad; |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2817 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2818 | cache->policy_nr_args = ca->policy_argc; |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 2819 | cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; |
| 2820 | |
| 2821 | r = set_config_values(cache, ca->policy_argc, ca->policy_argv); |
| 2822 | if (r) { |
| 2823 | *error = "Error setting cache policy's config values"; |
| 2824 | goto bad; |
| 2825 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2826 | |
| 2827 | cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, |
| 2828 | ca->block_size, may_format, |
| 2829 | dm_cache_policy_get_hint_size(cache->policy)); |
| 2830 | if (IS_ERR(cmd)) { |
| 2831 | *error = "Error creating metadata object"; |
| 2832 | r = PTR_ERR(cmd); |
| 2833 | goto bad; |
| 2834 | } |
| 2835 | cache->cmd = cmd; |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 2836 | set_cache_mode(cache, CM_WRITE); |
| 2837 | if (get_cache_mode(cache) != CM_WRITE) { |
| 2838 | *error = "Unable to get write access to metadata, please check/repair metadata."; |
| 2839 | r = -EINVAL; |
| 2840 | goto bad; |
| 2841 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2842 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 2843 | if (passthrough_mode(&cache->features)) { |
| 2844 | bool all_clean; |
| 2845 | |
| 2846 | r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); |
| 2847 | if (r) { |
| 2848 | *error = "dm_cache_metadata_all_clean() failed"; |
| 2849 | goto bad; |
| 2850 | } |
| 2851 | |
| 2852 | if (!all_clean) { |
| 2853 | *error = "Cannot enter passthrough mode unless all blocks are clean"; |
| 2854 | r = -EINVAL; |
| 2855 | goto bad; |
| 2856 | } |
| 2857 | } |
| 2858 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2859 | spin_lock_init(&cache->lock); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 2860 | INIT_LIST_HEAD(&cache->deferred_cells); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2861 | bio_list_init(&cache->deferred_bios); |
| 2862 | bio_list_init(&cache->deferred_flush_bios); |
Joe Thornber | e2e74d6 | 2013-03-20 17:21:27 +0000 | [diff] [blame] | 2863 | bio_list_init(&cache->deferred_writethrough_bios); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2864 | INIT_LIST_HEAD(&cache->quiesced_migrations); |
| 2865 | INIT_LIST_HEAD(&cache->completed_migrations); |
| 2866 | INIT_LIST_HEAD(&cache->need_commit_migrations); |
Joe Thornber | a59db67 | 2015-01-23 10:16:16 +0000 | [diff] [blame] | 2867 | atomic_set(&cache->nr_allocated_migrations, 0); |
| 2868 | atomic_set(&cache->nr_io_migrations, 0); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2869 | init_waitqueue_head(&cache->migration_wait); |
| 2870 | |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2871 | init_waitqueue_head(&cache->quiescing_wait); |
Joe Thornber | 238f836 | 2013-10-30 17:29:30 +0000 | [diff] [blame] | 2872 | atomic_set(&cache->quiescing, 0); |
Joe Thornber | 66cb191 | 2013-10-30 17:11:58 +0000 | [diff] [blame] | 2873 | atomic_set(&cache->quiescing_ack, 0); |
| 2874 | |
Wei Yongjun | fa4d683 | 2013-05-10 14:37:14 +0100 | [diff] [blame] | 2875 | r = -ENOMEM; |
Anssi Hannula | 44fa816 | 2014-08-01 11:55:47 -0400 | [diff] [blame] | 2876 | atomic_set(&cache->nr_dirty, 0); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2877 | cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); |
| 2878 | if (!cache->dirty_bitset) { |
| 2879 | *error = "could not allocate dirty bitset"; |
| 2880 | goto bad; |
| 2881 | } |
| 2882 | clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); |
| 2883 | |
Joe Thornber | 08b1845 | 2014-11-06 14:38:01 +0000 | [diff] [blame] | 2884 | cache->discard_block_size = |
| 2885 | calculate_discard_block_size(cache->sectors_per_block, |
| 2886 | cache->origin_sectors); |
Joe Thornber | 2572629 | 2014-11-24 14:05:16 +0000 | [diff] [blame] | 2887 | cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, |
| 2888 | cache->discard_block_size)); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 2889 | cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2890 | if (!cache->discard_bitset) { |
| 2891 | *error = "could not allocate discard bitset"; |
| 2892 | goto bad; |
| 2893 | } |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 2894 | clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2895 | |
| 2896 | cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); |
| 2897 | if (IS_ERR(cache->copier)) { |
| 2898 | *error = "could not create kcopyd client"; |
| 2899 | r = PTR_ERR(cache->copier); |
| 2900 | goto bad; |
| 2901 | } |
| 2902 | |
| 2903 | cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); |
| 2904 | if (!cache->wq) { |
| 2905 | *error = "could not create workqueue for metadata object"; |
| 2906 | goto bad; |
| 2907 | } |
| 2908 | INIT_WORK(&cache->worker, do_worker); |
| 2909 | INIT_DELAYED_WORK(&cache->waker, do_waker); |
| 2910 | cache->last_commit_jiffies = jiffies; |
| 2911 | |
Joe Thornber | a195db2 | 2014-10-06 16:30:06 -0400 | [diff] [blame] | 2912 | cache->prison = dm_bio_prison_create(); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2913 | if (!cache->prison) { |
| 2914 | *error = "could not create bio prison"; |
| 2915 | goto bad; |
| 2916 | } |
| 2917 | |
| 2918 | cache->all_io_ds = dm_deferred_set_create(); |
| 2919 | if (!cache->all_io_ds) { |
| 2920 | *error = "could not create all_io deferred set"; |
| 2921 | goto bad; |
| 2922 | } |
| 2923 | |
| 2924 | cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, |
| 2925 | migration_cache); |
| 2926 | if (!cache->migration_pool) { |
| 2927 | *error = "Error creating cache's migration mempool"; |
| 2928 | goto bad; |
| 2929 | } |
| 2930 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2931 | cache->need_tick_bio = true; |
| 2932 | cache->sized = false; |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2933 | cache->invalidate = false; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2934 | cache->commit_requested = false; |
| 2935 | cache->loaded_mappings = false; |
| 2936 | cache->loaded_discards = false; |
| 2937 | |
| 2938 | load_stats(cache); |
| 2939 | |
| 2940 | atomic_set(&cache->stats.demotion, 0); |
| 2941 | atomic_set(&cache->stats.promotion, 0); |
| 2942 | atomic_set(&cache->stats.copies_avoided, 0); |
| 2943 | atomic_set(&cache->stats.cache_cell_clash, 0); |
| 2944 | atomic_set(&cache->stats.commit_count, 0); |
| 2945 | atomic_set(&cache->stats.discard_count, 0); |
| 2946 | |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 2947 | spin_lock_init(&cache->invalidation_lock); |
| 2948 | INIT_LIST_HEAD(&cache->invalidation_requests); |
| 2949 | |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 2950 | iot_init(&cache->origin_tracker); |
| 2951 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 2952 | *result = cache; |
| 2953 | return 0; |
| 2954 | |
| 2955 | bad: |
| 2956 | destroy(cache); |
| 2957 | return r; |
| 2958 | } |
| 2959 | |
| 2960 | static int copy_ctr_args(struct cache *cache, int argc, const char **argv) |
| 2961 | { |
| 2962 | unsigned i; |
| 2963 | const char **copy; |
| 2964 | |
| 2965 | copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL); |
| 2966 | if (!copy) |
| 2967 | return -ENOMEM; |
| 2968 | for (i = 0; i < argc; i++) { |
| 2969 | copy[i] = kstrdup(argv[i], GFP_KERNEL); |
| 2970 | if (!copy[i]) { |
| 2971 | while (i--) |
| 2972 | kfree(copy[i]); |
| 2973 | kfree(copy); |
| 2974 | return -ENOMEM; |
| 2975 | } |
| 2976 | } |
| 2977 | |
| 2978 | cache->nr_ctr_args = argc; |
| 2979 | cache->ctr_args = copy; |
| 2980 | |
| 2981 | return 0; |
| 2982 | } |
| 2983 | |
| 2984 | static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) |
| 2985 | { |
| 2986 | int r = -EINVAL; |
| 2987 | struct cache_args *ca; |
| 2988 | struct cache *cache = NULL; |
| 2989 | |
| 2990 | ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
| 2991 | if (!ca) { |
| 2992 | ti->error = "Error allocating memory for cache"; |
| 2993 | return -ENOMEM; |
| 2994 | } |
| 2995 | ca->ti = ti; |
| 2996 | |
| 2997 | r = parse_cache_args(ca, argc, argv, &ti->error); |
| 2998 | if (r) |
| 2999 | goto out; |
| 3000 | |
| 3001 | r = cache_create(ca, &cache); |
Heinz Mauelshagen | 617a0b8 | 2013-03-20 17:21:26 +0000 | [diff] [blame] | 3002 | if (r) |
| 3003 | goto out; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3004 | |
| 3005 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); |
| 3006 | if (r) { |
| 3007 | destroy(cache); |
| 3008 | goto out; |
| 3009 | } |
| 3010 | |
| 3011 | ti->private = cache; |
| 3012 | |
| 3013 | out: |
| 3014 | destroy_cache_args(ca); |
| 3015 | return r; |
| 3016 | } |
| 3017 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3018 | /*----------------------------------------------------------------*/ |
| 3019 | |
| 3020 | static int cache_map(struct dm_target *ti, struct bio *bio) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3021 | { |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3022 | struct cache *cache = ti->private; |
| 3023 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3024 | int r; |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3025 | struct dm_bio_prison_cell *cell = NULL; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3026 | dm_oblock_t block = get_bio_block(cache, bio); |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 3027 | size_t pb_data_size = get_per_bio_data_size(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3028 | bool can_migrate = false; |
Joe Thornber | 4077525 | 2015-05-15 15:29:58 +0100 | [diff] [blame] | 3029 | bool fast_promotion; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3030 | struct policy_result lookup_result; |
Heinz Mauelshagen | e893fba | 2014-03-12 16:13:39 +0100 | [diff] [blame] | 3031 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 3032 | struct old_oblock_lock ool; |
| 3033 | |
| 3034 | ool.locker.fn = null_locker; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3035 | |
Heinz Mauelshagen | e893fba | 2014-03-12 16:13:39 +0100 | [diff] [blame] | 3036 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3037 | /* |
| 3038 | * This can only occur if the io goes to a partial block at |
| 3039 | * the end of the origin device. We don't cache these. |
| 3040 | * Just remap to the origin and carry on. |
| 3041 | */ |
Heinz Mauelshagen | e893fba | 2014-03-12 16:13:39 +0100 | [diff] [blame] | 3042 | remap_to_origin(cache, bio); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3043 | accounted_begin(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3044 | return DM_MAPIO_REMAPPED; |
| 3045 | } |
| 3046 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3047 | if (discard_or_flush(bio)) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3048 | defer_bio(cache, bio); |
| 3049 | return DM_MAPIO_SUBMITTED; |
| 3050 | } |
| 3051 | |
| 3052 | /* |
| 3053 | * Check to see if that block is currently migrating. |
| 3054 | */ |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3055 | cell = alloc_prison_cell(cache); |
| 3056 | if (!cell) { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3057 | defer_bio(cache, bio); |
| 3058 | return DM_MAPIO_SUBMITTED; |
| 3059 | } |
| 3060 | |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3061 | r = bio_detain(cache, block, bio, cell, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3062 | (cell_free_fn) free_prison_cell, |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3063 | cache, &cell); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3064 | if (r) { |
| 3065 | if (r < 0) |
| 3066 | defer_bio(cache, bio); |
| 3067 | |
| 3068 | return DM_MAPIO_SUBMITTED; |
| 3069 | } |
| 3070 | |
Joe Thornber | 4077525 | 2015-05-15 15:29:58 +0100 | [diff] [blame] | 3071 | fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3072 | |
Joe Thornber | 4077525 | 2015-05-15 15:29:58 +0100 | [diff] [blame] | 3073 | r = policy_map(cache->policy, block, false, can_migrate, fast_promotion, |
Joe Thornber | fb4100a | 2015-05-20 10:30:32 +0100 | [diff] [blame] | 3074 | bio, &ool.locker, &lookup_result); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3075 | if (r == -EWOULDBLOCK) { |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3076 | cell_defer(cache, cell, true); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3077 | return DM_MAPIO_SUBMITTED; |
| 3078 | |
| 3079 | } else if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3080 | DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d", |
| 3081 | cache_device_name(cache), r); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3082 | cell_defer(cache, cell, false); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3083 | bio_io_error(bio); |
| 3084 | return DM_MAPIO_SUBMITTED; |
| 3085 | } |
| 3086 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3087 | r = DM_MAPIO_REMAPPED; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3088 | switch (lookup_result.op) { |
| 3089 | case POLICY_HIT: |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3090 | if (passthrough_mode(&cache->features)) { |
| 3091 | if (bio_data_dir(bio) == WRITE) { |
| 3092 | /* |
| 3093 | * We need to invalidate this block, so |
| 3094 | * defer for the worker thread. |
| 3095 | */ |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3096 | cell_defer(cache, cell, true); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3097 | r = DM_MAPIO_SUBMITTED; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3098 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3099 | } else { |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3100 | inc_miss_counter(cache, bio); |
| 3101 | remap_to_origin_clear_discard(cache, bio, block); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3102 | accounted_begin(cache, bio); |
| 3103 | inc_ds(cache, bio, cell); |
| 3104 | // FIXME: we want to remap hits or misses straight |
| 3105 | // away rather than passing over to the worker. |
| 3106 | cell_defer(cache, cell, false); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3107 | } |
| 3108 | |
| 3109 | } else { |
| 3110 | inc_hit_counter(cache, bio); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3111 | if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3112 | !is_dirty(cache, lookup_result.cblock)) { |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3113 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3114 | accounted_begin(cache, bio); |
| 3115 | inc_ds(cache, bio, cell); |
| 3116 | cell_defer(cache, cell, false); |
| 3117 | |
| 3118 | } else |
| 3119 | remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3120 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3121 | break; |
| 3122 | |
| 3123 | case POLICY_MISS: |
| 3124 | inc_miss_counter(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3125 | if (pb->req_nr != 0) { |
| 3126 | /* |
| 3127 | * This is a duplicate writethrough io that is no |
| 3128 | * longer needed because the block has been demoted. |
| 3129 | */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 3130 | bio_endio(bio); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3131 | // FIXME: remap everything as a miss |
| 3132 | cell_defer(cache, cell, false); |
Joe Thornber | 8c081b5 | 2014-05-13 16:18:38 +0100 | [diff] [blame] | 3133 | r = DM_MAPIO_SUBMITTED; |
| 3134 | |
| 3135 | } else |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3136 | remap_cell_to_origin_clear_discard(cache, cell, block, false); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3137 | break; |
| 3138 | |
| 3139 | default: |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3140 | DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u", |
| 3141 | cache_device_name(cache), __func__, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3142 | (unsigned) lookup_result.op); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3143 | cell_defer(cache, cell, false); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3144 | bio_io_error(bio); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3145 | r = DM_MAPIO_SUBMITTED; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3146 | } |
| 3147 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3148 | return r; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3149 | } |
| 3150 | |
| 3151 | static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) |
| 3152 | { |
| 3153 | struct cache *cache = ti->private; |
| 3154 | unsigned long flags; |
Mike Snitzer | 19b0092 | 2013-04-05 15:36:34 +0100 | [diff] [blame] | 3155 | size_t pb_data_size = get_per_bio_data_size(cache); |
| 3156 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3157 | |
| 3158 | if (pb->tick) { |
Joe Thornber | fba1010 | 2015-05-29 10:20:56 +0100 | [diff] [blame] | 3159 | policy_tick(cache->policy, false); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3160 | |
| 3161 | spin_lock_irqsave(&cache->lock, flags); |
| 3162 | cache->need_tick_bio = true; |
| 3163 | spin_unlock_irqrestore(&cache->lock, flags); |
| 3164 | } |
| 3165 | |
| 3166 | check_for_quiesced_migrations(cache, pb); |
Joe Thornber | 066dbaa | 2015-05-15 15:18:01 +0100 | [diff] [blame] | 3167 | accounted_complete(cache, bio); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3168 | |
| 3169 | return 0; |
| 3170 | } |
| 3171 | |
| 3172 | static int write_dirty_bitset(struct cache *cache) |
| 3173 | { |
| 3174 | unsigned i, r; |
| 3175 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3176 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
| 3177 | return -EINVAL; |
| 3178 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3179 | for (i = 0; i < from_cblock(cache->cache_size); i++) { |
| 3180 | r = dm_cache_set_dirty(cache->cmd, to_cblock(i), |
| 3181 | is_dirty(cache, to_cblock(i))); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3182 | if (r) { |
| 3183 | metadata_operation_failed(cache, "dm_cache_set_dirty", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3184 | return r; |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3185 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3186 | } |
| 3187 | |
| 3188 | return 0; |
| 3189 | } |
| 3190 | |
| 3191 | static int write_discard_bitset(struct cache *cache) |
| 3192 | { |
| 3193 | unsigned i, r; |
| 3194 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3195 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
| 3196 | return -EINVAL; |
| 3197 | |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 3198 | r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, |
| 3199 | cache->discard_nr_blocks); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3200 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3201 | DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3202 | metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3203 | return r; |
| 3204 | } |
| 3205 | |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 3206 | for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { |
| 3207 | r = dm_cache_set_discard(cache->cmd, to_dblock(i), |
| 3208 | is_discarded(cache, to_dblock(i))); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3209 | if (r) { |
| 3210 | metadata_operation_failed(cache, "dm_cache_set_discard", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3211 | return r; |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3212 | } |
| 3213 | } |
| 3214 | |
| 3215 | return 0; |
| 3216 | } |
| 3217 | |
| 3218 | static int write_hints(struct cache *cache) |
| 3219 | { |
| 3220 | int r; |
| 3221 | |
| 3222 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
| 3223 | return -EINVAL; |
| 3224 | |
| 3225 | r = dm_cache_write_hints(cache->cmd, cache->policy); |
| 3226 | if (r) { |
| 3227 | metadata_operation_failed(cache, "dm_cache_write_hints", r); |
| 3228 | return r; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3229 | } |
| 3230 | |
| 3231 | return 0; |
| 3232 | } |
| 3233 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3234 | /* |
| 3235 | * returns true on success |
| 3236 | */ |
| 3237 | static bool sync_metadata(struct cache *cache) |
| 3238 | { |
| 3239 | int r1, r2, r3, r4; |
| 3240 | |
| 3241 | r1 = write_dirty_bitset(cache); |
| 3242 | if (r1) |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3243 | DMERR("%s: could not write dirty bitset", cache_device_name(cache)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3244 | |
| 3245 | r2 = write_discard_bitset(cache); |
| 3246 | if (r2) |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3247 | DMERR("%s: could not write discard bitset", cache_device_name(cache)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3248 | |
| 3249 | save_stats(cache); |
| 3250 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3251 | r3 = write_hints(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3252 | if (r3) |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3253 | DMERR("%s: could not write hints", cache_device_name(cache)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3254 | |
| 3255 | /* |
| 3256 | * If writing the above metadata failed, we still commit, but don't |
| 3257 | * set the clean shutdown flag. This will effectively force every |
| 3258 | * dirty bit to be set on reload. |
| 3259 | */ |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3260 | r4 = commit(cache, !r1 && !r2 && !r3); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3261 | if (r4) |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3262 | DMERR("%s: could not write cache metadata", cache_device_name(cache)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3263 | |
| 3264 | return !r1 && !r2 && !r3 && !r4; |
| 3265 | } |
| 3266 | |
| 3267 | static void cache_postsuspend(struct dm_target *ti) |
| 3268 | { |
| 3269 | struct cache *cache = ti->private; |
| 3270 | |
| 3271 | start_quiescing(cache); |
| 3272 | wait_for_migrations(cache); |
| 3273 | stop_worker(cache); |
Joe Thornber | 651f5fa | 2015-05-15 15:26:08 +0100 | [diff] [blame] | 3274 | requeue_deferred_bios(cache); |
| 3275 | requeue_deferred_cells(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3276 | stop_quiescing(cache); |
| 3277 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3278 | if (get_cache_mode(cache) == CM_WRITE) |
| 3279 | (void) sync_metadata(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3280 | } |
| 3281 | |
| 3282 | static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, |
| 3283 | bool dirty, uint32_t hint, bool hint_valid) |
| 3284 | { |
| 3285 | int r; |
| 3286 | struct cache *cache = context; |
| 3287 | |
| 3288 | r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); |
| 3289 | if (r) |
| 3290 | return r; |
| 3291 | |
| 3292 | if (dirty) |
| 3293 | set_dirty(cache, oblock, cblock); |
| 3294 | else |
| 3295 | clear_dirty(cache, oblock, cblock); |
| 3296 | |
| 3297 | return 0; |
| 3298 | } |
| 3299 | |
Joe Thornber | 3e2e1c3 | 2014-11-24 14:06:22 +0000 | [diff] [blame] | 3300 | /* |
| 3301 | * The discard block size in the on disk metadata is not |
| 3302 | * neccessarily the same as we're currently using. So we have to |
| 3303 | * be careful to only set the discarded attribute if we know it |
| 3304 | * covers a complete block of the new size. |
| 3305 | */ |
| 3306 | struct discard_load_info { |
| 3307 | struct cache *cache; |
| 3308 | |
| 3309 | /* |
| 3310 | * These blocks are sized using the on disk dblock size, rather |
| 3311 | * than the current one. |
| 3312 | */ |
| 3313 | dm_block_t block_size; |
| 3314 | dm_block_t discard_begin, discard_end; |
| 3315 | }; |
| 3316 | |
| 3317 | static void discard_load_info_init(struct cache *cache, |
| 3318 | struct discard_load_info *li) |
| 3319 | { |
| 3320 | li->cache = cache; |
| 3321 | li->discard_begin = li->discard_end = 0; |
| 3322 | } |
| 3323 | |
| 3324 | static void set_discard_range(struct discard_load_info *li) |
| 3325 | { |
| 3326 | sector_t b, e; |
| 3327 | |
| 3328 | if (li->discard_begin == li->discard_end) |
| 3329 | return; |
| 3330 | |
| 3331 | /* |
| 3332 | * Convert to sectors. |
| 3333 | */ |
| 3334 | b = li->discard_begin * li->block_size; |
| 3335 | e = li->discard_end * li->block_size; |
| 3336 | |
| 3337 | /* |
| 3338 | * Then convert back to the current dblock size. |
| 3339 | */ |
| 3340 | b = dm_sector_div_up(b, li->cache->discard_block_size); |
| 3341 | sector_div(e, li->cache->discard_block_size); |
| 3342 | |
| 3343 | /* |
| 3344 | * The origin may have shrunk, so we need to check we're still in |
| 3345 | * bounds. |
| 3346 | */ |
| 3347 | if (e > from_dblock(li->cache->discard_nr_blocks)) |
| 3348 | e = from_dblock(li->cache->discard_nr_blocks); |
| 3349 | |
| 3350 | for (; b < e; b++) |
| 3351 | set_discard(li->cache, to_dblock(b)); |
| 3352 | } |
| 3353 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3354 | static int load_discard(void *context, sector_t discard_block_size, |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 3355 | dm_dblock_t dblock, bool discard) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3356 | { |
Joe Thornber | 3e2e1c3 | 2014-11-24 14:06:22 +0000 | [diff] [blame] | 3357 | struct discard_load_info *li = context; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3358 | |
Joe Thornber | 3e2e1c3 | 2014-11-24 14:06:22 +0000 | [diff] [blame] | 3359 | li->block_size = discard_block_size; |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 3360 | |
Joe Thornber | 3e2e1c3 | 2014-11-24 14:06:22 +0000 | [diff] [blame] | 3361 | if (discard) { |
| 3362 | if (from_dblock(dblock) == li->discard_end) |
| 3363 | /* |
| 3364 | * We're already in a discard range, just extend it. |
| 3365 | */ |
| 3366 | li->discard_end = li->discard_end + 1ULL; |
| 3367 | |
| 3368 | else { |
| 3369 | /* |
| 3370 | * Emit the old range and start a new one. |
| 3371 | */ |
| 3372 | set_discard_range(li); |
| 3373 | li->discard_begin = from_dblock(dblock); |
| 3374 | li->discard_end = li->discard_begin + 1ULL; |
| 3375 | } |
| 3376 | } else { |
| 3377 | set_discard_range(li); |
| 3378 | li->discard_begin = li->discard_end = 0; |
| 3379 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3380 | |
| 3381 | return 0; |
| 3382 | } |
| 3383 | |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3384 | static dm_cblock_t get_cache_dev_size(struct cache *cache) |
| 3385 | { |
| 3386 | sector_t size = get_dev_size(cache->cache_dev); |
| 3387 | (void) sector_div(size, cache->sectors_per_block); |
| 3388 | return to_cblock(size); |
| 3389 | } |
| 3390 | |
| 3391 | static bool can_resize(struct cache *cache, dm_cblock_t new_size) |
| 3392 | { |
| 3393 | if (from_cblock(new_size) > from_cblock(cache->cache_size)) |
| 3394 | return true; |
| 3395 | |
| 3396 | /* |
| 3397 | * We can't drop a dirty block when shrinking the cache. |
| 3398 | */ |
| 3399 | while (from_cblock(new_size) < from_cblock(cache->cache_size)) { |
| 3400 | new_size = to_cblock(from_cblock(new_size) + 1); |
| 3401 | if (is_dirty(cache, new_size)) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3402 | DMERR("%s: unable to shrink cache; cache block %llu is dirty", |
| 3403 | cache_device_name(cache), |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3404 | (unsigned long long) from_cblock(new_size)); |
| 3405 | return false; |
| 3406 | } |
| 3407 | } |
| 3408 | |
| 3409 | return true; |
| 3410 | } |
| 3411 | |
| 3412 | static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) |
| 3413 | { |
| 3414 | int r; |
| 3415 | |
Vincent Pelletier | 0884480 | 2013-11-30 12:58:42 +0100 | [diff] [blame] | 3416 | r = dm_cache_resize(cache->cmd, new_size); |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3417 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3418 | DMERR("%s: could not resize cache metadata", cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3419 | metadata_operation_failed(cache, "dm_cache_resize", r); |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3420 | return r; |
| 3421 | } |
| 3422 | |
Joe Thornber | d1d9220 | 2014-11-11 11:58:32 +0000 | [diff] [blame] | 3423 | set_cache_size(cache, new_size); |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3424 | |
| 3425 | return 0; |
| 3426 | } |
| 3427 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3428 | static int cache_preresume(struct dm_target *ti) |
| 3429 | { |
| 3430 | int r = 0; |
| 3431 | struct cache *cache = ti->private; |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3432 | dm_cblock_t csize = get_cache_dev_size(cache); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3433 | |
| 3434 | /* |
| 3435 | * Check to see if the cache has resized. |
| 3436 | */ |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3437 | if (!cache->sized) { |
| 3438 | r = resize_cache_dev(cache, csize); |
| 3439 | if (r) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3440 | return r; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3441 | |
| 3442 | cache->sized = true; |
Joe Thornber | f494a9c | 2013-10-31 13:55:49 -0400 | [diff] [blame] | 3443 | |
| 3444 | } else if (csize != cache->cache_size) { |
| 3445 | if (!can_resize(cache, csize)) |
| 3446 | return -EINVAL; |
| 3447 | |
| 3448 | r = resize_cache_dev(cache, csize); |
| 3449 | if (r) |
| 3450 | return r; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3451 | } |
| 3452 | |
| 3453 | if (!cache->loaded_mappings) { |
Mike Snitzer | ea2dd8c | 2013-03-20 17:21:28 +0000 | [diff] [blame] | 3454 | r = dm_cache_load_mappings(cache->cmd, cache->policy, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3455 | load_mapping, cache); |
| 3456 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3457 | DMERR("%s: could not load cache mappings", cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3458 | metadata_operation_failed(cache, "dm_cache_load_mappings", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3459 | return r; |
| 3460 | } |
| 3461 | |
| 3462 | cache->loaded_mappings = true; |
| 3463 | } |
| 3464 | |
| 3465 | if (!cache->loaded_discards) { |
Joe Thornber | 3e2e1c3 | 2014-11-24 14:06:22 +0000 | [diff] [blame] | 3466 | struct discard_load_info li; |
| 3467 | |
| 3468 | /* |
| 3469 | * The discard bitset could have been resized, or the |
| 3470 | * discard block size changed. To be safe we start by |
| 3471 | * setting every dblock to not discarded. |
| 3472 | */ |
| 3473 | clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); |
| 3474 | |
| 3475 | discard_load_info_init(cache, &li); |
| 3476 | r = dm_cache_load_discards(cache->cmd, load_discard, &li); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3477 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3478 | DMERR("%s: could not load origin discards", cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3479 | metadata_operation_failed(cache, "dm_cache_load_discards", r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3480 | return r; |
| 3481 | } |
Joe Thornber | 3e2e1c3 | 2014-11-24 14:06:22 +0000 | [diff] [blame] | 3482 | set_discard_range(&li); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3483 | |
| 3484 | cache->loaded_discards = true; |
| 3485 | } |
| 3486 | |
| 3487 | return r; |
| 3488 | } |
| 3489 | |
| 3490 | static void cache_resume(struct dm_target *ti) |
| 3491 | { |
| 3492 | struct cache *cache = ti->private; |
| 3493 | |
| 3494 | cache->need_tick_bio = true; |
| 3495 | do_waker(&cache->waker.work); |
| 3496 | } |
| 3497 | |
| 3498 | /* |
| 3499 | * Status format: |
| 3500 | * |
Mike Snitzer | 6a38861 | 2014-01-09 16:04:12 -0500 | [diff] [blame] | 3501 | * <metadata block size> <#used metadata blocks>/<#total metadata blocks> |
| 3502 | * <cache block size> <#used cache blocks>/<#total cache blocks> |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3503 | * <#read hits> <#read misses> <#write hits> <#write misses> |
Mike Snitzer | 6a38861 | 2014-01-09 16:04:12 -0500 | [diff] [blame] | 3504 | * <#demotions> <#promotions> <#dirty> |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3505 | * <#features> <features>* |
| 3506 | * <#core args> <core args> |
Mike Snitzer | 255eac2 | 2015-07-15 11:42:59 -0400 | [diff] [blame] | 3507 | * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check> |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3508 | */ |
| 3509 | static void cache_status(struct dm_target *ti, status_type_t type, |
| 3510 | unsigned status_flags, char *result, unsigned maxlen) |
| 3511 | { |
| 3512 | int r = 0; |
| 3513 | unsigned i; |
| 3514 | ssize_t sz = 0; |
| 3515 | dm_block_t nr_free_blocks_metadata = 0; |
| 3516 | dm_block_t nr_blocks_metadata = 0; |
| 3517 | char buf[BDEVNAME_SIZE]; |
| 3518 | struct cache *cache = ti->private; |
| 3519 | dm_cblock_t residency; |
Joe Thornber | d14fcf3 | 2016-03-10 16:20:58 +0000 | [diff] [blame] | 3520 | bool needs_check; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3521 | |
| 3522 | switch (type) { |
| 3523 | case STATUSTYPE_INFO: |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3524 | if (get_cache_mode(cache) == CM_FAIL) { |
| 3525 | DMEMIT("Fail"); |
| 3526 | break; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3527 | } |
| 3528 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3529 | /* Commit to ensure statistics aren't out-of-date */ |
| 3530 | if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) |
| 3531 | (void) commit(cache, false); |
| 3532 | |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3533 | r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3534 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3535 | DMERR("%s: dm_cache_get_free_metadata_block_count returned %d", |
| 3536 | cache_device_name(cache), r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3537 | goto err; |
| 3538 | } |
| 3539 | |
| 3540 | r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); |
| 3541 | if (r) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3542 | DMERR("%s: dm_cache_get_metadata_dev_size returned %d", |
| 3543 | cache_device_name(cache), r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3544 | goto err; |
| 3545 | } |
| 3546 | |
| 3547 | residency = policy_residency(cache->policy); |
| 3548 | |
Anssi Hannula | 44fa816 | 2014-08-01 11:55:47 -0400 | [diff] [blame] | 3549 | DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ", |
Mike Snitzer | 895b47d | 2014-07-14 15:37:18 -0400 | [diff] [blame] | 3550 | (unsigned)DM_CACHE_METADATA_BLOCK_SIZE, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3551 | (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), |
| 3552 | (unsigned long long)nr_blocks_metadata, |
Mike Snitzer | 6a38861 | 2014-01-09 16:04:12 -0500 | [diff] [blame] | 3553 | cache->sectors_per_block, |
| 3554 | (unsigned long long) from_cblock(residency), |
| 3555 | (unsigned long long) from_cblock(cache->cache_size), |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3556 | (unsigned) atomic_read(&cache->stats.read_hit), |
| 3557 | (unsigned) atomic_read(&cache->stats.read_miss), |
| 3558 | (unsigned) atomic_read(&cache->stats.write_hit), |
| 3559 | (unsigned) atomic_read(&cache->stats.write_miss), |
| 3560 | (unsigned) atomic_read(&cache->stats.demotion), |
| 3561 | (unsigned) atomic_read(&cache->stats.promotion), |
Anssi Hannula | 44fa816 | 2014-08-01 11:55:47 -0400 | [diff] [blame] | 3562 | (unsigned long) atomic_read(&cache->nr_dirty)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3563 | |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3564 | if (writethrough_mode(&cache->features)) |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3565 | DMEMIT("1 writethrough "); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3566 | |
| 3567 | else if (passthrough_mode(&cache->features)) |
| 3568 | DMEMIT("1 passthrough "); |
| 3569 | |
| 3570 | else if (writeback_mode(&cache->features)) |
| 3571 | DMEMIT("1 writeback "); |
| 3572 | |
| 3573 | else { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3574 | DMERR("%s: internal error: unknown io mode: %d", |
| 3575 | cache_device_name(cache), (int) cache->features.io_mode); |
Joe Thornber | 2ee57d5 | 2013-10-24 14:10:29 -0400 | [diff] [blame] | 3576 | goto err; |
| 3577 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3578 | |
| 3579 | DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); |
Mike Snitzer | 2e68c4e | 2014-01-15 21:06:55 -0500 | [diff] [blame] | 3580 | |
| 3581 | DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3582 | if (sz < maxlen) { |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3583 | r = policy_emit_config_values(cache->policy, result, maxlen, &sz); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3584 | if (r) |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3585 | DMERR("%s: policy_emit_config_values returned %d", |
| 3586 | cache_device_name(cache), r); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3587 | } |
| 3588 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3589 | if (get_cache_mode(cache) == CM_READ_ONLY) |
| 3590 | DMEMIT("ro "); |
| 3591 | else |
| 3592 | DMEMIT("rw "); |
| 3593 | |
Joe Thornber | d14fcf3 | 2016-03-10 16:20:58 +0000 | [diff] [blame] | 3594 | r = dm_cache_metadata_needs_check(cache->cmd, &needs_check); |
| 3595 | |
| 3596 | if (r || needs_check) |
Mike Snitzer | 255eac2 | 2015-07-15 11:42:59 -0400 | [diff] [blame] | 3597 | DMEMIT("needs_check "); |
| 3598 | else |
| 3599 | DMEMIT("- "); |
| 3600 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3601 | break; |
| 3602 | |
| 3603 | case STATUSTYPE_TABLE: |
| 3604 | format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); |
| 3605 | DMEMIT("%s ", buf); |
| 3606 | format_dev_t(buf, cache->cache_dev->bdev->bd_dev); |
| 3607 | DMEMIT("%s ", buf); |
| 3608 | format_dev_t(buf, cache->origin_dev->bdev->bd_dev); |
| 3609 | DMEMIT("%s", buf); |
| 3610 | |
| 3611 | for (i = 0; i < cache->nr_ctr_args - 1; i++) |
| 3612 | DMEMIT(" %s", cache->ctr_args[i]); |
| 3613 | if (cache->nr_ctr_args) |
| 3614 | DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); |
| 3615 | } |
| 3616 | |
| 3617 | return; |
| 3618 | |
| 3619 | err: |
| 3620 | DMEMIT("Error"); |
| 3621 | } |
| 3622 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3623 | /* |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3624 | * A cache block range can take two forms: |
| 3625 | * |
| 3626 | * i) A single cblock, eg. '3456' |
| 3627 | * ii) A begin and end cblock with dots between, eg. 123-234 |
| 3628 | */ |
| 3629 | static int parse_cblock_range(struct cache *cache, const char *str, |
| 3630 | struct cblock_range *result) |
| 3631 | { |
| 3632 | char dummy; |
| 3633 | uint64_t b, e; |
| 3634 | int r; |
| 3635 | |
| 3636 | /* |
| 3637 | * Try and parse form (ii) first. |
| 3638 | */ |
| 3639 | r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy); |
| 3640 | if (r < 0) |
| 3641 | return r; |
| 3642 | |
| 3643 | if (r == 2) { |
| 3644 | result->begin = to_cblock(b); |
| 3645 | result->end = to_cblock(e); |
| 3646 | return 0; |
| 3647 | } |
| 3648 | |
| 3649 | /* |
| 3650 | * That didn't work, try form (i). |
| 3651 | */ |
| 3652 | r = sscanf(str, "%llu%c", &b, &dummy); |
| 3653 | if (r < 0) |
| 3654 | return r; |
| 3655 | |
| 3656 | if (r == 1) { |
| 3657 | result->begin = to_cblock(b); |
| 3658 | result->end = to_cblock(from_cblock(result->begin) + 1u); |
| 3659 | return 0; |
| 3660 | } |
| 3661 | |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3662 | DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3663 | return -EINVAL; |
| 3664 | } |
| 3665 | |
| 3666 | static int validate_cblock_range(struct cache *cache, struct cblock_range *range) |
| 3667 | { |
| 3668 | uint64_t b = from_cblock(range->begin); |
| 3669 | uint64_t e = from_cblock(range->end); |
| 3670 | uint64_t n = from_cblock(cache->cache_size); |
| 3671 | |
| 3672 | if (b >= n) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3673 | DMERR("%s: begin cblock out of range: %llu >= %llu", |
| 3674 | cache_device_name(cache), b, n); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3675 | return -EINVAL; |
| 3676 | } |
| 3677 | |
| 3678 | if (e > n) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3679 | DMERR("%s: end cblock out of range: %llu > %llu", |
| 3680 | cache_device_name(cache), e, n); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3681 | return -EINVAL; |
| 3682 | } |
| 3683 | |
| 3684 | if (b >= e) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3685 | DMERR("%s: invalid cblock range: %llu >= %llu", |
| 3686 | cache_device_name(cache), b, e); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3687 | return -EINVAL; |
| 3688 | } |
| 3689 | |
| 3690 | return 0; |
| 3691 | } |
| 3692 | |
| 3693 | static int request_invalidation(struct cache *cache, struct cblock_range *range) |
| 3694 | { |
| 3695 | struct invalidation_request req; |
| 3696 | |
| 3697 | INIT_LIST_HEAD(&req.list); |
| 3698 | req.cblocks = range; |
| 3699 | atomic_set(&req.complete, 0); |
| 3700 | req.err = 0; |
| 3701 | init_waitqueue_head(&req.result_wait); |
| 3702 | |
| 3703 | spin_lock(&cache->invalidation_lock); |
| 3704 | list_add(&req.list, &cache->invalidation_requests); |
| 3705 | spin_unlock(&cache->invalidation_lock); |
| 3706 | wake_worker(cache); |
| 3707 | |
| 3708 | wait_event(req.result_wait, atomic_read(&req.complete)); |
| 3709 | return req.err; |
| 3710 | } |
| 3711 | |
| 3712 | static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, |
| 3713 | const char **cblock_ranges) |
| 3714 | { |
| 3715 | int r = 0; |
| 3716 | unsigned i; |
| 3717 | struct cblock_range range; |
| 3718 | |
| 3719 | if (!passthrough_mode(&cache->features)) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3720 | DMERR("%s: cache has to be in passthrough mode for invalidation", |
| 3721 | cache_device_name(cache)); |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3722 | return -EPERM; |
| 3723 | } |
| 3724 | |
| 3725 | for (i = 0; i < count; i++) { |
| 3726 | r = parse_cblock_range(cache, cblock_ranges[i], &range); |
| 3727 | if (r) |
| 3728 | break; |
| 3729 | |
| 3730 | r = validate_cblock_range(cache, &range); |
| 3731 | if (r) |
| 3732 | break; |
| 3733 | |
| 3734 | /* |
| 3735 | * Pass begin and end origin blocks to the worker and wake it. |
| 3736 | */ |
| 3737 | r = request_invalidation(cache, &range); |
| 3738 | if (r) |
| 3739 | break; |
| 3740 | } |
| 3741 | |
| 3742 | return r; |
| 3743 | } |
| 3744 | |
| 3745 | /* |
| 3746 | * Supports |
| 3747 | * "<key> <value>" |
| 3748 | * and |
| 3749 | * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]* |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3750 | * |
| 3751 | * The key migration_threshold is supported by the cache target core. |
| 3752 | */ |
| 3753 | static int cache_message(struct dm_target *ti, unsigned argc, char **argv) |
| 3754 | { |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3755 | struct cache *cache = ti->private; |
| 3756 | |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3757 | if (!argc) |
| 3758 | return -EINVAL; |
| 3759 | |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3760 | if (get_cache_mode(cache) >= CM_READ_ONLY) { |
Mike Snitzer | b61d950 | 2015-04-22 17:25:56 -0400 | [diff] [blame] | 3761 | DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode", |
| 3762 | cache_device_name(cache)); |
Joe Thornber | 028ae9f | 2015-04-22 16:42:35 -0400 | [diff] [blame] | 3763 | return -EOPNOTSUPP; |
| 3764 | } |
| 3765 | |
Mike Snitzer | 7b6b2bc | 2013-11-12 12:17:43 -0500 | [diff] [blame] | 3766 | if (!strcasecmp(argv[0], "invalidate_cblocks")) |
Joe Thornber | 65790ff | 2013-11-08 16:39:50 +0000 | [diff] [blame] | 3767 | return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); |
| 3768 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3769 | if (argc != 2) |
| 3770 | return -EINVAL; |
| 3771 | |
Joe Thornber | 2f14f4b | 2013-05-10 14:37:21 +0100 | [diff] [blame] | 3772 | return set_config_value(cache, argv[0], argv[1]); |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3773 | } |
| 3774 | |
| 3775 | static int cache_iterate_devices(struct dm_target *ti, |
| 3776 | iterate_devices_callout_fn fn, void *data) |
| 3777 | { |
| 3778 | int r = 0; |
| 3779 | struct cache *cache = ti->private; |
| 3780 | |
| 3781 | r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); |
| 3782 | if (!r) |
| 3783 | r = fn(ti, cache->origin_dev, 0, ti->len, data); |
| 3784 | |
| 3785 | return r; |
| 3786 | } |
| 3787 | |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3788 | static void set_discard_limits(struct cache *cache, struct queue_limits *limits) |
| 3789 | { |
| 3790 | /* |
| 3791 | * FIXME: these limits may be incompatible with the cache device |
| 3792 | */ |
Joe Thornber | 7ae34e7 | 2014-11-06 10:18:04 +0000 | [diff] [blame] | 3793 | limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, |
| 3794 | cache->origin_sectors); |
Joe Thornber | 1bad9bc | 2014-11-07 14:47:07 +0000 | [diff] [blame] | 3795 | limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3796 | } |
| 3797 | |
| 3798 | static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) |
| 3799 | { |
| 3800 | struct cache *cache = ti->private; |
Mike Snitzer | f610937 | 2013-08-20 15:02:41 -0400 | [diff] [blame] | 3801 | uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3802 | |
Mike Snitzer | f610937 | 2013-08-20 15:02:41 -0400 | [diff] [blame] | 3803 | /* |
| 3804 | * If the system-determined stacked limits are compatible with the |
| 3805 | * cache's blocksize (io_opt is a factor) do not override them. |
| 3806 | */ |
| 3807 | if (io_opt_sectors < cache->sectors_per_block || |
| 3808 | do_div(io_opt_sectors, cache->sectors_per_block)) { |
Mike Snitzer | b024653 | 2014-07-19 13:25:46 -0400 | [diff] [blame] | 3809 | blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); |
Mike Snitzer | f610937 | 2013-08-20 15:02:41 -0400 | [diff] [blame] | 3810 | blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); |
| 3811 | } |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3812 | set_discard_limits(cache, limits); |
| 3813 | } |
| 3814 | |
| 3815 | /*----------------------------------------------------------------*/ |
| 3816 | |
| 3817 | static struct target_type cache_target = { |
| 3818 | .name = "cache", |
Mike Snitzer | 843f0f2 | 2016-03-10 13:40:26 -0500 | [diff] [blame] | 3819 | .version = {1, 9, 0}, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3820 | .module = THIS_MODULE, |
| 3821 | .ctr = cache_ctr, |
| 3822 | .dtr = cache_dtr, |
| 3823 | .map = cache_map, |
| 3824 | .end_io = cache_end_io, |
| 3825 | .postsuspend = cache_postsuspend, |
| 3826 | .preresume = cache_preresume, |
| 3827 | .resume = cache_resume, |
| 3828 | .status = cache_status, |
| 3829 | .message = cache_message, |
| 3830 | .iterate_devices = cache_iterate_devices, |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 3831 | .io_hints = cache_io_hints, |
| 3832 | }; |
| 3833 | |
| 3834 | static int __init dm_cache_init(void) |
| 3835 | { |
| 3836 | int r; |
| 3837 | |
| 3838 | r = dm_register_target(&cache_target); |
| 3839 | if (r) { |
| 3840 | DMERR("cache target registration failed: %d", r); |
| 3841 | return r; |
| 3842 | } |
| 3843 | |
| 3844 | migration_cache = KMEM_CACHE(dm_cache_migration, 0); |
| 3845 | if (!migration_cache) { |
| 3846 | dm_unregister_target(&cache_target); |
| 3847 | return -ENOMEM; |
| 3848 | } |
| 3849 | |
| 3850 | return 0; |
| 3851 | } |
| 3852 | |
| 3853 | static void __exit dm_cache_exit(void) |
| 3854 | { |
| 3855 | dm_unregister_target(&cache_target); |
| 3856 | kmem_cache_destroy(migration_cache); |
| 3857 | } |
| 3858 | |
| 3859 | module_init(dm_cache_init); |
| 3860 | module_exit(dm_cache_exit); |
| 3861 | |
| 3862 | MODULE_DESCRIPTION(DM_NAME " cache target"); |
| 3863 | MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); |
| 3864 | MODULE_LICENSE("GPL"); |