Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011-2012 Red Hat, Inc. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #ifndef DM_BIO_PRISON_H |
| 8 | #define DM_BIO_PRISON_H |
| 9 | |
| 10 | #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */ |
| 11 | #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */ |
| 12 | |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 13 | #include <linux/bio.h> |
Joe Thornber | a195db2 | 2014-10-06 16:30:06 -0400 | [diff] [blame] | 14 | #include <linux/rbtree.h> |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 15 | |
| 16 | /*----------------------------------------------------------------*/ |
| 17 | |
| 18 | /* |
| 19 | * Sometimes we can't deal with a bio straight away. We put them in prison |
| 20 | * where they can't cause any mischief. Bios are put in a cell identified |
| 21 | * by a key, multiple bios can be in the same cell. When the cell is |
| 22 | * subsequently unlocked the bios become available. |
| 23 | */ |
| 24 | struct dm_bio_prison; |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 25 | |
Joe Thornber | 5f274d8 | 2014-09-17 10:17:39 +0100 | [diff] [blame] | 26 | /* |
| 27 | * Keys define a range of blocks within either a virtual or physical |
| 28 | * device. |
| 29 | */ |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 30 | struct dm_cell_key { |
| 31 | int virtual; |
| 32 | dm_thin_id dev; |
Joe Thornber | 5f274d8 | 2014-09-17 10:17:39 +0100 | [diff] [blame] | 33 | dm_block_t block_begin, block_end; |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 34 | }; |
| 35 | |
Joe Thornber | 025b968 | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 36 | /* |
| 37 | * Treat this as opaque, only in header so callers can manage allocation |
| 38 | * themselves. |
| 39 | */ |
| 40 | struct dm_bio_prison_cell { |
Joe Thornber | a374bb2 | 2014-10-10 13:43:14 +0100 | [diff] [blame] | 41 | struct list_head user_list; /* for client use */ |
Joe Thornber | a195db2 | 2014-10-06 16:30:06 -0400 | [diff] [blame] | 42 | struct rb_node node; |
| 43 | |
Joe Thornber | 025b968 | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 44 | struct dm_cell_key key; |
| 45 | struct bio *holder; |
| 46 | struct bio_list bios; |
| 47 | }; |
| 48 | |
Joe Thornber | a195db2 | 2014-10-06 16:30:06 -0400 | [diff] [blame] | 49 | struct dm_bio_prison *dm_bio_prison_create(void); |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 50 | void dm_bio_prison_destroy(struct dm_bio_prison *prison); |
| 51 | |
| 52 | /* |
Joe Thornber | 6beca5e | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 53 | * These two functions just wrap a mempool. This is a transitory step: |
| 54 | * Eventually all bio prison clients should manage their own cell memory. |
| 55 | * |
| 56 | * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called |
| 57 | * in interrupt context or passed GFP_NOWAIT. |
| 58 | */ |
| 59 | struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, |
| 60 | gfp_t gfp); |
| 61 | void dm_bio_prison_free_cell(struct dm_bio_prison *prison, |
| 62 | struct dm_bio_prison_cell *cell); |
| 63 | |
| 64 | /* |
Joe Thornber | 5f274d8 | 2014-09-17 10:17:39 +0100 | [diff] [blame] | 65 | * Creates, or retrieves a cell that overlaps the given key. |
Joe Thornber | c6b4fcb | 2013-03-01 22:45:51 +0000 | [diff] [blame] | 66 | * |
| 67 | * Returns 1 if pre-existing cell returned, zero if new cell created using |
| 68 | * @cell_prealloc. |
| 69 | */ |
| 70 | int dm_get_cell(struct dm_bio_prison *prison, |
| 71 | struct dm_cell_key *key, |
| 72 | struct dm_bio_prison_cell *cell_prealloc, |
| 73 | struct dm_bio_prison_cell **cell_result); |
| 74 | |
| 75 | /* |
Joe Thornber | 5f274d8 | 2014-09-17 10:17:39 +0100 | [diff] [blame] | 76 | * An atomic op that combines retrieving or creating a cell, and adding a |
| 77 | * bio to it. |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 78 | * |
| 79 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. |
| 80 | */ |
Joe Thornber | 6beca5e | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 81 | int dm_bio_detain(struct dm_bio_prison *prison, |
| 82 | struct dm_cell_key *key, |
| 83 | struct bio *inmate, |
| 84 | struct dm_bio_prison_cell *cell_prealloc, |
| 85 | struct dm_bio_prison_cell **cell_result); |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 86 | |
Joe Thornber | 6beca5e | 2013-03-01 22:45:50 +0000 | [diff] [blame] | 87 | void dm_cell_release(struct dm_bio_prison *prison, |
| 88 | struct dm_bio_prison_cell *cell, |
| 89 | struct bio_list *bios); |
| 90 | void dm_cell_release_no_holder(struct dm_bio_prison *prison, |
| 91 | struct dm_bio_prison_cell *cell, |
| 92 | struct bio_list *inmates); |
| 93 | void dm_cell_error(struct dm_bio_prison *prison, |
Mike Snitzer | af91805 | 2014-05-22 14:32:51 -0400 | [diff] [blame] | 94 | struct dm_bio_prison_cell *cell, int error); |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 95 | |
Joe Thornber | 2d759a4 | 2014-10-10 15:27:16 +0100 | [diff] [blame] | 96 | /* |
| 97 | * Visits the cell and then releases. Guarantees no new inmates are |
| 98 | * inserted between the visit and release. |
| 99 | */ |
| 100 | void dm_cell_visit_release(struct dm_bio_prison *prison, |
| 101 | void (*visit_fn)(void *, struct dm_bio_prison_cell *), |
| 102 | void *context, struct dm_bio_prison_cell *cell); |
| 103 | |
Joe Thornber | 3cdf93f | 2015-05-15 15:23:35 +0100 | [diff] [blame] | 104 | /* |
| 105 | * Rather than always releasing the prisoners in a cell, the client may |
| 106 | * want to promote one of them to be the new holder. There is a race here |
| 107 | * though between releasing an empty cell, and other threads adding new |
| 108 | * inmates. So this function makes the decision with its lock held. |
| 109 | * |
| 110 | * This function can have two outcomes: |
| 111 | * i) An inmate is promoted to be the holder of the cell (return value of 0). |
| 112 | * ii) The cell has no inmate for promotion and is released (return value of 1). |
| 113 | */ |
| 114 | int dm_cell_promote_or_release(struct dm_bio_prison *prison, |
| 115 | struct dm_bio_prison_cell *cell); |
| 116 | |
Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 117 | /*----------------------------------------------------------------*/ |
| 118 | |
| 119 | /* |
| 120 | * We use the deferred set to keep track of pending reads to shared blocks. |
| 121 | * We do this to ensure the new mapping caused by a write isn't performed |
| 122 | * until these prior reads have completed. Otherwise the insertion of the |
| 123 | * new mapping could free the old block that the read bios are mapped to. |
| 124 | */ |
| 125 | |
| 126 | struct dm_deferred_set; |
| 127 | struct dm_deferred_entry; |
| 128 | |
| 129 | struct dm_deferred_set *dm_deferred_set_create(void); |
| 130 | void dm_deferred_set_destroy(struct dm_deferred_set *ds); |
| 131 | |
| 132 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds); |
| 133 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head); |
| 134 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work); |
| 135 | |
| 136 | /*----------------------------------------------------------------*/ |
| 137 | |
| 138 | #endif |