Mike Snitzer | 4f81a41 | 2012-10-12 21:02:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011-2012 Red Hat, Inc. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #ifndef DM_BIO_PRISON_H |
| 8 | #define DM_BIO_PRISON_H |
| 9 | |
| 10 | #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */ |
| 11 | #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */ |
| 12 | |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/bio.h> |
| 15 | |
| 16 | /*----------------------------------------------------------------*/ |
| 17 | |
| 18 | /* |
| 19 | * Sometimes we can't deal with a bio straight away. We put them in prison |
| 20 | * where they can't cause any mischief. Bios are put in a cell identified |
| 21 | * by a key, multiple bios can be in the same cell. When the cell is |
| 22 | * subsequently unlocked the bios become available. |
| 23 | */ |
| 24 | struct dm_bio_prison; |
| 25 | struct dm_bio_prison_cell; |
| 26 | |
| 27 | /* FIXME: this needs to be more abstract */ |
| 28 | struct dm_cell_key { |
| 29 | int virtual; |
| 30 | dm_thin_id dev; |
| 31 | dm_block_t block; |
| 32 | }; |
| 33 | |
| 34 | struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells); |
| 35 | void dm_bio_prison_destroy(struct dm_bio_prison *prison); |
| 36 | |
| 37 | /* |
| 38 | * This may block if a new cell needs allocating. You must ensure that |
| 39 | * cells will be unlocked even if the calling thread is blocked. |
| 40 | * |
| 41 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. |
| 42 | */ |
| 43 | int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, |
| 44 | struct bio *inmate, struct dm_bio_prison_cell **ref); |
| 45 | |
| 46 | void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios); |
| 47 | void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed |
| 48 | void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates); |
| 49 | void dm_cell_error(struct dm_bio_prison_cell *cell); |
| 50 | |
| 51 | /*----------------------------------------------------------------*/ |
| 52 | |
| 53 | /* |
| 54 | * We use the deferred set to keep track of pending reads to shared blocks. |
| 55 | * We do this to ensure the new mapping caused by a write isn't performed |
| 56 | * until these prior reads have completed. Otherwise the insertion of the |
| 57 | * new mapping could free the old block that the read bios are mapped to. |
| 58 | */ |
| 59 | |
| 60 | struct dm_deferred_set; |
| 61 | struct dm_deferred_entry; |
| 62 | |
| 63 | struct dm_deferred_set *dm_deferred_set_create(void); |
| 64 | void dm_deferred_set_destroy(struct dm_deferred_set *ds); |
| 65 | |
| 66 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds); |
| 67 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head); |
| 68 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work); |
| 69 | |
| 70 | /*----------------------------------------------------------------*/ |
| 71 | |
| 72 | #endif |