Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * 2.5 block I/O model |
| 3 | * |
| 4 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 12 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public Licens |
| 17 | * along with this program; if not, write to the Free Software |
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- |
| 19 | */ |
| 20 | #ifndef __LINUX_BIO_H |
| 21 | #define __LINUX_BIO_H |
| 22 | |
| 23 | #include <linux/highmem.h> |
| 24 | #include <linux/mempool.h> |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 25 | #include <linux/ioprio.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 26 | #include <linux/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
David Howells | 02a5e0a | 2007-08-11 22:34:32 +0200 | [diff] [blame] | 28 | #ifdef CONFIG_BLOCK |
| 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/io.h> |
| 31 | |
Tejun Heo | 7cc0158 | 2010-08-03 13:14:58 +0200 | [diff] [blame] | 32 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
| 33 | #include <linux/blk_types.h> |
| 34 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #define BIO_DEBUG |
| 36 | |
| 37 | #ifdef BIO_DEBUG |
| 38 | #define BIO_BUG_ON BUG_ON |
| 39 | #else |
| 40 | #define BIO_BUG_ON |
| 41 | #endif |
| 42 | |
Alexey Dobriyan | d84a847 | 2006-06-25 05:49:32 -0700 | [diff] [blame] | 43 | #define BIO_MAX_PAGES 256 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) |
| 45 | #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) |
| 46 | |
| 47 | /* |
Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 48 | * upper 16 bits of bi_rw define the io priority of this bio |
| 49 | */ |
| 50 | #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) |
| 51 | #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) |
| 52 | #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) |
| 53 | |
| 54 | #define bio_set_prio(bio, prio) do { \ |
| 55 | WARN_ON(prio >= (1 << IOPRIO_BITS)); \ |
| 56 | (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ |
| 57 | (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ |
| 58 | } while (0) |
| 59 | |
| 60 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | * various member access, note that bio_data should of course not be used |
| 62 | * on highmem page vectors |
| 63 | */ |
| 64 | #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) |
| 65 | #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) |
| 66 | #define bio_page(bio) bio_iovec((bio))->bv_page |
| 67 | #define bio_offset(bio) bio_iovec((bio))->bv_offset |
| 68 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) |
| 69 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 70 | |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 71 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 72 | { |
| 73 | if (bio->bi_vcnt) |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 74 | return bio_iovec(bio)->bv_len; |
David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 75 | else /* dataless requests such as discard */ |
Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 76 | return bio->bi_size; |
Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | static inline void *bio_data(struct bio *bio) |
| 80 | { |
| 81 | if (bio->bi_vcnt) |
| 82 | return page_address(bio_page(bio)) + bio_offset(bio); |
| 83 | |
| 84 | return NULL; |
| 85 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
Jens Axboe | 392ddc3 | 2008-12-23 12:42:54 +0100 | [diff] [blame] | 87 | static inline int bio_has_allocated_vec(struct bio *bio) |
| 88 | { |
| 89 | return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; |
| 90 | } |
| 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | /* |
| 93 | * will die |
| 94 | */ |
| 95 | #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) |
| 96 | #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) |
| 97 | |
| 98 | /* |
| 99 | * queues that have highmem support enabled may still need to revert to |
| 100 | * PIO transfers occasionally and thus map high pages temporarily. For |
| 101 | * permanent PIO fall back, user is probably better off disabling highmem |
| 102 | * I/O completely on that queue (see ide-dma for example) |
| 103 | */ |
| 104 | #define __bio_kmap_atomic(bio, idx, kmtype) \ |
Cong Wang | e8e3c3d | 2011-11-25 23:14:27 +0800 | [diff] [blame] | 105 | (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | bio_iovec_idx((bio), (idx))->bv_offset) |
| 107 | |
Cong Wang | e8e3c3d | 2011-11-25 23:14:27 +0800 | [diff] [blame] | 108 | #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
| 110 | /* |
| 111 | * merge helpers etc |
| 112 | */ |
| 113 | |
| 114 | #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) |
| 115 | #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) |
| 116 | |
Jeremy Fitzhardinge | f92131c | 2008-10-29 14:10:51 +0100 | [diff] [blame] | 117 | /* Default implementation of BIOVEC_PHYS_MERGEABLE */ |
| 118 | #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
| 119 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) |
| 120 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | /* |
| 122 | * allow arch override, for eg virtualized architectures (put in asm/io.h) |
| 123 | */ |
| 124 | #ifndef BIOVEC_PHYS_MERGEABLE |
| 125 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ |
Jeremy Fitzhardinge | f92131c | 2008-10-29 14:10:51 +0100 | [diff] [blame] | 126 | __BIOVEC_PHYS_MERGEABLE(vec1, vec2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | #endif |
| 128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
| 130 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
| 131 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 132 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
| 134 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) |
| 135 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 136 | #define bio_io_error(bio) bio_endio((bio), -EIO) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | /* |
| 139 | * drivers should not use the __ version unless they _really_ want to |
| 140 | * run through the entire bio and not just pending pieces |
| 141 | */ |
| 142 | #define __bio_for_each_segment(bvl, bio, i, start_idx) \ |
| 143 | for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \ |
| 144 | i < (bio)->bi_vcnt; \ |
| 145 | bvl++, i++) |
| 146 | |
| 147 | #define bio_for_each_segment(bvl, bio, i) \ |
| 148 | __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx) |
| 149 | |
| 150 | /* |
| 151 | * get a reference to a bio, so it won't disappear. the intended use is |
| 152 | * something like: |
| 153 | * |
| 154 | * bio_get(bio); |
| 155 | * submit_bio(rw, bio); |
| 156 | * if (bio->bi_flags ...) |
| 157 | * do_something |
| 158 | * bio_put(bio); |
| 159 | * |
| 160 | * without the bio_get(), it could potentially complete I/O before submit_bio |
| 161 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) |
| 162 | * runs |
| 163 | */ |
| 164 | #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) |
| 165 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 166 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 167 | /* |
| 168 | * bio integrity payload |
| 169 | */ |
| 170 | struct bio_integrity_payload { |
| 171 | struct bio *bip_bio; /* parent bio */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 172 | |
| 173 | sector_t bip_sector; /* virtual start sector */ |
| 174 | |
| 175 | void *bip_buf; /* generated integrity data */ |
| 176 | bio_end_io_t *bip_end_io; /* saved I/O completion fn */ |
| 177 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 178 | unsigned int bip_size; |
| 179 | |
Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 180 | unsigned short bip_slab; /* slab the bip came from */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 181 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
| 182 | unsigned short bip_idx; /* current bip_vec index */ |
| 183 | |
| 184 | struct work_struct bip_work; /* I/O completion */ |
Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 185 | struct bio_vec bip_vec[0]; /* embedded bvec array */ |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 186 | }; |
| 187 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
| 189 | /* |
| 190 | * A bio_pair is used when we need to split a bio. |
| 191 | * This can only happen for a bio that refers to just one |
| 192 | * page of data, and in the unusual situation when the |
| 193 | * page crosses a chunk/device boundary |
| 194 | * |
| 195 | * The address of the master bio is stored in bio1.bi_private |
| 196 | * The address of the pool the pair was allocated from is stored |
| 197 | * in bio2.bi_private |
| 198 | */ |
| 199 | struct bio_pair { |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 200 | struct bio bio1, bio2; |
| 201 | struct bio_vec bv1, bv2; |
| 202 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 203 | struct bio_integrity_payload bip1, bip2; |
| 204 | struct bio_vec iv1, iv2; |
| 205 | #endif |
| 206 | atomic_t cnt; |
| 207 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | }; |
Denis ChengRq | 6feef53 | 2008-10-09 08:57:05 +0200 | [diff] [blame] | 209 | extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | extern void bio_pair_release(struct bio_pair *dbio); |
| 211 | |
Jens Axboe | bb799ca | 2008-12-10 15:35:05 +0100 | [diff] [blame] | 212 | extern struct bio_set *bioset_create(unsigned int, unsigned int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | extern void bioset_free(struct bio_set *); |
| 214 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 215 | extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | extern void bio_put(struct bio *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
Kent Overstreet | bf800ef | 2012-09-06 15:35:02 -0700 | [diff] [blame] | 218 | extern void __bio_clone(struct bio *, struct bio *); |
| 219 | extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); |
| 220 | |
Kent Overstreet | 3f86a82 | 2012-09-06 15:35:01 -0700 | [diff] [blame] | 221 | extern struct bio_set *fs_bio_set; |
| 222 | |
| 223 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 224 | { |
| 225 | return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); |
| 226 | } |
| 227 | |
Kent Overstreet | bf800ef | 2012-09-06 15:35:02 -0700 | [diff] [blame] | 228 | static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) |
| 229 | { |
| 230 | return bio_clone_bioset(bio, gfp_mask, fs_bio_set); |
| 231 | } |
| 232 | |
Kent Overstreet | 3f86a82 | 2012-09-06 15:35:01 -0700 | [diff] [blame] | 233 | static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
| 234 | { |
| 235 | return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); |
| 236 | } |
| 237 | |
Kent Overstreet | bf800ef | 2012-09-06 15:35:02 -0700 | [diff] [blame] | 238 | static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) |
| 239 | { |
| 240 | return bio_clone_bioset(bio, gfp_mask, NULL); |
| 241 | |
| 242 | } |
| 243 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 244 | extern void bio_endio(struct bio *, int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | struct request_queue; |
| 246 | extern int bio_phys_segments(struct request_queue *, struct bio *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | extern void bio_init(struct bio *); |
Kent Overstreet | f44b48c | 2012-09-06 15:34:58 -0700 | [diff] [blame] | 249 | extern void bio_reset(struct bio *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
| 251 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); |
Mike Christie | 6e68af6 | 2005-11-11 05:30:27 -0600 | [diff] [blame] | 252 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
| 253 | unsigned int, unsigned int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | extern int bio_get_nr_vecs(struct block_device *); |
Martin K. Petersen | ad3316b | 2008-10-01 22:42:53 -0400 | [diff] [blame] | 255 | extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 257 | unsigned long, unsigned int, int, gfp_t); |
James Bottomley | f1970ba | 2005-06-20 14:06:52 +0200 | [diff] [blame] | 258 | struct sg_iovec; |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 259 | struct rq_map_data; |
James Bottomley | f1970ba | 2005-06-20 14:06:52 +0200 | [diff] [blame] | 260 | extern struct bio *bio_map_user_iov(struct request_queue *, |
| 261 | struct block_device *, |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 262 | struct sg_iovec *, int, int, gfp_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | extern void bio_unmap_user(struct bio *); |
Mike Christie | df46b9a | 2005-06-20 14:04:44 +0200 | [diff] [blame] | 264 | extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
Al Viro | 27496a8 | 2005-10-21 03:20:48 -0400 | [diff] [blame] | 265 | gfp_t); |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 266 | extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, |
| 267 | gfp_t, int); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | extern void bio_set_pages_dirty(struct bio *bio); |
| 269 | extern void bio_check_pages_dirty(struct bio *bio); |
Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 270 | |
| 271 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
| 272 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" |
| 273 | #endif |
| 274 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
| 275 | extern void bio_flush_dcache_pages(struct bio *bi); |
| 276 | #else |
| 277 | static inline void bio_flush_dcache_pages(struct bio *bi) |
| 278 | { |
| 279 | } |
| 280 | #endif |
| 281 | |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 282 | extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, |
| 283 | unsigned long, unsigned int, int, gfp_t); |
| 284 | extern struct bio *bio_copy_user_iov(struct request_queue *, |
| 285 | struct rq_map_data *, struct sg_iovec *, |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 286 | int, int, gfp_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | extern int bio_uncopy_user(struct bio *); |
| 288 | void zero_fill_bio(struct bio *bio); |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 289 | extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); |
Jens Axboe | bb799ca | 2008-12-10 15:35:05 +0100 | [diff] [blame] | 290 | extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 291 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 292 | |
Tejun Heo | 852c788 | 2012-03-05 13:15:27 -0800 | [diff] [blame] | 293 | #ifdef CONFIG_BLK_CGROUP |
| 294 | int bio_associate_current(struct bio *bio); |
| 295 | void bio_disassociate_task(struct bio *bio); |
| 296 | #else /* CONFIG_BLK_CGROUP */ |
| 297 | static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } |
| 298 | static inline void bio_disassociate_task(struct bio *bio) { } |
| 299 | #endif /* CONFIG_BLK_CGROUP */ |
| 300 | |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 301 | /* |
| 302 | * bio_set is used to allow other portions of the IO system to |
| 303 | * allocate their own private memory pools for bio and iovec structures. |
| 304 | * These memory pools in turn all allocate from the bio_slab |
| 305 | * and the bvec_slabs[]. |
| 306 | */ |
| 307 | #define BIO_POOL_SIZE 2 |
| 308 | #define BIOVEC_NR_POOLS 6 |
Jens Axboe | 7ff9345 | 2008-12-11 11:53:43 +0100 | [diff] [blame] | 309 | #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 310 | |
| 311 | struct bio_set { |
Jens Axboe | 1b43449 | 2008-10-22 20:32:58 +0200 | [diff] [blame] | 312 | struct kmem_cache *bio_slab; |
Jens Axboe | bb799ca | 2008-12-10 15:35:05 +0100 | [diff] [blame] | 313 | unsigned int front_pad; |
| 314 | |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 315 | mempool_t *bio_pool; |
Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 316 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 317 | mempool_t *bio_integrity_pool; |
| 318 | #endif |
Jens Axboe | 7ff9345 | 2008-12-11 11:53:43 +0100 | [diff] [blame] | 319 | mempool_t *bvec_pool; |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 320 | }; |
| 321 | |
| 322 | struct biovec_slab { |
| 323 | int nr_vecs; |
| 324 | char *name; |
| 325 | struct kmem_cache *slab; |
| 326 | }; |
| 327 | |
Martin K. Petersen | 51d654e | 2008-06-17 18:59:56 +0200 | [diff] [blame] | 328 | /* |
| 329 | * a small number of entries is fine, not going to be performance critical. |
| 330 | * basically we just need to survive |
| 331 | */ |
| 332 | #define BIO_SPLIT_ENTRIES 2 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | |
| 334 | #ifdef CONFIG_HIGHMEM |
| 335 | /* |
Alberto Bertogli | 20b636b | 2009-02-02 12:41:07 +0100 | [diff] [blame] | 336 | * remember never ever reenable interrupts between a bvec_kmap_irq and |
| 337 | * bvec_kunmap_irq! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | */ |
Alberto Bertogli | 4f570f9 | 2009-11-02 11:40:16 +0100 | [diff] [blame] | 339 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | { |
| 341 | unsigned long addr; |
| 342 | |
| 343 | /* |
| 344 | * might not be a highmem page, but the preempt/irq count |
| 345 | * balancing is a lot nicer this way |
| 346 | */ |
| 347 | local_irq_save(*flags); |
Cong Wang | e8e3c3d | 2011-11-25 23:14:27 +0800 | [diff] [blame] | 348 | addr = (unsigned long) kmap_atomic(bvec->bv_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | |
| 350 | BUG_ON(addr & ~PAGE_MASK); |
| 351 | |
| 352 | return (char *) addr + bvec->bv_offset; |
| 353 | } |
| 354 | |
Alberto Bertogli | 4f570f9 | 2009-11-02 11:40:16 +0100 | [diff] [blame] | 355 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | { |
| 357 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; |
| 358 | |
Cong Wang | e8e3c3d | 2011-11-25 23:14:27 +0800 | [diff] [blame] | 359 | kunmap_atomic((void *) ptr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | local_irq_restore(*flags); |
| 361 | } |
| 362 | |
| 363 | #else |
Geert Uytterhoeven | 11a691b | 2010-10-21 10:32:29 +0200 | [diff] [blame] | 364 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
| 365 | { |
| 366 | return page_address(bvec->bv_page) + bvec->bv_offset; |
| 367 | } |
| 368 | |
| 369 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
| 370 | { |
| 371 | *flags = 0; |
| 372 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | #endif |
| 374 | |
Adrian Bunk | c2d08da | 2005-09-10 00:27:18 -0700 | [diff] [blame] | 375 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | unsigned long *flags) |
| 377 | { |
| 378 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); |
| 379 | } |
| 380 | #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) |
| 381 | |
| 382 | #define bio_kmap_irq(bio, flags) \ |
| 383 | __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) |
| 384 | #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) |
| 385 | |
Jens Axboe | 7a67f63 | 2008-08-08 11:17:12 +0200 | [diff] [blame] | 386 | /* |
| 387 | * Check whether this bio carries any data or not. A NULL bio is allowed. |
| 388 | */ |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 389 | static inline bool bio_has_data(struct bio *bio) |
Jens Axboe | 7a67f63 | 2008-08-08 11:17:12 +0200 | [diff] [blame] | 390 | { |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 391 | if (bio && bio->bi_vcnt) |
| 392 | return true; |
| 393 | |
| 394 | return false; |
| 395 | } |
| 396 | |
| 397 | static inline bool bio_is_rw(struct bio *bio) |
| 398 | { |
| 399 | if (!bio_has_data(bio)) |
| 400 | return false; |
| 401 | |
Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 402 | if (bio->bi_rw & REQ_WRITE_SAME) |
| 403 | return false; |
| 404 | |
Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 405 | return true; |
| 406 | } |
| 407 | |
| 408 | static inline bool bio_mergeable(struct bio *bio) |
| 409 | { |
| 410 | if (bio->bi_rw & REQ_NOMERGE_FLAGS) |
| 411 | return false; |
| 412 | |
| 413 | return true; |
Jens Axboe | 7a67f63 | 2008-08-08 11:17:12 +0200 | [diff] [blame] | 414 | } |
| 415 | |
Christoph Hellwig | 8f3d8ba | 2009-04-07 19:55:13 +0200 | [diff] [blame] | 416 | /* |
Akinobu Mita | e686307 | 2009-04-17 08:41:21 +0200 | [diff] [blame] | 417 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
Christoph Hellwig | 8f3d8ba | 2009-04-07 19:55:13 +0200 | [diff] [blame] | 418 | * |
| 419 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
| 420 | * member of the bio. The bio_list also caches the last list member to allow |
| 421 | * fast access to the tail. |
| 422 | */ |
| 423 | struct bio_list { |
| 424 | struct bio *head; |
| 425 | struct bio *tail; |
| 426 | }; |
| 427 | |
| 428 | static inline int bio_list_empty(const struct bio_list *bl) |
| 429 | { |
| 430 | return bl->head == NULL; |
| 431 | } |
| 432 | |
| 433 | static inline void bio_list_init(struct bio_list *bl) |
| 434 | { |
| 435 | bl->head = bl->tail = NULL; |
| 436 | } |
| 437 | |
| 438 | #define bio_list_for_each(bio, bl) \ |
| 439 | for (bio = (bl)->head; bio; bio = bio->bi_next) |
| 440 | |
| 441 | static inline unsigned bio_list_size(const struct bio_list *bl) |
| 442 | { |
| 443 | unsigned sz = 0; |
| 444 | struct bio *bio; |
| 445 | |
| 446 | bio_list_for_each(bio, bl) |
| 447 | sz++; |
| 448 | |
| 449 | return sz; |
| 450 | } |
| 451 | |
| 452 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) |
| 453 | { |
| 454 | bio->bi_next = NULL; |
| 455 | |
| 456 | if (bl->tail) |
| 457 | bl->tail->bi_next = bio; |
| 458 | else |
| 459 | bl->head = bio; |
| 460 | |
| 461 | bl->tail = bio; |
| 462 | } |
| 463 | |
| 464 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) |
| 465 | { |
| 466 | bio->bi_next = bl->head; |
| 467 | |
| 468 | bl->head = bio; |
| 469 | |
| 470 | if (!bl->tail) |
| 471 | bl->tail = bio; |
| 472 | } |
| 473 | |
| 474 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) |
| 475 | { |
| 476 | if (!bl2->head) |
| 477 | return; |
| 478 | |
| 479 | if (bl->tail) |
| 480 | bl->tail->bi_next = bl2->head; |
| 481 | else |
| 482 | bl->head = bl2->head; |
| 483 | |
| 484 | bl->tail = bl2->tail; |
| 485 | } |
| 486 | |
| 487 | static inline void bio_list_merge_head(struct bio_list *bl, |
| 488 | struct bio_list *bl2) |
| 489 | { |
| 490 | if (!bl2->head) |
| 491 | return; |
| 492 | |
| 493 | if (bl->head) |
| 494 | bl2->tail->bi_next = bl->head; |
| 495 | else |
| 496 | bl->tail = bl2->tail; |
| 497 | |
| 498 | bl->head = bl2->head; |
| 499 | } |
| 500 | |
Geert Uytterhoeven | 13685a1 | 2009-06-10 04:38:40 +0000 | [diff] [blame] | 501 | static inline struct bio *bio_list_peek(struct bio_list *bl) |
| 502 | { |
| 503 | return bl->head; |
| 504 | } |
| 505 | |
Christoph Hellwig | 8f3d8ba | 2009-04-07 19:55:13 +0200 | [diff] [blame] | 506 | static inline struct bio *bio_list_pop(struct bio_list *bl) |
| 507 | { |
| 508 | struct bio *bio = bl->head; |
| 509 | |
| 510 | if (bio) { |
| 511 | bl->head = bl->head->bi_next; |
| 512 | if (!bl->head) |
| 513 | bl->tail = NULL; |
| 514 | |
| 515 | bio->bi_next = NULL; |
| 516 | } |
| 517 | |
| 518 | return bio; |
| 519 | } |
| 520 | |
| 521 | static inline struct bio *bio_list_get(struct bio_list *bl) |
| 522 | { |
| 523 | struct bio *bio = bl->head; |
| 524 | |
| 525 | bl->head = bl->tail = NULL; |
| 526 | |
| 527 | return bio; |
| 528 | } |
| 529 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 530 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 531 | |
| 532 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) |
| 533 | #define bip_vec(bip) bip_vec_idx(bip, 0) |
| 534 | |
| 535 | #define __bip_for_each_vec(bvl, bip, i, start_idx) \ |
| 536 | for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \ |
| 537 | i < (bip)->bip_vcnt; \ |
| 538 | bvl++, i++) |
| 539 | |
| 540 | #define bip_for_each_vec(bvl, bip, i) \ |
| 541 | __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) |
| 542 | |
Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 543 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ |
| 544 | for_each_bio(_bio) \ |
| 545 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) |
| 546 | |
Alberto Bertogli | 8deaf72 | 2008-10-02 12:46:53 +0200 | [diff] [blame] | 547 | #define bio_integrity(bio) (bio->bi_integrity != NULL) |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 548 | |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 549 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
Kent Overstreet | 1e2a410f | 2012-09-06 15:34:56 -0700 | [diff] [blame] | 550 | extern void bio_integrity_free(struct bio *); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 551 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
| 552 | extern int bio_integrity_enabled(struct bio *bio); |
| 553 | extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); |
| 554 | extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); |
| 555 | extern int bio_integrity_prep(struct bio *); |
| 556 | extern void bio_integrity_endio(struct bio *, int); |
| 557 | extern void bio_integrity_advance(struct bio *, unsigned int); |
| 558 | extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); |
| 559 | extern void bio_integrity_split(struct bio *, struct bio_pair *, int); |
Kent Overstreet | 1e2a410f | 2012-09-06 15:34:56 -0700 | [diff] [blame] | 560 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); |
Martin K. Petersen | 7878cba | 2009-06-26 15:37:49 +0200 | [diff] [blame] | 561 | extern int bioset_integrity_create(struct bio_set *, int); |
| 562 | extern void bioset_integrity_free(struct bio_set *); |
| 563 | extern void bio_integrity_init(void); |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 564 | |
| 565 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 566 | |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 567 | static inline int bio_integrity(struct bio *bio) |
| 568 | { |
| 569 | return 0; |
| 570 | } |
| 571 | |
| 572 | static inline int bio_integrity_enabled(struct bio *bio) |
| 573 | { |
| 574 | return 0; |
| 575 | } |
| 576 | |
| 577 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) |
| 578 | { |
| 579 | return 0; |
| 580 | } |
| 581 | |
| 582 | static inline void bioset_integrity_free (struct bio_set *bs) |
| 583 | { |
| 584 | return; |
| 585 | } |
| 586 | |
| 587 | static inline int bio_integrity_prep(struct bio *bio) |
| 588 | { |
| 589 | return 0; |
| 590 | } |
| 591 | |
Kent Overstreet | 1e2a410f | 2012-09-06 15:34:56 -0700 | [diff] [blame] | 592 | static inline void bio_integrity_free(struct bio *bio) |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 593 | { |
| 594 | return; |
| 595 | } |
| 596 | |
Stephen Rothwell | 0c614e2 | 2011-11-16 09:21:48 +0100 | [diff] [blame] | 597 | static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
Kent Overstreet | 1e2a410f | 2012-09-06 15:34:56 -0700 | [diff] [blame] | 598 | gfp_t gfp_mask) |
Stephen Rothwell | 0c614e2 | 2011-11-16 09:21:48 +0100 | [diff] [blame] | 599 | { |
| 600 | return 0; |
| 601 | } |
Martin K. Petersen | 6898e3b | 2012-01-13 08:15:33 +0100 | [diff] [blame] | 602 | |
| 603 | static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp, |
| 604 | int sectors) |
| 605 | { |
| 606 | return; |
| 607 | } |
| 608 | |
| 609 | static inline void bio_integrity_advance(struct bio *bio, |
| 610 | unsigned int bytes_done) |
| 611 | { |
| 612 | return; |
| 613 | } |
| 614 | |
| 615 | static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, |
| 616 | unsigned int sectors) |
| 617 | { |
| 618 | return; |
| 619 | } |
| 620 | |
| 621 | static inline void bio_integrity_init(void) |
| 622 | { |
| 623 | return; |
| 624 | } |
Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 625 | |
| 626 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 627 | |
David Howells | 02a5e0a | 2007-08-11 22:34:32 +0200 | [diff] [blame] | 628 | #endif /* CONFIG_BLOCK */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | #endif /* __LINUX_BIO_H */ |