blob: 985dc645637ecbed2a6fcc0dabf8d26cd091c345 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Tejun Heo7cc01582010-08-03 13:14:58 +020010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18#ifndef __LINUX_BIO_H
19#define __LINUX_BIO_H
20
21#include <linux/highmem.h>
22#include <linux/mempool.h>
Jens Axboe22e2c502005-06-27 10:55:12 +020023#include <linux/ioprio.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050024#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
David Howells02a5e0a2007-08-11 22:34:32 +020026#ifdef CONFIG_BLOCK
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/io.h>
29
Tejun Heo7cc01582010-08-03 13:14:58 +020030/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
31#include <linux/blk_types.h>
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#define BIO_DEBUG
34
35#ifdef BIO_DEBUG
36#define BIO_BUG_ON BUG_ON
37#else
38#define BIO_BUG_ON
39#endif
40
Alexey Dobriyand84a8472006-06-25 05:49:32 -070041#define BIO_MAX_PAGES 256
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Mike Christie43b62ce2016-06-05 14:32:20 -050043#define bio_prio(bio) (bio)->bi_ioprio
44#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
Jens Axboe22e2c502005-06-27 10:55:12 +020045
Kent Overstreet4550dd62013-08-07 14:26:21 -070046#define bio_iter_iovec(bio, iter) \
47 bvec_iter_bvec((bio)->bi_io_vec, (iter))
48
49#define bio_iter_page(bio, iter) \
50 bvec_iter_page((bio)->bi_io_vec, (iter))
51#define bio_iter_len(bio, iter) \
52 bvec_iter_len((bio)->bi_io_vec, (iter))
53#define bio_iter_offset(bio, iter) \
54 bvec_iter_offset((bio)->bi_io_vec, (iter))
55
56#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
57#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
58#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
Kent Overstreet79886132013-11-23 17:19:00 -080059
Kent Overstreet458b76e2013-09-24 16:26:05 -070060#define bio_multiple_segments(bio) \
61 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
Kent Overstreet4f024f32013-10-11 15:44:27 -070062#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
63#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
Jens Axboebf2de6f2007-09-27 13:01:25 +020064
Kent Overstreet458b76e2013-09-24 16:26:05 -070065/*
Christoph Hellwigd3849952016-11-01 07:40:11 -060066 * Return the data direction, READ or WRITE.
67 */
68#define bio_data_dir(bio) \
69 (op_is_write(bio_op(bio)) ? WRITE : READ)
70
71/*
Kent Overstreet458b76e2013-09-24 16:26:05 -070072 * Check whether this bio carries any data or not. A NULL bio is allowed.
73 */
74static inline bool bio_has_data(struct bio *bio)
75{
76 if (bio &&
77 bio->bi_iter.bi_size &&
Adrian Hunter7afafc82016-08-16 10:59:35 +030078 bio_op(bio) != REQ_OP_DISCARD &&
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -080079 bio_op(bio) != REQ_OP_SECURE_ERASE &&
80 bio_op(bio) != REQ_OP_WRITE_ZEROES)
Kent Overstreet458b76e2013-09-24 16:26:05 -070081 return true;
82
83 return false;
84}
85
Mike Christie95fe6c12016-06-05 14:31:48 -050086static inline bool bio_no_advance_iter(struct bio *bio)
87{
Adrian Hunter7afafc82016-08-16 10:59:35 +030088 return bio_op(bio) == REQ_OP_DISCARD ||
89 bio_op(bio) == REQ_OP_SECURE_ERASE ||
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -080090 bio_op(bio) == REQ_OP_WRITE_SAME ||
91 bio_op(bio) == REQ_OP_WRITE_ZEROES;
Mike Christie95fe6c12016-06-05 14:31:48 -050092}
93
Kent Overstreet458b76e2013-09-24 16:26:05 -070094static inline bool bio_mergeable(struct bio *bio)
95{
Jens Axboe1eff9d32016-08-05 15:35:16 -060096 if (bio->bi_opf & REQ_NOMERGE_FLAGS)
Kent Overstreet458b76e2013-09-24 16:26:05 -070097 return false;
98
99 return true;
100}
101
Tejun Heo2e46e8b2009-05-07 22:24:41 +0900102static inline unsigned int bio_cur_bytes(struct bio *bio)
Jens Axboebf2de6f2007-09-27 13:01:25 +0200103{
Kent Overstreet458b76e2013-09-24 16:26:05 -0700104 if (bio_has_data(bio))
Kent Overstreeta4ad39b12013-08-07 14:24:32 -0700105 return bio_iovec(bio).bv_len;
David Woodhousefb2dce82008-08-05 18:01:53 +0100106 else /* dataless requests such as discard */
Kent Overstreet4f024f32013-10-11 15:44:27 -0700107 return bio->bi_iter.bi_size;
Jens Axboebf2de6f2007-09-27 13:01:25 +0200108}
109
110static inline void *bio_data(struct bio *bio)
111{
Kent Overstreet458b76e2013-09-24 16:26:05 -0700112 if (bio_has_data(bio))
Jens Axboebf2de6f2007-09-27 13:01:25 +0200113 return page_address(bio_page(bio)) + bio_offset(bio);
114
115 return NULL;
116}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
118/*
119 * will die
120 */
121#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
122#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
123
124/*
125 * queues that have highmem support enabled may still need to revert to
126 * PIO transfers occasionally and thus map high pages temporarily. For
127 * permanent PIO fall back, user is probably better off disabling highmem
128 * I/O completely on that queue (see ide-dma for example)
129 */
Kent Overstreetf619d252013-08-07 14:30:33 -0700130#define __bio_kmap_atomic(bio, iter) \
131 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
132 bio_iter_iovec((bio), (iter)).bv_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Kent Overstreetf619d252013-08-07 14:30:33 -0700134#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136/*
137 * merge helpers etc
138 */
139
Jeremy Fitzhardingef92131c2008-10-29 14:10:51 +0100140/* Default implementation of BIOVEC_PHYS_MERGEABLE */
141#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
142 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144/*
145 * allow arch override, for eg virtualized architectures (put in asm/io.h)
146 */
147#ifndef BIOVEC_PHYS_MERGEABLE
148#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
Jeremy Fitzhardingef92131c2008-10-29 14:10:51 +0100149 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150#endif
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
153 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
154#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
Martin K. Petersenae03bf62009-05-22 17:17:50 -0400155 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Jens Axboe66cb45a2014-06-24 16:22:24 -0600157/*
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800158 * drivers should _never_ use the all version - the bio may have been split
159 * before it got to the driver and the driver won't own all of it
160 */
161#define bio_for_each_segment_all(bvl, bio, i) \
Kent Overstreetf619d252013-08-07 14:30:33 -0700162 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
Kent Overstreetd74c6d52013-02-06 12:23:11 -0800163
Kent Overstreet4550dd62013-08-07 14:26:21 -0700164static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
165 unsigned bytes)
166{
167 iter->bi_sector += bytes >> 9;
168
Mike Christie95fe6c12016-06-05 14:31:48 -0500169 if (bio_no_advance_iter(bio))
Kent Overstreet4550dd62013-08-07 14:26:21 -0700170 iter->bi_size -= bytes;
171 else
172 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
173}
174
Kent Overstreet79886132013-11-23 17:19:00 -0800175#define __bio_for_each_segment(bvl, bio, iter, start) \
176 for (iter = (start); \
Kent Overstreet4550dd62013-08-07 14:26:21 -0700177 (iter).bi_size && \
178 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
179 bio_advance_iter((bio), &(iter), (bvl).bv_len))
Kent Overstreet79886132013-11-23 17:19:00 -0800180
181#define bio_for_each_segment(bvl, bio, iter) \
182 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
183
Kent Overstreet4550dd62013-08-07 14:26:21 -0700184#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Shaohua Lif4595872017-03-24 10:34:43 -0700186static inline unsigned bio_segments(struct bio *bio)
Kent Overstreet458b76e2013-09-24 16:26:05 -0700187{
188 unsigned segs = 0;
189 struct bio_vec bv;
190 struct bvec_iter iter;
191
Kent Overstreet8423ae32014-02-10 17:45:50 -0800192 /*
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800193 * We special case discard/write same/write zeroes, because they
194 * interpret bi_size differently:
Kent Overstreet8423ae32014-02-10 17:45:50 -0800195 */
196
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800197 switch (bio_op(bio)) {
198 case REQ_OP_DISCARD:
199 case REQ_OP_SECURE_ERASE:
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800200 case REQ_OP_WRITE_ZEROES:
Christoph Hellwigf9d03f92016-12-08 15:20:32 -0700201 return 0;
202 case REQ_OP_WRITE_SAME:
Kent Overstreet8423ae32014-02-10 17:45:50 -0800203 return 1;
Chaitanya Kulkarnia6f07882016-11-30 12:28:59 -0800204 default:
205 break;
206 }
Kent Overstreet8423ae32014-02-10 17:45:50 -0800207
Shaohua Lif4595872017-03-24 10:34:43 -0700208 bio_for_each_segment(bv, bio, iter)
Kent Overstreet458b76e2013-09-24 16:26:05 -0700209 segs++;
210
211 return segs;
212}
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214/*
215 * get a reference to a bio, so it won't disappear. the intended use is
216 * something like:
217 *
218 * bio_get(bio);
219 * submit_bio(rw, bio);
220 * if (bio->bi_flags ...)
221 * do_something
222 * bio_put(bio);
223 *
224 * without the bio_get(), it could potentially complete I/O before submit_bio
225 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
226 * runs
227 */
Jens Axboedac56212015-04-17 16:23:59 -0600228static inline void bio_get(struct bio *bio)
229{
230 bio->bi_flags |= (1 << BIO_REFFED);
231 smp_mb__before_atomic();
232 atomic_inc(&bio->__bi_cnt);
233}
234
235static inline void bio_cnt_set(struct bio *bio, unsigned int count)
236{
237 if (count != 1) {
238 bio->bi_flags |= (1 << BIO_REFFED);
239 smp_mb__before_atomic();
240 }
241 atomic_set(&bio->__bi_cnt, count);
242}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600244static inline bool bio_flagged(struct bio *bio, unsigned int bit)
245{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600246 return (bio->bi_flags & (1U << bit)) != 0;
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600247}
248
249static inline void bio_set_flag(struct bio *bio, unsigned int bit)
250{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600251 bio->bi_flags |= (1U << bit);
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600252}
253
254static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
255{
Jens Axboe2c68f6d2015-07-28 13:14:32 -0600256 bio->bi_flags &= ~(1U << bit);
Jens Axboeb7c44ed2015-07-24 12:37:59 -0600257}
258
Ming Lei7bcd79a2016-02-26 23:40:50 +0800259static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
260{
261 *bv = bio_iovec(bio);
262}
263
264static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
265{
266 struct bvec_iter iter = bio->bi_iter;
267 int idx;
268
Ming Lei7bcd79a2016-02-26 23:40:50 +0800269 if (unlikely(!bio_multiple_segments(bio))) {
270 *bv = bio_iovec(bio);
271 return;
272 }
273
274 bio_advance_iter(bio, &iter, iter.bi_size);
275
276 if (!iter.bi_bvec_done)
277 idx = iter.bi_idx - 1;
278 else /* in the middle of bvec */
279 idx = iter.bi_idx;
280
281 *bv = bio->bi_io_vec[idx];
282
283 /*
284 * iter.bi_bvec_done records actual length of the last bvec
285 * if this bio ends in the middle of one io vector
286 */
287 if (iter.bi_bvec_done)
288 bv->bv_len = iter.bi_bvec_done;
289}
290
Martin K. Petersenc6115292014-09-26 19:20:08 -0400291enum bip_flags {
292 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
293 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
294 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
295 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
296 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
297};
298
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200299/*
300 * bio integrity payload
301 */
302struct bio_integrity_payload {
303 struct bio *bip_bio; /* parent bio */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200304
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800305 struct bvec_iter bip_iter;
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200306
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800307 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200308
Martin K. Petersen7878cba2009-06-26 15:37:49 +0200309 unsigned short bip_slab; /* slab the bip came from */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200310 unsigned short bip_vcnt; /* # of integrity bio_vecs */
Gu Zhengcbcd10542014-07-01 10:36:47 -0600311 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
Martin K. Petersenb1f0138852014-09-26 19:20:04 -0400312 unsigned short bip_flags; /* control flags */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200313
314 struct work_struct bip_work; /* I/O completion */
Kent Overstreet6fda9812012-10-12 13:18:27 -0700315
316 struct bio_vec *bip_vec;
317 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200318};
Martin K. Petersen18593082014-09-26 19:20:01 -0400319
Keith Busch06c1e392015-12-03 09:32:21 -0700320#if defined(CONFIG_BLK_DEV_INTEGRITY)
321
322static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
323{
Jens Axboe1eff9d32016-08-05 15:35:16 -0600324 if (bio->bi_opf & REQ_INTEGRITY)
Keith Busch06c1e392015-12-03 09:32:21 -0700325 return bio->bi_integrity;
326
327 return NULL;
328}
329
Martin K. Petersenc6115292014-09-26 19:20:08 -0400330static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
331{
332 struct bio_integrity_payload *bip = bio_integrity(bio);
333
334 if (bip)
335 return bip->bip_flags & flag;
336
337 return false;
338}
Martin K. Petersenb1f0138852014-09-26 19:20:04 -0400339
Martin K. Petersen18593082014-09-26 19:20:01 -0400340static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
341{
342 return bip->bip_iter.bi_sector;
343}
344
345static inline void bip_set_seed(struct bio_integrity_payload *bip,
346 sector_t seed)
347{
348 bip->bip_iter.bi_sector = seed;
349}
350
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200351#endif /* CONFIG_BLK_DEV_INTEGRITY */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Kent Overstreet6678d832013-08-07 11:14:32 -0700353extern void bio_trim(struct bio *bio, int offset, int size);
Kent Overstreet20d01892013-11-23 18:21:01 -0800354extern struct bio *bio_split(struct bio *bio, int sectors,
355 gfp_t gfp, struct bio_set *bs);
356
357/**
358 * bio_next_split - get next @sectors from a bio, splitting if necessary
359 * @bio: bio to split
360 * @sectors: number of sectors to split from the front of @bio
361 * @gfp: gfp mask
362 * @bs: bio set to allocate from
363 *
364 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
365 * than @sectors, returns the original bio unchanged.
366 */
367static inline struct bio *bio_next_split(struct bio *bio, int sectors,
368 gfp_t gfp, struct bio_set *bs)
369{
370 if (sectors >= bio_sectors(bio))
371 return bio;
372
373 return bio_split(bio, sectors, gfp, bs);
374}
375
NeilBrown011067b2017-06-18 14:38:57 +1000376extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
377enum {
378 BIOSET_NEED_BVECS = BIT(0),
379};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380extern void bioset_free(struct bio_set *);
Fabian Fredericka6c39cb4f2014-04-22 15:09:05 -0600381extern mempool_t *biovec_create_pool(int pool_entries);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
Dan Carpenter7a88fa12017-03-23 13:24:55 +0300383extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384extern void bio_put(struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Kent Overstreet59d276f2013-11-23 18:19:27 -0800386extern void __bio_clone_fast(struct bio *, struct bio *);
387extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700388extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
389
Kent Overstreet3f86a822012-09-06 15:35:01 -0700390extern struct bio_set *fs_bio_set;
391
392static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
393{
394 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
395}
396
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700397static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
398{
399 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
400}
401
Kent Overstreet3f86a822012-09-06 15:35:01 -0700402static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
403{
404 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
405}
406
Kent Overstreetbf800ef2012-09-06 15:35:02 -0700407static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
408{
409 return bio_clone_bioset(bio, gfp_mask, NULL);
410
411}
412
Christoph Hellwig1e3914d2016-11-01 07:40:12 -0600413extern blk_qc_t submit_bio(struct bio *);
414
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200415extern void bio_endio(struct bio *);
416
417static inline void bio_io_error(struct bio *bio)
418{
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200419 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200420 bio_endio(bio);
421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423struct request_queue;
424extern int bio_phys_segments(struct request_queue *, struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Mike Christie4e49ea42016-06-05 14:31:41 -0500426extern int submit_bio_wait(struct bio *bio);
Kent Overstreet054bdf62012-09-28 13:17:55 -0700427extern void bio_advance(struct bio *, unsigned);
428
Ming Lei3a83f462016-11-22 08:57:21 -0700429extern void bio_init(struct bio *bio, struct bio_vec *table,
430 unsigned short max_vecs);
Kent Overstreetf44b48c2012-09-06 15:34:58 -0700431extern void bio_reset(struct bio *);
Kent Overstreet196d38bc2013-11-23 18:34:15 -0800432void bio_chain(struct bio *, struct bio *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
Mike Christie6e68af62005-11-11 05:30:27 -0600435extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
436 unsigned int, unsigned int);
Kent Overstreet2cefe4d2016-10-31 11:59:24 -0600437int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900438struct rq_map_data;
James Bottomley f1970ba2005-06-20 14:06:52 +0200439extern struct bio *bio_map_user_iov(struct request_queue *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100440 const struct iov_iter *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441extern void bio_unmap_user(struct bio *);
Mike Christie df46b9a2005-06-20 14:04:44 +0200442extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
Al Viro27496a82005-10-21 03:20:48 -0400443 gfp_t);
FUJITA Tomonori68154e92008-04-25 12:47:50 +0200444extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
445 gfp_t, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446extern void bio_set_pages_dirty(struct bio *bio);
447extern void bio_check_pages_dirty(struct bio *bio);
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100448
Gu Zheng394ffa52014-11-24 11:05:22 +0800449void generic_start_io_acct(int rw, unsigned long sectors,
450 struct hd_struct *part);
451void generic_end_io_acct(int rw, struct hd_struct *part,
452 unsigned long start_time);
453
Ilya Loginov2d4dc892009-11-26 09:16:19 +0100454#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
455# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
456#endif
457#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
458extern void bio_flush_dcache_pages(struct bio *bi);
459#else
460static inline void bio_flush_dcache_pages(struct bio *bi)
461{
462}
463#endif
464
Kent Overstreet16ac3d62012-09-10 13:57:51 -0700465extern void bio_copy_data(struct bio *dst, struct bio *src);
Kent Overstreeta0787602012-09-10 14:03:28 -0700466extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
Guoqing Jiang491221f2016-09-22 03:10:01 -0400467extern void bio_free_pages(struct bio *bio);
Kent Overstreet16ac3d62012-09-10 13:57:51 -0700468
FUJITA Tomonori152e2832008-08-28 16:17:06 +0900469extern struct bio *bio_copy_user_iov(struct request_queue *,
Al Viro86d564c2014-02-08 20:42:52 -0500470 struct rq_map_data *,
Kent Overstreet26e49cf2015-01-18 16:16:31 +0100471 const struct iov_iter *,
472 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473extern int bio_uncopy_user(struct bio *);
474void zero_fill_bio(struct bio *bio);
Kent Overstreet9f060e22012-10-12 15:29:33 -0700475extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
476extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200477extern unsigned int bvec_nr_vecs(unsigned short idx);
Martin K. Petersen51d654e2008-06-17 18:59:56 +0200478
Tejun Heo852c7882012-03-05 13:15:27 -0800479#ifdef CONFIG_BLK_CGROUP
Tejun Heo1d933cf2015-05-22 17:13:24 -0400480int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
Tejun Heo852c7882012-03-05 13:15:27 -0800481int bio_associate_current(struct bio *bio);
482void bio_disassociate_task(struct bio *bio);
Paolo Valente20bd7232016-07-27 07:22:05 +0200483void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
Tejun Heo852c7882012-03-05 13:15:27 -0800484#else /* CONFIG_BLK_CGROUP */
Tejun Heo1d933cf2015-05-22 17:13:24 -0400485static inline int bio_associate_blkcg(struct bio *bio,
486 struct cgroup_subsys_state *blkcg_css) { return 0; }
Tejun Heo852c7882012-03-05 13:15:27 -0800487static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
488static inline void bio_disassociate_task(struct bio *bio) { }
Paolo Valente20bd7232016-07-27 07:22:05 +0200489static inline void bio_clone_blkcg_association(struct bio *dst,
490 struct bio *src) { }
Tejun Heo852c7882012-03-05 13:15:27 -0800491#endif /* CONFIG_BLK_CGROUP */
492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493#ifdef CONFIG_HIGHMEM
494/*
Alberto Bertogli20b636b2009-02-02 12:41:07 +0100495 * remember never ever reenable interrupts between a bvec_kmap_irq and
496 * bvec_kunmap_irq!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 */
Alberto Bertogli4f570f92009-11-02 11:40:16 +0100498static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499{
500 unsigned long addr;
501
502 /*
503 * might not be a highmem page, but the preempt/irq count
504 * balancing is a lot nicer this way
505 */
506 local_irq_save(*flags);
Cong Wange8e3c3d2011-11-25 23:14:27 +0800507 addr = (unsigned long) kmap_atomic(bvec->bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
509 BUG_ON(addr & ~PAGE_MASK);
510
511 return (char *) addr + bvec->bv_offset;
512}
513
Alberto Bertogli4f570f92009-11-02 11:40:16 +0100514static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
516 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
517
Cong Wange8e3c3d2011-11-25 23:14:27 +0800518 kunmap_atomic((void *) ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 local_irq_restore(*flags);
520}
521
522#else
Geert Uytterhoeven11a691b2010-10-21 10:32:29 +0200523static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
524{
525 return page_address(bvec->bv_page) + bvec->bv_offset;
526}
527
528static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
529{
530 *flags = 0;
531}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532#endif
533
Kent Overstreetf619d252013-08-07 14:30:33 -0700534static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 unsigned long *flags)
536{
Kent Overstreetf619d252013-08-07 14:30:33 -0700537 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538}
539#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
540
541#define bio_kmap_irq(bio, flags) \
Kent Overstreetf619d252013-08-07 14:30:33 -0700542 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
544
Jens Axboe7a67f632008-08-08 11:17:12 +0200545/*
Akinobu Mitae6863072009-04-17 08:41:21 +0200546 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200547 *
548 * A bio_list anchors a singly-linked list of bios chained through the bi_next
549 * member of the bio. The bio_list also caches the last list member to allow
550 * fast access to the tail.
551 */
552struct bio_list {
553 struct bio *head;
554 struct bio *tail;
555};
556
557static inline int bio_list_empty(const struct bio_list *bl)
558{
559 return bl->head == NULL;
560}
561
562static inline void bio_list_init(struct bio_list *bl)
563{
564 bl->head = bl->tail = NULL;
565}
566
Jens Axboe320ae512013-10-24 09:20:05 +0100567#define BIO_EMPTY_LIST { NULL, NULL }
568
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200569#define bio_list_for_each(bio, bl) \
570 for (bio = (bl)->head; bio; bio = bio->bi_next)
571
572static inline unsigned bio_list_size(const struct bio_list *bl)
573{
574 unsigned sz = 0;
575 struct bio *bio;
576
577 bio_list_for_each(bio, bl)
578 sz++;
579
580 return sz;
581}
582
583static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
584{
585 bio->bi_next = NULL;
586
587 if (bl->tail)
588 bl->tail->bi_next = bio;
589 else
590 bl->head = bio;
591
592 bl->tail = bio;
593}
594
595static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
596{
597 bio->bi_next = bl->head;
598
599 bl->head = bio;
600
601 if (!bl->tail)
602 bl->tail = bio;
603}
604
605static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
606{
607 if (!bl2->head)
608 return;
609
610 if (bl->tail)
611 bl->tail->bi_next = bl2->head;
612 else
613 bl->head = bl2->head;
614
615 bl->tail = bl2->tail;
616}
617
618static inline void bio_list_merge_head(struct bio_list *bl,
619 struct bio_list *bl2)
620{
621 if (!bl2->head)
622 return;
623
624 if (bl->head)
625 bl2->tail->bi_next = bl->head;
626 else
627 bl->tail = bl2->tail;
628
629 bl->head = bl2->head;
630}
631
Geert Uytterhoeven13685a12009-06-10 04:38:40 +0000632static inline struct bio *bio_list_peek(struct bio_list *bl)
633{
634 return bl->head;
635}
636
Christoph Hellwig8f3d8ba2009-04-07 19:55:13 +0200637static inline struct bio *bio_list_pop(struct bio_list *bl)
638{
639 struct bio *bio = bl->head;
640
641 if (bio) {
642 bl->head = bl->head->bi_next;
643 if (!bl->head)
644 bl->tail = NULL;
645
646 bio->bi_next = NULL;
647 }
648
649 return bio;
650}
651
652static inline struct bio *bio_list_get(struct bio_list *bl)
653{
654 struct bio *bio = bl->head;
655
656 bl->head = bl->tail = NULL;
657
658 return bio;
659}
660
Kent Overstreet57fb2332012-08-24 04:56:11 -0700661/*
Mike Snitzer0ef5a502016-05-05 11:54:22 -0400662 * Increment chain count for the bio. Make sure the CHAIN flag update
663 * is visible before the raised count.
664 */
665static inline void bio_inc_remaining(struct bio *bio)
666{
667 bio_set_flag(bio, BIO_CHAIN);
668 smp_mb__before_atomic();
669 atomic_inc(&bio->__bi_remaining);
670}
671
672/*
Kent Overstreet57fb2332012-08-24 04:56:11 -0700673 * bio_set is used to allow other portions of the IO system to
674 * allocate their own private memory pools for bio and iovec structures.
675 * These memory pools in turn all allocate from the bio_slab
676 * and the bvec_slabs[].
677 */
678#define BIO_POOL_SIZE 2
Kent Overstreet57fb2332012-08-24 04:56:11 -0700679
680struct bio_set {
681 struct kmem_cache *bio_slab;
682 unsigned int front_pad;
683
684 mempool_t *bio_pool;
Kent Overstreet9f060e22012-10-12 15:29:33 -0700685 mempool_t *bvec_pool;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700686#if defined(CONFIG_BLK_DEV_INTEGRITY)
687 mempool_t *bio_integrity_pool;
Kent Overstreet9f060e22012-10-12 15:29:33 -0700688 mempool_t *bvec_integrity_pool;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700689#endif
Kent Overstreetdf2cb6d2012-09-10 14:33:46 -0700690
691 /*
692 * Deadlock avoidance for stacking block drivers: see comments in
693 * bio_alloc_bioset() for details
694 */
695 spinlock_t rescue_lock;
696 struct bio_list rescue_list;
697 struct work_struct rescue_work;
698 struct workqueue_struct *rescue_workqueue;
Kent Overstreet57fb2332012-08-24 04:56:11 -0700699};
700
701struct biovec_slab {
702 int nr_vecs;
703 char *name;
704 struct kmem_cache *slab;
705};
706
707/*
708 * a small number of entries is fine, not going to be performance critical.
709 * basically we just need to survive
710 */
711#define BIO_SPLIT_ENTRIES 2
712
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200713#if defined(CONFIG_BLK_DEV_INTEGRITY)
714
Kent Overstreetd57a5f72013-11-23 17:20:16 -0800715#define bip_for_each_vec(bvl, bip, iter) \
716 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200717
Martin K. Petersen13f05c82010-09-10 20:50:10 +0200718#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
719 for_each_bio(_bio) \
720 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
721
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200722extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700723extern void bio_integrity_free(struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200724extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
Martin K. Petersene7258c12014-09-26 19:19:55 -0400725extern bool bio_integrity_enabled(struct bio *bio);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200726extern int bio_integrity_prep(struct bio *);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200727extern void bio_integrity_endio(struct bio *);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200728extern void bio_integrity_advance(struct bio *, unsigned int);
729extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700730extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
Martin K. Petersen7878cba2009-06-26 15:37:49 +0200731extern int bioset_integrity_create(struct bio_set *, int);
732extern void bioset_integrity_free(struct bio_set *);
733extern void bio_integrity_init(void);
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200734
735#else /* CONFIG_BLK_DEV_INTEGRITY */
736
Martin K. Petersenc6115292014-09-26 19:20:08 -0400737static inline void *bio_integrity(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100738{
Martin K. Petersenc6115292014-09-26 19:20:08 -0400739 return NULL;
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100740}
741
Martin K. Petersene7258c12014-09-26 19:19:55 -0400742static inline bool bio_integrity_enabled(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100743{
Martin K. Petersene7258c12014-09-26 19:19:55 -0400744 return false;
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100745}
746
747static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
748{
749 return 0;
750}
751
752static inline void bioset_integrity_free (struct bio_set *bs)
753{
754 return;
755}
756
757static inline int bio_integrity_prep(struct bio *bio)
758{
759 return 0;
760}
761
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700762static inline void bio_integrity_free(struct bio *bio)
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100763{
764 return;
765}
766
Stephen Rothwell0c614e22011-11-16 09:21:48 +0100767static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
Kent Overstreet1e2a410f2012-09-06 15:34:56 -0700768 gfp_t gfp_mask)
Stephen Rothwell0c614e22011-11-16 09:21:48 +0100769{
770 return 0;
771}
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100772
Martin K. Petersen6898e3b2012-01-13 08:15:33 +0100773static inline void bio_integrity_advance(struct bio *bio,
774 unsigned int bytes_done)
775{
776 return;
777}
778
779static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
780 unsigned int sectors)
781{
782 return;
783}
784
785static inline void bio_integrity_init(void)
786{
787 return;
788}
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200789
Martin K. Petersenc6115292014-09-26 19:20:08 -0400790static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
791{
792 return false;
793}
794
Keith Busch06c1e392015-12-03 09:32:21 -0700795static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
796 unsigned int nr)
797{
798 return ERR_PTR(-EINVAL);
799}
800
801static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
802 unsigned int len, unsigned int offset)
803{
804 return 0;
805}
806
Martin K. Petersen7ba1ba12008-06-30 20:04:41 +0200807#endif /* CONFIG_BLK_DEV_INTEGRITY */
808
David Howells02a5e0a2007-08-11 22:34:32 +0200809#endif /* CONFIG_BLOCK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810#endif /* __LINUX_BIO_H */