blob: 1e1ef210ae91b0af43aa6825ea35e7e0eac9974f [file] [log] [blame]
Tejun Heo7cc01582010-08-03 13:14:58 +02001/*
2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
4 */
5#ifndef __LINUX_BLK_TYPES_H
6#define __LINUX_BLK_TYPES_H
7
Tejun Heo7cc01582010-08-03 13:14:58 +02008#include <linux/types.h>
Ming Lei0781e792016-05-30 21:34:30 +08009#include <linux/bvec.h>
Tejun Heo7cc01582010-08-03 13:14:58 +020010
11struct bio_set;
12struct bio;
13struct bio_integrity_payload;
14struct page;
15struct block_device;
Tejun Heo852c7882012-03-05 13:15:27 -080016struct io_context;
17struct cgroup_subsys_state;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +020018typedef void (bio_end_io_t) (struct bio *);
Tejun Heo7cc01582010-08-03 13:14:58 +020019typedef void (bio_destructor_t) (struct bio *);
20
Al Viro62a80672014-04-04 23:12:29 -040021#ifdef CONFIG_BLOCK
Tejun Heo7cc01582010-08-03 13:14:58 +020022/*
23 * main unit of I/O for the block layer and lower layers (ie drivers and
24 * stacking drivers)
25 */
26struct bio {
Tejun Heo7cc01582010-08-03 13:14:58 +020027 struct bio *bi_next; /* request queue link */
28 struct block_device *bi_bdev;
Jens Axboe2c68f6d2015-07-28 13:14:32 -060029 int bi_error;
Jens Axboe1eff9d32016-08-05 15:35:16 -060030 unsigned int bi_opf; /* bottom bits req flags,
31 * top bits REQ_OP. Use
32 * accessors.
Mike Christie4e1b2d522016-06-05 14:32:22 -050033 */
Christoph Hellwigc0acf122016-07-19 11:28:43 +020034 unsigned short bi_flags; /* status, command, etc */
Mike Christie43b62ce2016-06-05 14:32:20 -050035 unsigned short bi_ioprio;
Tejun Heo7cc01582010-08-03 13:14:58 +020036
Kent Overstreet4f024f32013-10-11 15:44:27 -070037 struct bvec_iter bi_iter;
Tejun Heo7cc01582010-08-03 13:14:58 +020038
39 /* Number of segments in this BIO after
40 * physical address coalescing is performed.
41 */
42 unsigned int bi_phys_segments;
43
Tejun Heo7cc01582010-08-03 13:14:58 +020044 /*
45 * To keep track of the max segment size, we account for the
46 * sizes of the first and last mergeable segments in this bio.
47 */
48 unsigned int bi_seg_front_size;
49 unsigned int bi_seg_back_size;
50
Jens Axboec4cf5262015-04-17 16:15:18 -060051 atomic_t __bi_remaining;
Kent Overstreet196d38b2013-11-23 18:34:15 -080052
Tejun Heo7cc01582010-08-03 13:14:58 +020053 bio_end_io_t *bi_end_io;
54
55 void *bi_private;
Tejun Heo852c7882012-03-05 13:15:27 -080056#ifdef CONFIG_BLK_CGROUP
57 /*
58 * Optional ioc and css associated with this bio. Put on bio
59 * release. Read comment on top of bio_associate_current().
60 */
61 struct io_context *bi_ioc;
62 struct cgroup_subsys_state *bi_css;
63#endif
Martin K. Petersen180b2f92014-09-26 19:19:56 -040064 union {
Tejun Heo7cc01582010-08-03 13:14:58 +020065#if defined(CONFIG_BLK_DEV_INTEGRITY)
Martin K. Petersen180b2f92014-09-26 19:19:56 -040066 struct bio_integrity_payload *bi_integrity; /* data integrity */
Tejun Heo7cc01582010-08-03 13:14:58 +020067#endif
Martin K. Petersen180b2f92014-09-26 19:19:56 -040068 };
Tejun Heo7cc01582010-08-03 13:14:58 +020069
Kent Overstreet4f024f32013-10-11 15:44:27 -070070 unsigned short bi_vcnt; /* how many bio_vec's */
71
Kent Overstreetf44b48c2012-09-06 15:34:58 -070072 /*
73 * Everything starting with bi_max_vecs will be preserved by bio_reset()
74 */
75
Kent Overstreet4f024f32013-10-11 15:44:27 -070076 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
Kent Overstreetf44b48c2012-09-06 15:34:58 -070077
Jens Axboedac56212015-04-17 16:23:59 -060078 atomic_t __bi_cnt; /* pin count */
Kent Overstreetf44b48c2012-09-06 15:34:58 -070079
80 struct bio_vec *bi_io_vec; /* the actual vec list */
81
Kent Overstreet395c72a2012-09-06 15:34:55 -070082 struct bio_set *bi_pool;
83
Tejun Heo7cc01582010-08-03 13:14:58 +020084 /*
85 * We can inline a number of vecs at the end of the bio, to avoid
86 * double allocations for a small number of bio_vecs. This member
87 * MUST obviously be kept at the very end of the bio.
88 */
89 struct bio_vec bi_inline_vecs[0];
90};
91
Bart Van Assche637ca772016-09-14 10:44:12 +020092#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
Jens Axboe1eff9d32016-08-05 15:35:16 -060093#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
Mike Christie4e1b2d522016-06-05 14:32:22 -050094
95#define bio_set_op_attrs(bio, op, op_flags) do { \
96 WARN_ON(op >= (1 << REQ_OP_BITS)); \
Jens Axboe1eff9d32016-08-05 15:35:16 -060097 (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
98 (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
99 (bio)->bi_opf |= op_flags; \
Mike Christie4e1b2d522016-06-05 14:32:22 -0500100} while (0)
101
Kent Overstreetf44b48c2012-09-06 15:34:58 -0700102#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
103
Tejun Heo7cc01582010-08-03 13:14:58 +0200104/*
105 * bio flags
106 */
Jens Axboeb2dbe0a2015-05-19 09:18:28 -0600107#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
108#define BIO_CLONED 2 /* doesn't own data */
109#define BIO_BOUNCED 3 /* bio is a bounce bio */
110#define BIO_USER_MAPPED 4 /* contains user pages */
111#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
112#define BIO_QUIET 6 /* Make BIO Quiet */
Jan Karaa3ad0a92015-06-18 17:19:14 +0200113#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
114#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
Kent Overstreetf44b48c2012-09-06 15:34:58 -0700115
116/*
117 * Flags starting here get preserved by bio_reset() - this includes
Christoph Hellwiged996a52016-07-19 11:28:42 +0200118 * BVEC_POOL_IDX()
Kent Overstreetf44b48c2012-09-06 15:34:58 -0700119 */
Christoph Hellwigc0acf122016-07-19 11:28:43 +0200120#define BIO_RESET_BITS 10
Kent Overstreetf44b48c2012-09-06 15:34:58 -0700121
Tejun Heo7cc01582010-08-03 13:14:58 +0200122/*
Christoph Hellwiged996a52016-07-19 11:28:42 +0200123 * We support 6 different bvec pools, the last one is magic in that it
124 * is backed by a mempool.
Tejun Heo7cc01582010-08-03 13:14:58 +0200125 */
Christoph Hellwiged996a52016-07-19 11:28:42 +0200126#define BVEC_POOL_NR 6
127#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
128
129/*
130 * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
131 * 1 to the actual index so that 0 indicates that there are no bvecs to be
132 * freed.
133 */
134#define BVEC_POOL_BITS (4)
Christoph Hellwigc0acf122016-07-19 11:28:43 +0200135#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
Christoph Hellwiged996a52016-07-19 11:28:42 +0200136#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
Tejun Heo7cc01582010-08-03 13:14:58 +0200137
Jens Axboede75d602010-08-10 12:14:27 -0400138#endif /* CONFIG_BLOCK */
139
Tejun Heo7cc01582010-08-03 13:14:58 +0200140/*
141 * Request flags. For use in the cmd_flags field of struct request, and in
Jens Axboe1eff9d32016-08-05 15:35:16 -0600142 * bi_opf of struct bio. Note that some flags are only valid in either one.
Tejun Heo7cc01582010-08-03 13:14:58 +0200143 */
144enum rq_flag_bits {
145 /* common flags */
Tejun Heo7cc01582010-08-03 13:14:58 +0200146 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
147 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
148 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
149
Tejun Heo7cc01582010-08-03 13:14:58 +0200150 __REQ_SYNC, /* request is sync (sync write or read) */
151 __REQ_META, /* metadata io request */
Christoph Hellwig65299a32011-08-23 14:50:29 +0200152 __REQ_PRIO, /* boost priority in cfq */
Matthew Wilcox8e4bf842011-08-11 10:36:03 +0200153
Tejun Heo7cc01582010-08-03 13:14:58 +0200154 __REQ_NOIDLE, /* don't anticipate more IO after this one */
Martin K. Petersen180b2f92014-09-26 19:19:56 -0400155 __REQ_INTEGRITY, /* I/O includes block integrity payload */
Matthew Wilcox8e4bf842011-08-11 10:36:03 +0200156 __REQ_FUA, /* forced unit access */
Mike Christie28a8f0d2016-06-05 14:32:25 -0500157 __REQ_PREFLUSH, /* request for cache flush */
Tejun Heo7cc01582010-08-03 13:14:58 +0200158
159 /* bio only flags */
Tejun Heo7cc01582010-08-03 13:14:58 +0200160 __REQ_RAHEAD, /* read ahead, can fail anytime */
Vivek Goyale43473b2010-09-15 17:06:35 -0400161 __REQ_THROTTLED, /* This bio has already been subjected to
162 * throttling rules. Don't do it again. */
Tejun Heo7cc01582010-08-03 13:14:58 +0200163
164 /* request only flags */
165 __REQ_SORTED, /* elevator knows about this request */
166 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
Tejun Heo7cc01582010-08-03 13:14:58 +0200167 __REQ_NOMERGE, /* don't touch this for merging */
168 __REQ_STARTED, /* drive already may have started this one */
169 __REQ_DONTPREP, /* don't call prep for this one */
170 __REQ_QUEUED, /* uses queueing */
171 __REQ_ELVPRIV, /* elevator private data attached */
172 __REQ_FAILED, /* set if the request failed */
173 __REQ_QUIET, /* don't worry about errors */
Bart Van Asschebba0bdd2015-03-04 10:31:47 +0100174 __REQ_PREEMPT, /* set for "ide_preempt" requests and also
175 for requests for which the SCSI "quiesce"
176 state must be ignored. */
Tejun Heo7cc01582010-08-03 13:14:58 +0200177 __REQ_ALLOCED, /* request came from our alloc pool */
178 __REQ_COPY_USER, /* contains copies of user pages */
Tejun Heo414b4ff2011-01-25 12:43:49 +0100179 __REQ_FLUSH_SEQ, /* request for flush sequence */
Tejun Heo7cc01582010-08-03 13:14:58 +0200180 __REQ_IO_STAT, /* account I/O stat */
181 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
Lin Ming66311272013-03-23 11:42:24 +0800182 __REQ_PM, /* runtime pm request */
Jens Axboe360f92c2014-04-09 20:27:01 -0600183 __REQ_HASHED, /* on IO scheduler merge hash */
Jens Axboe0d2602c2014-05-13 15:10:52 -0600184 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
Tejun Heo7cc01582010-08-03 13:14:58 +0200185 __REQ_NR_BITS, /* stops here */
186};
187
Jens Axboe59533162013-05-23 12:25:08 +0200188#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
189#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
190#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
191#define REQ_SYNC (1ULL << __REQ_SYNC)
192#define REQ_META (1ULL << __REQ_META)
193#define REQ_PRIO (1ULL << __REQ_PRIO)
Jens Axboe59533162013-05-23 12:25:08 +0200194#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
Martin K. Petersen180b2f92014-09-26 19:19:56 -0400195#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
Tejun Heo7cc01582010-08-03 13:14:58 +0200196
197#define REQ_FAILFAST_MASK \
198 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
199#define REQ_COMMON_MASK \
Mike Christie4e1b2d522016-06-05 14:32:22 -0500200 (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
Christoph Hellwig288dab82016-06-09 16:00:36 +0200201 REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
Tejun Heo3a2edd02010-09-03 11:56:18 +0200202#define REQ_CLONE_MASK REQ_COMMON_MASK
Tejun Heo7cc01582010-08-03 13:14:58 +0200203
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400204/* This mask is used for both bio and request merge checking */
205#define REQ_NOMERGE_FLAGS \
Mike Christie28a8f0d2016-06-05 14:32:25 -0500206 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
Martin K. Petersene2a60da2012-09-18 12:19:25 -0400207
Jens Axboe59533162013-05-23 12:25:08 +0200208#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
209#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
Tejun Heo7cc01582010-08-03 13:14:58 +0200210
Jens Axboe59533162013-05-23 12:25:08 +0200211#define REQ_SORTED (1ULL << __REQ_SORTED)
212#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
213#define REQ_FUA (1ULL << __REQ_FUA)
214#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
215#define REQ_STARTED (1ULL << __REQ_STARTED)
216#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
217#define REQ_QUEUED (1ULL << __REQ_QUEUED)
218#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
219#define REQ_FAILED (1ULL << __REQ_FAILED)
220#define REQ_QUIET (1ULL << __REQ_QUIET)
221#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
222#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
223#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
Mike Christie28a8f0d2016-06-05 14:32:25 -0500224#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
Jens Axboe59533162013-05-23 12:25:08 +0200225#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
226#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
227#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
Jens Axboe59533162013-05-23 12:25:08 +0200228#define REQ_PM (1ULL << __REQ_PM)
Jens Axboe360f92c2014-04-09 20:27:01 -0600229#define REQ_HASHED (1ULL << __REQ_HASHED)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600230#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
Tejun Heo7cc01582010-08-03 13:14:58 +0200231
Jens Axboec11f0c02016-08-05 08:11:04 -0600232enum req_op {
233 REQ_OP_READ,
234 REQ_OP_WRITE,
235 REQ_OP_DISCARD, /* request to discard sectors */
236 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
237 REQ_OP_WRITE_SAME, /* write same block many times */
238 REQ_OP_FLUSH, /* request for cache flush */
239};
240
241#define REQ_OP_BITS 3
242
Jens Axboedece1632015-11-05 10:41:16 -0700243typedef unsigned int blk_qc_t;
244#define BLK_QC_T_NONE -1U
245#define BLK_QC_T_SHIFT 16
246
247static inline bool blk_qc_t_valid(blk_qc_t cookie)
248{
249 return cookie != BLK_QC_T_NONE;
250}
251
252static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
253{
254 return tag | (queue_num << BLK_QC_T_SHIFT);
255}
256
257static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
258{
259 return cookie >> BLK_QC_T_SHIFT;
260}
261
262static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
263{
Jens Axboee3a7a3b2015-11-11 09:37:34 -0700264 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
Jens Axboedece1632015-11-05 10:41:16 -0700265}
266
Tejun Heo7cc01582010-08-03 13:14:58 +0200267#endif /* __LINUX_BLK_TYPES_H */